diff options
Diffstat (limited to 'drivers/net/wireless/ath')
117 files changed, 26007 insertions, 4349 deletions
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index bc6330b43795..3b2771ff2796 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1472,12 +1472,12 @@ static int ar5523_init_modes(struct ar5523 *ar) memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels)); memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates)); - ar->band.band = IEEE80211_BAND_2GHZ; + ar->band.band = NL80211_BAND_2GHZ; ar->band.channels = ar->channels; ar->band.n_channels = ARRAY_SIZE(ar5523_channels); ar->band.bitrates = ar->rates; ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates); - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band; + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band; return 0; } diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 65ef483ebf50..da7a7c8dafb2 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -185,7 +185,7 @@ struct ath_common { bool bt_ant_diversity; int last_rssi; - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; }; static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common) diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 72acb822bb11..f0c831b4b3d9 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -2,6 +2,7 @@ config ATH10K tristate "Atheros 802.11ac wireless cards support" depends on MAC80211 && HAS_DMA select ATH_COMMON + select CRC32 ---help--- This module adds support for wireless adapters based on Atheros IEEE 802.11ac family of chipsets. @@ -14,6 +15,32 @@ config ATH10K_PCI ---help--- This module adds support for PCIE bus +config ATH10K_AHB + bool "Atheros ath10k AHB support" + depends on ATH10K_PCI && OF && RESET_CONTROLLER + ---help--- + This module adds support for AHB bus + +config ATH10K_TARGET_SNOC + tristate "Atheros ath10k SNOC support" + depends on ATH10K + ---help--- + This module adds support for the Integrated WCN3990 WLAN module, + WCN3990 has integrated 802.11ac chipset with SNOC bus interface. + This module also adds support to register the WCN3990 wlan module + with MAC80211 network subsystem. + +config ATH10K_SNOC + bool "Enable/disable Atheros ath10k SNOC bus interface support" + depends on ATH10K + depends on ATH10K_TARGET_SNOC + ---help--- + This module add support for WLAN SNOC bus registration, WLAN + copy engine configuration for the WCN3990 chipset, WLAN hardware + shadow register configuration, create host to target communication + interface to interact with WLAN firmware, WLAN module interface + control and data receive(RX)/transmit(TX) control. + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index c04fb00e7930..27a6c75682c4 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -24,6 +24,13 @@ ath10k_core-$(CONFIG_PM) += wow.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o ath10k_pci-y += pci.o \ ce.o +obj-$(CONFIG_ATH10K_TARGET_SNOC) += ath10k_snoc.o +ath10k_snoc-y += snoc.o \ + qmi.o \ + wcn3990_qmi_service_v01.o \ + ce.o + +ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c new file mode 100644 index 000000000000..766c63bf05c4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/clk.h> +#include <linux/reset.h> +#include "core.h" +#include "debug.h" +#include "pci.h" +#include "ahb.h" + +static const struct of_device_id ath10k_ahb_of_match[] = { + { .compatible = "qcom,ipq4019-wifi", + .data = (void *)ATH10K_HW_QCA4019 + }, + { } +}; + +MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); + +static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar) +{ + return &((struct ath10k_pci *)ar->drv_priv)->ahb[0]; +} + +static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + iowrite32(value, ar_ahb->mem + offset); +} + +static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->mem + offset); +} + +static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->gcc_mem + offset); +} + +static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + iowrite32(value, ar_ahb->tcsr_mem + offset); +} + +static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->tcsr_mem + offset); +} + +static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +static int ath10k_ahb_get_num_banks(struct ath10k *ar) +{ + if (ar->hw_rev == ATH10K_HW_QCA4019) + return 1; + + ath10k_warn(ar, "unknown number of banks, assuming 1\n"); + return 1; +} + +static int ath10k_ahb_clock_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd"); + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) { + ath10k_err(ar, "failed to get cmd clk: %ld\n", + PTR_ERR(ar_ahb->cmd_clk)); + return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV; + } + + ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref"); + if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) { + ath10k_err(ar, "failed to get ref clk: %ld\n", + PTR_ERR(ar_ahb->ref_clk)); + return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV; + } + + ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc"); + if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { + ath10k_err(ar, "failed to get rtc clk: %ld\n", + PTR_ERR(ar_ahb->rtc_clk)); + return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV; + } + + return 0; +} + +static void ath10k_ahb_clock_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->cmd_clk = NULL; + ar_ahb->ref_clk = NULL; + ar_ahb->rtc_clk = NULL; +} + +static int ath10k_ahb_clock_enable(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + int ret; + + dev = &ar_ahb->pdev->dev; + + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) || + IS_ERR_OR_NULL(ar_ahb->ref_clk) || + IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { + ath10k_err(ar, "clock(s) is/are not initialized\n"); + ret = -EIO; + goto out; + } + + ret = clk_prepare_enable(ar_ahb->cmd_clk); + if (ret) { + ath10k_err(ar, "failed to enable cmd clk: %d\n", ret); + goto out; + } + + ret = clk_prepare_enable(ar_ahb->ref_clk); + if (ret) { + ath10k_err(ar, "failed to enable ref clk: %d\n", ret); + goto err_cmd_clk_disable; + } + + ret = clk_prepare_enable(ar_ahb->rtc_clk); + if (ret) { + ath10k_err(ar, "failed to enable rtc clk: %d\n", ret); + goto err_ref_clk_disable; + } + + return 0; + +err_ref_clk_disable: + clk_disable_unprepare(ar_ahb->ref_clk); + +err_cmd_clk_disable: + clk_disable_unprepare(ar_ahb->cmd_clk); + +out: + return ret; +} + +static void ath10k_ahb_clock_disable(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) + clk_disable_unprepare(ar_ahb->cmd_clk); + + if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) + clk_disable_unprepare(ar_ahb->ref_clk); + + if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) + clk_disable_unprepare(ar_ahb->rtc_clk); +} + +static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold"); + if (IS_ERR(ar_ahb->core_cold_rst)) { + ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n", + PTR_ERR(ar_ahb->core_cold_rst)); + return PTR_ERR(ar_ahb->core_cold_rst); + } + + ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold"); + if (IS_ERR(ar_ahb->radio_cold_rst)) { + ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_cold_rst)); + return PTR_ERR(ar_ahb->radio_cold_rst); + } + + ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm"); + if (IS_ERR(ar_ahb->radio_warm_rst)) { + ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_warm_rst)); + return PTR_ERR(ar_ahb->radio_warm_rst); + } + + ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif"); + if (IS_ERR(ar_ahb->radio_srif_rst)) { + ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_srif_rst)); + return PTR_ERR(ar_ahb->radio_srif_rst); + } + + ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init"); + if (IS_ERR(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n", + PTR_ERR(ar_ahb->cpu_init_rst)); + return PTR_ERR(ar_ahb->cpu_init_rst); + } + + return 0; +} + +static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->core_cold_rst = NULL; + ar_ahb->radio_cold_rst = NULL; + ar_ahb->radio_warm_rst = NULL; + ar_ahb->radio_srif_rst = NULL; + ar_ahb->cpu_init_rst = NULL; +} + +static int ath10k_ahb_release_reset(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); + return -EINVAL; + } + + ret = reset_control_deassert(ar_ahb->radio_cold_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->radio_warm_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->radio_srif_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->cpu_init_rst); + if (ret) { + ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg, + u32 haltack_reg) +{ + unsigned long timeout; + u32 val; + + /* Issue halt axi bus request */ + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); + val |= AHB_AXI_BUS_HALT_REQ; + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); + + /* Wait for axi bus halted ack */ + timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT); + do { + val = ath10k_ahb_tcsr_read32(ar, haltack_reg); + if (val & AHB_AXI_BUS_HALT_ACK) + break; + + mdelay(1); + } while (time_before(jiffies, timeout)); + + if (!(val & AHB_AXI_BUS_HALT_ACK)) { + ath10k_err(ar, "failed to halt axi bus: %d\n", val); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n"); +} + +static void ath10k_ahb_halt_chip(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg; + u32 val; + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); + return; + } + + core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG); + + switch (core_id) { + case 0: + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG; + haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ; + haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK; + break; + case 1: + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG; + haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ; + haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK; + break; + default: + ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n", + core_id); + return; + } + + ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg); + + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); + val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); + + ret = reset_control_assert(ar_ahb->core_cold_rst); + if (ret) + ath10k_err(ar, "failed to assert core cold rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_cold_rst); + if (ret) + ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_warm_rst); + if (ret) + ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_srif_rst); + if (ret) + ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->cpu_init_rst); + if (ret) + ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret); + msleep(10); + + /* Clear halt req and core clock disable req before + * deasserting wifi core reset. + */ + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); + val &= ~AHB_AXI_BUS_HALT_REQ; + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); + + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); + val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); + + ret = reset_control_deassert(ar_ahb->core_cold_rst); + if (ret) + ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret); + + ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id); +} + +static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + + if (!ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_legacy_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + napi_schedule(&ar->napi); + + return IRQ_HANDLED; +} + +static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + ret = request_irq(ar_ahb->irq, + ath10k_ahb_interrupt_handler, + IRQF_SHARED, "ath10k_ahb", ar); + if (ret) { + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", + ar_ahb->irq, ret); + return ret; + } + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; + + return 0; +} + +static void ath10k_ahb_release_irq_legacy(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + free_irq(ar_ahb->irq, ar); +} + +static void ath10k_ahb_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); + ath10k_pci_disable_and_clear_legacy_irq(ar); +} + +static int ath10k_ahb_resource_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct platform_device *pdev; + struct device *dev; + struct resource *res; + int ret; + + pdev = ar_ahb->pdev; + dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ath10k_err(ar, "failed to get memory resource\n"); + ret = -ENXIO; + goto out; + } + + ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ar_ahb->mem)) { + ath10k_err(ar, "mem ioremap error\n"); + ret = PTR_ERR(ar_ahb->mem); + goto out; + } + + ar_ahb->mem_len = resource_size(res); + + ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE, + ATH10K_GCC_REG_SIZE); + if (!ar_ahb->gcc_mem) { + ath10k_err(ar, "gcc mem ioremap error\n"); + ret = -ENOMEM; + goto err_mem_unmap; + } + + ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE, + ATH10K_TCSR_REG_SIZE); + if (!ar_ahb->tcsr_mem) { + ath10k_err(ar, "tcsr mem ioremap error\n"); + ret = -ENOMEM; + goto err_gcc_mem_unmap; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret); + goto err_tcsr_mem_unmap; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n", + ret); + goto err_tcsr_mem_unmap; + } + + ret = ath10k_ahb_clock_init(ar); + if (ret) + goto err_tcsr_mem_unmap; + + ret = ath10k_ahb_rst_ctrl_init(ar); + if (ret) + goto err_clock_deinit; + + ar_ahb->irq = platform_get_irq_byname(pdev, "legacy"); + if (ar_ahb->irq < 0) { + ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq); + ret = ar_ahb->irq; + goto err_clock_deinit; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n", + ar_ahb->mem, ar_ahb->mem_len, + ar_ahb->gcc_mem, ar_ahb->tcsr_mem); + return 0; + +err_clock_deinit: + ath10k_ahb_clock_deinit(ar); + +err_tcsr_mem_unmap: + iounmap(ar_ahb->tcsr_mem); + +err_gcc_mem_unmap: + ar_ahb->tcsr_mem = NULL; + iounmap(ar_ahb->gcc_mem); + +err_mem_unmap: + ar_ahb->gcc_mem = NULL; + devm_iounmap(&pdev->dev, ar_ahb->mem); + +out: + ar_ahb->mem = NULL; + return ret; +} + +static void ath10k_ahb_resource_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + if (ar_ahb->mem) + devm_iounmap(dev, ar_ahb->mem); + + if (ar_ahb->gcc_mem) + iounmap(ar_ahb->gcc_mem); + + if (ar_ahb->tcsr_mem) + iounmap(ar_ahb->tcsr_mem); + + ar_ahb->mem = NULL; + ar_ahb->gcc_mem = NULL; + ar_ahb->tcsr_mem = NULL; + + ath10k_ahb_clock_deinit(ar); + ath10k_ahb_rst_ctrl_deinit(ar); +} + +static int ath10k_ahb_prepare_device(struct ath10k *ar) +{ + u32 val; + int ret; + + ret = ath10k_ahb_clock_enable(ar); + if (ret) { + ath10k_err(ar, "failed to enable clocks\n"); + return ret; + } + + /* Clock for the target is supplied from outside of target (ie, + * external clock module controlled by the host). Target needs + * to know what frequency target cpu is configured which is needed + * for target internal use. Read target cpu frequency info from + * gcc register and write into target's scratch register where + * target expects this information. + */ + val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV); + ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val); + + ret = ath10k_ahb_release_reset(ar); + if (ret) + goto err_clk_disable; + + ath10k_ahb_irq_disable(ar); + + ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY); + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) + goto err_halt_chip; + + return 0; + +err_halt_chip: + ath10k_ahb_halt_chip(ar); + +err_clk_disable: + ath10k_ahb_clock_disable(ar); + + return ret; +} + +static int ath10k_ahb_chip_reset(struct ath10k *ar) +{ + int ret; + + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + + ret = ath10k_ahb_prepare_device(ar); + if (ret) + return ret; + + return 0; +} + +static int ath10k_ahb_wake_target_cpu(struct ath10k *ar) +{ + u32 addr, val; + + addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; + val = ath10k_ahb_read32(ar, addr); + val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK; + ath10k_ahb_write32(ar, addr, val); + + return 0; +} + +static int ath10k_ahb_hif_start(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n"); + + ath10k_ce_enable_interrupts(ar); + ath10k_pci_enable_legacy_irq(ar); + + ath10k_pci_rx_post(ar); + + return 0; +} + +static void ath10k_ahb_hif_stop(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n"); + + ath10k_ahb_irq_disable(ar); + synchronize_irq(ar_ahb->irq); + + ath10k_pci_flush(ar); + + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); +} + +static int ath10k_ahb_hif_power_up(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n"); + + ret = ath10k_ahb_chip_reset(ar); + if (ret) { + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto out; + } + + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto out; + } + + ret = ath10k_pci_init_config(ar); + if (ret) { + ath10k_err(ar, "failed to setup init config: %d\n", ret); + goto err_ce_deinit; + } + + ret = ath10k_ahb_wake_target_cpu(ar); + if (ret) { + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); + goto err_ce_deinit; + } + napi_enable(&ar->napi); + + return 0; + +err_ce_deinit: + ath10k_pci_ce_deinit(ar); +out: + return ret; +} + +static const struct ath10k_hif_ops ath10k_ahb_hif_ops = { + .tx_sg = ath10k_pci_hif_tx_sg, + .diag_read = ath10k_pci_hif_diag_read, + .diag_write = ath10k_pci_diag_write_mem, + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, + .start = ath10k_ahb_hif_start, + .stop = ath10k_ahb_hif_stop, + .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, + .get_default_pipe = ath10k_pci_hif_get_default_pipe, + .send_complete_check = ath10k_pci_hif_send_complete_check, + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, + .power_up = ath10k_ahb_hif_power_up, + .power_down = ath10k_pci_hif_power_down, + .read32 = ath10k_ahb_read32, + .write32 = ath10k_ahb_write32, +}; + +static const struct ath10k_bus_ops ath10k_ahb_bus_ops = { + .read32 = ath10k_ahb_read32, + .write32 = ath10k_ahb_write32, + .get_num_banks = ath10k_ahb_get_num_banks, +}; + +static int ath10k_ahb_probe(struct platform_device *pdev) +{ + struct ath10k *ar; + struct ath10k_ahb *ar_ahb; + struct ath10k_pci *ar_pci; + const struct of_device_id *of_id; + enum ath10k_hw_rev hw_rev; + size_t size; + int ret; + u32 chip_id; + + of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "failed to find matching device tree id\n"); + return -EINVAL; + } + + hw_rev = (enum ath10k_hw_rev)of_id->data; + + size = sizeof(*ar_pci) + sizeof(*ar_ahb); + ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB, + hw_rev, &ath10k_ahb_hif_ops); + if (!ar) { + dev_err(&pdev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n"); + + ar_pci = ath10k_pci_priv(ar); + ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->pdev = pdev; + platform_set_drvdata(pdev, ar); + + ret = ath10k_ahb_resource_init(ar); + if (ret) + goto err_core_destroy; + + ar->dev_id = 0; + ar_pci->mem = ar_ahb->mem; + ar_pci->mem_len = ar_ahb->mem_len; + ar_pci->ar = ar; + ar_pci->bus_ops = &ath10k_ahb_bus_ops; + + ret = ath10k_pci_setup_resource(ar); + if (ret) { + ath10k_err(ar, "failed to setup resource: %d\n", ret); + goto err_resource_deinit; + } + + ath10k_pci_init_napi(ar); + + ret = ath10k_ahb_request_irq_legacy(ar); + if (ret) + goto err_free_pipes; + + ret = ath10k_ahb_prepare_device(ar); + if (ret) + goto err_free_irq; + + ath10k_pci_ce_deinit(ar); + + chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (chip_id == 0xffffffff) { + ath10k_err(ar, "failed to get chip id\n"); + ret = -ENODEV; + goto err_halt_device; + } + + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_halt_device; + } + + return 0; + +err_halt_device: + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + +err_free_irq: + ath10k_ahb_release_irq_legacy(ar); + +err_free_pipes: + ath10k_pci_free_pipes(ar); + +err_resource_deinit: + ath10k_ahb_resource_deinit(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + platform_set_drvdata(pdev, NULL); + + return ret; +} + +static int ath10k_ahb_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_ahb *ar_ahb; + + if (!ar) + return -EINVAL; + + ar_ahb = ath10k_ahb_priv(ar); + + if (!ar_ahb) + return -EINVAL; + + ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n"); + + ath10k_core_unregister(ar); + ath10k_ahb_irq_disable(ar); + ath10k_ahb_release_irq_legacy(ar); + ath10k_pci_release_resource(ar); + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + ath10k_ahb_resource_deinit(ar); + ath10k_core_destroy(ar); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver ath10k_ahb_driver = { + .driver = { + .name = "ath10k_ahb", + .of_match_table = ath10k_ahb_of_match, + }, + .probe = ath10k_ahb_probe, + .remove = ath10k_ahb_remove, +}; + +int ath10k_ahb_init(void) +{ + int ret; + + ret = platform_driver_register(&ath10k_ahb_driver); + if (ret) + printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", + ret); + return ret; +} + +void ath10k_ahb_exit(void) +{ + platform_driver_unregister(&ath10k_ahb_driver); +} diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h new file mode 100644 index 000000000000..d43e375215c8 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AHB_H_ +#define _AHB_H_ + +#include <linux/platform_device.h> + +struct ath10k_ahb { + struct platform_device *pdev; + void __iomem *mem; + unsigned long mem_len; + void __iomem *gcc_mem; + void __iomem *tcsr_mem; + + int irq; + + struct clk *cmd_clk; + struct clk *ref_clk; + struct clk *rtc_clk; + + struct reset_control *core_cold_rst; + struct reset_control *radio_cold_rst; + struct reset_control *radio_warm_rst; + struct reset_control *radio_srif_rst; + struct reset_control *cpu_init_rst; +}; + +#ifdef CONFIG_ATH10K_AHB + +#define ATH10K_GCC_REG_BASE 0x1800000 +#define ATH10K_GCC_REG_SIZE 0x60000 + +#define ATH10K_TCSR_REG_BASE 0x1900000 +#define ATH10K_TCSR_REG_SIZE 0x80000 + +#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020 +#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014 + +#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030 + +#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000 +#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004 +#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25) + +#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000 +#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010 +#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004 +#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014 + +#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */ +#define AHB_AXI_BUS_HALT_REQ 1 +#define AHB_AXI_BUS_HALT_ACK 1 + +#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1 + +int ath10k_ahb_init(void); +void ath10k_ahb_exit(void); + +#else /* CONFIG_ATH10K_AHB */ + +static inline int ath10k_ahb_init(void) +{ + return 0; +} + +static inline void ath10k_ahb_exit(void) +{ +} + +#endif /* CONFIG_ATH10K_AHB */ + +#endif /* _AHB_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 3d29b0875b3e..2872d347ea78 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) u32 txlen; int ret; - ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n", buffer, length); if (ar->bmi.done_sent) { @@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar, int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, - "bmi fast download address 0x%x buffer 0x%p length %d\n", + "bmi fast download address 0x%x buffer 0x%pK length %d\n", address, buffer, length); ret = ath10k_bmi_lz_stream_start(ar, address); diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index edf3629288bc..9ccd6212b54a 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -16,13 +16,12 @@ */ #include "hif.h" -#include "pci.h" #include "ce.h" #include "debug.h" /* * Support for Copy Engine hardware, which is mainly used for - * communication between Host and Target over a PCIe interconnect. + * communication between Host and Target over a PCIe/SNOC/AHB interconnect. */ /* @@ -33,13 +32,13 @@ * Each ring consists of a number of descriptors which specify * an address, length, and meta-data. * - * Typically, one side of the PCIe interconnect (Host or Target) + * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target) * controls one ring and the other side controls the other ring. * The source side chooses when to initiate a transfer and it * chooses what to send (buffer address, length). The destination * side keeps a supply of "anonymous receive buffers" available and * it handles incoming data as it arrives (when the destination - * recieves an interrupt). + * receives an interrupt). * * The sender may send a simple buffer (address/length) or it may * send a small list of buffers. When a small list is sent, hardware @@ -63,201 +62,401 @@ static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->dst_wr_index_addr, n); } static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->dst_wr_index_addr); } static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->sr_wr_index_addr, n); } static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->sr_wr_index_addr); +} + +static inline u32 ath10k_ce_src_ring_read_index_get_from_ddr( + struct ath10k *ar, u32 ce_id) +{ + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->vaddr_rri_on_ddr[ce_id] & CE_DDR_RRI_MASK; } static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; + u32 index; + + if (ar->rri_on_ddr && (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_src_ring_read_index_get_from_ddr(ar, ce_id); + else + index = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->current_srri_addr); + + return index; +} + +static inline void ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, shadow_sr_wr_ind_addr(ar, + ce_ctrl_addr), n); +} + +static inline void ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, shadow_dst_wr_ind_addr(ar, + ce_ctrl_addr), + n); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->sr_base_addr, addr); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->sr_size_addr, n); } static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ath10k_pci_read32((ar), - (ce_ctrl_addr) + CE_CTRL1_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, - (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | - CE_CTRL1_DMAX_LENGTH_SET(n)); + u32 ctrl1_addr = ar_opaque->bus_ops->read32((ar), + (ce_ctrl_addr) + ctrl_regs->addr); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | + ctrl_regs->dmax->set(n, ctrl_regs->dmax)); } static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + + u32 ctrl1_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, - (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | - CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | + ctrl_regs->src_ring->set(n, ctrl_regs->src_ring)); } static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, - (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | - CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); + u32 ctrl1_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | + ctrl_regs->dst_ring->set(n, ctrl_regs->dst_ring)); } static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; + u32 index; + + if (ar->rri_on_ddr && (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = (ar_opaque->vaddr_rri_on_ddr[ce_id] >> + CE_DDR_RRI_SHIFT) & + CE_DDR_RRI_MASK; + else + index = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->current_drri_addr); + + return index; } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, u32 addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->dr_base_addr, addr); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->dr_size_addr, n); } static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, - (addr & ~SRC_WATERMARK_HIGH_MASK) | - SRC_WATERMARK_HIGH_SET(n)); + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_high->mask)) | + (srcr_wm->wm_high->set(n, srcr_wm->wm_high))); } static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, - (addr & ~SRC_WATERMARK_LOW_MASK) | - SRC_WATERMARK_LOW_SET(n)); + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_low->mask)) | + (srcr_wm->wm_low->set(n, srcr_wm->wm_low))); } static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, - (addr & ~DST_WATERMARK_HIGH_MASK) | - DST_WATERMARK_HIGH_SET(n)); + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_high->mask)) | + (dstr_wm->wm_high->set(n, dstr_wm->wm_high))); } static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, - (addr & ~DST_WATERMARK_LOW_MASK) | - DST_WATERMARK_LOW_SET(n)); + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_low->mask)) | + (dstr_wm->wm_low->set(n, dstr_wm->wm_low))); } static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + HOST_IE_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + + u32 host_ie_addr = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, - host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr | host_ie->copy_complete->mask); } static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + HOST_IE_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + + u32 host_ie_addr = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, - host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(host_ie->copy_complete->mask)); } static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + HOST_IE_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, - host_ie_addr & ~CE_WATERMARK_MASK); + u32 host_ie_addr = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(wm_regs->wm_mask)); } static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 misc_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + MISC_IE_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; + + u32 misc_ie_addr = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, - misc_ie_addr | CE_ERROR_MASK); + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr | misc_regs->err_mask); } static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 misc_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + MISC_IE_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, - misc_ie_addr & ~CE_ERROR_MASK); + u32 misc_ie_addr = ar_opaque->bus_ops->read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); + + ar_opaque->bus_ops->write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr & ~(misc_regs->err_mask)); } static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int mask) { - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + wm_regs->addr, mask); +} + +u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 0: + addr = SHADOW_VALUE0; + break; + case 3: + addr = SHADOW_VALUE3; + break; + case 4: + addr = SHADOW_VALUE4; + break; + case 5: + addr = SHADOW_VALUE5; + break; + case 7: + addr = SHADOW_VALUE7; + break; + default: + ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce); + WARN_ON(1); + } + return addr; +} + +u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 1: + addr = SHADOW_VALUE13; + break; + case 2: + addr = SHADOW_VALUE14; + break; + case 5: + addr = SHADOW_VALUE17; + break; + case 7: + addr = SHADOW_VALUE19; + break; + case 8: + addr = SHADOW_VALUE20; + break; + case 9: + addr = SHADOW_VALUE21; + break; + case 10: + addr = SHADOW_VALUE22; + break; + case 11: + addr = SHADOW_VALUE23; + break; + default: + ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce); + WARN_ON(1); + } + + return addr; +} + +static inline void ath10k_ce_snoc_addr_config(struct ce_desc *sdesc, + dma_addr_t buffer, + unsigned int flags) +{ + __le32 *addr = (__le32 *)&sdesc->addr; + + flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK; + addr[0] = __cpu_to_le32(buffer); + addr[1] = flags; + if (flags & CE_SEND_FLAG_GATHER) + addr[1] |= CE_WCN3990_DESC_FLAGS_GATHER; + else + addr[1] &= ~CE_WCN3990_DESC_FLAGS_GATHER; } /* @@ -267,7 +466,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, */ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags) @@ -276,16 +475,20 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; - unsigned int sw_index = src_ring->sw_index; + unsigned int sw_index; unsigned int write_index = src_ring->write_index; u32 ctrl_addr = ce_state->ctrl_addr; u32 desc_flags = 0; int ret = 0; + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + if (nbytes > ce_state->src_sz_max) ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); + sw_index = ath10k_ce_src_ring_read_index_get_from_ddr(ar, ce_state->id); if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; @@ -299,10 +502,15 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, if (flags & CE_SEND_FLAG_GATHER) desc_flags |= CE_DESC_FLAGS_GATHER; + if (flags & CE_SEND_FLAG_BYTE_SWAP) desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; - sdesc.addr = __cpu_to_le32(buffer); + if (QCA_REV_WCN3990(ar)) + ath10k_ce_snoc_addr_config(&sdesc, buffer, flags); + else + sdesc.addr = __cpu_to_le32(buffer); + sdesc.nbytes = __cpu_to_le16(nbytes); sdesc.flags = __cpu_to_le16(desc_flags); @@ -314,8 +522,14 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (QCA_REV_WCN3990(ar)) + ath10k_ce_shadow_src_ring_write_index_set(ar, ctrl_addr, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -325,11 +539,11 @@ exit: void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); struct ath10k_ce_ring *src_ring = pipe->src_ring; u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ar_opaque->ce_lock); /* * This function must be called only if there is an incomplete @@ -351,19 +565,19 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -371,14 +585,14 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int delta; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, pipe->src_ring->write_index, pipe->src_ring->sw_index - 1); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return delta; } @@ -386,21 +600,22 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; unsigned int sw_index = dest_ring->sw_index; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ar_opaque->ce_lock); return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } -int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; @@ -409,12 +624,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ar_opaque->ce_lock); - if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + if ((pipe->id != 5) && + CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) return -ENOSPC; - desc->addr = __cpu_to_le32(paddr); + if (QCA_REV_WCN3990(ar)) { + desc->addr = paddr; + desc->addr &= CE_DESC_37BIT_ADDR_MASK; + } else { + desc->addr = __cpu_to_le32(paddr); + } + desc->nbytes = 0; dest_ring->per_transfer_context[write_index] = ctx; @@ -425,15 +647,29 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) return 0; } -int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + u32 ctrl_addr = pipe->ctrl_addr; + + write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; +} + +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr) +{ + struct ath10k *ar = pipe->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -444,14 +680,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) */ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) + unsigned int *nbytesp) { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; - struct ath10k *ar = ce_state->ar; unsigned int sw_index = dest_ring->sw_index; struct ce_desc *base = dest_ring->base_addr_owner_space; @@ -476,21 +708,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, desc->nbytes = 0; /* Return data from completed destination descriptor */ - *bufferp = __le32_to_cpu(sdesc.addr); *nbytesp = nbytes; - *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); - - if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) - *flagsp = CE_RECV_FLAG_SWAPPED; - else - *flagsp = 0; if (per_transfer_contextp) *per_transfer_contextp = dest_ring->per_transfer_context[sw_index]; - /* sanity */ - dest_ring->per_transfer_context[sw_index] = NULL; + /* Copy engine 5 (HTT Rx) will reuse the same transfer context. + * So update transfer context all CEs except CE5. + */ + if (ce_state->id != 5) + dest_ring->per_transfer_context[sw_index] = NULL; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); @@ -501,21 +729,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) + unsigned int *nbytesp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = ath10k_ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, - bufferp, nbytesp, - transfer_idp, flagsp); - spin_unlock_bh(&ar_pci->ce_lock); + nbytesp); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -530,7 +754,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct bus_opaque *ar_opaque; dest_ring = ce_state->dest_ring; @@ -538,9 +762,9 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ar_opaque = ath10k_bus_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); nentries_mask = dest_ring->nentries_mask; sw_index = dest_ring->sw_index; @@ -568,7 +792,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -636,7 +860,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct bus_opaque *ar_opaque; src_ring = ce_state->src_ring; @@ -644,9 +868,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ar_opaque = ath10k_bus_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -677,7 +901,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -686,13 +910,13 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = ath10k_ce_completed_send_next_nolock(ce_state, per_transfer_contextp); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -705,17 +929,18 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, */ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; + struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 ctrl_addr = ce_state->ctrl_addr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); /* Clear the copy-complete interrupts that will be handled here. */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, - HOST_IS_COPY_COMPLETE_MASK); + wm_regs->cc_mask); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); if (ce_state->recv_cb) ce_state->recv_cb(ce_state); @@ -723,15 +948,15 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) if (ce_state->send_cb) ce_state->send_cb(ce_state); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); /* * Misc CE interrupts are not being handled, but still need * to be cleared. */ - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); } /* @@ -744,8 +969,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) { int ce_id; u32 intr_summary; + struct ath10k_ce_pipe *ce_state; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); - intr_summary = CE_INTERRUPT_SUMMARY(ar); + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return; + + if (ar->target_version == ATH10K_HW_WCN3990) + intr_summary = 0xFFF; + else + intr_summary = CE_INTERRUPT_SUMMARY(ar, ar_opaque); for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { if (intr_summary & (1 << ce_id)) @@ -754,8 +987,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) /* no intr pending on this CE */ continue; - ath10k_ce_per_engine_service(ar, ce_id); + ce_state = &ar_opaque->ce_states[ce_id]; + if (ce_state->send_cb || ce_state->recv_cb) + ath10k_ce_per_engine_service(ar, ce_id); } + } /* @@ -797,22 +1033,53 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) void ath10k_ce_enable_interrupts(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ce_id; + struct ath10k_ce_pipe *ce_state; + u8 ce_count; + if (QCA_REV_WCN3990(ar)) + ce_count = CE_COUNT; + else /* Skip the last copy engine, CE7 the diagnostic window, as that * uses polling and isn't initialized for interrupts. */ - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) - ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); + ce_count = CE_COUNT - 1; + + for (ce_id = 0; ce_id < ce_count; ce_id++) { + ce_state = &ar_opaque->ce_states[ce_id]; + ath10k_ce_per_engine_handler_adjust(ce_state); + } +} + +void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id) +{ + u32 offset; + u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + offset = ar->hw_ce_regs->host_ie_addr + ctrl_addr; + ar_opaque->bus_ops->write32(ar, offset, 1); + ar_opaque->bus_ops->read32(ar, offset); +} + +void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id) +{ + u32 offset; + u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + offset = ar->hw_ce_regs->host_ie_addr + ctrl_addr; + ar_opaque->bus_ops->write32(ar, offset, 0); + ar_opaque->bus_ops->read32(ar, offset); } static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -838,7 +1105,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot init ce src ring id %d entries %d base_addr %p\n", + "boot init ce src ring id %d entries %d base_addr %pK\n", ce_id, nentries, src_ring->base_addr_owner_space); return 0; @@ -848,8 +1115,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -872,7 +1139,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot ce dest ring id %d entries %d base_addr %p\n", + "boot ce dest ring id %d entries %d base_addr %pK\n", ce_id, nentries, dest_ring->base_addr_owner_space); return 0; @@ -921,6 +1188,24 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + src_ring->shadow_base_unaligned = kzalloc( + nentries * sizeof(struct ce_desc), + GFP_KERNEL); + + if (!src_ring->shadow_base_unaligned) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(-ENOMEM); + } + + src_ring->shadow_base = (struct ce_desc *)PTR_ALIGN( + src_ring->shadow_base_unaligned, + CE_DESC_RING_ALIGN); + return src_ring; } @@ -977,6 +1262,52 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, return dest_ring; } +void ce_config_rri_on_ddr(struct ath10k *ar) +{ + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 hi_paddr, low_paddr; + u32 ce_base_addr; + u32 ctrl1_regs; + int i; + + ar_opaque->vaddr_rri_on_ddr = + (u32 *)dma_alloc_coherent(ar->dev, + (CE_COUNT * sizeof(u32)), + &ar_opaque->paddr_rri_on_ddr, GFP_KERNEL); + + if (!ar_opaque->vaddr_rri_on_ddr) + return; + + low_paddr = lower_32_bits(ar_opaque->paddr_rri_on_ddr); + hi_paddr = upper_32_bits(ar_opaque->paddr_rri_on_ddr) & + CE_DESC_FLAGS_GET_MASK; + + ar_opaque->bus_ops->write32(ar, ar->hw_ce_regs->ce_rri_low, low_paddr); + ar_opaque->bus_ops->write32(ar, ar->hw_ce_regs->ce_rri_high, hi_paddr); + + for (i = 0; i < CE_COUNT; i++) { + ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; + ce_base_addr = ath10k_ce_base_address(ar, i); + ar_opaque->bus_ops->write32(ar, ce_base_addr + ctrl1_regs, + ar_opaque->bus_ops->read32(ar, ce_base_addr + ctrl1_regs) | + ar->hw_ce_regs->upd->mask); + } + + memset(ar_opaque->vaddr_rri_on_ddr, 0, CE_COUNT * sizeof(u32)); +} + +void ce_remove_rri_on_ddr(struct ath10k *ar) +{ + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + if (!ar_opaque->vaddr_rri_on_ddr) + return; + + dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)), + ar_opaque->vaddr_rri_on_ddr, + ar_opaque->paddr_rri_on_ddr); +} + /* * Initialize a Copy Engine based on caller-supplied attributes. * This may be called once to initialize both source and destination @@ -1038,8 +1369,8 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; int ret; /* @@ -1048,11 +1379,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, * * For the lack of a better place do the check here. */ - BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); ce_state->ar = ar; @@ -1095,10 +1426,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; if (ce_state->src_ring) { + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc) + diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 47b734ce7ecf..fe5f5680ca3d 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -22,7 +22,7 @@ /* Maximum number of Copy Engine's supported */ #define CE_COUNT_MAX 12 -#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 +#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192 /* Descriptor rings must be aligned to this boundary */ #define CE_DESC_RING_ALIGN 8 @@ -38,6 +38,12 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) +#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31) + +#define CE_DESC_FLAGS_GET_MASK 0x1F +#define CE_DESC_37BIT_ADDR_MASK 0x1FFFFFFFFF +#define CE_DDR_RRI_MASK 0xFFFF +#define CE_DDR_RRI_SHIFT 16 /* Following desc flags are used in QCA99X0 */ #define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) @@ -46,11 +52,20 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask #define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb +#ifndef CONFIG_ATH10K_SNOC struct ce_desc { __le32 addr; __le16 nbytes; __le16 flags; /* %CE_DESC_FLAGS_ */ }; +#else +struct ce_desc { + __le64 addr; + u16 nbytes; /* length in register map */ + u16 flags; /* fw_metadata_high */ + u32 toeplitz_hash_result; +}; +#endif struct ath10k_ce_ring { /* Number of entries in this ring; must be power of 2 */ @@ -101,6 +116,9 @@ struct ath10k_ce_ring { /* CE address space */ u32 base_addr_ce_space; + char *shadow_base_unaligned; + struct ce_desc *shadow_base; + /* keep last */ void *per_transfer_context[0]; }; @@ -124,6 +142,81 @@ struct ath10k_ce_pipe { /* Copy Engine settable attributes */ struct ce_attr; +#define SHADOW_VALUE0 (ar->shadow_reg_value->shadow_reg_value_0) +#define SHADOW_VALUE1 (ar->shadow_reg_value->shadow_reg_value_1) +#define SHADOW_VALUE2 (ar->shadow_reg_value->shadow_reg_value_2) +#define SHADOW_VALUE3 (ar->shadow_reg_value->shadow_reg_value_3) +#define SHADOW_VALUE4 (ar->shadow_reg_value->shadow_reg_value_4) +#define SHADOW_VALUE5 (ar->shadow_reg_value->shadow_reg_value_5) +#define SHADOW_VALUE6 (ar->shadow_reg_value->shadow_reg_value_6) +#define SHADOW_VALUE7 (ar->shadow_reg_value->shadow_reg_value_7) +#define SHADOW_VALUE8 (ar->shadow_reg_value->shadow_reg_value_8) +#define SHADOW_VALUE9 (ar->shadow_reg_value->shadow_reg_value_9) +#define SHADOW_VALUE10 (ar->shadow_reg_value->shadow_reg_value_10) +#define SHADOW_VALUE11 (ar->shadow_reg_value->shadow_reg_value_11) +#define SHADOW_VALUE12 (ar->shadow_reg_value->shadow_reg_value_12) +#define SHADOW_VALUE13 (ar->shadow_reg_value->shadow_reg_value_13) +#define SHADOW_VALUE14 (ar->shadow_reg_value->shadow_reg_value_14) +#define SHADOW_VALUE15 (ar->shadow_reg_value->shadow_reg_value_15) +#define SHADOW_VALUE16 (ar->shadow_reg_value->shadow_reg_value_16) +#define SHADOW_VALUE17 (ar->shadow_reg_value->shadow_reg_value_17) +#define SHADOW_VALUE18 (ar->shadow_reg_value->shadow_reg_value_18) +#define SHADOW_VALUE19 (ar->shadow_reg_value->shadow_reg_value_19) +#define SHADOW_VALUE20 (ar->shadow_reg_value->shadow_reg_value_20) +#define SHADOW_VALUE21 (ar->shadow_reg_value->shadow_reg_value_21) +#define SHADOW_VALUE22 (ar->shadow_reg_value->shadow_reg_value_22) +#define SHADOW_VALUE23 (ar->shadow_reg_value->shadow_reg_value_23) +#define SHADOW_ADDRESS0 (ar->shadow_reg_address->shadow_reg_address_0) +#define SHADOW_ADDRESS1 (ar->shadow_reg_address->shadow_reg_address_1) +#define SHADOW_ADDRESS2 (ar->shadow_reg_address->shadow_reg_address_2) +#define SHADOW_ADDRESS3 (ar->shadow_reg_address->shadow_reg_address_3) +#define SHADOW_ADDRESS4 (ar->shadow_reg_address->shadow_reg_address_4) +#define SHADOW_ADDRESS5 (ar->shadow_reg_address->shadow_reg_address_5) +#define SHADOW_ADDRESS6 (ar->shadow_reg_address->shadow_reg_address_6) +#define SHADOW_ADDRESS7 (ar->shadow_reg_address->shadow_reg_address_7) +#define SHADOW_ADDRESS8 (ar->shadow_reg_address->shadow_reg_address_8) +#define SHADOW_ADDRESS9 (ar->shadow_reg_address->shadow_reg_address_9) +#define SHADOW_ADDRESS10 (ar->shadow_reg_address->shadow_reg_address_10) +#define SHADOW_ADDRESS11 (ar->shadow_reg_address->shadow_reg_address_11) +#define SHADOW_ADDRESS12 (ar->shadow_reg_address->shadow_reg_address_12) +#define SHADOW_ADDRESS13 (ar->shadow_reg_address->shadow_reg_address_13) +#define SHADOW_ADDRESS14 (ar->shadow_reg_address->shadow_reg_address_14) +#define SHADOW_ADDRESS15 (ar->shadow_reg_address->shadow_reg_address_15) +#define SHADOW_ADDRESS16 (ar->shadow_reg_address->shadow_reg_address_16) +#define SHADOW_ADDRESS17 (ar->shadow_reg_address->shadow_reg_address_17) +#define SHADOW_ADDRESS18 (ar->shadow_reg_address->shadow_reg_address_18) +#define SHADOW_ADDRESS19 (ar->shadow_reg_address->shadow_reg_address_19) +#define SHADOW_ADDRESS20 (ar->shadow_reg_address->shadow_reg_address_20) +#define SHADOW_ADDRESS21 (ar->shadow_reg_address->shadow_reg_address_21) +#define SHADOW_ADDRESS22 (ar->shadow_reg_address->shadow_reg_address_22) +#define SHADOW_ADDRESS23 (ar->shadow_reg_address->shadow_reg_address_23) + +#define SHADOW_ADDRESS(i) (SHADOW_ADDRESS0 + \ + i * (SHADOW_ADDRESS1 - SHADOW_ADDRESS0)) + +u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr); +u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr); + +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); +}; + +static inline struct bus_opaque *ath10k_bus_priv(struct ath10k *ar) +{ + return (struct bus_opaque *)ar->drv_priv; +} + +struct bus_opaque { + /* protects CE info */ + spinlock_t ce_lock; + const struct ath10k_bus_ops *bus_ops; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + u32 *vaddr_rri_on_ddr; + dma_addr_t paddr_rri_on_ddr; +}; + /*==================Send====================*/ /* ath10k_ce_send flags */ @@ -144,7 +237,7 @@ struct ce_attr; */ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_send_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, /* 14 bits */ unsigned int transfer_id, @@ -152,7 +245,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags); @@ -164,8 +257,11 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); /*==================Recv=======================*/ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); -int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); -int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); +int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr); +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr); +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries); /* recv flags */ /* Data is byte-swapped */ @@ -177,10 +273,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); */ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp); + unsigned int *nbytesp); /* * Supply data for the next completed unprocessed send descriptor. * Pops 1 completed send buffer from Source ring. @@ -199,6 +292,8 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, const struct ce_attr *attr); void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); +void ce_config_rri_on_ddr(struct ath10k *ar); +void ce_remove_rri_on_ddr(struct ath10k *ar); /*==================CE Engine Shutdown=======================*/ /* @@ -212,10 +307,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp); + unsigned int *nbytesp); /* * Support clean shutdown by allowing the caller to cancel @@ -233,6 +325,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_disable_interrupts(struct ath10k *ar); void ath10k_ce_enable_interrupts(struct ath10k *ar); +void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id); +void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ @@ -268,143 +362,14 @@ struct ce_attr { void (*recv_cb)(struct ath10k_ce_pipe *); }; -#define SR_BA_ADDRESS 0x0000 -#define SR_SIZE_ADDRESS 0x0004 -#define DR_BA_ADDRESS 0x0008 -#define DR_SIZE_ADDRESS 0x000c -#define CE_CMD_ADDRESS 0x0018 - -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17 -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ - (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ - CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) - -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16 -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \ - (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \ - CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ - (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ - CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) - -#define CE_CTRL1_DMAX_LENGTH_MSB 15 -#define CE_CTRL1_DMAX_LENGTH_LSB 0 -#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff -#define CE_CTRL1_DMAX_LENGTH_GET(x) \ - (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB) -#define CE_CTRL1_DMAX_LENGTH_SET(x) \ - (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) - -#define CE_CTRL1_ADDRESS 0x0010 -#define CE_CTRL1_HW_MASK 0x0007ffff -#define CE_CTRL1_SW_MASK 0x0007ffff -#define CE_CTRL1_HW_WRITE_MASK 0x00000000 -#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff -#define CE_CTRL1_RSTMASK 0xffffffff -#define CE_CTRL1_RESET 0x00000080 - -#define CE_CMD_HALT_STATUS_MSB 3 -#define CE_CMD_HALT_STATUS_LSB 3 -#define CE_CMD_HALT_STATUS_MASK 0x00000008 -#define CE_CMD_HALT_STATUS_GET(x) \ - (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB) -#define CE_CMD_HALT_STATUS_SET(x) \ - (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK) -#define CE_CMD_HALT_STATUS_RESET 0 -#define CE_CMD_HALT_MSB 0 -#define CE_CMD_HALT_MASK 0x00000001 - -#define HOST_IE_COPY_COMPLETE_MSB 0 -#define HOST_IE_COPY_COMPLETE_LSB 0 -#define HOST_IE_COPY_COMPLETE_MASK 0x00000001 -#define HOST_IE_COPY_COMPLETE_GET(x) \ - (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB) -#define HOST_IE_COPY_COMPLETE_SET(x) \ - (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK) -#define HOST_IE_COPY_COMPLETE_RESET 0 -#define HOST_IE_ADDRESS 0x002c - -#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 -#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 -#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 -#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 -#define HOST_IS_COPY_COMPLETE_MASK 0x00000001 -#define HOST_IS_ADDRESS 0x0030 - -#define MISC_IE_ADDRESS 0x0034 - -#define MISC_IS_AXI_ERR_MASK 0x00000400 - -#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200 -#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100 -#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 -#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 -#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 - -#define MISC_IS_ADDRESS 0x0038 - -#define SR_WR_INDEX_ADDRESS 0x003c - -#define DST_WR_INDEX_ADDRESS 0x0040 - -#define CURRENT_SRRI_ADDRESS 0x0044 - -#define CURRENT_DRRI_ADDRESS 0x0048 - -#define SRC_WATERMARK_LOW_MSB 31 -#define SRC_WATERMARK_LOW_LSB 16 -#define SRC_WATERMARK_LOW_MASK 0xffff0000 -#define SRC_WATERMARK_LOW_GET(x) \ - (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB) -#define SRC_WATERMARK_LOW_SET(x) \ - (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) -#define SRC_WATERMARK_LOW_RESET 0 -#define SRC_WATERMARK_HIGH_MSB 15 -#define SRC_WATERMARK_HIGH_LSB 0 -#define SRC_WATERMARK_HIGH_MASK 0x0000ffff -#define SRC_WATERMARK_HIGH_GET(x) \ - (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB) -#define SRC_WATERMARK_HIGH_SET(x) \ - (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) -#define SRC_WATERMARK_HIGH_RESET 0 -#define SRC_WATERMARK_ADDRESS 0x004c - -#define DST_WATERMARK_LOW_LSB 16 -#define DST_WATERMARK_LOW_MASK 0xffff0000 -#define DST_WATERMARK_LOW_SET(x) \ - (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) -#define DST_WATERMARK_LOW_RESET 0 -#define DST_WATERMARK_HIGH_MSB 15 -#define DST_WATERMARK_HIGH_LSB 0 -#define DST_WATERMARK_HIGH_MASK 0x0000ffff -#define DST_WATERMARK_HIGH_GET(x) \ - (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB) -#define DST_WATERMARK_HIGH_SET(x) \ - (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) -#define DST_WATERMARK_HIGH_RESET 0 -#define DST_WATERMARK_ADDRESS 0x0050 +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \ + - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) { return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; } -#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ - HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ - HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ - HOST_IS_DST_RING_HIGH_WATERMARK_MASK) - -#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ - MISC_IS_DST_ADDR_ERR_MASK | \ - MISC_IS_SRC_LEN_ERR_MASK | \ - MISC_IS_DST_MAX_LEN_VIO_MASK | \ - MISC_IS_DST_RING_OVERFLOW_MASK | \ - MISC_IS_SRC_RING_OVERFLOW_MASK) - #define CE_SRC_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) @@ -413,9 +378,11 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ - (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) + (((int)(toidx) - (int)(fromidx)) & (nentries_mask)) #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) +#define CE_RING_IDX_ADD(nentries_mask, idx, num) \ + (((idx) + (num)) & (nentries_mask)) #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ ar->regs->ce_wrap_intr_sum_host_msi_lsb @@ -426,9 +393,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 -#define CE_INTERRUPT_SUMMARY(ar) \ +#define CE_INTERRUPT_SUMMARY(ar, ar_opaque) \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ - ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ + ar_opaque->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + \ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index ef9fb9ddde5e..784a11a74443 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <asm/byteorder.h> #include "core.h" #include "mac.h" @@ -55,18 +56,38 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, - .fw = QCA988X_HW_2_0_FW_FILE, - .otp = QCA988X_HW_2_0_OTP_FILE, .board = QCA988X_HW_2_0_BOARD_DATA_FILE, .board_size = QCA988X_BOARD_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + }, + { + .id = QCA9887_HW_1_0_VERSION, + .dev_id = QCA9887_1_0_DEVICE_ID, + .name = "qca9887 hw1.0", + .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, + .fw = { + .dir = QCA9887_HW_1_0_FW_DIR, + .board = QCA9887_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9887_BOARD_DATA_SZ, + .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, + }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, { @@ -78,14 +99,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, - .fw = QCA6174_HW_2_1_FW_FILE, - .otp = QCA6174_HW_2_1_OTP_FILE, .board = QCA6174_HW_2_1_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, { @@ -97,14 +118,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, - .fw = QCA6174_HW_2_1_FW_FILE, - .otp = QCA6174_HW_2_1_OTP_FILE, .board = QCA6174_HW_2_1_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, { @@ -116,14 +137,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, - .fw = QCA6174_HW_3_0_FW_FILE, - .otp = QCA6174_HW_3_0_OTP_FILE, .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, { @@ -135,15 +156,15 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, - .fw = QCA6174_HW_3_0_FW_FILE, - .otp = QCA6174_HW_3_0_OTP_FILE, .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, { @@ -154,16 +175,70 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .uart_pin = 7, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, - .fw = QCA99X0_HW_2_0_FW_FILE, - .otp = QCA99X0_HW_2_0_OTP_FILE, .board = QCA99X0_HW_2_0_BOARD_DATA_FILE, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, }, + .sw_decrypt_mcast_mgmt = true, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + }, + { + .id = QCA9984_HW_1_0_DEV_VERSION, + .dev_id = QCA9984_1_0_DEVICE_ID, + .name = "qca9984/qca9994 hw1.0", + .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA9984_HW_1_0_FW_DIR, + .board = QCA9984_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + }, + { + .id = QCA9888_HW_2_0_DEV_VERSION, + .dev_id = QCA9888_2_0_DEVICE_ID, + .name = "qca9888 hw2.0", + .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 3, + .rx_chain_mask = 3, + .max_spatial_stream = 2, + .cal_data_len = 12064, + .fw = { + .dir = QCA9888_HW_2_0_FW_DIR, + .board = QCA9888_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, }, { @@ -175,14 +250,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .fw = QCA9377_HW_1_0_FW_FILE, - .otp = QCA9377_HW_1_0_OTP_FILE, .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, { @@ -194,16 +269,57 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .fw = QCA9377_HW_1_0_FW_FILE, - .otp = QCA9377_HW_1_0_OTP_FILE, .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, + .hw_ops = &qca988x_ops, .decap_align_bytes = 4, }, + { + .id = QCA4019_HW_1_0_DEV_VERSION, + .dev_id = 0, + .name = "qca4019 hw1.0", + .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, + .otp_exe_param = 0x0010000, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 125000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 0x3, + .rx_chain_mask = 0x3, + .max_spatial_stream = 2, + .cal_data_len = 12064, + .fw = { + .dir = QCA4019_HW_1_0_FW_DIR, + .board = QCA4019_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA4019_BOARD_DATA_SZ, + .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + }, + { + .id = ATH10K_HW_WCN3990, + .dev_id = 0, + .name = "wcn3990 hw1.0", + .continuous_frag_desc = true, + .tx_chain_mask = 0x7, + .rx_chain_mask = 0x7, + .max_spatial_stream = 4, + .fw = { + .dir = WCN3990_HW_1_0_FW_DIR, + }, + .sw_decrypt_mcast_mgmt = true, + .hw_ops = &wcn3990_ops, + .decap_align_bytes = 1, + }, }; static const char *const ath10k_core_fw_feature_str[] = { @@ -219,6 +335,10 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", + [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", + [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", + [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", + [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -245,7 +365,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar, int i; for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { - if (test_bit(i, ar->fw_features)) { + if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) { if (len > 0) len += scnprintf(buf + len, buf_len - len, ","); @@ -437,18 +557,18 @@ exit: return ret; } -static int ath10k_download_cal_file(struct ath10k *ar) +static int ath10k_download_cal_file(struct ath10k *ar, + const struct firmware *file) { int ret; - if (!ar->cal_file) + if (!file) return -ENOENT; - if (IS_ERR(ar->cal_file)) - return PTR_ERR(ar->cal_file); + if (IS_ERR(file)) + return PTR_ERR(file); - ret = ath10k_download_board_data(ar, ar->cal_file->data, - ar->cal_file->size); + ret = ath10k_download_board_data(ar, file->data, file->size); if (ret) { ath10k_err(ar, "failed to download cal_file data: %d\n", ret); return ret; @@ -459,7 +579,7 @@ static int ath10k_download_cal_file(struct ath10k *ar) return 0; } -static int ath10k_download_cal_dt(struct ath10k *ar) +static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) { struct device_node *node; int data_len; @@ -473,13 +593,12 @@ static int ath10k_download_cal_dt(struct ath10k *ar) */ return -ENOENT; - if (!of_get_property(node, "qcom,ath10k-calibration-data", - &data_len)) { + if (!of_get_property(node, dt_name, &data_len)) { /* The calibration data node is optional */ return -ENOENT; } - if (data_len != QCA988X_CAL_DATA_LEN) { + if (data_len != ar->hw_params.cal_data_len) { ath10k_warn(ar, "invalid calibration data length in DT: %d\n", data_len); ret = -EMSGSIZE; @@ -492,8 +611,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar) goto out; } - ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data", - data, data_len); + ret = of_property_read_u8_array(node, dt_name, data, data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data from DT: %d\n", ret); @@ -516,6 +634,35 @@ out: return ret; } +static int ath10k_download_cal_eeprom(struct ath10k *ar) +{ + size_t data_len; + void *data = NULL; + int ret; + + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + + return ret; +} + static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; @@ -524,7 +671,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) address = ar->hw_params.patch_load_addr; - if (!ar->otp_data || !ar->otp_len) { + if (!ar->normal_mode_fw.fw_file.otp_data || + !ar->normal_mode_fw.fw_file.otp_len) { ath10k_warn(ar, "failed to retrieve board id because of invalid otp\n"); return -ENODATA; @@ -532,9 +680,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd for board id\n", - address, ar->otp_len); + address, ar->normal_mode_fw.fw_file.otp_len); - ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); + ret = ath10k_bmi_fast_download(ar, address, + ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp for board id check: %d\n", ret); @@ -575,7 +725,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; int ret; - ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); + ret = ath10k_download_board_data(ar, + ar->running_fw->board_data, + ar->running_fw->board_len); if (ret) { ath10k_err(ar, "failed to download board data: %d\n", ret); return ret; @@ -583,16 +735,20 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) /* OTP is optional */ - if (!ar->otp_data || !ar->otp_len) { - ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", - ar->otp_data, ar->otp_len); + if (!ar->running_fw->fw_file.otp_data || + !ar->running_fw->fw_file.otp_len) { + ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n", + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); return 0; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", - address, ar->otp_len); + address, ar->running_fw->fw_file.otp_len); - ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); + ret = ath10k_bmi_fast_download(ar, address, + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp (%d)\n", ret); return ret; @@ -607,7 +763,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, - ar->fw_features)) && + ar->running_fw->fw_file.fw_features)) && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; @@ -616,46 +772,32 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) return 0; } -static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) +static int ath10k_download_fw(struct ath10k *ar) { u32 address, data_len; - const char *mode_name; const void *data; int ret; address = ar->hw_params.patch_load_addr; - switch (mode) { - case ATH10K_FIRMWARE_MODE_NORMAL: - data = ar->firmware_data; - data_len = ar->firmware_len; - mode_name = "normal"; - ret = ath10k_swap_code_seg_configure(ar, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); - if (ret) { - ath10k_err(ar, "failed to configure fw code swap: %d\n", - ret); - return ret; - } - break; - case ATH10K_FIRMWARE_MODE_UTF: - data = ar->testmode.utf_firmware_data; - data_len = ar->testmode.utf_firmware_len; - mode_name = "utf"; - break; - default: - ath10k_err(ar, "unknown firmware mode: %d\n", mode); - return -EINVAL; + data = ar->running_fw->fw_file.firmware_data; + data_len = ar->running_fw->fw_file.firmware_len; + + ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file); + if (ret) { + ath10k_err(ar, "failed to configure fw code swap: %d\n", + ret); + return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot uploading firmware image %p len %d mode %s\n", - data, data_len, mode_name); + "boot uploading firmware image %pK len %d\n", + data, data_len); ret = ath10k_bmi_fast_download(ar, address, data, data_len); if (ret) { - ath10k_err(ar, "failed to download %s firmware: %d\n", - mode_name, ret); + ath10k_err(ar, "failed to download firmware: %d\n", + ret); return ret; } @@ -664,42 +806,50 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) static void ath10k_core_free_board_files(struct ath10k *ar) { - if (!IS_ERR(ar->board)) - release_firmware(ar->board); + if (!IS_ERR(ar->normal_mode_fw.board)) + release_firmware(ar->normal_mode_fw.board); - ar->board = NULL; - ar->board_data = NULL; - ar->board_len = 0; + ar->normal_mode_fw.board = NULL; + ar->normal_mode_fw.board_data = NULL; + ar->normal_mode_fw.board_len = 0; } static void ath10k_core_free_firmware_files(struct ath10k *ar) { - if (!IS_ERR(ar->otp)) - release_firmware(ar->otp); - - if (!IS_ERR(ar->firmware)) - release_firmware(ar->firmware); + if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware)) + release_firmware(ar->normal_mode_fw.fw_file.firmware); if (!IS_ERR(ar->cal_file)) release_firmware(ar->cal_file); - ath10k_swap_code_seg_release(ar); + if (!IS_ERR(ar->pre_cal_file)) + release_firmware(ar->pre_cal_file); - ar->otp = NULL; - ar->otp_data = NULL; - ar->otp_len = 0; + ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file); - ar->firmware = NULL; - ar->firmware_data = NULL; - ar->firmware_len = 0; + ar->normal_mode_fw.fw_file.otp_data = NULL; + ar->normal_mode_fw.fw_file.otp_len = 0; + + ar->normal_mode_fw.fw_file.firmware = NULL; + ar->normal_mode_fw.fw_file.firmware_data = NULL; + ar->normal_mode_fw.fw_file.firmware_len = 0; ar->cal_file = NULL; + ar->pre_cal_file = NULL; } static int ath10k_fetch_cal_file(struct ath10k *ar) { char filename[100]; + /* pre-cal-<bus>-<id>.bin */ + scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); + if (!IS_ERR(ar->pre_cal_file)) + goto success; + /* cal-<bus>-<id>.bin */ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); @@ -708,7 +858,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar) if (IS_ERR(ar->cal_file)) /* calibration file is optional, don't print any warnings */ return PTR_ERR(ar->cal_file); - +success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", ATH10K_FW_DIR, filename); @@ -722,14 +872,14 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar) return -EINVAL; } - ar->board = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.board); - if (IS_ERR(ar->board)) - return PTR_ERR(ar->board); + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); - ar->board_data = ar->board->data; - ar->board_len = ar->board->size; + ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; + ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; return 0; } @@ -789,8 +939,8 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, "boot found board data for '%s'", boardname); - ar->board_data = board_ie_data; - ar->board_len = board_ie_len; + ar->normal_mode_fw.board_data = board_ie_data; + ar->normal_mode_fw.board_len = board_ie_len; ret = 0; goto out; @@ -823,12 +973,14 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const u8 *data; int ret, ie_id; - ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename); - if (IS_ERR(ar->board)) - return PTR_ERR(ar->board); + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + filename); + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); - data = ar->board->data; - len = ar->board->size; + data = ar->normal_mode_fw.board->data; + len = ar->normal_mode_fw.board->size; /* magic has extra null byte padded */ magic_len = strlen(ATH10K_BOARD_MAGIC) + 1; @@ -895,10 +1047,10 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, } out: - if (!ar->board_data || !ar->board_len) { + if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) { ath10k_err(ar, "failed to fetch board data for %s from %s/%s\n", - ar->hw_params.fw.dir, boardname, filename); + boardname, ar->hw_params.fw.dir, filename); ret = -ENODATA; goto err; } @@ -963,51 +1115,8 @@ success: return 0; } -static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) -{ - int ret = 0; - - if (ar->hw_params.fw.fw == NULL) { - ath10k_err(ar, "firmware file not defined\n"); - return -EINVAL; - } - - ar->firmware = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.fw); - if (IS_ERR(ar->firmware)) { - ret = PTR_ERR(ar->firmware); - ath10k_err(ar, "could not fetch firmware (%d)\n", ret); - goto err; - } - - ar->firmware_data = ar->firmware->data; - ar->firmware_len = ar->firmware->size; - - /* OTP may be undefined. If so, don't fetch it at all */ - if (ar->hw_params.fw.otp == NULL) - return 0; - - ar->otp = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.otp); - if (IS_ERR(ar->otp)) { - ret = PTR_ERR(ar->otp); - ath10k_err(ar, "could not fetch otp (%d)\n", ret); - goto err; - } - - ar->otp_data = ar->otp->data; - ar->otp_len = ar->otp->size; - - return 0; - -err: - ath10k_core_free_firmware_files(ar); - return ret; -} - -static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) +int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file) { size_t magic_len, len, ie_len; int ie_id, i, index, bit, ret; @@ -1016,15 +1125,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) __le32 *timestamp, *version; /* first fetch the firmware file (firmware-*.bin) */ - ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); - if (IS_ERR(ar->firmware)) { + fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + name); + if (IS_ERR(fw_file->firmware)) { ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", - ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); - return PTR_ERR(ar->firmware); + ar->hw_params.fw.dir, name, + PTR_ERR(fw_file->firmware)); + return PTR_ERR(fw_file->firmware); } - data = ar->firmware->data; - len = ar->firmware->size; + data = fw_file->firmware->data; + len = fw_file->firmware->size; /* magic also includes the null byte, check that as well */ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; @@ -1067,15 +1178,15 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) switch (ie_id) { case ATH10K_FW_IE_FW_VERSION: - if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1) + if (ie_len > sizeof(fw_file->fw_version) - 1) break; - memcpy(ar->hw->wiphy->fw_version, data, ie_len); - ar->hw->wiphy->fw_version[ie_len] = '\0'; + memcpy(fw_file->fw_version, data, ie_len); + fw_file->fw_version[ie_len] = '\0'; ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw version %s\n", - ar->hw->wiphy->fw_version); + fw_file->fw_version); break; case ATH10K_FW_IE_TIMESTAMP: if (ie_len != sizeof(u32)) @@ -1102,21 +1213,21 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ath10k_dbg(ar, ATH10K_DBG_BOOT, "Enabling feature bit: %i\n", i); - __set_bit(i, ar->fw_features); + __set_bit(i, fw_file->fw_features); } } ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", - ar->fw_features, - sizeof(ar->fw_features)); + fw_file->fw_features, + sizeof(fw_file->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); - ar->firmware_data = data; - ar->firmware_len = ie_len; + fw_file->firmware_data = data; + fw_file->firmware_len = ie_len; break; case ATH10K_FW_IE_OTP_IMAGE: @@ -1124,8 +1235,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) "found otp image ie (%zd B)\n", ie_len); - ar->otp_data = data; - ar->otp_len = ie_len; + fw_file->otp_data = data; + fw_file->otp_len = ie_len; break; case ATH10K_FW_IE_WMI_OP_VERSION: @@ -1134,10 +1245,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) version = (__le32 *)data; - ar->wmi.op_version = le32_to_cpup(version); + fw_file->wmi_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", - ar->wmi.op_version); + fw_file->wmi_op_version); break; case ATH10K_FW_IE_HTT_OP_VERSION: if (ie_len != sizeof(u32)) @@ -1145,17 +1256,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) version = (__le32 *)data; - ar->htt.op_version = le32_to_cpup(version); + fw_file->htt_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", - ar->htt.op_version); + fw_file->htt_op_version); break; case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw code swap image ie (%zd B)\n", ie_len); - ar->swap.firmware_codeswap_data = data; - ar->swap.firmware_codeswap_len = ie_len; + fw_file->codeswap_data = data; + fw_file->codeswap_len = ie_len; break; default: ath10k_warn(ar, "Unknown FW IE: %u\n", @@ -1170,11 +1281,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) data += ie_len; } - if (!ar->firmware_data || !ar->firmware_len) { - ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", - ar->hw_params.fw.dir, name); - ret = -ENOMEDIUM; - goto err; + if (ar->is_bmi) { + if (!fw_file->firmware_data || + !fw_file->firmware_len) { + ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", + ar->hw_params.fw.dir, name); + ret = -ENOMEDIUM; + goto err; + } } return 0; @@ -1187,47 +1301,117 @@ err: static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret; + struct ath10k_fw_file *fw_file; + + if (!ar->is_bmi) { + fw_file = &ar->normal_mode_fw.fw_file; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; + __set_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + fw_file->fw_features); + __set_bit(WMI_SERVICE_WOW, ar->wmi.svc_map); + __set_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + fw_file->fw_features); + return 0; + } - /* calibration file is optional, don't check for any errors */ - ath10k_fetch_cal_file(ar); + if (ar->is_bmi) { + /* calibration file is optional, don't check for any errors */ + ath10k_fetch_cal_file(ar); + } ar->fw_api = 5; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE, + &ar->normal_mode_fw.fw_file); if (ret == 0) goto success; ar->fw_api = 4; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE, + &ar->normal_mode_fw.fw_file); if (ret == 0) goto success; ar->fw_api = 3; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE); + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE, + &ar->normal_mode_fw.fw_file); if (ret == 0) goto success; ar->fw_api = 2; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); - if (ret == 0) + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE, + &ar->normal_mode_fw.fw_file); + if (ret) + return ret; + +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + + return 0; +} + +static int ath10k_core_pre_cal_download(struct ath10k *ar) +{ + int ret; + + ret = ath10k_download_cal_file(ar, ar->pre_cal_file); + if (ret == 0) { + ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE; goto success; + } - ar->fw_api = 1; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a pre calibration file, try DT next: %d\n", + ret); - ret = ath10k_core_fetch_firmware_api_1(ar); - if (ret) + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "unable to load pre cal data from DT: %d\n", ret); return ret; + } + ar->cal_mode = ATH10K_PRE_CAL_MODE_DT; success: - ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", + ath10k_cal_mode_str(ar->cal_mode)); + + return 0; +} + +static int ath10k_core_pre_cal_config(struct ath10k *ar) +{ + int ret; + + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "failed to load pre cal data: %d\n", ret); + return ret; + } + + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret) { + ath10k_err(ar, "failed to get board id: %d\n", ret); + return ret; + } + + ret = ath10k_download_and_run_otp(ar); + if (ret) { + ath10k_err(ar, "failed to run otp: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "pre cal configuration done successfully\n"); return 0; } @@ -1236,7 +1420,15 @@ static int ath10k_download_cal_data(struct ath10k *ar) { int ret; - ret = ath10k_download_cal_file(ar); + ret = ath10k_core_pre_cal_config(ar); + if (ret == 0) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "pre cal download procedure failed, try cal file: %d\n", + ret); + + ret = ath10k_download_cal_file(ar, ar->cal_file); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_FILE; goto done; @@ -1246,14 +1438,24 @@ static int ath10k_download_cal_data(struct ath10k *ar) "boot did not find a calibration file, try DT next: %d\n", ret); - ret = ath10k_download_cal_dt(ar); + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_DT; goto done; } ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot did not find DT entry, try OTP next: %d\n", + "boot did not find DT entry, try target EEPROM next: %d\n", + ret); + + ret = ath10k_download_cal_eeprom(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_EEPROM; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find target EEPROM entry, try OTP next: %d\n", ret); ret = ath10k_download_and_run_otp(ar); @@ -1350,13 +1552,15 @@ static void ath10k_core_restart(struct work_struct *work) ieee80211_stop_queues(ar->hw); ath10k_drain_tx(ar); - complete_all(&ar->scan.started); - complete_all(&ar->scan.completed); - complete_all(&ar->scan.on_channel); - complete_all(&ar->offchan_tx_completed); - complete_all(&ar->install_key_done); - complete_all(&ar->vdev_setup_done); - complete_all(&ar->thermal.wmi_sync); + complete(&ar->scan.started); + complete(&ar->scan.completed); + complete(&ar->scan.on_channel); + complete(&ar->offchan_tx_completed); + complete(&ar->install_key_done); + complete(&ar->vdev_setup_done); + complete(&ar->vdev_delete_done); + complete(&ar->thermal.wmi_sync); + complete(&ar->bss_survey_done); wake_up(&ar->htt.empty_tx_wq); wake_up(&ar->wmi.tx_credits_wq); wake_up(&ar->peer_mapping_wq); @@ -1394,15 +1598,17 @@ static void ath10k_core_restart(struct work_struct *work) static int ath10k_core_init_firmware_features(struct ath10k *ar) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) && - !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) && + !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); return -EINVAL; } - if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { + if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n", - ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version); + ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version); return -EINVAL; } @@ -1414,7 +1620,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) break; case ATH10K_CRYPT_MODE_SW: if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, - ar->fw_features)) { + fw_file->fw_features)) { ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware"); return -EINVAL; } @@ -1433,7 +1639,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) if (rawmode) { if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, - ar->fw_features)) { + fw_file->fw_features)) { ath10k_err(ar, "rawmode = 1 requires support from firmware"); return -EINVAL; } @@ -1458,19 +1664,19 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) /* Backwards compatibility for firmwares without * ATH10K_FW_IE_WMI_OP_VERSION. */ - if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, - ar->fw_features)) - ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2; + fw_file->fw_features)) + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2; else - ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; } else { - ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; } } - switch (ar->wmi.op_version) { + switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->max_num_peers = TARGET_NUM_PEERS; ar->max_num_stations = TARGET_NUM_STATIONS; @@ -1483,8 +1689,13 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: - ar->max_num_peers = TARGET_10X_NUM_PEERS; - ar->max_num_stations = TARGET_10X_NUM_STATIONS; + if (ath10k_peer_stats_enabled(ar)) { + ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; + ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; + } else { + ar->max_num_peers = TARGET_10X_NUM_PEERS; + ar->max_num_stations = TARGET_10X_NUM_STATIONS; + } ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PEER; @@ -1495,11 +1706,16 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS; - ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; + if (QCA_REV_WCN3990(ar)) + ar->htt.max_num_pending_tx = + TARGET_HL_1_0_NUM_MSDU_DESC; + else + ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_10_4: ar->max_num_peers = TARGET_10_4_NUM_PEERS; @@ -1507,9 +1723,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; - ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; - ar->fw_stats_req_mask = WMI_STAT_PEER; - ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM; + ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | + WMI_10_4_STAT_PEER_EXTD; + ar->max_spatial_stream = ar->hw_params.max_spatial_stream; + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + fw_file->fw_features)) + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC; + else + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: @@ -1520,23 +1742,23 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) /* Backwards compatibility for firmwares without * ATH10K_FW_IE_HTT_OP_VERSION. */ - if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { - switch (ar->wmi.op_version) { + if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { + switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: - ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: - ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; break; case ATH10K_FW_WMI_OP_VERSION_TLV: - ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; break; case ATH10K_FW_WMI_OP_VERSION_10_4: case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: - WARN_ON(1); + ath10k_err(ar, "htt op version not found from fw meta data"); return -EINVAL; } } @@ -1544,48 +1766,105 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) return 0; } -int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) +static int ath10k_core_reset_rx_filter(struct ath10k *ar) +{ + int ret; + int vdev_id; + int vdev_type; + int vdev_subtype; + const u8 *vdev_addr; + + vdev_id = 0; + vdev_type = WMI_VDEV_TYPE_STA; + vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); + vdev_addr = ar->mac_addr; + + ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype, + vdev_addr); + if (ret) { + ath10k_err(ar, "failed to create dummy vdev: %d\n", ret); + return ret; + } + + ret = ath10k_wmi_vdev_delete(ar, vdev_id); + if (ret) { + ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret); + return ret; + } + + /* WMI and HTT may use separate HIF pipes and are not guaranteed to be + * serialized properly implicitly. + * + * Moreover (most) WMI commands have no explicit acknowledges. It is + * possible to infer it implicitly by poking firmware with echo + * command - getting a reply means all preceding comments have been + * (mostly) processed. + * + * In case of vdev create/delete this is sufficient. + * + * Without this it's possible to end up with a race when HTT Rx ring is + * started before vdev create/delete hack is complete allowing a short + * window of opportunity to receive (and Tx ACK) a bunch of frames. + */ + ret = ath10k_wmi_barrier(ar); + if (ret) { + ath10k_err(ar, "failed to ping firmware: %d\n", ret); + return ret; + } + + return 0; +} + +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + const struct ath10k_fw_components *fw) { int status; + u32 val; lockdep_assert_held(&ar->conf_mutex); clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); - ath10k_bmi_start(ar); + ar->running_fw = fw; - if (ath10k_init_configure_target(ar)) { - status = -EINVAL; - goto err; - } + if (ar->is_bmi) { - status = ath10k_download_cal_data(ar); - if (status) - goto err; + ath10k_bmi_start(ar); - /* Some of of qca988x solutions are having global reset issue - * during target initialization. Bypassing PLL setting before - * downloading firmware and letting the SoC run on REF_CLK is - * fixing the problem. Corresponding firmware change is also needed - * to set the clock source once the target is initialized. - */ - if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, - ar->fw_features)) { - status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); - if (status) { - ath10k_err(ar, "could not write to skip_clock_init: %d\n", - status); + if (ath10k_init_configure_target(ar)) { + status = -EINVAL; goto err; } - } - status = ath10k_download_fw(ar, mode); - if (status) - goto err; + status = ath10k_download_cal_data(ar); + if (status) + goto err; - status = ath10k_init_uart(ar); - if (status) - goto err; + /* Some of of qca988x solutions are having global reset issue + * during target initialization. Bypassing PLL setting before + * downloading firmware and letting the SoC run on REF_CLK is + * fixing the problem. Corresponding firmware change is also + * needed to set the clock source once the target is + * initialized. + */ + if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); + if (status) { + ath10k_err(ar, "skip_clock_init failed: %d\n", + status); + goto err; + } + } + + status = ath10k_download_fw(ar); + if (status) + goto err; + + status = ath10k_init_uart(ar); + if (status) + goto err; + } ar->htc.htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; @@ -1596,9 +1875,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err; } - status = ath10k_bmi_done(ar); - if (status) - goto err; + if (ar->is_bmi) { + status = ath10k_bmi_done(ar); + if (status) + goto err; + } status = ath10k_wmi_attach(ar); if (status) { @@ -1656,6 +1937,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err_hif_stop; } + status = ath10k_pktlog_connect(ar); + if (status) { + ath10k_err(ar, "could not connect pktlog: %d\n", status); + goto err_hif_stop; + } + status = ath10k_htc_start(&ar->htc); if (status) { ath10k_err(ar, "failed to start htc: %d\n", status); @@ -1673,6 +1960,33 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); + if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) { + val = 0; + if (ath10k_peer_stats_enabled(ar)) + val = WMI_10_4_PEER_STATS; + + if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) + val |= WMI_10_4_BSS_CHANNEL_INFO_64; + + /* 10.4 firmware supports BT-Coex without reloading firmware + * via pdev param. To support Bluetooth coexistence pdev param, + * WMI_COEX_GPIO_SUPPORT of extended resource config should be + * enabled always. + */ + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) + val |= WMI_10_4_COEX_GPIO_SUPPORT; + + status = ath10k_mac_ext_resource_config(ar, val); + if (status) { + ath10k_err(ar, + "failed to send ext resource cfg command : %d\n", + status); + goto err_hif_stop; + } + } + status = ath10k_wmi_cmd_init(ar); if (status) { ath10k_err(ar, "could not send WMI init command (%d)\n", @@ -1686,13 +2000,38 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err_hif_stop; } + /* Some firmware revisions do not properly set up hardware rx filter + * registers. + * + * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK + * is filled with 0s instead of 1s allowing HW to respond with ACKs to + * any frames that matches MAC_PCU_RX_FILTER which is also + * misconfigured to accept anything. + * + * The ADDR1 is programmed using internal firmware structure field and + * can't be (easily/sanely) reached from the driver explicitly. It is + * possible to implicitly make it correct by creating a dummy vdev and + * then deleting it. + */ + if (!QCA_REV_WCN3990(ar)) { + status = ath10k_core_reset_rx_filter(ar); + if (status) { + ath10k_err(ar, "failed to reset rx filter: %d\n", + status); + goto err_hif_stop; + } + } + status = ath10k_htt_rx_ring_refill(ar); if (status) { ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); goto err_hif_stop; } - ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; + if (ar->max_num_vdevs >= 64) + ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL; + else + ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; INIT_LIST_HEAD(&ar->arvifs); @@ -1755,7 +2094,7 @@ void ath10k_core_stop(struct ath10k *ar) /* try to suspend target */ if (ar->state != ATH10K_STATE_RESTARTING && ar->state != ATH10K_STATE_UTF) - ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); + ath10k_wait_for_suspend(ar, ar->hw_values->pdev_suspend_option); ath10k_hif_stop(ar); ath10k_htt_tx_free(&ar->htt); @@ -1779,16 +2118,17 @@ static int ath10k_core_probe_fw(struct ath10k *ar) return ret; } - memset(&target_info, 0, sizeof(target_info)); - ret = ath10k_bmi_get_target_info(ar, &target_info); - if (ret) { - ath10k_err(ar, "could not get target info (%d)\n", ret); - goto err_power_down; + if (ar->is_bmi) { + memset(&target_info, 0, sizeof(target_info)); + ret = ath10k_bmi_get_target_info(ar, &target_info); + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; } - ar->target_version = target_info.version; - ar->hw->wiphy->hw_version = target_info.version; - ret = ath10k_init_hw_params(ar); if (ret) { ath10k_err(ar, "could not get hw params (%d)\n", ret); @@ -1801,17 +2141,37 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_power_down; } - ret = ath10k_core_get_board_id_from_otp(ar); - if (ret && ret != -EOPNOTSUPP) { - ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n", - ret); - goto err_free_firmware_files; - } + BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) != + sizeof(ar->normal_mode_fw.fw_file.fw_version)); + memcpy(ar->hw->wiphy->fw_version, + ar->normal_mode_fw.fw_file.fw_version, + sizeof(ar->hw->wiphy->fw_version)); + ath10k_debug_print_hwfw_info(ar); - ret = ath10k_core_fetch_board_file(ar); - if (ret) { - ath10k_err(ar, "failed to fetch board file: %d\n", ret); - goto err_free_firmware_files; + if (ar->is_bmi) { + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + /* pre calibration data download is not necessary + * for all the chipsets. Ignore failures and continue. + */ + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "could not load pre cal data: %d\n", ret); + } + + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret && ret != -EOPNOTSUPP) { + ath10k_err(ar, "failed to get board id from otp: %d\n", + ret); + goto err_free_firmware_files; + } + + ret = ath10k_core_fetch_board_file(ar); + if (ret) { + ath10k_err(ar, "failed to fetch board file: %d\n", ret); + goto err_free_firmware_files; + } + + ath10k_debug_print_board_info(ar); } ret = ath10k_core_init_firmware_features(ar); @@ -1821,22 +2181,26 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } - ret = ath10k_swap_code_seg_init(ar); - if (ret) { - ath10k_err(ar, "failed to initialize code swap segment: %d\n", - ret); - goto err_free_firmware_files; + if (ar->is_bmi) { + ret = ath10k_swap_code_seg_init(ar, + &ar->normal_mode_fw.fw_file); + if (ret) { + ath10k_err(ar, "failed to init code swap segment: %d\n", + ret); + goto err_free_firmware_files; + } } mutex_lock(&ar->conf_mutex); - ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, + &ar->normal_mode_fw); if (ret) { ath10k_err(ar, "could not init core (%d)\n", ret); goto err_unlock; } - ath10k_print_driver_info(ar); + ath10k_debug_print_boot_info(ar); ath10k_core_stop(ar); mutex_unlock(&ar->conf_mutex); @@ -1861,6 +2225,9 @@ static void ath10k_core_register_work(struct work_struct *work) struct ath10k *ar = container_of(work, struct ath10k, register_work); int status; + /* peer stats are enabled by default */ + set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + status = ath10k_core_probe_fw(ar); if (status) { ath10k_err(ar, "could not probe fw (%d)\n", status); @@ -1966,20 +2333,52 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->hif.ops = hif_ops; ar->hif.bus = bus; + /* By default, assume bmi is set, as most of the existing + * chip sets are based on this, set to false explicitly + * when current chip set does not support. + */ + ar->is_bmi = true; + switch (hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: ar->regs = &qca988x_regs; + ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca988x_values; break; case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: ar->regs = &qca6174_regs; + ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca6174_values; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: ar->regs = &qca99x0_regs; + ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca99x0_values; break; + case ATH10K_HW_QCA9888: + ar->regs = &qca99x0_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca9888_values; + break; + case ATH10K_HW_QCA4019: + ar->regs = &qca4019_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca4019_values; + break; + case ATH10K_HW_WCN3990: + ar->regs = &wcn3990_regs; + ar->hw_ce_regs = &wcn3990_ce_regs; + ar->hw_values = &wcn3990_values; + /* WCN3990 chip set is non bmi based */ + ar->is_bmi = false; + ar->fw_flags = &wcn3990_fw_flags; + ar->shadow_reg_value = &wcn3990_shadow_reg_value; + ar->shadow_reg_address = &wcn3990_shadow_reg_address; + ar->rri_on_ddr = true; + break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", hw_rev); @@ -1995,7 +2394,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, init_completion(&ar->install_key_done); init_completion(&ar->vdev_setup_done); + init_completion(&ar->vdev_delete_done); init_completion(&ar->thermal.wmi_sync); + init_completion(&ar->bss_survey_done); + init_completion(&ar->peer_delete_done); INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); @@ -2009,7 +2411,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, mutex_init(&ar->conf_mutex); spin_lock_init(&ar->data_lock); + spin_lock_init(&ar->txqs_lock); + spin_lock_init(&ar->datapath_rx_stat_lock); + INIT_LIST_HEAD(&ar->txqs); INIT_LIST_HEAD(&ar->peers); init_waitqueue_head(&ar->peer_mapping_wq); init_waitqueue_head(&ar->htt.empty_tx_wq); @@ -2025,6 +2430,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, INIT_WORK(&ar->register_work, ath10k_core_register_work); INIT_WORK(&ar->restart_work, ath10k_core_restart); + init_dummy_netdev(&ar->napi_dev); + ret = ath10k_debug_create(ar); if (ret) goto err_free_aux_wq; @@ -2058,5 +2465,5 @@ void ath10k_core_destroy(struct ath10k *ar) EXPORT_SYMBOL(ath10k_core_destroy); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index a7fab3b0a443..aeeeac2388fe 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,6 +24,8 @@ #include <linux/pci.h> #include <linux/uuid.h> #include <linux/time.h> +#include <linux/inetdevice.h> +#include <soc/qcom/socinfo.h> #include "htt.h" #include "htc.h" @@ -44,8 +46,8 @@ #define ATH10K_SCAN_ID 0 #define WMI_READY_TIMEOUT (5 * HZ) -#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) -#define ATH10K_CONNECTION_LOSS_HZ (3*HZ) +#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) +#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) #define ATH10K_NUM_CHANS 39 /* Antenna noise floor */ @@ -65,10 +67,30 @@ #define ATH10K_KEEPALIVE_MAX_IDLE 3895 #define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900 +/* NAPI poll budget */ +#define ATH10K_NAPI_BUDGET 64 +#define ATH10K_NAPI_QUOTA_LIMIT 60 + +#define ATH10K_RX_MCS_MIN 0 +#define ATH10K_RX_HT_MCS_MAX 32 +#define ATH10K_RX_VHT_RATEIDX_MAX 9 +#define ATH10K_RX_VHT_MCS_MAX 20 /* For 2x2 */ +#define ATH10K_RX_NSS_MIN 0 +#define ATH10K_RX_NSS_MAX 5 + +enum ath10k_datapath_rx_band { + ATH10K_BAND_MIN, + ATH10K_BAND_2GHZ = ATH10K_BAND_MIN, + ATH10K_BAND_5GHZ, + ATH10K_BAND_MAX, +}; + struct ath10k; enum ath10k_bus { ATH10K_BUS_PCI, + ATH10K_BUS_AHB, + ATH10K_BUS_SNOC, }; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -76,31 +98,30 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus) switch (bus) { case ATH10K_BUS_PCI: return "pci"; + case ATH10K_BUS_AHB: + return "ahb"; + case ATH10K_BUS_SNOC: + return "snoc"; } return "unknown"; } +enum ath10k_skb_flags { + ATH10K_SKB_F_NO_HWCRYPT = BIT(0), + ATH10K_SKB_F_DTIM_ZERO = BIT(1), + ATH10K_SKB_F_DELIVER_CAB = BIT(2), + ATH10K_SKB_F_MGMT = BIT(3), + ATH10K_SKB_F_QOS = BIT(4), +}; + struct ath10k_skb_cb { dma_addr_t paddr; + u8 flags; u8 eid; - u8 vdev_id; - enum ath10k_hw_txrx_mode txmode; - bool is_protected; - - struct { - u8 tid; - u16 freq; - bool is_offchan; - bool nohwcrypt; - struct ath10k_htt_txbuf *txbuf; - u32 txbuf_paddr; - } __packed htt; - - struct { - bool dtim_zero; - bool deliver_cab; - } bcn; + u16 msdu_id; + struct ieee80211_vif *vif; + struct ieee80211_txq *txq; } __packed; struct ath10k_skb_rxcb { @@ -141,17 +162,22 @@ struct ath10k_mem_chunk { }; struct ath10k_wmi { - enum ath10k_fw_wmi_op_version op_version; enum ath10k_htc_ep_id eid; struct completion service_ready; struct completion unified_ready; + struct completion barrier; wait_queue_head_t tx_credits_wq; DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX); struct wmi_cmd_map *cmd; struct wmi_vdev_param_map *vdev_param; struct wmi_pdev_param_map *pdev_param; const struct wmi_ops *ops; + const struct wmi_peer_flags_map *peer_flags; + u32 mgmt_max_num_pending_tx; + struct idr mgmt_pending_tx; + /* Protects access to mgmt_pending_tx, mgmt_max_num_pending_tx */ + spinlock_t mgmt_tx_lock; u32 num_mem_chunks; u32 rx_decap_mode; struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; @@ -164,6 +190,14 @@ struct ath10k_fw_stats_peer { u32 peer_rssi; u32 peer_tx_rate; u32 peer_rx_rate; /* 10x only */ + u32 rx_duration; +}; + +struct ath10k_fw_extd_stats_peer { + struct list_head list; + + u8 peer_macaddr[ETH_ALEN]; + u32 rx_duration; }; struct ath10k_fw_stats_vdev { @@ -190,10 +224,10 @@ struct ath10k_fw_stats_pdev { /* PDEV stats */ s32 ch_noise_floor; - u32 tx_frame_count; - u32 rx_frame_count; - u32 rx_clear_count; - u32 cycle_count; + u32 tx_frame_count; /* Cycles spent transmitting frames */ + u32 rx_frame_count; /* Cycles spent receiving frames */ + u32 rx_clear_count; /* Total channel busy time, evidently */ + u32 cycle_count; /* Total on-channel time */ u32 phy_err_count; u32 chan_tx_power; u32 ack_rx_bad; @@ -257,9 +291,11 @@ struct ath10k_fw_stats_pdev { }; struct ath10k_fw_stats { + bool extended; struct list_head pdevs; struct list_head vdevs; struct list_head peers; + struct list_head peers_extd; }; #define ATH10K_TPC_TABLE_TYPE_FLAG 1 @@ -298,6 +334,9 @@ struct ath10k_dfs_stats { struct ath10k_peer { struct list_head list; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); @@ -306,6 +345,12 @@ struct ath10k_peer { struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; }; +struct ath10k_txq { + struct list_head list; + unsigned long num_fw_queued; + unsigned long num_push_allowed; +}; + struct ath10k_sta { struct ath10k_vif *arvif; @@ -314,16 +359,19 @@ struct ath10k_sta { u32 bw; u32 nss; u32 smps; + u16 peer_id; struct work_struct update_wk; #ifdef CONFIG_MAC80211_DEBUGFS /* protected by conf_mutex */ bool aggr_mode; + u64 rx_duration; #endif }; -#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) +#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ) +#define ATH10K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ) enum ath10k_beacon_state { ATH10K_BEACON_SCHEDULED = 0, @@ -335,6 +383,7 @@ struct ath10k_vif { struct list_head list; u32 vdev_id; + u16 peer_id; enum wmi_vdev_type vdev_type; enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; @@ -386,6 +435,9 @@ struct ath10k_vif { struct work_struct ap_csa_work; struct delayed_work connection_loss_work; struct cfg80211_bitrate_mask bitrate_mask; + struct wmi_ns_arp_offload_req arp_offload; + struct wmi_ns_arp_offload_req ns_offload; + struct wmi_gtk_rekey_data gtk_rekey_data; }; struct ath10k_vif_iter { @@ -420,11 +472,13 @@ struct ath10k_debug { struct completion tpc_complete; /* protected by conf_mutex */ - u32 fw_dbglog_mask; + u64 fw_dbglog_mask; u32 fw_dbglog_level; u32 pktlog_filter; + enum ath10k_htc_ep_id eid; u32 reg_addr; u32 nf_cal_period; + void *cal_data; struct ath10k_fw_crash_data *fw_crash_data; }; @@ -512,6 +566,32 @@ enum ath10k_fw_features { /* Firmware Supports Adaptive CCA*/ ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11, + /* Firmware supports management frame protection */ + ATH10K_FW_FEATURE_MFP_SUPPORT = 12, + + /* Firmware supports pull-push model where host shares it's software + * queue state with firmware and firmware generates fetch requests + * telling host which queues to dequeue tx from. + * + * Primary function of this is improved MU-MIMO performance with + * multiple clients. + */ + ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, + + /* Firmware supports BT-Coex without reloading firmware via pdev param. + * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of + * extended resource config should be enabled always. This firmware IE + * is used to configure WMI_COEX_GPIO_SUPPORT. + */ + ATH10K_FW_FEATURE_BTCOEX_PARAM = 14, + + /* Older firmware with HTT delivers incorrect tx status for null func + * frames to driver, but this fixed in 10.2 and 10.4 firmware versions. + * Also this workaround results in reporting of incorrect null func + * status for 10.4. This flag is used to skip the workaround. + */ + ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -534,12 +614,21 @@ enum ath10k_dev_flags { /* Disable HW crypto engine */ ATH10K_FLAG_HW_CRYPTO_DISABLED, + + /* Bluetooth coexistance enabled */ + ATH10K_FLAG_BTCOEX, + + /* Per Station statistics service */ + ATH10K_FLAG_PEER_STATS, }; enum ath10k_cal_mode { ATH10K_CAL_MODE_FILE, ATH10K_CAL_MODE_OTP, ATH10K_CAL_MODE_DT, + ATH10K_PRE_CAL_MODE_FILE, + ATH10K_PRE_CAL_MODE_DT, + ATH10K_CAL_MODE_EEPROM, }; enum ath10k_crypt_mode { @@ -558,6 +647,12 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) return "otp"; case ATH10K_CAL_MODE_DT: return "dt"; + case ATH10K_PRE_CAL_MODE_FILE: + return "pre-cal-file"; + case ATH10K_PRE_CAL_MODE_DT: + return "pre-cal-dt"; + case ATH10K_CAL_MODE_EEPROM: + return "eeprom"; } return "unknown"; @@ -591,11 +686,68 @@ enum ath10k_tx_pause_reason { ATH10K_TX_PAUSE_MAX, }; +struct fw_flag { + u32 flags; +}; + +struct ath10k_fw_file { + const struct firmware *firmware; + + char fw_version[ETHTOOL_FWVERS_LEN]; + + DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); + + enum ath10k_fw_wmi_op_version wmi_op_version; + enum ath10k_fw_htt_op_version htt_op_version; + + const void *firmware_data; + size_t firmware_len; + + const void *otp_data; + size_t otp_len; + + const void *codeswap_data; + size_t codeswap_len; + + /* The original idea of struct ath10k_fw_file was that it only + * contains struct firmware and pointers to various parts (actual + * firmware binary, otp, metadata etc) of the file. This seg_info + * is actually created separate but as this is used similarly as + * the other firmware components it's more convenient to have it + * here. + */ + struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; +}; + +struct ath10k_fw_components { + const struct firmware *board; + const void *board_data; + size_t board_len; + + struct ath10k_fw_file fw_file; +}; + +struct datapath_rx_stats { + u32 no_of_packets; + u32 short_gi_pkts; + u32 ht_rate_indx[ATH10K_RX_HT_MCS_MAX + 1]; + u32 vht_rate_indx[ATH10K_RX_VHT_MCS_MAX + 1]; + u32 ht_rate_packets; + u32 vht_rate_packets; + u32 legacy_pkt; + u32 nss[ATH10K_RX_NSS_MAX + 1]; + u32 num_pkts_40Mhz; + u32 num_pkts_80Mhz; + u32 band[ATH10K_BAND_MAX + 1]; +}; + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; + struct ieee80211_ops *ops; struct device *dev; u8 mac_addr[ETH_ALEN]; + u8 base_mac_addr[ETH_ALEN]; enum ath10k_hw_rev hw_rev; u16 dev_id; @@ -615,9 +767,7 @@ struct ath10k { u32 max_spatial_stream; /* protected by conf_mutex */ bool ani_enabled; - - DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); - + bool sifs_burst_enabled; bool p2p; struct { @@ -628,75 +778,29 @@ struct ath10k { struct completion target_suspend; const struct ath10k_hw_regs *regs; + const struct ath10k_hw_ce_regs *hw_ce_regs; const struct ath10k_hw_values *hw_values; + struct ath10k_shadow_reg_value *shadow_reg_value; + struct ath10k_shadow_reg_address *shadow_reg_address; struct ath10k_bmi bmi; struct ath10k_wmi wmi; struct ath10k_htc htc; struct ath10k_htt htt; - struct ath10k_hw_params { - u32 id; - u16 dev_id; - const char *name; - u32 patch_load_addr; - int uart_pin; - u32 otp_exe_param; - - /* This is true if given HW chip has a quirky Cycle Counter - * wraparound which resets to 0x7fffffff instead of 0. All - * other CC related counters (e.g. Rx Clear Count) are divided - * by 2 so they never wraparound themselves. - */ - bool has_shifted_cc_wraparound; - - /* Some of chip expects fragment descriptor to be continuous - * memory for any TX operation. Set continuous_frag_desc flag - * for the hardware which have such requirement. - */ - bool continuous_frag_desc; - - u32 channel_counters_freq_hz; - - /* Mgmt tx descriptors threshold for limiting probe response - * frames. - */ - u32 max_probe_resp_desc_thres; - - struct ath10k_hw_params_fw { - const char *dir; - const char *fw; - const char *otp; - const char *board; - size_t board_size; - size_t board_ext_size; - } fw; - - /* Number of bytes used for alignment in rx_hdr_status */ - int decap_align_bytes; - - } hw_params; - - const struct firmware *board; - const void *board_data; - size_t board_len; + struct ath10k_hw_params hw_params; - const struct firmware *otp; - const void *otp_data; - size_t otp_len; + /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */ + struct ath10k_fw_components normal_mode_fw; - const struct firmware *firmware; - const void *firmware_data; - size_t firmware_len; + /* READ-ONLY images of the running firmware, which can be either + * normal or UTF. Do not modify, release etc! + */ + const struct ath10k_fw_components *running_fw; + const struct firmware *pre_cal_file; const struct firmware *cal_file; struct { - const void *firmware_codeswap_data; - size_t firmware_codeswap_len; - struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; - } swap; - - struct { u32 vendor; u32 device; u32 subsystem_vendor; @@ -724,7 +828,7 @@ struct ath10k { } scan; struct { - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; } mac; /* should never be NULL; needed for regular htt rx */ @@ -736,6 +840,9 @@ struct ath10k { /* current operating channel definition */ struct cfg80211_chan_def chandef; + /* currently configured operating channel in firmware */ + struct ieee80211_channel *tgt_oper_chan; + unsigned long long free_vdev_map; struct ath10k_vif *monitor_arvif; bool monitor; @@ -757,6 +864,7 @@ struct ath10k { int last_wmi_vdev_start_status; struct completion vdev_setup_done; + struct completion vdev_delete_done; struct workqueue_struct *workqueue; /* Auxiliary workqueue */ @@ -767,9 +875,13 @@ struct ath10k { /* protects shared structure data */ spinlock_t data_lock; + /* protects: ar->txqs, artxq->list */ + spinlock_t txqs_lock; + struct list_head txqs; struct list_head arvifs; struct list_head peers; + struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS]; wait_queue_head_t peer_mapping_wq; /* protected by conf_mutex */ @@ -812,6 +924,7 @@ struct ath10k { * avoid reporting garbage data. */ bool ch_info_can_report_survey; + struct completion bss_survey_done; struct dfs_pattern_detector *dfs_detector; @@ -819,8 +932,6 @@ struct ath10k { #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; -#endif - struct { /* relay(fs) channel for spectral scan */ struct rchan *rfs_chan_spec_scan; @@ -829,16 +940,15 @@ struct ath10k { enum ath10k_spectral_mode mode; struct ath10k_spec_scan config; } spectral; + struct datapath_rx_stats *rx_stats; +#endif + /* prevent concurrency histogram for receiving data packet */ + spinlock_t datapath_rx_stat_lock; struct { /* protected by conf_mutex */ - const struct firmware *utf; - char utf_version[32]; - const void *utf_firmware_data; - size_t utf_firmware_len; - DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT); - enum ath10k_fw_wmi_op_version orig_wmi_op_version; - enum ath10k_fw_wmi_op_version op_version; + struct ath10k_fw_components utf_mode_fw; + /* protected by data_lock */ bool utf_monitor; } testmode; @@ -853,10 +963,29 @@ struct ath10k { struct ath10k_thermal thermal; struct ath10k_wow wow; + /* NAPI */ + struct net_device napi_dev; + struct napi_struct napi; + + struct fw_flag *fw_flags; + /* set for bmi chip sets */ + struct completion peer_delete_done; + bool is_bmi; + enum ieee80211_sta_state sta_state; + bool rri_on_ddr; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; +static inline bool ath10k_peer_stats_enabled(struct ath10k *ar) +{ + if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) && + test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + return true; + + return false; +} + struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_bus bus, enum ath10k_hw_rev hw_rev, @@ -865,8 +994,11 @@ void ath10k_core_destroy(struct ath10k *ar); void ath10k_core_get_fw_features_str(struct ath10k *ar, char *buf, size_t max_len); +int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file); -int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode); +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + const struct ath10k_fw_components *fw_components); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); void ath10k_core_stop(struct ath10k *ar); int ath10k_core_register(struct ath10k *ar, u32 chip_id); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 30c357567054..0836a81b93e0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -19,15 +19,20 @@ #include <linux/debugfs.h> #include <linux/vmalloc.h> #include <linux/utsname.h> +#include <linux/crc32.h> +#include <linux/firmware.h> #include "core.h" #include "debug.h" #include "hif.h" +#include "htt.h" #include "wmi-ops.h" /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 +#define ATH10K_DEBUG_CAL_DATA_LEN 12064 + #define ATH10K_FW_CRASH_DUMP_VERSION 1 /** @@ -122,43 +127,73 @@ void ath10k_info(struct ath10k *ar, const char *fmt, ...) } EXPORT_SYMBOL(ath10k_info); -void ath10k_print_driver_info(struct ath10k *ar) +void ath10k_debug_print_hwfw_info(struct ath10k *ar) { + const struct firmware *firmware; char fw_features[128] = {}; - char boardinfo[100]; + u32 crc = 0; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); - if (ar->id.bmi_ids_valid) - scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d", - ar->id.bmi_chip_id, ar->id.bmi_board_id); - else - scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x", - ar->id.subsystem_vendor, ar->id.subsystem_device); - - ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n", + ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x", ar->hw_params.name, ar->target_version, ar->chip_id, - boardinfo, + ar->id.subsystem_vendor, ar->id.subsystem_device); + + ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", + IS_ENABLED(CONFIG_ATH10K_DEBUG), + IS_ENABLED(CONFIG_ATH10K_DEBUGFS), + IS_ENABLED(CONFIG_ATH10K_TRACING), + IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED), + IS_ENABLED(CONFIG_NL80211_TESTMODE)); + + firmware = ar->normal_mode_fw.fw_file.firmware; + if (firmware) + crc = crc32_le(0, firmware->data, firmware->size); + + ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", ar->hw->wiphy->fw_version, ar->fw_api, + fw_features, + crc); +} + +void ath10k_debug_print_board_info(struct ath10k *ar) +{ + char boardinfo[100]; + + if (ar->id.bmi_ids_valid) + scnprintf(boardinfo, sizeof(boardinfo), "%d:%d", + ar->id.bmi_chip_id, ar->id.bmi_board_id); + else + scnprintf(boardinfo, sizeof(boardinfo), "N/A"); + + ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", ar->bd_api, + boardinfo, + crc32_le(0, ar->normal_mode_fw.board->data, + ar->normal_mode_fw.board->size)); +} + +void ath10k_debug_print_boot_info(struct ath10k *ar) +{ + ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n", ar->htt.target_version_major, ar->htt.target_version_minor, - ar->wmi.op_version, - ar->htt.op_version, + ar->normal_mode_fw.fw_file.wmi_op_version, + ar->normal_mode_fw.fw_file.htt_op_version, ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), - !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags), - fw_features); - ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", - config_enabled(CONFIG_ATH10K_DEBUG), - config_enabled(CONFIG_ATH10K_DEBUGFS), - config_enabled(CONFIG_ATH10K_TRACING), - config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), - config_enabled(CONFIG_NL80211_TESTMODE)); + !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)); +} + +void ath10k_print_driver_info(struct ath10k *ar) +{ + ath10k_debug_print_hwfw_info(ar); + ath10k_debug_print_board_info(ar); + ath10k_debug_print_boot_info(ar); } EXPORT_SYMBOL(ath10k_print_driver_info); @@ -251,7 +286,7 @@ static const struct file_operations fops_wmi_services = { .llseek = default_llseek, }; -static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) +static void ath10k_fw_stats_pdevs_free(struct list_head *head) { struct ath10k_fw_stats_pdev *i, *tmp; @@ -261,7 +296,7 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) } } -static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head) +static void ath10k_fw_stats_vdevs_free(struct list_head *head) { struct ath10k_fw_stats_vdev *i, *tmp; @@ -271,7 +306,7 @@ static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head) } } -static void ath10k_debug_fw_stats_peers_free(struct list_head *head) +static void ath10k_fw_stats_peers_free(struct list_head *head) { struct ath10k_fw_stats_peer *i, *tmp; @@ -281,13 +316,25 @@ static void ath10k_debug_fw_stats_peers_free(struct list_head *head) } } +static void ath10k_fw_extd_stats_peers_free(struct list_head *head) +{ + struct ath10k_fw_extd_stats_peer *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + static void ath10k_debug_fw_stats_reset(struct ath10k *ar) { spin_lock_bh(&ar->data_lock); ar->debug.fw_stats_done = false; - ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); - ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); - ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ar->debug.fw_stats.extended = false; + ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); spin_unlock_bh(&ar->data_lock); } @@ -302,6 +349,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.peers); + INIT_LIST_HEAD(&stats.peers_extd); spin_lock_bh(&ar->data_lock); ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); @@ -321,9 +369,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * b) consume stat update events until another one with pdev stats is * delivered which is treated as end-of-data and is itself discarded */ + if (ath10k_peer_stats_enabled(ar)) + ath10k_sta_update_rx_duration(ar, &stats); if (ar->debug.fw_stats_done) { - ath10k_warn(ar, "received unsolicited stats update event\n"); + if (!ath10k_peer_stats_enabled(ar)) + ath10k_warn(ar, "received unsolicited stats update event\n"); + goto free; } @@ -347,17 +399,21 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) /* Although this is unlikely impose a sane limit to * prevent firmware from DoS-ing the host. */ + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_warn(ar, "dropping fw peer stats\n"); goto free; } if (num_vdevs >= BITS_PER_LONG) { + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_warn(ar, "dropping fw vdev stats\n"); goto free; } list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); + list_splice_tail_init(&stats.peers_extd, + &ar->debug.fw_stats.peers_extd); } complete(&ar->debug.fw_stats_complete); @@ -366,9 +422,10 @@ free: /* In some cases lists have been spliced and cleared. Free up * resources if that is not the case. */ - ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); - ath10k_debug_fw_stats_vdevs_free(&stats.vdevs); - ath10k_debug_fw_stats_peers_free(&stats.peers); + ath10k_fw_stats_pdevs_free(&stats.pdevs); + ath10k_fw_stats_vdevs_free(&stats.vdevs); + ath10k_fw_stats_peers_free(&stats.peers); + ath10k_fw_extd_stats_peers_free(&stats.peers_extd); spin_unlock_bh(&ar->data_lock); } @@ -481,6 +538,230 @@ static const struct file_operations fops_fw_stats = { .llseek = default_llseek, }; +static inline int is_vht_rate_valid(u32 rate_indx) +{ + if ((rate_indx >= ATH10K_RX_MCS_MIN) && + (rate_indx <= ATH10K_RX_VHT_RATEIDX_MAX)) + return 1; + else + return 0; +} + +void fill_datapath_stats(struct ath10k *ar, struct ieee80211_rx_status *status) +{ + struct datapath_rx_stats *stat_cnt = ar->rx_stats; + + spin_lock_bh(&ar->datapath_rx_stat_lock); + + stat_cnt->no_of_packets += 1; + if (!(stat_cnt->no_of_packets)) { + memset(stat_cnt, 0, sizeof(*stat_cnt)); + stat_cnt->no_of_packets += 1; + } + + if (status->flag & RX_FLAG_SHORT_GI) + stat_cnt->short_gi_pkts += 1; + + if ((status->vht_nss >= ATH10K_RX_NSS_MIN) && + (status->vht_nss < ATH10K_RX_NSS_MAX)) { + stat_cnt->nss[status->vht_nss] += 1; + if (status->flag & RX_FLAG_VHT) { + stat_cnt->vht_rate_packets += 1; + if (is_vht_rate_valid(status->rate_idx)) { + stat_cnt->vht_rate_indx[((status->vht_nss - 1) * + 10) + status->rate_idx] += 1; + } else { + /*if we get index other than (>=0 and <=9)*/ + stat_cnt->vht_rate_indx[ATH10K_RX_VHT_MCS_MAX] += 1; + } + } else if (status->flag & RX_FLAG_HT) { + stat_cnt->ht_rate_packets += 1; + if ((status->rate_idx >= ATH10K_RX_MCS_MIN) && + (status->rate_idx < ATH10K_RX_HT_MCS_MAX)) + stat_cnt->ht_rate_indx[status->rate_idx] += 1; + else { + /*if we get index other than (>=0 and <=31)*/ + stat_cnt->ht_rate_indx[ATH10K_RX_HT_MCS_MAX] += 1; + } + } else { + /* if pkt is other than HT and VHT */ + stat_cnt->legacy_pkt += 1; + } + } else { + stat_cnt->nss[ATH10K_RX_NSS_MAX] += 1; + } + + if (status->flag & RX_FLAG_40MHZ) + stat_cnt->num_pkts_40Mhz += 1; + if (status->vht_flag & RX_VHT_FLAG_80MHZ) + stat_cnt->num_pkts_80Mhz += 1; + if ((status->band >= ATH10K_BAND_MIN) && + (status->band < ATH10K_BAND_MAX)) { + stat_cnt->band[status->band] += 1; + } else { + /*if band is other than 0,1 */ + stat_cnt->band[ATH10K_BAND_MAX] += 1; + } + + spin_unlock_bh(&ar->datapath_rx_stat_lock); +} + +size_t get_datapath_stat(char *buf, struct ath10k *ar) +{ + u8 i; + struct datapath_rx_stats *stat_cnt = ar->rx_stats; + size_t j = 0; + + spin_lock(&ar->datapath_rx_stat_lock); + + j = snprintf(buf, ATH10K_DATAPATH_BUF_SIZE, "\nNo of packets: %u\t" + "No of short_gi packets: %u\n" + "\nHT Packets: %u \t VHT Packets: %u\n" + "\n40Mhz Packets: %u \t 80Mhz Packets: %u\n" + "\n2.4GHz: %u \t 5GHz: %u \t band-error: %u\n\n", + stat_cnt->no_of_packets, + stat_cnt->short_gi_pkts, + stat_cnt->ht_rate_packets, + stat_cnt->vht_rate_packets, + stat_cnt->num_pkts_40Mhz, + stat_cnt->num_pkts_80Mhz, + stat_cnt->band[ATH10K_BAND_2GHZ], + stat_cnt->band[ATH10K_BAND_5GHZ], + stat_cnt->band[ATH10K_BAND_MAX]); + + for (i = 0; i <= ATH10K_RX_NSS_MAX; i++) { + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "NSS-%u: %u\t", i, stat_cnt->nss[i]); + } + + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "\n\n----HT Rate index------\n"); + + for (i = ATH10K_RX_MCS_MIN; i < ATH10K_RX_HT_MCS_MAX; + i += 4) { + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "ht_rate_indx[%02u]: %10u\tht_rate_indx[%02u]: %10u\t" + "ht_rate_indx[%02u]: %10u\tht_rate_indx[%02u]: %10u\n", + i, stat_cnt->ht_rate_indx[i], + i + 1, stat_cnt->ht_rate_indx[i + 1], + i + 2, stat_cnt->ht_rate_indx[i + 2], + i + 3, stat_cnt->ht_rate_indx[i + 3]); + } + + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "ht_rate_indx[OOB]: %10u\n", + stat_cnt->ht_rate_indx[ATH10K_RX_HT_MCS_MAX]); + + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "\n----VHT Rate index------\n"); + + for (i = ATH10K_RX_MCS_MIN; + i <= ATH10K_RX_VHT_RATEIDX_MAX; i++) { + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "vht_rate_indx[%02u]: %10u\tvht_rate_indx[%02u]: %10u\n", + i, stat_cnt->vht_rate_indx[i], + i + 10, stat_cnt->vht_rate_indx[i + 10]); + } + + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "vht_rate_indx[%02u]: %10u\n", + i + 10, stat_cnt->vht_rate_indx[i + 10]); + + j += snprintf(buf + j, (ATH10K_DATAPATH_BUF_SIZE - j), + "\nnumber of pkt other than HT and VHT(legacy) : %u\n" + "----------------------\n", + stat_cnt->legacy_pkt); + + spin_unlock(&ar->datapath_rx_stat_lock); + + return j; +} + +static int ath10k_datapath_stats_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + int ret; + + spin_lock(&ar->datapath_rx_stat_lock); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + file->private_data = ar; + + spin_unlock(&ar->datapath_rx_stat_lock); + return 0; + +err_unlock: + spin_unlock(&ar->datapath_rx_stat_lock); + return ret; +} + +static ssize_t ath10k_datapath_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + size_t buf_len; + unsigned int ret; + void *buf = NULL; + + buf = vmalloc(ATH10K_DATAPATH_BUF_SIZE); + if (!buf) + return 0; + + buf_len = get_datapath_stat(buf, ar); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, buf_len); + vfree(buf); + + return ret; +} + +static ssize_t ath10k_datapath_stats_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -EINVAL; + + spin_lock(&ar->datapath_rx_stat_lock); + + if (ar->state != ATH10K_STATE_ON) { + ret = count; + goto err_unlock; + } + + if (!filter) + memset(ar->rx_stats, 0, sizeof(*ar->rx_stats)); + + ret = count; + +err_unlock: + spin_unlock(&ar->datapath_rx_stat_lock); + return ret; +} + +static int ath10k_datapath_stats_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations fops_datapath_stats = { + .open = ath10k_datapath_stats_open, + .read = ath10k_datapath_stats_read, + .write = ath10k_datapath_stats_write, + .release = ath10k_datapath_stats_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -571,25 +852,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, char buf[32]; int ret; - mutex_lock(&ar->conf_mutex); - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); /* make sure that buf is null terminated */ buf[sizeof(buf) - 1] = 0; + /* drop the possible '\n' from the end */ + if (buf[count - 1] == '\n') + buf[count - 1] = 0; + + mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } - /* drop the possible '\n' from the end */ - if (buf[count - 1] == '\n') { - buf[count - 1] = 0; - count--; - } - if (!strcmp(buf, "soft")) { ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); @@ -1114,7 +1393,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, { struct ath10k *ar = file->private_data; char buf[64]; - u8 amsdu = 3, ampdu = 64; + u8 amsdu, ampdu; unsigned int len; mutex_lock(&ar->conf_mutex); @@ -1176,9 +1455,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file, { struct ath10k *ar = file->private_data; unsigned int len; - char buf[64]; + char buf[96]; - len = scnprintf(buf, sizeof(buf), "0x%08x %u\n", + len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n", ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); return simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -1190,15 +1469,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, { struct ath10k *ar = file->private_data; int ret; - char buf[64]; - unsigned int log_level, mask; + char buf[96]; + unsigned int log_level; + u64 mask; simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); /* make sure that buf is null terminated */ buf[sizeof(buf) - 1] = 0; - ret = sscanf(buf, "%x %u", &mask, &log_level); + ret = sscanf(buf, "%llx %u", &mask, &log_level); if (!ret) return -EINVAL; @@ -1398,74 +1678,68 @@ static const struct file_operations fops_fw_dbglog = { .llseek = default_llseek, }; -static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +static int ath10k_debug_cal_data_fetch(struct ath10k *ar) { - struct ath10k *ar = inode->i_private; - void *buf; u32 hi_addr; __le32 addr; int ret; - mutex_lock(&ar->conf_mutex); - - if (ar->state != ATH10K_STATE_ON && - ar->state != ATH10K_STATE_UTF) { - ret = -ENETDOWN; - goto err; - } + lockdep_assert_held(&ar->conf_mutex); - buf = vmalloc(QCA988X_CAL_DATA_LEN); - if (!buf) { - ret = -ENOMEM; - goto err; - } + if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) + return -EINVAL; hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); if (ret) { - ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); - goto err_vfree; + ath10k_warn(ar, "failed to read hi_board_data address: %d\n", + ret); + return ret; } - ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, - QCA988X_CAL_DATA_LEN); + ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data, + ar->hw_params.cal_data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data: %d\n", ret); - goto err_vfree; + return ret; } - file->private_data = buf; + return 0; +} - mutex_unlock(&ar->conf_mutex); +static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; - return 0; + mutex_lock(&ar->conf_mutex); -err_vfree: - vfree(buf); + if (ar->state == ATH10K_STATE_ON || + ar->state == ATH10K_STATE_UTF) { + ath10k_debug_cal_data_fetch(ar); + } -err: + file->private_data = ar; mutex_unlock(&ar->conf_mutex); - return ret; + return 0; } static ssize_t ath10k_debug_cal_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - void *buf = file->private_data; + struct ath10k *ar = file->private_data; - return simple_read_from_buffer(user_buf, count, ppos, - buf, QCA988X_CAL_DATA_LEN); -} + mutex_lock(&ar->conf_mutex); -static int ath10k_debug_cal_data_release(struct inode *inode, - struct file *file) -{ - vfree(file->private_data); + count = simple_read_from_buffer(user_buf, count, ppos, + ar->debug.cal_data, + ar->hw_params.cal_data_len); - return 0; + mutex_unlock(&ar->conf_mutex); + + return count; } static ssize_t ath10k_write_ani_enable(struct file *file, @@ -1523,10 +1797,67 @@ static const struct file_operations fops_ani_enable = { .llseek = default_llseek, }; +static ssize_t ath10k_write_sifs_burst_enable(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + u8 enable; + + if (kstrtou8_from_user(user_buf, count, 0, &enable)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->sifs_burst_enabled == enable) { + ret = count; + goto exit; + } + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->burst_enable, + enable); + if (ret) { + ath10k_warn(ar, "sifs_burst_enable failed: %d\n", ret); + goto exit; + } + ar->sifs_burst_enabled = enable; + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_read_sifs_burst_enable(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + int len; + bool ret = false; + + if (ar->sifs_burst_enabled) + ret = true; + + len = scnprintf(buf, sizeof(buf), "%d\n", ret); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_sifs_burst_enable = { + .read = ath10k_read_sifs_burst_enable, + .write = ath10k_write_sifs_burst_enable, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static const struct file_operations fops_cal_data = { .open = ath10k_debug_cal_data_open, .read = ath10k_debug_cal_data_read, - .release = ath10k_debug_cal_data_release, .owner = THIS_MODULE, .llseek = default_llseek, }; @@ -1861,7 +2192,7 @@ int ath10k_debug_start(struct ath10k *ar) ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); } - if (ar->debug.nf_cal_period) { + if (ar->debug.nf_cal_period && !QCA_REV_WCN3990(ar)) { ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); @@ -1878,6 +2209,9 @@ void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); + if (!QCA_REV_WCN3990(ar)) + ath10k_debug_cal_data_fetch(ar); + /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid * warning from del_timer(). */ @@ -2088,17 +2422,410 @@ static const struct file_operations fops_quiet_period = { .open = simple_open }; +static ssize_t ath10k_write_btcoex(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t buf_size; + int ret; + bool val; + u32 pdev_param; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, ubuf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) { + ret = count; + goto exit; + } + + pdev_param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); + if (ret) { + ath10k_warn(ar, "failed to enable btcoex: %d\n", ret); + ret = count; + goto exit; + } + } else { + ath10k_info(ar, "restarting firmware due to btcoex change"); + queue_work(ar->workqueue, &ar->restart_work); + } + + if (val) + set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + else + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags)); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_btcoex = { + .read = ath10k_read_btcoex, + .write = ath10k_write_btcoex, + .open = simple_open +}; + +static ssize_t ath10k_write_peer_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t buf_size; + int ret; + bool val; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, ubuf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) { + ret = count; + goto exit; + } + + if (val) + set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + else + clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + + ath10k_info(ar, "restarting firmware due to Peer stats change"); + + queue_work(ar->workqueue, &ar->restart_work); + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags)); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_peer_stats = { + .read = ath10k_read_peer_stats, + .write = ath10k_write_peer_stats, + .open = simple_open +}; + +static ssize_t ath10k_debug_fw_checksums_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned int len = 0, buf_len = 4096; + ssize_t ret_cnt; + char *buf; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + len += scnprintf(buf + len, buf_len - len, + "firmware-N.bin\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data, + ar->normal_mode_fw.fw_file.firmware->size)); + len += scnprintf(buf + len, buf_len - len, + "athwlan\t\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data, + ar->normal_mode_fw.fw_file.firmware_len)); + len += scnprintf(buf + len, buf_len - len, + "otp\t\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len)); + len += scnprintf(buf + len, buf_len - len, + "codeswap\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data, + ar->normal_mode_fw.fw_file.codeswap_len)); + len += scnprintf(buf + len, buf_len - len, + "board-N.bin\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.board->data, + ar->normal_mode_fw.board->size)); + len += scnprintf(buf + len, buf_len - len, + "board\t\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.board_data, + ar->normal_mode_fw.board_len)); + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + mutex_unlock(&ar->conf_mutex); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_fw_checksums = { + .read = ath10k_debug_fw_checksums_read, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static struct txctl_frm_hdr frm_hdr; + +static void ath10k_extract_frame_header(u8 *addr1, u8 *addr2, u8 *addr3) +{ + frm_hdr.bssid_tail = (addr1[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE) + | (addr1[IEEE80211_ADDR_LEN - 1]); + frm_hdr.sa_tail = (addr2[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE) + | (addr2[IEEE80211_ADDR_LEN - 1]); + frm_hdr.da_tail = (addr3[IEEE80211_ADDR_LEN - 2] << BITS_PER_BYTE) + | (addr3[IEEE80211_ADDR_LEN - 1]); +} + +static void ath10k_process_ieee_hdr(void *data) +{ + u8 dir; + struct ieee80211_frame *wh; + + if (!data) + return; + + wh = (struct ieee80211_frame *)(data); + frm_hdr.framectrl = *(u_int16_t *)(wh->i_fc); + frm_hdr.seqctrl = *(u_int16_t *)(wh->i_seq); + dir = (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK); + + if (dir == IEEE80211_FC1_DIR_TODS) + ath10k_extract_frame_header(wh->i_addr1, wh->i_addr2, + wh->i_addr3); + else if (dir == IEEE80211_FC1_DIR_FROMDS) + ath10k_extract_frame_header(wh->i_addr2, wh->i_addr3, + wh->i_addr1); + else + ath10k_extract_frame_header(wh->i_addr3, wh->i_addr2, + wh->i_addr1); +} + +static void ath10k_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_pktlog_hdr *hdr = (void *)skb->data; + struct ath_pktlog_txctl pktlog_tx_ctrl; + + switch (hdr->log_type) { + case ATH10K_PKTLOG_TYPE_TX_CTRL: { + spin_lock_bh(&ar->htt.tx_lock); + + memcpy((void *)(&pktlog_tx_ctrl.hdr), (void *)hdr, + sizeof(pktlog_tx_ctrl.hdr)); + pktlog_tx_ctrl.frm_hdr = frm_hdr; + memcpy((void *)pktlog_tx_ctrl.txdesc_ctl, (void *)hdr->payload, + __le16_to_cpu(hdr->size)); + pktlog_tx_ctrl.hdr.size = sizeof(pktlog_tx_ctrl) - + sizeof(pktlog_tx_ctrl.hdr); + + spin_unlock_bh(&ar->htt.tx_lock); + + trace_ath10k_htt_pktlog(ar, (void *)&pktlog_tx_ctrl, + sizeof(pktlog_tx_ctrl)); + break; + } + case ATH10K_PKTLOG_TYPE_TX_MSDU_ID: + break; + case ATH10K_PKTLOG_TYPE_TX_FRM_HDR: { + ath10k_process_ieee_hdr((void *)(hdr->payload)); + trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) + + __le16_to_cpu(hdr->size)); + break; + } + case ATH10K_PKTLOG_TYPE_RX_STAT: + case ATH10K_PKTLOG_TYPE_RC_FIND: + case ATH10K_PKTLOG_TYPE_RC_UPDATE: + case ATH10K_PKTLOG_TYPE_DBG_PRINT: + case ATH10K_PKTLOG_TYPE_TX_STAT: + case ATH10K_PKTLOG_TYPE_SW_EVENT: + trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) + + __le16_to_cpu(hdr->size)); + break; + case ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR: { + u32 desc_id = (u32)*((u32 *)(hdr->payload)); + struct sk_buff *msdu; + + spin_lock_bh(&ar->htt.tx_lock); + msdu = ath10k_htt_tx_find_msdu_by_id(&ar->htt, desc_id); + + if (!msdu) { + ath10k_info(ar, + "Failed to get msdu, id: %d\n", + desc_id); + spin_unlock_bh(&ar->htt.tx_lock); + return; + } + ath10k_process_ieee_hdr((void *)msdu->data); + spin_unlock_bh(&ar->htt.tx_lock); + trace_ath10k_htt_pktlog(ar, hdr, sizeof(*hdr) + + __le16_to_cpu(hdr->size)); + break; + } + } +} + +int ath10k_rx_record_pktlog(struct ath10k *ar, struct sk_buff *skb) +{ + struct sk_buff *pktlog_skb; + struct ath_pktlog_hdr *pl_hdr; + struct ath_pktlog_rx_info *pktlog_rx_info; + struct htt_rx_desc *rx_desc = (void *)skb->data - sizeof(*rx_desc); + + if (!ar->debug.pktlog_filter) + return 0; + + pktlog_skb = dev_alloc_skb(sizeof(struct ath_pktlog_hdr) + + sizeof(struct htt_rx_desc) - + sizeof(struct htt_host_fw_desc_base)); + if (!pktlog_skb) + return -ENOMEM; + + pktlog_rx_info = (struct ath_pktlog_rx_info *)pktlog_skb->data; + pl_hdr = &pktlog_rx_info->pl_hdr; + + pl_hdr->flags = (1 << ATH10K_PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr->missed_cnt = 0; + pl_hdr->mac_id = 0; + pl_hdr->log_type = ATH10K_PKTLOG_TYPE_RX_STAT; + pl_hdr->flags |= ATH10K_PKTLOG_HDR_SIZE_16; + pl_hdr->size = sizeof(*rx_desc) - + sizeof(struct htt_host_fw_desc_base); + + pl_hdr->timestamp = + cpu_to_le32(rx_desc->ppdu_end.wcn3990.rx_pkt_end.phy_timestamp_1); + + pl_hdr->type_specific_data = 0xDEADAA; + memcpy((void *)pktlog_rx_info + sizeof(struct ath_pktlog_hdr), + (void *)rx_desc + sizeof(struct htt_host_fw_desc_base), + pl_hdr->size); + + ath10k_pktlog_process_rx(ar, pktlog_skb); + dev_kfree_skb_any(pktlog_skb); + return 0; +} + +static void ath10k_pktlog_htc_tx_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_info(ar, "PKTLOG htc completed\n"); +} + +int ath10k_pktlog_connect(struct ath10k *ar) +{ + int status; + struct ath10k_htc_svc_conn_req conn_req; + struct ath10k_htc_svc_conn_resp conn_resp; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + conn_req.ep_ops.ep_tx_complete = ath10k_pktlog_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath10k_pktlog_process_rx; + conn_req.ep_ops.ep_tx_credits = NULL; + + /* connect to control service */ + conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG; + status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); + if (status) { + ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n", + status); + return status; + } + + ar->debug.eid = conn_resp.eid; + + return 0; +} + int ath10k_debug_create(struct ath10k *ar) { ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); if (!ar->debug.fw_crash_data) return -ENOMEM; + ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); + if (!ar->debug.cal_data) + goto err_cal_data; + + ar->rx_stats = vzalloc(sizeof(*ar->rx_stats)); + if (!ar->rx_stats) + goto err_rx_stats; + INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.peers); + INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd); return 0; + +err_rx_stats: + vfree(ar->debug.cal_data); + +err_cal_data: + vfree(ar->debug.fw_crash_data); + return -ENOMEM; } void ath10k_debug_destroy(struct ath10k *ar) @@ -2106,6 +2833,12 @@ void ath10k_debug_destroy(struct ath10k *ar) vfree(ar->debug.fw_crash_data); ar->debug.fw_crash_data = NULL; + vfree(ar->debug.cal_data); + ar->debug.cal_data = NULL; + + vfree(ar->rx_stats); + ar->rx_stats = NULL; + ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); @@ -2128,6 +2861,9 @@ int ath10k_debug_register(struct ath10k *ar) init_completion(&ar->debug.tpc_complete); init_completion(&ar->debug.fw_stats_complete); + debugfs_create_file("datapath_rx_stats", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_datapath_stats); + debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_stats); @@ -2137,8 +2873,8 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_wmi_services); - debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_simulate_fw_crash); + debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_crash_dump); @@ -2155,26 +2891,32 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_chip_id); - debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_htt_stats_mask); + debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_htt_stats_mask); debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR, ar->debug.debugfs_phy, ar, &fops_htt_max_amsdu_ampdu); - debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_fw_dbglog); + debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_fw_dbglog); - debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_cal_data); + if (!QCA_REV_WCN3990(ar)) { + debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_cal_data); + + debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_nf_cal_period); + } debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR, ar->debug.debugfs_phy, ar, &fops_ani_enable); - debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_nf_cal_period); + debugfs_create_file("sifs_burst_enable", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_sifs_burst_enable); - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", S_IWUSR, ar->debug.debugfs_phy, ar, &fops_simulate_radar); @@ -2197,6 +2939,18 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("tpc_stats", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_tpc_stats); + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) + debugfs_create_file("btcoex", S_IRUGO | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_btcoex); + + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_peer_stats); + + debugfs_create_file("fw_checksums", S_IRUSR, + ar->debug.debugfs_phy, ar, &fops_fw_checksums); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 7de780c4ec8d..eb47720bbf91 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -37,6 +37,8 @@ enum ath10k_debug_mask { ATH10K_DBG_TESTMODE = 0x00001000, ATH10K_DBG_WMI_PRINT = 0x00002000, ATH10K_DBG_PCI_PS = 0x00004000, + ATH10K_DBG_AHB = 0x00008000, + ATH10K_DBG_SNOC = 0x00010000, ATH10K_DBG_ANY = 0xffffffff, }; @@ -55,14 +57,97 @@ enum ath10k_dbg_aggr_mode { ATH10K_DBG_AGGR_MODE_MAX, }; +#define IEEE80211_FC1_DIR_MASK 0x03 +#define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */ +#define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */ +#define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */ +#define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */ +#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */ + +#define MAX_PKT_INFO_MSDU_ID 192 +#define MSDU_ID_INFO_ID_OFFSET \ + ((MAX_PKT_INFO_MSDU_ID >> 3) + 4) + +#define PKTLOG_MAX_TXCTL_WORDS 57 /* +2 words for bitmap */ +#define HTT_TX_MSDU_LEN_MASK 0xffff + +struct txctl_frm_hdr { + __le16 framectrl; /* frame control field from header */ + __le16 seqctrl; /* frame control field from header */ + __le16 bssid_tail; /* last two octets of bssid */ + __le16 sa_tail; /* last two octets of SA */ + __le16 da_tail; /* last two octets of DA */ + __le16 resvd; +} __packed; + +struct ath_pktlog_hdr { + __le16 flags; + __le16 missed_cnt; + u8 log_type; + u8 mac_id; + __le16 size; + __le32 timestamp; + __le32 type_specific_data; +} __packed; + +/* generic definitions for IEEE 802.11 frames */ +struct ieee80211_frame { + u8 i_fc[2]; + u8 i_dur[2]; + union { + struct { + u8 i_addr1[IEEE80211_ADDR_LEN]; + u8 i_addr2[IEEE80211_ADDR_LEN]; + u8 i_addr3[IEEE80211_ADDR_LEN]; + }; + u8 i_addr_all[3 * IEEE80211_ADDR_LEN]; + }; + u8 i_seq[2]; +} __packed; + +struct fw_pktlog_msdu_info { + __le32 num_msdu; + u8 bound_bmap[MAX_PKT_INFO_MSDU_ID >> 3]; + __le16 id[MAX_PKT_INFO_MSDU_ID]; +} __packed; + +struct ath_pktlog_txctl { + struct ath_pktlog_hdr hdr; + struct txctl_frm_hdr frm_hdr; + __le32 txdesc_ctl[PKTLOG_MAX_TXCTL_WORDS]; +} __packed; + +struct ath_pktlog_msdu_id { + struct ath_pktlog_hdr hdr; + struct fw_pktlog_msdu_info msdu_info; +} __packed; + +struct ath_pktlog_rx_info { + struct ath_pktlog_hdr pl_hdr; + struct rx_attention attention; + struct rx_frag_info frag_info; + struct rx_mpdu_start mpdu_start; + struct rx_msdu_start msdu_start; + struct rx_msdu_end msdu_end; + struct rx_mpdu_end mpdu_end; + struct rx_ppdu_start ppdu_start; + struct rx_ppdu_end ppdu_end; + u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; +} __packed; + /* FIXME: How to calculate the buffer size sanely? */ -#define ATH10K_FW_STATS_BUF_SIZE (1024*1024) +#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024) +#define ATH10K_DATAPATH_BUF_SIZE (1024 * 1024) extern unsigned int ath10k_debug_mask; __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); __printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...); __printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...); + +void ath10k_debug_print_hwfw_info(struct ath10k *ar); +void ath10k_debug_print_board_info(struct ath10k *ar); +void ath10k_debug_print_boot_info(struct ath10k *ar); void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS @@ -79,6 +164,7 @@ struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); +int ath10k_rx_record_pktlog(struct ath10k *ar, struct sk_buff *skb); #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, @@ -89,6 +175,9 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data); +void fill_datapath_stats(struct ath10k *ar, struct ieee80211_rx_status *status); +size_t get_datapath_stat(char *buf, struct ath10k *ar); +int ath10k_pktlog_connect(struct ath10k *ar); #else static inline int ath10k_debug_start(struct ath10k *ar) { @@ -133,12 +222,32 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, { } +static inline int ath10k_rx_record_pktlog(struct ath10k *ar, + struct sk_buff *skb) +{ + return 0; +} + static inline struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) { return NULL; } +static inline void fill_datapath_stats(struct ath10k *ar, + struct ieee80211_rx_status *status) +{ +} + +static inline size_t get_datapath_stat(char *buf, struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_pktlog_connect(struct ath10k *ar) +{ + return 0; +} #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #define ath10k_debug_get_et_strings NULL @@ -149,6 +258,17 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) #ifdef CONFIG_MAC80211_DEBUGFS void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats); +void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo); +#else +static inline +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ +} #endif /* CONFIG_MAC80211_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 95b5c49374e0..9955fea0802a 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -18,6 +18,67 @@ #include "wmi-ops.h" #include "debug.h" +static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + struct ath10k_fw_extd_stats_peer *peer; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + + rcu_read_lock(); + list_for_each_entry(peer, &stats->peers_extd, list) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, + NULL); + if (!sta) + continue; + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_duration += (u64)peer->rx_duration; + } + rcu_read_unlock(); +} + +static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + struct ath10k_fw_stats_peer *peer; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + + rcu_read_lock(); + list_for_each_entry(peer, &stats->peers, list) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, + NULL); + if (!sta) + continue; + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_duration += (u64)peer->rx_duration; + } + rcu_read_unlock(); +} + +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + if (stats->extended) + ath10k_sta_update_extd_stats_rx_duration(ar, stats); + else + ath10k_sta_update_stats_rx_duration(ar, stats); +} + +void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + + if (!ath10k_peer_stats_enabled(ar)) + return; + + sinfo->rx_duration = arsta->rx_duration; + sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; +} + static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 89e7076c919f..65723124985e 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -26,7 +26,7 @@ struct ath10k_hif_sg_item { u16 transfer_id; void *transfer_context; /* NULL = tx completion callback not called */ void *vaddr; /* for debugging mostly */ - u32 paddr; + dma_addr_t paddr; u16 len; }; @@ -87,6 +87,10 @@ struct ath10k_hif_ops { int (*suspend)(struct ath10k *ar); int (*resume)(struct ath10k *ar); + + /* fetch calibration data from target eeprom */ + int (*fetch_cal_eeprom)(struct ath10k *ar, void **data, + size_t *data_len); }; static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar, ar->hif.ops->write32(ar, address, data); } +static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar, + void **data, + size_t *data_len) +{ + if (!ar->hif.ops->fetch_cal_eeprom) + return -EOPNOTSUPP; + + return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 5b3c6bcf9598..aa20ebbefe94 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) skb_cb = ATH10K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); - ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb); return skb; } @@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, { struct ath10k *ar = ep->htc->ar; - ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__, ep->eid, skb); ath10k_htc_restore_tx_skb(ep->htc, skb); @@ -86,7 +86,8 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, hdr->eid = ep->eid; hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); hdr->flags = 0; - hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; + if (ep->tx_credit_flow_enabled) + hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; spin_lock_bh(&ep->htc->tx_lock); hdr->seq_no = ep->seq_no++; @@ -404,7 +405,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb) goto out; } - ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n", eid, skb); ep->ep_ops.ep_rx_complete(ar, skb); @@ -451,8 +452,16 @@ static const char *htc_service_name(enum ath10k_htc_svc_id id) return "NMI Data"; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG: + return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG: + return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_IPA_MSG: + return "IPA"; case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: return "RAW"; + case ATH10K_HTC_SVC_ID_HTT_LOG_MSG: + return "PKTLOG"; } return "Unknown"; diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index e70aa38e6e05..5f28099b2c88 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -22,7 +22,6 @@ #include <linux/list.h> #include <linux/bug.h> #include <linux/skbuff.h> -#include <linux/semaphore.h> #include <linux/timer.h> struct ath10k; @@ -223,7 +222,8 @@ enum ath10k_htc_svc_gid { ATH10K_HTC_SVC_GRP_WMI = 1, ATH10K_HTC_SVC_GRP_NMI = 2, ATH10K_HTC_SVC_GRP_HTT = 3, - + ATH10K_IPA_SERVICE_GROUP = 5, + ATH10K_LOG_SERVICE_GROUP = 6, ATH10K_HTC_SVC_GRP_TEST = 254, ATH10K_HTC_SVC_GRP_LAST = 255, }; @@ -247,7 +247,10 @@ enum ath10k_htc_svc_id { ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1), ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0), - + ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1), + ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2), + ATH10K_HTC_SVC_ID_HTT_IPA_MSG = SVC(ATH10K_IPA_SERVICE_GROUP, 0), + ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0), /* raw stream service (i.e. flash, tcmd, calibration apps) */ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0), }; @@ -297,10 +300,10 @@ struct ath10k_htc_svc_conn_resp { #define ATH10K_NUM_CONTROL_TX_BUFFERS 2 #define ATH10K_HTC_MAX_LEN 4096 #define ATH10K_HTC_MAX_CTRL_MSG_LEN 256 -#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ) #define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \ sizeof(struct ath10k_htc_hdr)) -#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ) struct ath10k_htc_ep { struct ath10k_htc *htc; diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 3e6ba63dfdff..130cd9502021 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -131,12 +131,12 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = { [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] = HTT_T2H_MSG_TYPE_TX_FETCH_IND, - [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] = - HTT_T2H_MSG_TYPE_TX_FETCH_CONF, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] = + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, - [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] = - HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] = + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, }; int ath10k_htt_connect(struct ath10k_htt *htt) @@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt) memset(&conn_resp, 0, sizeof(conn_resp)); conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; - conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler; + conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler; /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; @@ -183,7 +183,7 @@ int ath10k_htt_init(struct ath10k *ar) 8 + /* llc snap */ 2; /* ip4 dscp or ip6 priority */ - switch (ar->htt.op_version) { + switch (ar->running_fw->fw_file.htt_op_version) { case ATH10K_FW_HTT_OP_VERSION_10_4: ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types; ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS; @@ -208,7 +208,7 @@ int ath10k_htt_init(struct ath10k *ar) return 0; } -#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) +#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ) static int ath10k_htt_verify_version(struct ath10k_htt *htt) { diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 2bad50e520b5..6377c4ef427c 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -22,6 +22,7 @@ #include <linux/interrupt.h> #include <linux/dmapool.h> #include <linux/hashtable.h> +#include <linux/kfifo.h> #include <net/mac80211.h> #include "htc.h" @@ -52,6 +53,7 @@ enum htt_h2t_msg_type { /* host-to-target */ /* This command is used for sending management frames in HTT < 3.0. * HTT >= 3.0 uses TX_FRM for everything. */ HTT_H2T_MSG_TYPE_MGMT_TX = 7, + HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, HTT_H2T_NUM_MSGS /* keep this last */ }; @@ -97,7 +99,11 @@ struct htt_data_tx_desc_frag { } __packed; struct htt_msdu_ext_desc { +#ifdef CONFIG_ATH10K_SNOC + __le32 tso_flag[5]; +#else __le32 tso_flag[3]; +#endif __le16 ip_identification; u8 flags; u8 reserved; @@ -116,6 +122,19 @@ struct htt_msdu_ext_desc { | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) +#define HTT_TX_IPV4_CSUM_EN BIT(16) +#define HTT_TX_UDP_IPV4_CSUM_EN BIT(17) +#define HTT_TX_UDP_IPV6_CSUM_EN BIT(18) +#define HTT_TX_TCP_IPV4_CSUM_EN BIT(19) +#define HTT_TX_TCP_IPV6_CSUM_EN BIT(20) +#define HTT_TX_PARTIAL_CSUM_EN BIT(21) + +#define HTT_TX_CHECKSUM_ENABLE (HTT_TX_IPV4_CSUM_EN \ + | HTT_TX_UDP_IPV4_CSUM_EN \ + | HTT_TX_UDP_IPV6_CSUM_EN \ + | HTT_TX_TCP_IPV4_CSUM_EN \ + | HTT_TX_TCP_IPV6_CSUM_EN) + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, @@ -165,9 +184,19 @@ struct htt_data_tx_desc { __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ __le16 len; __le16 id; +#ifdef CONFIG_ATH10K_SNOC + __le32 frags_paddr_lo; + __le32 frags_paddr_hi; +#else __le32 frags_paddr; - __le16 peerid; - __le16 freq; +#endif + union { + __le32 peerid; + struct { + __le16 peerid; + __le16 freq; + } __packed offchan_tx; + } __packed; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; @@ -194,8 +223,15 @@ enum htt_rx_ring_flags { #define HTT_RX_RING_SIZE_MAX 2048 struct htt_rx_ring_setup_ring { +#ifdef CONFIG_ATH10K_SNOC + __le32 fw_idx_shadow_reg_paddr_low; + __le32 fw_idx_shadow_reg_paddr_high; + __le32 rx_ring_base_paddr_low; + __le32 rx_ring_base_paddr_high; +#else __le32 fw_idx_shadow_reg_paddr; __le32 rx_ring_base_paddr; +#endif __le16 rx_ring_len; /* in 4-byte words */ __le16 rx_ring_bufsize; /* rx skb size - in bytes */ __le16 flags; /* %HTT_RX_RING_FLAGS_ */ @@ -408,10 +444,10 @@ enum htt_10_4_t2h_msg_type { HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, - HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17, HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, /* 0x19 to 0x2f are reserved */ - HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30, + HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30, /* keep this last */ HTT_10_4_T2H_NUM_MSGS }; @@ -444,8 +480,8 @@ enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_TEST, HTT_T2H_MSG_TYPE_EN_STATS, HTT_T2H_MSG_TYPE_TX_FETCH_IND, - HTT_T2H_MSG_TYPE_TX_FETCH_CONF, - HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, /* keep this last */ HTT_T2H_NUM_MSGS }; @@ -478,10 +514,10 @@ struct htt_mgmt_tx_completion { __le32 status; } __packed; -#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) +#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) -#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) -#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) +#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) +#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 @@ -517,6 +553,9 @@ struct htt_rx_indication_hdr { #define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24 +#define HTT_WCN3990_PADDR_MASK 0x1F +#define HTT_WCN3990_ARCH_PADDR_MASK 0x1FFFFFFFFF + enum htt_rx_legacy_rate { HTT_RX_OFDM_48 = 0, HTT_RX_OFDM_24 = 1, @@ -588,7 +627,7 @@ enum htt_rx_mpdu_status { /* only accept EAPOL frames */ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER, HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC, - /* Non-data in promiscous mode */ + /* Non-data in promiscuous mode */ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL, HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR, HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR, @@ -839,7 +878,11 @@ struct htt_rx_offload_ind { } __packed; struct htt_rx_in_ord_msdu_desc { +#ifdef CONFIG_ATH10K_SNOC + __le64 msdu_paddr; +#else __le32 msdu_paddr; +#endif __le16 msdu_len; u8 fw_desc; u8 reserved; @@ -893,7 +936,7 @@ struct htt_rx_in_ord_ind { * Purpose: indicate how many 32-bit integers follow the message header * - NUM_CHARS * Bits 31:16 - * Purpose: indicate how many 8-bit charaters follow the series of integers + * Purpose: indicate how many 8-bit characters follow the series of integers */ struct htt_rx_test { u8 num_ints; @@ -1035,10 +1078,10 @@ struct htt_dbg_stats_wal_tx_stats { /* illegal rate phy errors */ __le32 illgl_rate_phy_err; - /* wal pdev continous xretry */ + /* wal pdev continuous xretry */ __le32 pdev_cont_xretry; - /* wal pdev continous xretry */ + /* wal pdev continuous xretry */ __le32 pdev_tx_timeout; /* wal pdev resets */ @@ -1301,16 +1344,175 @@ struct htt_frag_desc_bank_id { * so we use a conservatively safe value for now */ #define HTT_FRAG_DESC_BANK_MAX 4 -#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 -#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 -#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2) +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 +#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4 + +enum htt_q_depth_type { + HTT_Q_DEPTH_TYPE_BYTES = 0, + HTT_Q_DEPTH_TYPE_MSDUS = 1, +}; + +#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \ + TARGET_10_4_NUM_VDEVS) +#define HTT_TX_Q_STATE_NUM_TIDS 8 +#define HTT_TX_Q_STATE_ENTRY_SIZE 1 +#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 + +/** + * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config + * + * Defines host q state format and behavior. See htt_q_state. + * + * @record_size: Defines the size of each host q entry in bytes. In practice + * however firmware (at least 10.4.3-00191) ignores this host + * configuration value and uses hardcoded value of 1. + * @record_multiplier: This is valid only when q depth type is MSDUs. It + * defines the exponent for the power of 2 multiplication. + */ +struct htt_q_state_conf { + __le32 paddr; + __le16 num_peers; + __le16 num_tids; + u8 record_size; + u8 record_multiplier; + u8 pad[2]; +} __packed; + +struct bank_base_addr { + __le32 low; + __le32 high; +}; struct htt_frag_desc_bank_cfg { u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ u8 num_banks; u8 desc_size; +#ifdef CONFIG_ATH10K_SNOC + struct bank_base_addr bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; +#else __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; +#endif struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; + struct htt_q_state_conf q_state; +} __packed; + +#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 +#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f +#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 +#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0 +#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 + +/** + * htt_q_state - shared between host and firmware via DMA + * + * This structure is used for the host to expose it's software queue state to + * firmware so that its rate control can schedule fetch requests for optimized + * performance. This is most notably used for MU-MIMO aggregation when multiple + * MU clients are connected. + * + * @count: Each element defines the host queue depth. When q depth type was + * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as: + * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and + * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as + * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 ** + * record_multiplier (see htt_q_state_conf). + * @map: Used by firmware to quickly check which host queues are not empty. It + * is a bitmap simply saying. + * @seq: Used by firmware to quickly check if the host queues were updated + * since it last checked. + * + * FIXME: Is the q_state map[] size calculation really correct? + */ +struct htt_q_state { + u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS]; + u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; + __le32 seq; +} __packed; + +#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff +#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0 +#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000 +#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12 + +struct htt_tx_fetch_record { + __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */ + __le16 num_msdus; + __le32 num_bytes; +} __packed; + +struct htt_tx_fetch_ind { + u8 pad0; + __le16 fetch_seq_num; + __le32 token; + __le16 num_resp_ids; + __le16 num_records; + struct htt_tx_fetch_record records[0]; + __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ +} __packed; + +static inline void * +ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind) +{ + return (void *)&ind->records[le16_to_cpu(ind->num_records)]; +} + +struct htt_tx_fetch_resp { + u8 pad0; + __le16 resp_id; + __le16 fetch_seq_num; + __le16 num_records; + __le32 token; + struct htt_tx_fetch_record records[0]; +} __packed; + +struct htt_tx_fetch_confirm { + u8 pad0; + __le16 num_resp_ids; + __le32 resp_ids[0]; +} __packed; + +enum htt_tx_mode_switch_mode { + HTT_TX_MODE_SWITCH_PUSH = 0, + HTT_TX_MODE_SWITCH_PUSH_PULL = 1, +}; + +#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0) +#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe +#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1 + +#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003 +#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0 +#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc +#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2 + +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0 +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000 +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12 + +struct htt_tx_mode_switch_record { + __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */ + __le16 num_max_msdus; +} __packed; + +struct htt_tx_mode_switch_ind { + u8 pad0; + __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ + __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ + u8 pad1[2]; + struct htt_tx_mode_switch_record records[0]; +} __packed; + +struct htt_channel_change { + u8 pad[3]; + __le32 freq; + __le32 center_freq1; + __le32 center_freq2; + __le32 phymode; } __packed; union htt_rx_pn_t { @@ -1318,10 +1520,10 @@ union htt_rx_pn_t { u32 pn24; /* TKIP or CCMP: 48-bit PN */ - u_int64_t pn48; + u64 pn48; /* WAPI: 128-bit PN */ - u_int64_t pn128[2]; + u64 pn128[2]; }; struct htt_cmd { @@ -1335,6 +1537,7 @@ struct htt_cmd { struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; + struct htt_tx_fetch_resp tx_fetch_resp; }; } __packed; @@ -1359,16 +1562,25 @@ struct htt_resp { struct htt_rx_pn_ind rx_pn_ind; struct htt_rx_offload_ind rx_offload_ind; struct htt_rx_in_ord_ind rx_in_ord_ind; + struct htt_tx_fetch_ind tx_fetch_ind; + struct htt_tx_fetch_confirm tx_fetch_confirm; + struct htt_tx_mode_switch_ind tx_mode_switch_ind; + struct htt_channel_change chan_change; }; } __packed; /*** host side structures follow ***/ struct htt_tx_done { - u32 msdu_id; - bool discard; - bool no_ack; - bool success; + u16 msdu_id; + u16 status; +}; + +enum htt_tx_compl_state { + HTT_TX_COMPL_STATE_NONE, + HTT_TX_COMPL_STATE_ACK, + HTT_TX_COMPL_STATE_NOACK, + HTT_TX_COMPL_STATE_DISCARD, }; struct htt_peer_map_event { @@ -1395,7 +1607,6 @@ struct ath10k_htt { u8 target_version_major; u8 target_version_minor; struct completion target_version_received; - enum ath10k_fw_htt_op_version op_version; u8 max_num_amsdu; u8 max_num_ampdu; @@ -1433,7 +1644,11 @@ struct ath10k_htt { * rx buffers the host SW provides for the MAC HW to * fill. */ +#ifdef CONFIG_ATH10K_SNOC + __le64 *paddrs_ring; +#else __le32 *paddrs_ring; +#endif /* * Base address of ring, as a "physical" device address @@ -1489,17 +1704,19 @@ struct ath10k_htt { struct idr pending_tx; wait_queue_head_t empty_tx_wq; + /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */ + DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done); + /* set if host-fw communication goes haywire * used to avoid further failures */ bool rx_confused; - struct tasklet_struct rx_replenish_task; + atomic_t num_mpdus_ready; /* This is used to group tx/rx completions separately and process them * in batches to reduce cache stalls */ - struct tasklet_struct txrx_compl_task; - struct sk_buff_head tx_compl_q; struct sk_buff_head rx_compl_q; struct sk_buff_head rx_in_ord_compl_q; + struct sk_buff_head tx_fetch_ind_q; /* rx_status template */ struct ieee80211_rx_status rx_status; @@ -1513,20 +1730,36 @@ struct ath10k_htt { dma_addr_t paddr; struct ath10k_htt_txbuf *vaddr; } txbuf; + + struct { + bool enabled; + struct htt_q_state *vaddr; + dma_addr_t paddr; + u16 num_push_allowed; + u16 num_peers; + u16 num_tids; + enum htt_tx_mode_switch_mode mode; + enum htt_q_depth_type type; + } tx_q_state; }; #define RX_HTT_HDR_STATUS_LEN 64 -/* This structure layout is programmed via rx ring setup - * so that FW knows how to transfer the rx descriptor to the host. - * Buffers like this are placed on the rx ring. */ -struct htt_rx_desc { +struct htt_host_fw_desc_base { union { /* This field is filled on the host using the msdu buffer * from htt_rx_indication */ struct fw_rx_desc_base fw_desc; u32 pad; } __packed; +}; + +/* This structure layout is programmed via rx ring setup + * so that FW knows how to transfer the rx descriptor to the host. + * Buffers like this are placed on the rx ring. + */ +struct htt_rx_desc { + struct htt_host_fw_desc_base fw_desc_base; struct { struct rx_attention attention; struct rx_frag_info frag_info; @@ -1555,7 +1788,7 @@ struct htt_rx_desc { /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle * aggregated traffic more nicely. */ -#define ATH10K_HTT_MAX_NUM_REFILL 16 +#define ATH10K_HTT_MAX_NUM_REFILL 100 /* * DMA_MAP expects the buffer to be an integral number of cache lines. @@ -1583,7 +1816,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar); void ath10k_htt_rx_free(struct ath10k_htt *htt); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); -void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt); @@ -1592,11 +1826,33 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); +int ath10k_htt_tx_fetch_resp(struct ath10k *ar, + __le32 token, + __le16 fetch_seq_num, + struct htt_tx_fetch_record *records, + size_t num_records); + +void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +void ath10k_htt_tx_txq_sync(struct ath10k *ar); +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt); +void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, + bool is_presp); -void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); -int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); +int ath10k_htt_tx(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu); +void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, + struct sk_buff *skb); +int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget); +struct sk_buff *ath10k_htt_tx_find_msdu_by_id(struct ath10k_htt *htt, + u16 msdu_id); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 78079ce1ad5a..23c43a0fa8df 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -27,15 +27,17 @@ #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) +#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1) /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 +#define HTT_RX_RING_REFILL_RESCHED_MS 5 + static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); -static void ath10k_htt_txrx_compl_task(unsigned long ptr); static struct sk_buff * -ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr) +ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) { struct ath10k_skb_rxcb *rxcb; @@ -136,13 +138,16 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) rxcb = ATH10K_SKB_RXCB(skb); rxcb->paddr = paddr; htt->rx_ring.netbufs_ring[idx] = skb; - htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); + if (QCA_REV_WCN3990(htt->ar)) + htt->rx_ring.paddrs_ring[idx] = __cpu_to_le64(paddr); + else + htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); htt->rx_ring.fill_cnt++; if (htt->rx_ring.in_ord_rx) { hash_add(htt->rx_ring.skb_table, &ATH10K_SKB_RXCB(skb)->hlist, - (u32)paddr); + paddr); } num--; @@ -200,7 +205,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); } else if (num_deficit > 0) { - tasklet_schedule(&htt->rx_replenish_task); + mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + + msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); } spin_unlock_bh(&htt->rx_ring.lock); } @@ -232,12 +238,10 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) void ath10k_htt_rx_free(struct ath10k_htt *htt) { del_timer_sync(&htt->rx_ring.refill_retry_timer); - tasklet_kill(&htt->rx_replenish_task); - tasklet_kill(&htt->txrx_compl_task); - skb_queue_purge(&htt->tx_compl_q); skb_queue_purge(&htt->rx_compl_q); skb_queue_purge(&htt->rx_in_ord_compl_q); + skb_queue_purge(&htt->tx_fetch_ind_q); spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_ring_free(htt); @@ -292,7 +296,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, - u8 **fw_desc, int *fw_desc_len, struct sk_buff_head *amsdu) { struct ath10k *ar = htt->ar; @@ -334,48 +337,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, return -EIO; } - /* - * Copy the FW rx descriptor for this MSDU from the rx - * indication message into the MSDU's netbuf. HL uses the - * same rx indication message definition as LL, and simply - * appends new info (fields from the HW rx desc, and the - * MSDU payload itself). So, the offset into the rx - * indication message only has to account for the standard - * offset of the per-MSDU FW rx desc info within the - * message, and how many bytes of the per-MSDU FW rx desc - * info have already been consumed. (And the endianness of - * the host, since for a big-endian host, the rx ind - * message contents, including the per-MSDU rx desc bytes, - * were byteswapped during upload.) - */ - if (*fw_desc_len > 0) { - rx_desc->fw_desc.info0 = **fw_desc; - /* - * The target is expected to only provide the basic - * per-MSDU rx descriptors. Just to be sure, verify - * that the target has not attached extension data - * (e.g. LRO flow ID). - */ - - /* or more, if there's extension data */ - (*fw_desc)++; - (*fw_desc_len)--; - } else { - /* - * When an oversized AMSDU happened, FW will lost - * some of MSDU status - in this case, the FW - * descriptors provided will be less than the - * actual MSDUs inside this MPDU. Mark the FW - * descriptors so that it will still deliver to - * upper stack, if no CRC error for this MPDU. - * - * FIX THIS - the FW descriptors are actually for - * MSDUs in the end of this A-MSDU instead of the - * beginning. - */ - rx_desc->fw_desc.info0 = 0; - } - msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); @@ -434,15 +395,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, return msdu_chaining; } -static void ath10k_htt_rx_replenish_task(unsigned long ptr) -{ - struct ath10k_htt *htt = (struct ath10k_htt *)ptr; - - ath10k_htt_rx_msdu_buff_replenish(htt); -} - static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, - u32 paddr) + u64 paddr) { struct ath10k *ar = htt->ar; struct ath10k_skb_rxcb *rxcb; @@ -477,7 +431,7 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, struct sk_buff *msdu; int msdu_count; bool is_offload; - u32 paddr; + u64 paddr; lockdep_assert_held(&htt->rx_ring.lock); @@ -485,8 +439,12 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); while (msdu_count--) { +#ifdef CONFIG_ATH10K_SNOC + paddr = __le64_to_cpu(msdu_desc->msdu_paddr); + paddr &= HTT_WCN3990_ARCH_PADDR_MASK; +#else paddr = __le32_to_cpu(msdu_desc->msdu_paddr); - +#endif msdu = ath10k_htt_rx_pop_paddr(htt, paddr); if (!msdu) { __skb_queue_purge(list); @@ -532,7 +490,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) */ htt->rx_ring.size = HTT_RX_RING_SIZE; htt->rx_ring.size_mask = htt->rx_ring.size - 1; - htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; + + switch (ar->hw_rev) { + case ATH10K_HW_WCN3990: + htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC; + break; + default: + htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; + break; + } if (!is_power_of_2(htt->rx_ring.size)) { ath10k_warn(ar, "htt rx ring size is not power of 2\n"); @@ -547,7 +513,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); - vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA); + vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); if (!vaddr) goto err_dma_ring; @@ -556,7 +522,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) vaddr = dma_alloc_coherent(htt->ar->dev, sizeof(*htt->rx_ring.alloc_idx.vaddr), - &paddr, GFP_DMA); + &paddr, GFP_KERNEL); if (!vaddr) goto err_dma_idx; @@ -574,15 +540,10 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) htt->rx_ring.sw_rd_idx.msdu_payld = 0; hash_init(htt->rx_ring.skb_table); - tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, - (unsigned long)htt); - - skb_queue_head_init(&htt->tx_compl_q); skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q); - - tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, - (unsigned long)htt); + skb_queue_head_init(&htt->tx_fetch_ind_q); + atomic_set(&htt->num_mpdus_ready, 0); ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", htt->rx_ring.size, htt->rx_ring.fill_level); @@ -614,6 +575,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, return IEEE80211_TKIP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: return IEEE80211_CCMP_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: + return IEEE80211_CCMP_256_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: + return IEEE80211_GCMP_HDR_LEN; case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WAPI: break; @@ -639,6 +605,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, return IEEE80211_TKIP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: return IEEE80211_CCMP_MIC_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: + return IEEE80211_CCMP_256_MIC_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: + return IEEE80211_GCMP_MIC_LEN; case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WAPI: break; @@ -686,7 +657,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, rate &= ~RX_PPDU_START_RATE_FLAG; sband = &ar->mac.sbands[status->band]; - status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate); + status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); break; case HTT_RX_HT: case HTT_RX_HT_WITH_TXBF: @@ -815,7 +786,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) if (WARN_ON_ONCE(!arvif)) return NULL; - if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + if (ath10k_mac_vif_chan(arvif->vif, &def)) return NULL; return def.chan; @@ -877,6 +848,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); if (!ch) ch = ath10k_htt_rx_h_any_channel(ar); + if (!ch) + ch = ar->tgt_oper_chan; spin_unlock_bh(&ar->data_lock); if (!ch) @@ -994,9 +967,9 @@ static void ath10k_process_rx(struct ath10k *ar, status = IEEE80211_SKB_RXCB(skb); *status = *rx_status; - + fill_datapath_stats(ar, status); ath10k_dbg(ar, ATH10K_DBG_DATA, - "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, ieee80211_get_SA(hdr), @@ -1004,7 +977,8 @@ static void ath10k_process_rx(struct ath10k *ar, is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - status->flag == 0 ? "legacy" : "", + (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? + "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", @@ -1022,7 +996,8 @@ static void ath10k_process_rx(struct ath10k *ar, trace_ath10k_rx_hdr(ar, skb->data, skb->len); trace_ath10k_rx_payload(ar, skb->data, skb->len); - ieee80211_rx(ar->hw, skb); + ath10k_rx_record_pktlog(ar, skb); + ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); } static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, @@ -1031,7 +1006,7 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, int len = ieee80211_hdrlen(hdr->frame_control); if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, - ar->fw_features)) + ar->running_fw->fw_file.fw_features)) len = round_up(len, 4); return len; @@ -1110,17 +1085,20 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, } /* MMIC */ - if (!ieee80211_has_morefrags(hdr->frame_control) && + if ((status->flag & RX_FLAG_MMIC_STRIPPED) && + !ieee80211_has_morefrags(hdr->frame_control) && enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) skb_trim(msdu, msdu->len - 8); /* Head */ - hdr_len = ieee80211_hdrlen(hdr->frame_control); - crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + if (status->flag & RX_FLAG_IV_STRIPPED) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); - memmove((void *)msdu->data + crypto_len, - (void *)msdu->data, hdr_len); - skb_pull(msdu, crypto_len); + memmove((void *)msdu->data + crypto_len, + (void *)msdu->data, hdr_len); + skb_pull(msdu, crypto_len); + } } static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, @@ -1130,9 +1108,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type enctype) { struct ieee80211_hdr *hdr; + struct htt_rx_desc *rxd; size_t hdr_len; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; + int l3_pad_bytes; int bytes_aligned = ar->hw_params.decap_align_bytes; /* Delivered decapped frame: @@ -1147,7 +1127,13 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, */ /* pull decapped header and copy SA & DA */ - hdr = (struct ieee80211_hdr *)msdu->data; + rxd = (void *)msdu->data - sizeof(*rxd); + + l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); + skb_put(msdu, l3_pad_bytes); + + hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); + hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); ether_addr_copy(da, ieee80211_get_DA(hdr)); ether_addr_copy(sa, ieee80211_get_SA(hdr)); @@ -1222,6 +1208,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, void *rfc1042; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; + int l3_pad_bytes; + struct htt_rx_desc *rxd; int bytes_aligned = ar->hw_params.decap_align_bytes; /* Delivered decapped frame: @@ -1233,6 +1221,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, if (WARN_ON_ONCE(!rfc1042)) return; + rxd = (void *)msdu->data - sizeof(*rxd); + l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); + skb_put(msdu, l3_pad_bytes); + skb_pull(msdu, l3_pad_bytes); + /* pull decapped header and copy SA & DA */ eth = (struct ethhdr *)msdu->data; ether_addr_copy(da, eth->h_dest); @@ -1272,6 +1265,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, { struct ieee80211_hdr *hdr; size_t hdr_len; + int l3_pad_bytes; + struct htt_rx_desc *rxd; int bytes_aligned = ar->hw_params.decap_align_bytes; /* Delivered decapped frame: @@ -1280,7 +1275,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, * [payload] */ - skb_pull(msdu, sizeof(struct amsdu_subframe_hdr)); + rxd = (void *)msdu->data - sizeof(*rxd); + l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); + + skb_put(msdu, l3_pad_bytes); + skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); @@ -1393,6 +1392,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, bool has_tkip_err; bool has_peer_idx_invalid; bool is_decrypted; + bool is_mgmt; u32 attention; if (skb_queue_empty(amsdu)) @@ -1401,6 +1401,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, first = skb_peek(amsdu); rxd = (void *)first->data - sizeof(*rxd); + is_mgmt = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), RX_MPDU_START_INFO0_ENCRYPT_TYPE); @@ -1444,6 +1447,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, RX_FLAG_MMIC_ERROR | RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | + RX_FLAG_ONLY_MONITOR | RX_FLAG_MMIC_STRIPPED); if (has_fcs_err) @@ -1452,9 +1456,19 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, if (has_tkip_err) status->flag |= RX_FLAG_MMIC_ERROR; + /* Firmware reports all necessary management frames via WMI already. + * They are not reported to monitor interfaces at all so pass the ones + * coming via HTT to monitor interfaces instead. This simplifies + * matters a lot. + */ + if (is_mgmt) + status->flag |= RX_FLAG_ONLY_MONITOR; + if (is_decrypted) { - status->flag |= RX_FLAG_DECRYPTED | - RX_FLAG_MMIC_STRIPPED; + status->flag |= RX_FLAG_DECRYPTED; + + if (likely(!is_mgmt)) + status->flag |= RX_FLAG_MMIC_STRIPPED; if (fill_crypt_header) status->flag |= RX_FLAG_MIC_STRIPPED | @@ -1474,6 +1488,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, */ if (!is_decrypted) continue; + if (is_mgmt) + continue; if (fill_crypt_header) continue; @@ -1587,14 +1603,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *rx_status) { - struct sk_buff *msdu; - struct htt_rx_desc *rxd; - bool is_mgmt; - bool has_fcs_err; - - msdu = skb_peek(amsdu); - rxd = (void *)msdu->data - sizeof(*rxd); - /* FIXME: It might be a good idea to do some fuzzy-testing to drop * invalid/dangerous frames. */ @@ -1604,23 +1612,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, return false; } - is_mgmt = !!(rxd->attention.flags & - __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); - has_fcs_err = !!(rxd->attention.flags & - __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR)); - - /* Management frames are handled via WMI events. The pros of such - * approach is that channel is explicitly provided in WMI events - * whereas HTT doesn't provide channel information for Rxed frames. - * - * However some firmware revisions don't report corrupted frames via - * WMI so don't drop them. - */ - if (is_mgmt && !has_fcs_err) { - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); - return false; - } - if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); return false; @@ -1642,25 +1633,50 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar, __skb_queue_purge(amsdu); } -static void ath10k_htt_rx_handler(struct ath10k_htt *htt, - struct htt_rx_indication *rx) +static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct ieee80211_rx_status *rx_status = &htt->rx_status; - struct htt_rx_indication_mpdu_range *mpdu_ranges; struct sk_buff_head amsdu; - int num_mpdu_ranges; - int fw_desc_len; - u8 *fw_desc; - int i, ret, mpdu_count = 0; + int ret, num_msdus; - lockdep_assert_held(&htt->rx_ring.lock); + __skb_queue_head_init(&amsdu); - if (htt->rx_confused) - return; + spin_lock_bh(&htt->rx_ring.lock); + if (htt->rx_confused) { + spin_unlock_bh(&htt->rx_ring.lock); + return -EIO; + } + ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); + spin_unlock_bh(&htt->rx_ring.lock); - fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); - fw_desc = (u8 *)&rx->fw_desc; + if (ret < 0) { + ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); + __skb_queue_purge(&amsdu); + /* FIXME: It's probably a good idea to reboot the + * device instead of leaving it inoperable. + */ + htt->rx_confused = true; + return ret; + } + + num_msdus = skb_queue_len(&amsdu); + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); + ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); + ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); + + return num_msdus; +} + +static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, + struct htt_rx_indication *rx) +{ + struct ath10k *ar = htt->ar; + struct htt_rx_indication_mpdu_range *mpdu_ranges; + int num_mpdu_ranges; + int i, mpdu_count = 0; num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); @@ -1674,80 +1690,10 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, for (i = 0; i < num_mpdu_ranges; i++) mpdu_count += mpdu_ranges[i].mpdu_count; - while (mpdu_count--) { - __skb_queue_head_init(&amsdu); - ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, - &fw_desc_len, &amsdu); - if (ret < 0) { - ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); - __skb_queue_purge(&amsdu); - /* FIXME: It's probably a good idea to reboot the - * device instead of leaving it inoperable. - */ - htt->rx_confused = true; - break; - } - - ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); - ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); - ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); - ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); - } - - tasklet_schedule(&htt->rx_replenish_task); -} - -static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, - struct htt_rx_fragment_indication *frag) -{ - struct ath10k *ar = htt->ar; - struct ieee80211_rx_status *rx_status = &htt->rx_status; - struct sk_buff_head amsdu; - int ret; - u8 *fw_desc; - int fw_desc_len; - - fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); - fw_desc = (u8 *)frag->fw_msdu_rx_desc; - - __skb_queue_head_init(&amsdu); - - spin_lock_bh(&htt->rx_ring.lock); - ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, - &amsdu); - spin_unlock_bh(&htt->rx_ring.lock); - - tasklet_schedule(&htt->rx_replenish_task); - - ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); - - if (ret) { - ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", - ret); - __skb_queue_purge(&amsdu); - return; - } - - if (skb_queue_len(&amsdu) != 1) { - ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); - __skb_queue_purge(&amsdu); - return; - } - - ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); - ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); - ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); - - if (fw_desc_len > 0) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "expecting more fragmented rx in one indication %d\n", - fw_desc_len); - } + atomic_add(mpdu_count, &htt->num_mpdus_ready); } -static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, +static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; @@ -1759,19 +1705,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, switch (status) { case HTT_DATA_TX_STATUS_NO_ACK: - tx_done.no_ack = true; + tx_done.status = HTT_TX_COMPL_STATE_NOACK; break; case HTT_DATA_TX_STATUS_OK: - tx_done.success = true; + tx_done.status = HTT_TX_COMPL_STATE_ACK; break; case HTT_DATA_TX_STATUS_DISCARD: case HTT_DATA_TX_STATUS_POSTPONE: case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; default: ath10k_warn(ar, "unhandled tx completion status %d\n", status); - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; } @@ -1781,7 +1727,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { msdu_id = resp->data_tx_completion.msdus[i]; tx_done.msdu_id = __le16_to_cpu(msdu_id); - ath10k_txrx_tx_unref(htt, &tx_done); + + /* kfifo_put: In practice firmware shouldn't fire off per-CE + * interrupt and main interrupt (MSI/-X range case) for the same + * HTC service so it should be safe to use kfifo_put w/o lock. + * + * From kfifo_put() documentation: + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ + if (!kfifo_put(&htt->txdone_fifo, tx_done)) { + ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", + tx_done.msdu_id, tx_done.status); + ath10k_txrx_tx_unref(htt, &tx_done); + } } } @@ -1865,7 +1824,8 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) spin_unlock_bh(&ar->data_lock); } -static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, +static int ath10k_htt_rx_extract_amsdu(struct ath10k *ar, + struct sk_buff_head *list, struct sk_buff_head *amsdu) { struct sk_buff *msdu; @@ -1886,6 +1846,9 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, break; } + if (QCA_REV_WCN3990(ar)) + return 0; + msdu = skb_peek_tail(amsdu); rxd = (void *)msdu->data - sizeof(*rxd); if (!(rxd->msdu_end.common.info0 & @@ -1916,14 +1879,15 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, RX_FLAG_MMIC_STRIPPED; } -static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, - struct sk_buff_head *list) +static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, + struct sk_buff_head *list) { struct ath10k_htt *htt = &ar->htt; struct ieee80211_rx_status *status = &htt->rx_status; struct htt_rx_offload_msdu *rx; struct sk_buff *msdu; size_t offset; + int num_msdu = 0; while ((msdu = __skb_dequeue(list))) { /* Offloaded frames don't have Rx descriptor. Instead they have @@ -1963,10 +1927,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ath10k_htt_rx_h_rx_offload_prot(status, msdu); ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); ath10k_process_rx(ar, status, msdu); + num_msdu++; } + return num_msdu; } -static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; struct htt_resp *resp = (void *)skb->data; @@ -1979,12 +1945,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) u8 tid; bool offload; bool frag; - int ret; + int ret, num_msdus = 0; lockdep_assert_held(&htt->rx_ring.lock); if (htt->rx_confused) - return; + return -EIO; skb_pull(skb, sizeof(resp->hdr)); skb_pull(skb, sizeof(resp->rx_in_ord_ind)); @@ -2003,7 +1969,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) { ath10k_warn(ar, "dropping invalid in order rx indication\n"); - return; + return -EINVAL; } /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later @@ -2014,18 +1980,18 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) if (ret < 0) { ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); htt->rx_confused = true; - return; + return -EIO; } /* Offloaded frames are very different and need to be handled * separately. */ if (offload) - ath10k_htt_rx_h_rx_offload(ar, &list); + num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); while (!skb_queue_empty(&list)) { __skb_queue_head_init(&amsdu); - ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); + ret = ath10k_htt_rx_extract_amsdu(ar, &list, &amsdu); switch (ret) { case 0: /* Note: The in-order indication may report interleaved @@ -2034,6 +2000,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) * better to report something than nothing though. This * should still give an idea about rx rate to the user. */ + num_msdus += skb_queue_len(&amsdu); ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false); @@ -2046,14 +2013,299 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); htt->rx_confused = true; __skb_queue_purge(&list); - return; + return -EIO; + } + } + return num_msdus; +} + +static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, + const __le32 *resp_ids, + int num_resp_ids) +{ + int i; + u32 resp_id; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", + num_resp_ids); + + for (i = 0; i < num_resp_ids; i++) { + resp_id = le32_to_cpu(resp_ids[i]); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", + resp_id); + + /* TODO: free resp_id */ + } +} + +static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_txq *txq; + struct htt_resp *resp = (struct htt_resp *)skb->data; + struct htt_tx_fetch_record *record; + size_t len; + size_t max_num_bytes; + size_t max_num_msdus; + size_t num_bytes; + size_t num_msdus; + const __le32 *resp_ids; + u16 num_records; + u16 num_resp_ids; + u16 peer_id; + u8 tid; + int ret; + int i; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); + return; + } + + num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); + num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); + + len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; + len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", + num_records, num_resp_ids, + le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); + + if (!ar->htt.tx_q_state.enabled) { + ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); + return; + } + + if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { + ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); + return; + } + + rcu_read_lock(); + + for (i = 0; i < num_records; i++) { + record = &resp->tx_fetch_ind.records[i]; + peer_id = MS(le16_to_cpu(record->info), + HTT_TX_FETCH_RECORD_INFO_PEER_ID); + tid = MS(le16_to_cpu(record->info), + HTT_TX_FETCH_RECORD_INFO_TID); + max_num_msdus = le16_to_cpu(record->num_msdus); + max_num_bytes = le32_to_cpu(record->num_bytes); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", + i, peer_id, tid, max_num_msdus, max_num_bytes); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", + peer_id, tid); + continue; } + + spin_lock_bh(&ar->data_lock); + txq = ath10k_mac_txq_lookup(ar, peer_id, tid); + spin_unlock_bh(&ar->data_lock); + + /* It is okay to release the lock and use txq because RCU read + * lock is held. + */ + + if (unlikely(!txq)) { + ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + num_msdus = 0; + num_bytes = 0; + + while (num_msdus < max_num_msdus && + num_bytes < max_num_bytes) { + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; + + num_msdus++; + num_bytes += ret; + } + + record->num_msdus = cpu_to_le16(num_msdus); + record->num_bytes = cpu_to_le32(num_bytes); + + ath10k_htt_tx_txq_recalc(hw, txq); + } + + rcu_read_unlock(); + + resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); + ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); + + ret = ath10k_htt_tx_fetch_resp(ar, + resp->tx_fetch_ind.token, + resp->tx_fetch_ind.fetch_seq_num, + resp->tx_fetch_ind.records, + num_records); + if (unlikely(ret)) { + ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", + le32_to_cpu(resp->tx_fetch_ind.token), ret); + /* FIXME: request fw restart */ } - tasklet_schedule(&htt->rx_replenish_task); + ath10k_htt_tx_txq_sync(ar); } -void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct htt_resp *resp = (void *)skb->data; + size_t len; + int num_resp_ids; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); + return; + } + + num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); + len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); + return; + } + + ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, + resp->tx_fetch_confirm.resp_ids, + num_resp_ids); +} + +static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct htt_resp *resp = (void *)skb->data; + const struct htt_tx_mode_switch_record *record; + struct ieee80211_txq *txq; + struct ath10k_txq *artxq; + size_t len; + size_t num_records; + enum htt_tx_mode_switch_mode mode; + bool enable; + u16 info0; + u16 info1; + u16 threshold; + u16 peer_id; + u8 tid; + int i; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); + return; + } + + info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); + info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); + + enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); + num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); + mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); + threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", + info0, info1, enable, num_records, mode, threshold); + + len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); + return; + } + + switch (mode) { + case HTT_TX_MODE_SWITCH_PUSH: + case HTT_TX_MODE_SWITCH_PUSH_PULL: + break; + default: + ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", + mode); + return; + } + + if (!enable) + return; + + ar->htt.tx_q_state.enabled = enable; + ar->htt.tx_q_state.mode = mode; + ar->htt.tx_q_state.num_push_allowed = threshold; + + rcu_read_lock(); + + for (i = 0; i < num_records; i++) { + record = &resp->tx_mode_switch_ind.records[i]; + info0 = le16_to_cpu(record->info0); + peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); + tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + spin_lock_bh(&ar->data_lock); + txq = ath10k_mac_txq_lookup(ar, peer_id, tid); + spin_unlock_bh(&ar->data_lock); + + /* It is okay to release the lock and use txq because RCU read + * lock is held. + */ + + if (unlikely(!txq)) { + ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", + peer_id, tid); + continue; + } + + spin_lock_bh(&ar->htt.tx_lock); + artxq = (void *)txq->drv_priv; + artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); + spin_unlock_bh(&ar->htt.tx_lock); + } + + rcu_read_unlock(); + + ath10k_mac_tx_push_pending(ar); +} + +void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +{ + bool release; + + release = ath10k_htt_t2h_msg_handler(ar, skb); + + /* Free the indication buffer */ + if (release) + dev_kfree_skb_any(skb); +} + +bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; struct htt_resp *resp = (struct htt_resp *)skb->data; @@ -2069,8 +2321,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", resp->hdr.msg_type, ar->htt.t2h_msg_types_max); - dev_kfree_skb_any(skb); - return; + return true; } type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; @@ -2082,11 +2333,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IND: - spin_lock_bh(&htt->rx_ring.lock); - __skb_queue_tail(&htt->rx_compl_q, skb); - spin_unlock_bh(&htt->rx_ring.lock); - tasklet_schedule(&htt->txrx_compl_task); - return; + ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind); + break; case HTT_T2H_MSG_TYPE_PEER_MAP: { struct htt_peer_map_event ev = { .vdev_id = resp->peer_map.vdev_id, @@ -2107,28 +2355,31 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) struct htt_tx_done tx_done = {}; int status = __le32_to_cpu(resp->mgmt_tx_completion.status); - tx_done.msdu_id = - __le32_to_cpu(resp->mgmt_tx_completion.desc_id); + tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); switch (status) { case HTT_MGMT_TX_STATUS_OK: - tx_done.success = true; + tx_done.status = HTT_TX_COMPL_STATE_ACK; break; case HTT_MGMT_TX_STATUS_RETRY: - tx_done.no_ack = true; + tx_done.status = HTT_TX_COMPL_STATE_NOACK; break; case HTT_MGMT_TX_STATUS_DROP: - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; } - ath10k_txrx_tx_unref(htt, &tx_done); + status = ath10k_txrx_tx_unref(htt, &tx_done); + if (!status) { + spin_lock_bh(&htt->tx_lock); + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&htt->tx_lock); + } break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: - skb_queue_tail(&htt->tx_compl_q, skb); - tasklet_schedule(&htt->txrx_compl_task); - return; + ath10k_htt_rx_tx_compl_ind(htt->ar, skb); + break; case HTT_T2H_MSG_TYPE_SEC_IND: { struct ath10k *ar = htt->ar; struct htt_security_indication *ev = &resp->security_indication; @@ -2144,7 +2395,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); - ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); + atomic_inc(&htt->num_mpdus_ready); break; } case HTT_T2H_MSG_TYPE_TEST: @@ -2167,12 +2418,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_rx_delba(ar, resp); break; case HTT_T2H_MSG_TYPE_PKTLOG: { - struct ath10k_pktlog_hdr *hdr = - (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload; - trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, - sizeof(*hdr) + - __le16_to_cpu(hdr->size)); + skb->len - + offsetof(struct htt_resp, + pktlog_msg.payload)); break; } case HTT_T2H_MSG_TYPE_RX_FLUSH: { @@ -2182,22 +2431,41 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { - spin_lock_bh(&htt->rx_ring.lock); __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); - spin_unlock_bh(&htt->rx_ring.lock); - tasklet_schedule(&htt->txrx_compl_task); - return; + return false; } case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: break; - case HTT_T2H_MSG_TYPE_CHAN_CHANGE: + case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { + u32 phymode = __le32_to_cpu(resp->chan_change.phymode); + u32 freq = __le32_to_cpu(resp->chan_change.freq); + + ar->tgt_oper_chan = + __ieee80211_get_channel(ar->hw->wiphy, freq); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt chan change freq %u phymode %s\n", + freq, ath10k_wmi_phymode_str(phymode)); break; + } case HTT_T2H_MSG_TYPE_AGGR_CONF: break; + case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { + struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); + + if (!tx_fetch_ind) { + ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); + break; + } + skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); + break; + } + case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: + ath10k_htt_rx_tx_fetch_confirm(ar, skb); + break; + case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: + ath10k_htt_rx_tx_mode_switch_ind(ar, skb); + break; case HTT_T2H_MSG_TYPE_EN_STATS: - case HTT_T2H_MSG_TYPE_TX_FETCH_IND: - case HTT_T2H_MSG_TYPE_TX_FETCH_CONF: - case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND: default: ath10k_warn(ar, "htt event (%d) not handled\n", resp->hdr.msg_type); @@ -2205,34 +2473,116 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) skb->data, skb->len); break; }; + return true; +} +EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); - /* Free the indication buffer */ +void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, + struct sk_buff *skb) +{ + trace_ath10k_htt_pktlog(ar, skb->data, skb->len); dev_kfree_skb_any(skb); } -EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); +EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); -static void ath10k_htt_txrx_compl_task(unsigned long ptr) +int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) { - struct ath10k_htt *htt = (struct ath10k_htt *)ptr; - struct ath10k *ar = htt->ar; - struct htt_resp *resp; + struct ath10k_htt *htt = &ar->htt; + struct htt_tx_done tx_done = {}; + struct sk_buff_head tx_ind_q; struct sk_buff *skb; + unsigned long flags; + int quota = 0, done, num_rx_msdus; + bool resched_napi = false; + + __skb_queue_head_init(&tx_ind_q); + + /* Since in-ord-ind can deliver more than 1 A-MSDU in single event, + * process it first to utilize full available quota. + */ + while (quota < budget) { + if (skb_queue_empty(&htt->rx_in_ord_compl_q)) + break; + + skb = __skb_dequeue(&htt->rx_in_ord_compl_q); + if (!skb) { + resched_napi = true; + goto exit; + } + + spin_lock_bh(&htt->rx_ring.lock); + num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb); + spin_unlock_bh(&htt->rx_ring.lock); + if (num_rx_msdus < 0) { + resched_napi = true; + goto exit; + } - while ((skb = skb_dequeue(&htt->tx_compl_q))) { - ath10k_htt_rx_frm_tx_compl(htt->ar, skb); dev_kfree_skb_any(skb); + if (num_rx_msdus > 0) + quota += num_rx_msdus; + + if ((quota > ATH10K_NAPI_QUOTA_LIMIT) && + !skb_queue_empty(&htt->rx_in_ord_compl_q)) { + resched_napi = true; + goto exit; + } } - spin_lock_bh(&htt->rx_ring.lock); - while ((skb = __skb_dequeue(&htt->rx_compl_q))) { - resp = (struct htt_resp *)skb->data; - ath10k_htt_rx_handler(htt, &resp->rx_ind); - dev_kfree_skb_any(skb); + while (quota < budget) { + /* no more data to receive */ + if (!atomic_read(&htt->num_mpdus_ready)) + break; + + num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt); + if (num_rx_msdus < 0) { + resched_napi = true; + goto exit; + } + + quota += num_rx_msdus; + atomic_dec(&htt->num_mpdus_ready); + if ((quota > ATH10K_NAPI_QUOTA_LIMIT) && + atomic_read(&htt->num_mpdus_ready)) { + resched_napi = true; + goto exit; + } } - while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { - ath10k_htt_rx_in_ord_ind(ar, skb); + /* From NAPI documentation: + * The napi poll() function may also process TX completions, in which + * case if it processes the entire TX ring then it should count that + * work as the rest of the budget. + */ + if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) + quota = budget; + + /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. + * From kfifo_get() documentation: + * Note that with only one concurrent reader and one concurrent writer, + * you don't need extra locking to use these macro. + */ + while (kfifo_get(&htt->txdone_fifo, &tx_done)) + ath10k_txrx_tx_unref(htt, &tx_done); + + ath10k_mac_tx_push_pending(ar); + + spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); + skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); + spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); + + while ((skb = __skb_dequeue(&tx_ind_q))) { + ath10k_htt_rx_tx_fetch_ind(ar, skb); dev_kfree_skb_any(skb); } - spin_unlock_bh(&htt->rx_ring.lock); + +exit: + ath10k_htt_rx_msdu_buff_replenish(htt); + /* In case of rx failure or more data to read, report budget + * to reschedule NAPI poll + */ + done = resched_napi ? budget : quota; + + return done; } +EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 16823970dbfd..6579eb2b410c 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -22,53 +22,185 @@ #include "txrx.h" #include "debug.h" -void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) +static u8 ath10k_htt_tx_txq_calc_size(size_t count) { - if (limit_mgmt_desc) - htt->num_pending_mgmt_tx--; + int exp; + int factor; + + exp = 0; + factor = count >> 7; + + while (factor >= 64 && exp < 4) { + factor >>= 3; + exp++; + } + + if (exp == 4) + return 0xff; + + if (count > 0) + factor = max(1, factor); + + return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | + SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); +} + +static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_sta *arsta; + struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; + unsigned long frame_cnt; + unsigned long byte_cnt; + int idx; + u32 bit; + u16 peer_id; + u8 tid; + u8 count; + + lockdep_assert_held(&ar->htt.tx_lock); + + if (!ar->htt.tx_q_state.enabled) + return; + + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) + return; + + if (txq->sta) { + arsta = (void *)txq->sta->drv_priv; + peer_id = arsta->peer_id; + } else { + peer_id = arvif->peer_id; + } + + tid = txq->tid; + bit = BIT(peer_id % 32); + idx = peer_id / 32; + + ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); + count = ath10k_htt_tx_txq_calc_size(byte_cnt); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n", + peer_id, tid); + return; + } + + ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; + ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; + ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n", + peer_id, tid, count); +} + +static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) +{ + u32 seq; + size_t size; + + lockdep_assert_held(&ar->htt.tx_lock); + + if (!ar->htt.tx_q_state.enabled) + return; + + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) + return; + + seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); + seq++; + ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", + seq); + + size = sizeof(*ar->htt.tx_q_state.vaddr); + dma_sync_single_for_device(ar->dev, + ar->htt.tx_q_state.paddr, + size, + DMA_TO_DEVICE); +} + +void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_recalc(hw, txq); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_txq_sync(struct ath10k *ar) +{ + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_sync(ar); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_recalc(hw, txq); + __ath10k_htt_tx_txq_sync(ar); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) +{ + lockdep_assert_held(&htt->tx_lock); htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); } -static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, - bool limit_mgmt_desc) +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) { - spin_lock_bh(&htt->tx_lock); - __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); - spin_unlock_bh(&htt->tx_lock); + lockdep_assert_held(&htt->tx_lock); + + if (htt->num_pending_tx >= htt->max_num_pending_tx) + return -EBUSY; + + htt->num_pending_tx++; + if (htt->num_pending_tx == htt->max_num_pending_tx) + ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); + + return 0; } -static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, - bool limit_mgmt_desc, bool is_probe_resp) +int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, + bool is_presp) { struct ath10k *ar = htt->ar; - int ret = 0; - spin_lock_bh(&htt->tx_lock); + lockdep_assert_held(&htt->tx_lock); - if (htt->num_pending_tx >= htt->max_num_pending_tx) { - ret = -EBUSY; - goto exit; - } + if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) + return 0; - if (limit_mgmt_desc) { - if (is_probe_resp && (htt->num_pending_mgmt_tx > - ar->hw_params.max_probe_resp_desc_thres)) { - ret = -EBUSY; - goto exit; - } - htt->num_pending_mgmt_tx++; - } + if (is_presp && + ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) + return -EBUSY; - htt->num_pending_tx++; - if (htt->num_pending_tx == htt->max_num_pending_tx) - ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); + htt->num_pending_mgmt_tx++; -exit: - spin_unlock_bh(&htt->tx_lock); - return ret; + return 0; +} + +void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) +{ + lockdep_assert_held(&htt->tx_lock); + + if (!htt->ar->hw_params.max_probe_resp_desc_thres) + return; + + htt->num_pending_mgmt_tx--; } int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) @@ -86,6 +218,27 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) return ret; } +struct sk_buff *ath10k_htt_tx_find_msdu_by_id(struct ath10k_htt *htt, + u16 msdu_id) +{ + struct ath10k *ar; + struct sk_buff *ret; + + if (!htt) + return NULL; + + ar = htt->ar; + + lockdep_assert_held(&htt->tx_lock); + + ret = (struct sk_buff *)idr_find(&htt->pending_tx, msdu_id); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx find msdu by msdu_id %s\n", + !ret ? "Failed" : "Success"); + + return ret; +} + void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { struct ath10k *ar = htt->ar; @@ -97,6 +250,87 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) idr_remove(&htt->pending_tx, msdu_id); } +static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) +{ + size_t size; + + if (!htt->frag_desc.vaddr) + return; + + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + + dma_free_coherent(htt->ar->dev, + size, + htt->frag_desc.vaddr, + htt->frag_desc.paddr); +} + +static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_KERNEL); + if (!htt->frag_desc.vaddr) { + ath10k_err(ar, "failed to alloc fragment desc memory\n"); + return -ENOMEM; + } + + return 0; +} + +static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + return; + + size = sizeof(*htt->tx_q_state.vaddr); + + dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); + kfree(htt->tx_q_state.vaddr); +} + +static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + int ret; + + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + return 0; + + htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; + htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; + htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; + + size = sizeof(*htt->tx_q_state.vaddr); + htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); + if (!htt->tx_q_state.vaddr) + return -ENOMEM; + + htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, + size, DMA_TO_DEVICE); + ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); + if (ret) { + ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); + kfree(htt->tx_q_state.vaddr); + return -EIO; + } + + return 0; +} + int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; @@ -111,36 +345,49 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, - GFP_DMA); + GFP_KERNEL); if (!htt->txbuf.vaddr) { ath10k_err(ar, "failed to alloc tx buffer\n"); ret = -ENOMEM; goto free_idr_pending_tx; } - if (!ar->hw_params.continuous_frag_desc) - goto skip_frag_desc_alloc; - - size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); - htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, - &htt->frag_desc.paddr, - GFP_DMA); - if (!htt->frag_desc.vaddr) { - ath10k_warn(ar, "failed to alloc fragment desc memory\n"); - ret = -ENOMEM; + ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); + if (ret) { + ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; } -skip_frag_desc_alloc: + ret = ath10k_htt_tx_alloc_txq(htt); + if (ret) { + ath10k_err(ar, "failed to alloc txq: %d\n", ret); + goto free_frag_desc; + } + + size = roundup_pow_of_two(htt->max_num_pending_tx); + ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); + if (ret) { + ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); + goto free_txq; + } + return 0; +free_txq: + ath10k_htt_tx_free_txq(htt); + +free_frag_desc: + ath10k_htt_tx_free_cont_frag_desc(htt); + free_txbuf: size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr); + free_idr_pending_tx: idr_destroy(&htt->pending_tx); + return ret; } @@ -152,8 +399,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); - tx_done.discard = 1; tx_done.msdu_id = msdu_id; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; ath10k_txrx_tx_unref(htt, &tx_done); @@ -174,12 +421,10 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) htt->txbuf.paddr); } - if (htt->frag_desc.vaddr) { - size = htt->max_num_pending_tx * - sizeof(struct htt_msdu_ext_desc); - dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, - htt->frag_desc.paddr); - } + ath10k_htt_tx_free_txq(htt); + ath10k_htt_tx_free_cont_frag_desc(htt); + WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); + kfifo_free(&htt->txdone_fifo); } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) @@ -263,12 +508,103 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) return 0; } +#ifdef CONFIG_ATH10K_SNOC +static inline +void ath10k_htt_fill_rx_ring_cfg(struct ath10k_htt *htt, + struct htt_rx_ring_setup_ring *ring) +{ + ring->fw_idx_shadow_reg_paddr_low = + __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); + ring->fw_idx_shadow_reg_paddr_high = 0; + ring->rx_ring_base_paddr_low = __cpu_to_le32(htt->rx_ring.base_paddr); + ring->rx_ring_base_paddr_high = upper_32_bits(htt->rx_ring.base_paddr) & + HTT_WCN3990_PADDR_MASK; +} + +static inline void ath10k_htt_fill_frags(struct htt_data_tx_desc_frag *frags, + struct sk_buff *msdu, + struct ath10k_skb_cb *skb_cb) +{ + frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = upper_32_bits(skb_cb->paddr) & + HTT_WCN3990_PADDR_MASK; + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + frags[1].tword_addr.paddr_lo = 0; + frags[1].tword_addr.paddr_hi = 0; + frags[1].tword_addr.len_16 = 0; +} + +static inline void ath10k_htt_fill_frag_desc(struct ath10k_htt_txbuf *txbuf, + dma_addr_t frags_paddr) +{ + txbuf->cmd_tx.frags_paddr_lo = __cpu_to_le32(frags_paddr); + txbuf->cmd_tx.frags_paddr_hi = upper_32_bits(frags_paddr) & + HTT_WCN3990_PADDR_MASK; +} + +static inline +void ath10k_htt_set_bank_base_addr(struct htt_frag_desc_bank_cfg *cfg, + dma_addr_t paddr) +{ + cfg->bank_base_addrs[0].low = __cpu_to_le32(paddr); + cfg->bank_base_addrs[0].high = upper_32_bits(paddr) & + HTT_WCN3990_PADDR_MASK; +} + +static inline +u32 ath10k_htt_get_paddr_hi(dma_addr_t paddr) +{ + return(upper_32_bits(paddr) & + HTT_WCN3990_PADDR_MASK); +} +#else +static inline void ath10k_htt_fill_frags(struct htt_data_tx_desc_frag *frags, + struct sk_buff *msdu, + struct ath10k_skb_cb *skb_cb) +{ + frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr); + frags[0].dword_addr.len = __cpu_to_le32(msdu->len); + frags[1].dword_addr.paddr = 0; + frags[1].dword_addr.len = 0; +} + +static inline void ath10k_htt_fill_frag_desc(struct ath10k_htt_txbuf *txbuf, + dma_addr_t frags_paddr) +{ + txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); +} + +static inline +void ath10k_htt_set_bank_base_addr(struct htt_frag_desc_bank_cfg *cfg, + dma_addr_t paddr) +{ + cfg->bank_base_addrs[0] = __cpu_to_le32(paddr); +} + +static inline +void ath10k_htt_fill_rx_ring_cfg(struct ath10k_htt *htt, + struct htt_rx_ring_setup_ring *ring) +{ + ring->fw_idx_shadow_reg_paddr = + __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); + ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); +} + +static inline +u32 ath10k_htt_get_paddr_hi(dma_addr_t paddr) +{ + return 0; +} +#endif + int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; + struct htt_frag_desc_bank_cfg *cfg; int ret, size; + u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; @@ -286,14 +622,31 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; - cmd->frag_desc_bank_cfg.info = 0; - cmd->frag_desc_bank_cfg.num_banks = 1; - cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); - cmd->frag_desc_bank_cfg.bank_base_addrs[0] = - __cpu_to_le32(htt->frag_desc.paddr); - cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; - cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = - __cpu_to_le16(htt->max_num_pending_tx - 1); + + info = 0; + info |= SM(htt->tx_q_state.type, + HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; + + cfg = &cmd->frag_desc_bank_cfg; + cfg->info = info; + cfg->num_banks = 1; + cfg->desc_size = sizeof(struct htt_msdu_ext_desc); + ath10k_htt_set_bank_base_addr(cfg, htt->frag_desc.paddr); + cfg->bank_id[0].bank_min_id = 0; + cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - + 1); + + cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); + cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); + cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); + cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; + cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { @@ -360,9 +713,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); - ring->fw_idx_shadow_reg_paddr = - __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); - ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); + ath10k_htt_fill_rx_ring_cfg(htt, ring); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); @@ -439,6 +790,86 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, return 0; } +int ath10k_htt_tx_fetch_resp(struct ath10k *ar, + __le32 token, + __le16 fetch_seq_num, + struct htt_tx_fetch_record *records, + size_t num_records) +{ + struct sk_buff *skb; + struct htt_cmd *cmd; + const u16 resp_id = 0; + int len = 0; + int ret; + + /* Response IDs are echo-ed back only for host driver convienence + * purposes. They aren't used for anything in the driver yet so use 0. + */ + + len += sizeof(cmd->hdr); + len += sizeof(cmd->tx_fetch_resp); + len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; + cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); + cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; + cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); + cmd->tx_fetch_resp.token = token; + + memcpy(cmd->tx_fetch_resp.records, records, + sizeof(records[0]) * num_records); + + ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); + if (ret) { + ath10k_warn(ar, "failed to submit htc command: %d\n", ret); + goto err_free_skb; + } + + return 0; + +err_free_skb: + dev_kfree_skb_any(skb); + + return ret; +} + +static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + struct ath10k_vif *arvif; + + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + return ar->scan.vdev_id; + } else if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; + return arvif->vdev_id; + } else if (ar->monitor_started) { + return ar->monitor_vdev_id; + } else { + return 0; + } +} + +static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + + if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) + return HTT_DATA_TX_EXT_TID_MGMT; + else if (cb->flags & ATH10K_SKB_F_QOS) + return skb->priority % IEEE80211_QOS_CTL_TID_MASK; + else + return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; +} + int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; @@ -446,25 +877,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); - u8 vdev_id = skb_cb->vdev_id; + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; - bool limit_mgmt_desc = false; - bool is_probe_resp = false; - - if (ar->hw_params.max_probe_resp_desc_thres) { - limit_mgmt_desc = true; - - if (ieee80211_is_probe_resp(hdr->frame_control)) - is_probe_resp = true; - } - - res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); - - if (res) - goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); @@ -473,10 +890,17 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) - goto err_tx_dec; + goto err; msdu_id = res; + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; @@ -503,8 +927,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); - skb_cb->htt.txbuf = NULL; - res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; @@ -519,65 +941,55 @@ err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); -err_tx_dec: - ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); err: return res; } -int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) +int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; + struct ath10k_htt_txbuf *txbuf; struct htt_data_tx_desc_frag *frags; - u8 vdev_id = skb_cb->vdev_id; - u8 tid = skb_cb->htt.tid; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; - u32 frags_paddr = 0; + u16 freq = 0; + dma_addr_t frags_paddr = 0; + u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; - bool limit_mgmt_desc = false; - bool is_probe_resp = false; - - if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) && - ar->hw_params.max_probe_resp_desc_thres) { - limit_mgmt_desc = true; - - if (ieee80211_is_probe_resp(hdr->frame_control)) - is_probe_resp = true; - } - - res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); - if (res) - goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) - goto err_tx_dec; + goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); - skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id]; - skb_cb->htt.txbuf_paddr = htt->txbuf.paddr + - (sizeof(struct ath10k_htt_txbuf) * msdu_id); + txbuf = &htt->txbuf.vaddr[msdu_id]; + txbuf_paddr = htt->txbuf.paddr + + (sizeof(struct ath10k_htt_txbuf) * msdu_id); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); - } else if (!skb_cb->htt.nohwcrypt && - skb_cb->txmode == ATH10K_HW_TXRX_RAW && + } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && + txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } @@ -590,7 +1002,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) goto err_free_msdu_id; } - switch (skb_cb->txmode) { + if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; @@ -604,22 +1019,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ext_desc = &htt->frag_desc.vaddr[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); - frags[0].tword_addr.paddr_hi = 0; + frags[0].tword_addr.paddr_hi = + ath10k_htt_get_paddr_hi(skb_cb->paddr); frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { - frags = skb_cb->htt.txbuf->frags; - frags[0].dword_addr.paddr = - __cpu_to_le32(skb_cb->paddr); - frags[0].dword_addr.len = __cpu_to_le32(msdu->len); - frags[1].dword_addr.paddr = 0; - frags[1].dword_addr.len = 0; - - frags_paddr = skb_cb->htt.txbuf_paddr; + frags = txbuf->frags; + ath10k_htt_fill_frags(frags, msdu, skb_cb); + frags_paddr = txbuf_paddr; } - flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, @@ -646,17 +1057,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) * avoid extra memory allocations, compress data structures and thus * improve performance. */ - skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; - skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( - sizeof(skb_cb->htt.txbuf->cmd_hdr) + - sizeof(skb_cb->htt.txbuf->cmd_tx) + - prefetch_len); - skb_cb->htt.txbuf->htc_hdr.flags = 0; + txbuf->htc_hdr.eid = htt->eid; + txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx) + + prefetch_len); + txbuf->htc_hdr.flags = 0; - if (skb_cb->htt.nohwcrypt) - flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; - - if (!skb_cb->is_protected) + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); @@ -665,8 +1072,14 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; - if (ar->hw_params.continuous_frag_desc) + if (ar->hw_params.continuous_frag_desc) { ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; + if (QCA_REV_WCN3990(ar)) { + memset(ext_desc->tso_flag, 0, + sizeof(ext_desc->tso_flag)); + ext_desc->tso_flag[3] |= HTT_TX_CHECKSUM_ENABLE; + } + } } /* Prevent firmware from sending up tx inspection requests. There's @@ -675,20 +1088,29 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; - skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; - skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; - skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); - skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); - skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); - skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); - skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); - skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); + txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + txbuf->cmd_tx.flags0 = flags0; + txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); + txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); + txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); + + /* fill fragment descriptor */ + ath10k_htt_fill_frag_desc(txbuf, frags_paddr); + if (ath10k_mac_tx_frm_has_freq(ar)) { + txbuf->cmd_tx.offchan_tx.peerid = + __cpu_to_le16(HTT_INVALID_PEERID); + txbuf->cmd_tx.offchan_tx.freq = + __cpu_to_le16(freq); + } else { + txbuf->cmd_tx.peerid = + __cpu_to_le32(HTT_INVALID_PEERID); + } trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", - flags0, flags1, msdu->len, msdu_id, frags_paddr, - (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); + "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n", + flags0, flags1, msdu->len, msdu_id, &frags_paddr, + &skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); @@ -696,12 +1118,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; - sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; - sg_items[0].paddr = skb_cb->htt.txbuf_paddr + - sizeof(skb_cb->htt.txbuf->frags); - sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + - sizeof(skb_cb->htt.txbuf->cmd_hdr) + - sizeof(skb_cb->htt.txbuf->cmd_tx); + sg_items[0].vaddr = &txbuf->htc_hdr; + sg_items[0].paddr = txbuf_paddr + + sizeof(txbuf->frags); + sg_items[0].len = sizeof(txbuf->htc_hdr) + + sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; @@ -720,11 +1142,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: - spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); -err_tx_dec: - ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); err: return res; } diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 12d6549e45a1..5b7faab1cf02 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,7 +19,6 @@ #include "hw.h" const struct ath10k_hw_regs qca988x_regs = { - .rtc_state_cold_reset_mask = 0x00000400, .rtc_soc_base_address = 0x00004000, .rtc_wmac_base_address = 0x00005000, .soc_core_base_address = 0x00009000, @@ -46,7 +45,6 @@ const struct ath10k_hw_regs qca988x_regs = { }; const struct ath10k_hw_regs qca6174_regs = { - .rtc_state_cold_reset_mask = 0x00002000, .rtc_soc_base_address = 0x00000800, .rtc_wmac_base_address = 0x00001000, .soc_core_base_address = 0x0003a000, @@ -73,7 +71,6 @@ const struct ath10k_hw_regs qca6174_regs = { }; const struct ath10k_hw_regs qca99x0_regs = { - .rtc_state_cold_reset_mask = 0x00000400, .rtc_soc_base_address = 0x00080000, .rtc_wmac_base_address = 0x00000000, .soc_core_base_address = 0x00082000, @@ -88,7 +85,7 @@ const struct ath10k_hw_regs qca99x0_regs = { .ce7_base_address = 0x0004bc00, /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of * CE0 and CE1 no other copy engine is directly referred in the code. - * It is not really neccessary to assign address for newly supported + * It is not really necessary to assign address for newly supported * CEs in this address table. * Copy Engine Address * CE8 0x0004c000 @@ -109,7 +106,368 @@ const struct ath10k_hw_regs qca99x0_regs = { .pcie_intr_clr_address = 0x00000010, }; +const struct ath10k_hw_regs qca4019_regs = { + .rtc_soc_base_address = 0x00080000, + .soc_core_base_address = 0x00082000, + .ce_wrapper_base_address = 0x0004d000, + .ce0_base_address = 0x0004a000, + .ce1_base_address = 0x0004a400, + .ce2_base_address = 0x0004a800, + .ce3_base_address = 0x0004ac00, + .ce4_base_address = 0x0004b000, + .ce5_base_address = 0x0004b400, + .ce6_base_address = 0x0004b800, + .ce7_base_address = 0x0004bc00, + /* qca4019 supports upto 12 copy engines. Since base address + * of ce8 to ce11 are not directly referred in the code, + * no need have them in separate members in this table. + * Copy Engine Address + * CE8 0x0004c000 + * CE9 0x0004c400 + * CE10 0x0004c800 + * CE11 0x0004cc00 + */ + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .soc_chip_id_address = 0x000000ec, + .fw_indicator_address = 0x0004f00c, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, + .pcie_intr_ce_mask_all = 0x000fff00, + .pcie_intr_clr_address = 0x00000010, +}; + +const struct ath10k_hw_regs wcn3990_regs = { + .rtc_soc_base_address = 0x00000000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00000000, + .ce_wrapper_base_address = 0x0024C000, + .soc_global_reset_address = 0x00000008, + .ce0_base_address = 0x00240000, + .ce1_base_address = 0x00241000, + .ce2_base_address = 0x00242000, + .ce3_base_address = 0x00243000, + .ce4_base_address = 0x00244000, + .ce5_base_address = 0x00245000, + .ce6_base_address = 0x00246000, + .ce7_base_address = 0x00247000, + .ce8_base_address = 0x00248000, + .ce9_base_address = 0x00249000, + .ce10_base_address = 0x0024A000, + .ce11_base_address = 0x0024B000, + .soc_chip_id_address = 0x000000f0, + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, +}; + +static unsigned int +ath10k_set_ring_byte(unsigned int offset, + struct ath10k_hw_ce_regs_addr_map *addr_map) +{ + return (((0 | (offset)) << addr_map->lsb) & addr_map->mask); +} + +static unsigned int +ath10k_get_ring_byte(unsigned int offset, + struct ath10k_hw_ce_regs_addr_map *addr_map) +{ + return (((offset) & addr_map->mask) >> (addr_map->lsb)); +} + +struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = 0x00020000, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { + .msb = 0x00000012, + .lsb = 0x00000012, + .mask = 0x00040000, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = 0x0000ffff, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { + .addr = 0x00000018, + .src_ring = &wcn3990_src_ring, + .dst_ring = &wcn3990_dst_ring, + .dmax = &wcn3990_dmax, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { + .mask = 0x00000001, +}; + +struct ath10k_hw_ce_host_ie wcn3990_host_ie = { + .copy_complete = &wcn3990_host_ie_cc, +}; + +struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { + .axi_err = 0x00000100, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000003E0, + .addr = 0x00000038, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { + .msb = 0x00000000, + .lsb = 0x00000010, + .mask = 0xffff0000, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = 0x0000ffff, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_src_wm_low, + .wm_high = &wcn3990_src_wm_high, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { + .lsb = 0x00000010, + .mask = 0xffff0000, + .set = &ath10k_set_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = 0x0000ffff, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_dst_wm_low, + .wm_high = &wcn3990_dst_wm_high, +}; + +static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { + .shift = 19, + .mask = 0x00080000, + .enable = 0x00000000, +}; + +struct ath10k_hw_ce_regs wcn3990_ce_regs = { + .sr_base_addr = 0x00000000, + .sr_size_addr = 0x00000008, + .dr_base_addr = 0x0000000c, + .dr_size_addr = 0x00000014, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .ddr_addr_for_rri_low = 0x00000004, + .ddr_addr_for_rri_high = 0x00000008, + .ce_rri_low = 0x0024C004, + .ce_rri_high = 0x0024C008, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &wcn3990_ctrl1, + .host_ie = &wcn3990_host_ie, + .wm_regs = &wcn3990_wm_reg, + .misc_regs = &wcn3990_misc_reg, + .wm_srcr = &wcn3990_wm_src_ring, + .wm_dstr = &wcn3990_wm_dst_ring, + .upd = &wcn3990_ctrl1_upd, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = 0x00010000, + .set = &ath10k_set_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = { + .msb = 0x00000011, + .lsb = 0x00000011, + .mask = 0x00020000, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_dmax = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = 0x0000ffff, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = { + .addr = 0x00000010, + .hw_mask = 0x0007ffff, + .sw_mask = 0x0007ffff, + .hw_wr_mask = 0x00000000, + .sw_wr_mask = 0x0007ffff, + .reset_mask = 0xffffffff, + .reset = 0x00000080, + .src_ring = &qcax_src_ring, + .dst_ring = &qcax_dst_ring, + .dmax = &qcax_dmax, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = { + .msb = 0x00000003, + .lsb = 0x00000003, + .mask = 0x00000008, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = { + .msb = 0x00000000, + .mask = 0x00000001, + .status_reset = 0x00000000, + .status = &qcax_cmd_halt_status, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = 0x00000001, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_host_ie qcax_host_ie = { + .copy_complete_reset = 0x00000000, + .copy_complete = &qcax_host_ie_cc, +}; + +struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +struct ath10k_hw_ce_misc_regs qcax_misc_reg = { + .axi_err = 0x00000400, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000007E0, + .addr = 0x00000038, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = { + .msb = 0x0000001f, + .lsb = 0x00000010, + .mask = 0xffff0000, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = 0x0000ffff, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &qcax_src_wm_low, + .wm_high = &qcax_src_wm_high, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = { + .lsb = 0x00000010, + .mask = 0xffff0000, + .set = &ath10k_set_ring_byte, +}; + +struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = 0x0000ffff, + .set = &ath10k_set_ring_byte, + .get = &ath10k_get_ring_byte, +}; + +struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &qcax_dst_wm_low, + .wm_high = &qcax_dst_wm_high, +}; + +struct ath10k_hw_ce_regs qcax_ce_regs = { + .sr_base_addr = 0x00000000, + .sr_size_addr = 0x00000004, + .dr_base_addr = 0x00000008, + .dr_size_addr = 0x0000000c, + .ce_cmd_addr = 0x00000018, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &qcax_ctrl1, + .cmd_halt = &qcax_cmd_halt, + .host_ie = &qcax_host_ie, + .wm_regs = &qcax_wm_reg, + .misc_regs = &qcax_misc_reg, + .wm_srcr = &qcax_wm_src_ring, + .wm_dstr = &qcax_wm_dst_ring, +}; + const struct ath10k_hw_values qca988x_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 3, .ce_count = 8, .msi_assign_ce_max = 7, @@ -119,6 +477,7 @@ const struct ath10k_hw_values qca988x_values = { }; const struct ath10k_hw_values qca6174_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 3, .ce_count = 8, .msi_assign_ce_max = 7, @@ -128,6 +487,7 @@ const struct ath10k_hw_values qca6174_values = { }; const struct ath10k_hw_values qca99x0_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 7, .ce_count = 12, .msi_assign_ce_max = 12, @@ -136,22 +496,143 @@ const struct ath10k_hw_values qca99x0_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_values qca9888_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, + .rtc_state_val_on = 3, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + +const struct ath10k_hw_values qca4019_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, + .ce_count = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + +const struct ath10k_hw_values wcn3990_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND, + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 12, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, + .default_listen_interval = 1, +}; + +struct fw_flag wcn3990_fw_flags = { + .flags = 0x82E, +}; + +struct ath10k_shadow_reg_value wcn3990_shadow_reg_value = { + .shadow_reg_value_0 = 0x00032000, + .shadow_reg_value_1 = 0x00032004, + .shadow_reg_value_2 = 0x00032008, + .shadow_reg_value_3 = 0x0003200C, + .shadow_reg_value_4 = 0x00032010, + .shadow_reg_value_5 = 0x00032014, + .shadow_reg_value_6 = 0x00032018, + .shadow_reg_value_7 = 0x0003201C, + .shadow_reg_value_8 = 0x00032020, + .shadow_reg_value_9 = 0x00032024, + .shadow_reg_value_10 = 0x00032028, + .shadow_reg_value_11 = 0x0003202C, + .shadow_reg_value_12 = 0x00032030, + .shadow_reg_value_13 = 0x00032034, + .shadow_reg_value_14 = 0x00032038, + .shadow_reg_value_15 = 0x0003203C, + .shadow_reg_value_16 = 0x00032040, + .shadow_reg_value_17 = 0x00032044, + .shadow_reg_value_18 = 0x00032048, + .shadow_reg_value_19 = 0x0003204C, + .shadow_reg_value_20 = 0x00032050, + .shadow_reg_value_21 = 0x00032054, + .shadow_reg_value_22 = 0x00032058, + .shadow_reg_value_23 = 0x0003205C +}; + +struct ath10k_shadow_reg_address wcn3990_shadow_reg_address = { + .shadow_reg_address_0 = 0x00030020, + .shadow_reg_address_1 = 0x00030024, + .shadow_reg_address_2 = 0x00030028, + .shadow_reg_address_3 = 0x0003002C, + .shadow_reg_address_4 = 0x00030030, + .shadow_reg_address_5 = 0x00030034, + .shadow_reg_address_6 = 0x00030038, + .shadow_reg_address_7 = 0x0003003C, + .shadow_reg_address_8 = 0x00030040, + .shadow_reg_address_9 = 0x00030044, + .shadow_reg_address_10 = 0x00030048, + .shadow_reg_address_11 = 0x0003004C, + .shadow_reg_address_12 = 0x00030050, + .shadow_reg_address_13 = 0x00030054, + .shadow_reg_address_14 = 0x00030058, + .shadow_reg_address_15 = 0x0003005C, + .shadow_reg_address_16 = 0x00030060, + .shadow_reg_address_17 = 0x00030064, + .shadow_reg_address_18 = 0x00030068, + .shadow_reg_address_19 = 0x0003006C, + .shadow_reg_address_20 = 0x00030070, + .shadow_reg_address_21 = 0x00030074, + .shadow_reg_address_22 = 0x00030078, + .shadow_reg_address_23 = 0x0003007C +}; + void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) { u32 cc_fix = 0; + u32 rcc_fix = 0; + enum ath10k_hw_cc_wraparound_type wraparound_type; survey->filled |= SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; - if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) { - cc_fix = 0x7fffffff; - survey->filled &= ~SURVEY_INFO_TIME_BUSY; + wraparound_type = ar->hw_params.cc_wraparound_type; + + if (cc < cc_prev || rcc < rcc_prev) { + switch (wraparound_type) { + case ATH10K_HW_CC_WRAP_SHIFTED_ALL: + if (cc < cc_prev) { + cc_fix = 0x7fffffff; + survey->filled &= ~SURVEY_INFO_TIME_BUSY; + } + break; + case ATH10K_HW_CC_WRAP_SHIFTED_EACH: + if (cc < cc_prev) + cc_fix = 0x7fffffff; + + if (rcc < rcc_prev) + rcc_fix = 0x7fffffff; + break; + case ATH10K_HW_CC_WRAP_DISABLED: + break; + } } cc -= cc_prev - cc_fix; - rcc -= rcc_prev; + rcc -= rcc_prev - rcc_fix; survey->time = CCNT_TO_MSEC(ar, cc); survey->time_busy = CCNT_TO_MSEC(ar, rcc); } + +const struct ath10k_hw_ops qca988x_ops = { +}; + +static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd) +{ + return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1), + RX_MSDU_END_INFO1_L3_HDR_PAD); +} + +const struct ath10k_hw_ops qca99x0_ops = { + .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, +}; + +const struct ath10k_hw_ops wcn3990_ops = {0}; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 8ec5c579d7fa..811895bb2e19 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2013,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -26,7 +26,10 @@ #define QCA6164_2_1_DEVICE_ID (0x0041) #define QCA6174_2_1_DEVICE_ID (0x003e) #define QCA99X0_2_0_DEVICE_ID (0x0040) +#define QCA9888_2_0_DEVICE_ID (0x0056) +#define QCA9984_1_0_DEVICE_ID (0x0046) #define QCA9377_1_0_DEVICE_ID (0x0042) +#define QCA9887_1_0_DEVICE_ID (0x0050) /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 @@ -35,11 +38,16 @@ #define QCA988X_HW_2_0_VERSION 0x4100016c #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0" -#define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 +/* QCA9887 1.0 definitions */ +#define QCA9887_HW_1_0_VERSION 0x4100016d +#define QCA9887_HW_1_0_CHIP_ID_REV 0 +#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0" +#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234 + /* QCA6174 target BMI version signatures */ #define QCA6174_HW_1_0_VERSION 0x05000000 #define QCA6174_HW_1_1_VERSION 0x05000001 @@ -76,14 +84,10 @@ enum qca9377_chip_id_rev { }; #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" -#define QCA6174_HW_2_1_FW_FILE "firmware.bin" -#define QCA6174_HW_2_1_OTP_FILE "otp.bin" #define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 #define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0" -#define QCA6174_HW_3_0_FW_FILE "firmware.bin" -#define QCA6174_HW_3_0_OTP_FILE "otp.bin" #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 @@ -94,18 +98,40 @@ enum qca9377_chip_id_rev { #define QCA99X0_HW_2_0_DEV_VERSION 0x01000000 #define QCA99X0_HW_2_0_CHIP_ID_REV 0x1 #define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0" -#define QCA99X0_HW_2_0_FW_FILE "firmware.bin" -#define QCA99X0_HW_2_0_OTP_FILE "otp.bin" #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 +/* QCA9984 1.0 defines */ +#define QCA9984_HW_1_0_DEV_VERSION 0x1000000 +#define QCA9984_HW_DEV_TYPE 0xa +#define QCA9984_HW_1_0_CHIP_ID_REV 0x0 +#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0" +#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA9888 2.0 defines */ +#define QCA9888_HW_2_0_DEV_VERSION 0x1000000 +#define QCA9888_HW_DEV_TYPE 0xc +#define QCA9888_HW_2_0_CHIP_ID_REV 0x0 +#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0" +#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin" +#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234 + /* QCA9377 1.0 definitions */ #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" -#define QCA9377_HW_1_0_FW_FILE "firmware.bin" -#define QCA9377_HW_1_0_OTP_FILE "otp.bin" #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234 +/* QCA4019 1.0 definitions */ +#define QCA4019_HW_1_0_DEV_VERSION 0x01000000 +#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0" +#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* WCN3990 1.0 definitions */ +#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990 +#define WCN3990_HW_1_0_FW_DIR "/etc/firmware" + #define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin" @@ -126,8 +152,6 @@ enum qca9377_chip_id_rev { #define REG_DUMP_COUNT_QCA988X 60 -#define QCA988X_CAL_DATA_LEN 2116 - struct ath10k_fw_ie { __le32 id; __le32 len; @@ -199,14 +223,19 @@ enum ath10k_hw_rev { ATH10K_HW_QCA988X, ATH10K_HW_QCA6174, ATH10K_HW_QCA99X0, + ATH10K_HW_QCA9888, + ATH10K_HW_QCA9984, ATH10K_HW_QCA9377, + ATH10K_HW_QCA4019, + ATH10K_HW_QCA9887, + ATH10K_HW_WCN3990, }; struct ath10k_hw_regs { - u32 rtc_state_cold_reset_mask; u32 rtc_soc_base_address; u32 rtc_wmac_base_address; u32 soc_core_base_address; + u32 soc_global_reset_address; u32 ce_wrapper_base_address; u32 ce0_base_address; u32 ce1_base_address; @@ -216,6 +245,10 @@ struct ath10k_hw_regs { u32 ce5_base_address; u32 ce6_base_address; u32 ce7_base_address; + u32 ce8_base_address; + u32 ce9_base_address; + u32 ce10_base_address; + u32 ce11_base_address; u32 soc_reset_control_si0_rst_mask; u32 soc_reset_control_ce_rst_mask; u32 soc_chip_id_address; @@ -232,29 +265,142 @@ struct ath10k_hw_regs { extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; extern const struct ath10k_hw_regs qca99x0_regs; +extern const struct ath10k_hw_regs qca4019_regs; +extern const struct ath10k_hw_regs wcn3990_regs; + +struct ath10k_hw_ce_regs_addr_map { + u32 msb; + u32 lsb; + u32 mask; + unsigned int (*set)(unsigned int offset, + struct ath10k_hw_ce_regs_addr_map *addr_map); + unsigned int (*get)(unsigned int offset, + struct ath10k_hw_ce_regs_addr_map *addr_map); +}; + +struct ath10k_hw_ce_ctrl1 { + u32 addr; + u32 hw_mask; + u32 sw_mask; + u32 hw_wr_mask; + u32 sw_wr_mask; + u32 reset_mask; + u32 reset; + struct ath10k_hw_ce_regs_addr_map *src_ring; + struct ath10k_hw_ce_regs_addr_map *dst_ring; + struct ath10k_hw_ce_regs_addr_map *dmax; +}; + +struct ath10k_hw_ce_cmd_halt { + u32 status_reset; + u32 msb; + u32 mask; + struct ath10k_hw_ce_regs_addr_map *status; +}; + +struct ath10k_hw_ce_host_ie { + u32 copy_complete_reset; + struct ath10k_hw_ce_regs_addr_map *copy_complete; +}; + +struct ath10k_hw_ce_host_wm_regs { + u32 dstr_lmask; + u32 dstr_hmask; + u32 srcr_lmask; + u32 srcr_hmask; + u32 cc_mask; + u32 wm_mask; + u32 addr; +}; + +struct ath10k_hw_ce_misc_regs { + u32 axi_err; + u32 dstr_add_err; + u32 srcr_len_err; + u32 dstr_mlen_vio; + u32 dstr_overflow; + u32 srcr_overflow; + u32 err_mask; + u32 addr; +}; + +struct ath10k_hw_ce_dst_src_wm_regs { + u32 addr; + u32 low_rst; + u32 high_rst; + struct ath10k_hw_ce_regs_addr_map *wm_low; + struct ath10k_hw_ce_regs_addr_map *wm_high; +}; + +struct ath10k_hw_ce_ctrl1_upd { + u32 shift; + u32 mask; + u32 enable; +}; + +struct ath10k_hw_ce_regs { + u32 sr_base_addr; + u32 sr_size_addr; + u32 dr_base_addr; + u32 dr_size_addr; + u32 ce_cmd_addr; + u32 misc_ie_addr; + u32 sr_wr_index_addr; + u32 dst_wr_index_addr; + u32 current_srri_addr; + u32 current_drri_addr; + u32 ddr_addr_for_rri_low; + u32 ddr_addr_for_rri_high; + u32 ce_rri_low; + u32 ce_rri_high; + u32 host_ie_addr; + struct ath10k_hw_ce_host_wm_regs *wm_regs; + struct ath10k_hw_ce_misc_regs *misc_regs; + struct ath10k_hw_ce_ctrl1 *ctrl1_regs; + struct ath10k_hw_ce_cmd_halt *cmd_halt; + struct ath10k_hw_ce_host_ie *host_ie; + struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; + struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; + struct ath10k_hw_ce_ctrl1_upd *upd; +}; + +extern struct ath10k_hw_ce_regs wcn3990_ce_regs; +extern struct ath10k_hw_ce_regs qcax_ce_regs; + +extern struct fw_flag wcn3990_fw_flags; struct ath10k_hw_values { + u32 pdev_suspend_option; u32 rtc_state_val_on; u8 ce_count; u8 msi_assign_ce_max; u8 num_target_ce_config_wlan; u16 ce_desc_meta_data_mask; u8 ce_desc_meta_data_lsb; + u8 default_listen_interval; }; extern const struct ath10k_hw_values qca988x_values; extern const struct ath10k_hw_values qca6174_values; extern const struct ath10k_hw_values qca99x0_values; +extern const struct ath10k_hw_values qca9888_values; +extern const struct ath10k_hw_values qca4019_values; +extern const struct ath10k_hw_values wcn3990_values; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) +#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887) #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) +#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888) +#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) +#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) +#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990) -/* Known pecularities: +/* Known peculiarities: * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap * - raw have FCS, nwifi doesn't * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher @@ -277,15 +423,6 @@ enum ath10k_mcast2ucast_mode { ATH10K_MCAST2UCAST_ENABLED = 1, }; -struct ath10k_pktlog_hdr { - __le16 flags; - __le16 missed_cnt; - __le16 log_type; - __le16 size; - __le32 timestamp; - u8 payload[0]; -} __packed; - enum ath10k_hw_rate_ofdm { ATH10K_HW_RATE_OFDM_48M = 0, ATH10K_HW_RATE_OFDM_24M, @@ -307,6 +444,111 @@ enum ath10k_hw_rate_cck { ATH10K_HW_RATE_CCK_SP_2M, }; +enum ath10k_hw_rate_rev2_cck { + ATH10K_HW_RATE_REV2_CCK_LP_1M = 1, + ATH10K_HW_RATE_REV2_CCK_LP_2M, + ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + ATH10K_HW_RATE_REV2_CCK_LP_11M, + ATH10K_HW_RATE_REV2_CCK_SP_2M, + ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + ATH10K_HW_RATE_REV2_CCK_SP_11M, +}; + +enum ath10k_hw_cc_wraparound_type { + ATH10K_HW_CC_WRAP_DISABLED = 0, + + /* This type is when the HW chip has a quirky Cycle Counter + * wraparound which resets to 0x7fffffff instead of 0. All + * other CC related counters (e.g. Rx Clear Count) are divided + * by 2 so they never wraparound themselves. + */ + ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1, + + /* Each hw counter wrapsaround independently. When the + * counter overflows the repestive counter is right shifted + * by 1, i.e reset to 0x7fffffff, and other counters will be + * running unaffected. In this type of wraparound, it should + * be possible to report accurate Rx busy time unlike the + * first type. + */ + ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2, +}; + +struct ath10k_hw_params { + u32 id; + u16 dev_id; + const char *name; + u32 patch_load_addr; + int uart_pin; + u32 otp_exe_param; + + /* Type of hw cycle counter wraparound logic, for more info + * refer enum ath10k_hw_cc_wraparound_type. + */ + enum ath10k_hw_cc_wraparound_type cc_wraparound_type; + + /* Some of chip expects fragment descriptor to be continuous + * memory for any TX operation. Set continuous_frag_desc flag + * for the hardware which have such requirement. + */ + bool continuous_frag_desc; + + /* CCK hardware rate table mapping for the newer chipsets + * like QCA99X0, QCA4019 got revised. The CCK h/w rate values + * are in a proper order with respect to the rate/preamble + */ + bool cck_rate_map_rev2; + + u32 channel_counters_freq_hz; + + /* Mgmt tx descriptors threshold for limiting probe response + * frames. + */ + u32 max_probe_resp_desc_thres; + + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 max_spatial_stream; + u32 cal_data_len; + + struct ath10k_hw_params_fw { + const char *dir; + const char *board; + size_t board_size; + size_t board_ext_size; + } fw; + + /* qca99x0 family chips deliver broadcast/multicast management + * frames encrypted and expect software do decryption. + */ + bool sw_decrypt_mcast_mgmt; + + const struct ath10k_hw_ops *hw_ops; + + /* Number of bytes used for alignment in rx_hdr_status of rx desc. */ + int decap_align_bytes; +}; + +struct htt_rx_desc; + +/* Defines needed for Rx descriptor abstraction */ +struct ath10k_hw_ops { + int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd); +}; + +extern const struct ath10k_hw_ops qca988x_ops; +extern const struct ath10k_hw_ops qca99x0_ops; +extern const struct ath10k_hw_ops wcn3990_ops; + +static inline int +ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, + struct htt_rx_desc *rxd) +{ + if (hw->hw_ops->rx_desc_get_l3_pad_bytes) + return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd); + return 0; +} + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 @@ -348,14 +590,19 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_MAC_AGGR_DELIM 0 #define TARGET_10X_AST_SKID_LIMIT 128 #define TARGET_10X_NUM_STATIONS 128 +#define TARGET_10X_TX_STATS_NUM_STATIONS 118 #define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ (TARGET_10X_NUM_VDEVS)) +#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \ + (TARGET_10X_NUM_VDEVS)) #define TARGET_10X_NUM_OFFLOAD_PEERS 0 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_10X_NUM_PEER_KEYS 2 #define TARGET_10X_NUM_TIDS_MAX 256 #define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ (TARGET_10X_NUM_PEERS) * 2) +#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ + (TARGET_10X_TX_STATS_NUM_PEERS) * 2) #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 @@ -385,6 +632,14 @@ enum ath10k_hw_rate_cck { #define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) #define TARGET_TLV_NUM_WOW_PATTERNS 22 +/* FW supports max 50 outstanding mgmt cmds */ +#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50) + +/* Target specific defines for WMI-HL-1.0 firmware */ +#define TARGET_HL_10_TLV_NUM_PEERS 14 +#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6 +#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2 +#define TARGET_HL_1_0_NUM_MSDU_DESC (3600) /* Diagnostic Window */ #define CE_DIAG_PIPE 7 @@ -400,15 +655,14 @@ enum ath10k_hw_rate_cck { #define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 #define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35 #define TARGET_10_4_NUM_OFFLOAD_PEERS 0 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 #define TARGET_10_4_NUM_PEER_KEYS 2 #define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) +#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500 #define TARGET_10_4_AST_SKID_LIMIT 32 -#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \ - BIT(2) | BIT(3)) -#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \ - BIT(2) | BIT(3)) /* 100 ms for video, best-effort, and background */ #define TARGET_10_4_RX_TIMEOUT_LO_PRI 100 @@ -434,7 +688,6 @@ enum ath10k_hw_rate_cck { #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 #define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3 -#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) #define TARGET_10_4_11AC_TX_MAX_FRAGS 2 #define TARGET_10_4_MAX_PEER_EXT_STATS 16 #define TARGET_10_4_SMART_ANT_CAP 0 @@ -468,7 +721,6 @@ enum ath10k_hw_rate_cck { /* as of IP3.7.1 */ #define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on -#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask #define RTC_STATE_V_LSB 0 #define RTC_STATE_V_MASK 0x00000007 #define RTC_STATE_ADDRESS 0x0000 @@ -531,7 +783,10 @@ enum ath10k_hw_rate_cck { #define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 #define WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define WLAN_GPIO_PIN0_CONFIG_LSB 11 #define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5 +#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060 #define WLAN_GPIO_PIN1_ADDRESS 0x0000002c #define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 #define WLAN_GPIO_PIN10_ADDRESS 0x00000050 @@ -544,6 +799,8 @@ enum ath10k_hw_rate_cck { #define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 #define SI_CONFIG_OFFSET 0x00000000 +#define SI_CONFIG_ERR_INT_LSB 19 +#define SI_CONFIG_ERR_INT_MASK 0x00080000 #define SI_CONFIG_BIDIR_OD_DATA_LSB 18 #define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 #define SI_CONFIG_I2C_LSB 16 @@ -557,7 +814,9 @@ enum ath10k_hw_rate_cck { #define SI_CONFIG_DIVIDER_LSB 0 #define SI_CONFIG_DIVIDER_MASK 0x0000000f #define SI_CS_OFFSET 0x00000004 +#define SI_CS_DONE_ERR_LSB 10 #define SI_CS_DONE_ERR_MASK 0x00000400 +#define SI_CS_DONE_INT_LSB 9 #define SI_CS_DONE_INT_MASK 0x00000200 #define SI_CS_START_LSB 8 #define SI_CS_START_MASK 0x00000100 @@ -586,6 +845,7 @@ enum ath10k_hw_rate_cck { #define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address #define FW_IND_EVENT_PENDING 1 #define FW_IND_INITIALIZED 2 +#define FW_IND_HOST_READY 0x80000000 /* HOST_REG interrupt from firmware */ #define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask @@ -607,7 +867,10 @@ enum ath10k_hw_rate_cck { #define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS #define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS #define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB #define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK #define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK #define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS #define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS @@ -662,6 +925,108 @@ enum ath10k_hw_rate_cck { #define WINDOW_READ_ADDR_ADDRESS MISSING #define WINDOW_WRITE_ADDR_ADDRESS MISSING +#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5 +#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3 +#define QCA9887_1_0_SI_CLK_GPIO_PIN 17 +#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3 +#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010 + +#define QCA9887_EEPROM_SELECT_READ 0xa10000a0 +#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00 +#define QCA9887_EEPROM_ADDR_HI_LSB 8 +#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000 +#define QCA9887_EEPROM_ADDR_LO_LSB 16 + #define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +struct ath10k_shadow_reg_value { + u32 shadow_reg_value_0; + u32 shadow_reg_value_1; + u32 shadow_reg_value_2; + u32 shadow_reg_value_3; + u32 shadow_reg_value_4; + u32 shadow_reg_value_5; + u32 shadow_reg_value_6; + u32 shadow_reg_value_7; + u32 shadow_reg_value_8; + u32 shadow_reg_value_9; + u32 shadow_reg_value_10; + u32 shadow_reg_value_11; + u32 shadow_reg_value_12; + u32 shadow_reg_value_13; + u32 shadow_reg_value_14; + u32 shadow_reg_value_15; + u32 shadow_reg_value_16; + u32 shadow_reg_value_17; + u32 shadow_reg_value_18; + u32 shadow_reg_value_19; + u32 shadow_reg_value_20; + u32 shadow_reg_value_21; + u32 shadow_reg_value_22; + u32 shadow_reg_value_23; +}; + +struct ath10k_shadow_reg_address { + u32 shadow_reg_address_0; + u32 shadow_reg_address_1; + u32 shadow_reg_address_2; + u32 shadow_reg_address_3; + u32 shadow_reg_address_4; + u32 shadow_reg_address_5; + u32 shadow_reg_address_6; + u32 shadow_reg_address_7; + u32 shadow_reg_address_8; + u32 shadow_reg_address_9; + u32 shadow_reg_address_10; + u32 shadow_reg_address_11; + u32 shadow_reg_address_12; + u32 shadow_reg_address_13; + u32 shadow_reg_address_14; + u32 shadow_reg_address_15; + u32 shadow_reg_address_16; + u32 shadow_reg_address_17; + u32 shadow_reg_address_18; + u32 shadow_reg_address_19; + u32 shadow_reg_address_20; + u32 shadow_reg_address_21; + u32 shadow_reg_address_22; + u32 shadow_reg_address_23; +}; + +extern struct ath10k_shadow_reg_value wcn3990_shadow_reg_value; +extern struct ath10k_shadow_reg_address wcn3990_shadow_reg_address; + +#define ATH10K_PKTLOG_HDR_SIZE_16 0x8000 + +enum { + ATH10k_PKTLOG_FLG_FRM_TYPE_LOCAL_S = 0, + ATH10K_PKTLOG_FLG_FRM_TYPE_REMOTE_S, + ATH10K_PKTLOG_FLG_FRM_TYPE_CLONE_S, + ATH10K_PKTLOG_FLG_FRM_TYPE_CBF_S, + ATH10K_PKTLOG_FLG_FRM_TYPE_UNKNOWN_S +}; + +enum ath10k_pktlog_type { + ATH10K_PKTLOG_TYPE_TX_CTRL = 1, + ATH10K_PKTLOG_TYPE_TX_STAT, + ATH10K_PKTLOG_TYPE_TX_MSDU_ID, + ATH10K_PKTLOG_TYPE_TX_FRM_HDR, + ATH10K_PKTLOG_TYPE_RX_STAT, + ATH10K_PKTLOG_TYPE_RC_FIND, + ATH10K_PKTLOG_TYPE_RC_UPDATE, + ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR, + ATH10K_PKTLOG_TYPE_DBG_PRINT, + ATH10K_PKTLOG_TYPE_SW_EVENT, + ATH10K_PKTLOG_TYPE_MAX, +}; + +struct ath10k_pktlog_hdr { + __le16 flags; + __le16 missed_cnt; + __le16 log_type; + __le16 size; + __le32 timestamp; + u8 payload[0]; +} __packed; + #endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 7993ca956ede..60cc2b6b8be1 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = { { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, }; +static struct ieee80211_rate ath10k_rates_rev2[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, +}; + #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) @@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = { #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) +#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) +#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) + static bool ath10k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@ -90,7 +119,7 @@ static u8 ath10k_mac_bitrate_to_rate(int bitrate) } u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, - u8 hw_rate) + u8 hw_rate, bool cck) { const struct ieee80211_rate *rate; int i; @@ -98,6 +127,9 @@ u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, for (i = 0; i < sband->n_bitrates; i++) { rate = &sband->bitrates[i]; + if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) + continue; + if (rate->hw_value == hw_rate) return i; else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && @@ -154,6 +186,26 @@ ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) return 1; } +int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) +{ + enum wmi_host_platform_type platform_type; + int ret; + + if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) + platform_type = WMI_HOST_PLATFORM_LOW_PERF; + else + platform_type = WMI_HOST_PLATFORM_HIGH_PERF; + + ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); + + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); + return ret; + } + + return 0; +} + /**********/ /* Crypto */ /**********/ @@ -247,7 +299,8 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && - arvif->vif->type != NL80211_IFTYPE_ADHOC)) + arvif->vif->type != NL80211_IFTYPE_ADHOC && + arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) return -EINVAL; spin_lock_bh(&ar->data_lock); @@ -445,10 +498,10 @@ static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(peer, &ar->peers, list) { - if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN)) + if (ether_addr_equal(peer->addr, arvif->vif->addr)) continue; - if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN)) + if (ether_addr_equal(peer->addr, arvif->bssid)) continue; if (peer->keys[key->keyidx] == key) @@ -478,7 +531,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) enum wmi_phy_mode phymode = MODE_UNKNOWN; switch (chandef->chan->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) @@ -501,7 +554,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) break; } break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: phymode = MODE_11A; @@ -614,10 +667,45 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, *def = &conf->def; } -static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, +static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); + if (ret) + return ret; + + ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); + if (ret) + return ret; + + if (QCA_REV_WCN3990(ar)) { + time_left = wait_for_completion_timeout(&ar->peer_delete_done, + 50 * HZ); + + if (time_left == 0) { + ath10k_warn(ar, "Timeout in receiving peer delete response\n"); + return -ETIMEDOUT; + } + } + + ar->num_peers--; + + return 0; +} + +static int ath10k_peer_create(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 vdev_id, + const u8 *addr, enum wmi_peer_type peer_type) { struct ath10k_vif *arvif; + struct ath10k_peer *peer; int num_peers = 0; int ret; @@ -646,6 +734,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, return ret; } + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, vdev_id, addr); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", + addr, vdev_id); + ath10k_peer_delete(ar, vdev_id, addr); + return -ENOENT; + } + + peer->vif = vif; + peer->sta = sta; + + spin_unlock_bh(&ar->data_lock); + ar->num_peers++; return 0; @@ -705,28 +809,11 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } -static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) -{ - int ret; - - lockdep_assert_held(&ar->conf_mutex); - - ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); - if (ret) - return ret; - - ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); - if (ret) - return ret; - - ar->num_peers--; - - return 0; -} - static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) { struct ath10k_peer *peer, *tmp; + int peer_id; + int i; lockdep_assert_held(&ar->conf_mutex); @@ -738,6 +825,22 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); + for_each_set_bit(peer_id, peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS) { + ar->peer_map[peer_id] = NULL; + } + + /* Double check that peer is properly un-referenced from + * the peer_map + */ + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + if (ar->peer_map[i] == peer) { + ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", + peer->addr, peer, i); + ar->peer_map[i] = NULL; + } + } + list_del(&peer->list); kfree(peer); ar->num_peers--; @@ -748,6 +851,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) static void ath10k_peer_cleanup_all(struct ath10k *ar) { struct ath10k_peer *peer, *tmp; + int i; lockdep_assert_held(&ar->conf_mutex); @@ -756,6 +860,10 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar) list_del(&peer->list); kfree(peer); } + + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) + ar->peer_map[i] = NULL; + spin_unlock_bh(&ar->data_lock); ar->num_peers = 0; @@ -886,6 +994,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { @@ -935,6 +1044,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar) ar->monitor_vdev_id, ret); reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) @@ -1271,6 +1381,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); if (ret) { @@ -1306,7 +1417,12 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, lockdep_assert_held(&ar->conf_mutex); + /* Clear arp and ns offload cache */ + memset(&arvif->arp_offload, 0, sizeof(arvif->arp_offload)); + memset(&arvif->ns_offload, 0, sizeof(arvif->ns_offload)); + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); arg.vdev_id = arvif->vdev_id; arg.dtim_period = arvif->dtim_period; @@ -1384,10 +1500,7 @@ static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, const u8 *p2p_ie; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_AP) - return 0; - - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) return 0; mgmt = (void *)bcn->data; @@ -1758,7 +1871,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, - ar->fw_features)) { + ar->running_fw->fw_file.fw_features)) { ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", arvif->vdev_id); enable_ps = false; @@ -1994,7 +2107,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, ether_addr_copy(arg->addr, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_aid = aid; - arg->peer_flags |= WMI_PEER_AUTH; + arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); arg->peer_num_spatial_streams = 1; arg->peer_caps = vif->bss_conf.assoc_capability; @@ -2002,6 +2115,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { struct ieee80211_bss_conf *info = &vif->bss_conf; @@ -2036,12 +2150,18 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); - arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; } if (wpaie) { ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); - arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; + } + + if (sta->mfp && + test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, + ar->running_fw->fw_file.fw_features)) { + arg->peer_flags |= ar->wmi.peer_flags->pmf; } } @@ -2055,7 +2175,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; const struct ieee80211_rate *rates; - enum ieee80211_band band; + enum nl80211_band band; u32 ratemask; u8 rate; int i; @@ -2115,7 +2235,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; int i, n; @@ -2138,7 +2258,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) return; - arg->peer_flags |= WMI_PEER_HT; + arg->peer_flags |= ar->wmi.peer_flags->ht; arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + ht_cap->ampdu_factor)) - 1; @@ -2149,10 +2269,10 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_rate_caps |= WMI_RC_HT_FLAG; if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) - arg->peer_flags |= WMI_PEER_LDPC; + arg->peer_flags |= ar->wmi.peer_flags->ldbc; if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { - arg->peer_flags |= WMI_PEER_40MHZ; + arg->peer_flags |= ar->wmi.peer_flags->bw40; arg->peer_rate_caps |= WMI_RC_CW40_FLAG; } @@ -2166,7 +2286,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; - arg->peer_flags |= WMI_PEER_STBC; + arg->peer_flags |= ar->wmi.peer_flags->stbc; } if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { @@ -2174,7 +2294,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc = stbc << WMI_RC_RX_STBC_FLAG_S; arg->peer_rate_caps |= stbc; - arg->peer_flags |= WMI_PEER_STBC; + arg->peer_flags |= ar->wmi.peer_flags->stbc; } if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) @@ -2339,7 +2459,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u16 *vht_mcs_mask; u8 ampdu_factor; @@ -2355,10 +2475,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) return; - arg->peer_flags |= WMI_PEER_VHT; + arg->peer_flags |= ar->wmi.peer_flags->vht; - if (def.chan->band == IEEE80211_BAND_2GHZ) - arg->peer_flags |= WMI_PEER_VHT_2G; + if (def.chan->band == NL80211_BAND_2GHZ) + arg->peer_flags |= ar->wmi.peer_flags->vht_2g; arg->peer_vht_caps = vht_cap->cap; @@ -2375,7 +2495,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, ampdu_factor)) - 1); if (sta->bandwidth == IEEE80211_STA_RX_BW_80) - arg->peer_flags |= WMI_PEER_80MHZ; + arg->peer_flags |= ar->wmi.peer_flags->bw80; arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); @@ -2400,32 +2520,33 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) - arg->peer_flags |= WMI_PEER_QOS; + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; if (sta->wme && sta->uapsd_queues) { - arg->peer_flags |= WMI_PEER_APSD; + arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; } break; case WMI_VDEV_TYPE_STA: if (vif->bss_conf.qos) - arg->peer_flags |= WMI_PEER_QOS; + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; case WMI_VDEV_TYPE_IBSS: if (sta->wme) - arg->peer_flags |= WMI_PEER_QOS; + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; default: break; } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", - sta->addr, !!(arg->peer_flags & WMI_PEER_QOS)); + sta->addr, !!(arg->peer_flags & + arvif->ar->wmi.peer_flags->qos)); } static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) { - return sta->supp_rates[IEEE80211_BAND_2GHZ] >> + return sta->supp_rates[NL80211_BAND_2GHZ] >> ATH10K_MAC_FIRST_OFDM_RATE_IDX; } @@ -2436,7 +2557,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, { struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; @@ -2449,7 +2570,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: if (sta->vht_cap.vht_supported && !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { if (sta->bandwidth == IEEE80211_STA_RX_BW_40) @@ -2469,7 +2590,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, } break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: /* * Check VHT first. */ @@ -2513,7 +2634,7 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar, memset(arg, 0, sizeof(*arg)); ath10k_peer_assoc_h_basic(ar, vif, sta, arg); - ath10k_peer_assoc_h_crypto(ar, vif, arg); + ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); ath10k_peer_assoc_h_rates(ar, vif, sta, arg); ath10k_peer_assoc_h_ht(ar, vif, sta, arg); ath10k_peer_assoc_h_vht(ar, vif, sta, arg); @@ -2725,16 +2846,18 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) - ath10k_warn(ar, "faield to down vdev %i: %d\n", + ath10k_warn(ar, "failed to down vdev %i: %d\n", arvif->vdev_id, ret); arvif->def_wep_key_idx = -1; - ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); - if (ret) { - ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", - arvif->vdev_id, ret); - return; + if (!QCA_REV_WCN3990(ar)) { + ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); + if (ret) { + ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", + arvif->vdev_id, ret); + return; + } } arvif->is_up = false; @@ -2847,7 +2970,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) { struct ieee80211_hw *hw = ar->hw; struct ieee80211_supported_band **bands; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_channel *channel; struct wmi_scan_chan_list_arg arg = {0}; struct wmi_channel_arg *ch; @@ -2859,7 +2982,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); bands = hw->wiphy->bands; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) continue; @@ -2878,7 +3001,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) return -ENOMEM; ch = arg.channels; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) continue; @@ -2888,7 +3011,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) if (channel->flags & IEEE80211_CHAN_DISABLED) continue; - ch->allow_ht = true; + ch->allow_ht = true; /* FIXME: when should we really allow VHT? */ ch->allow_vht = true; @@ -2923,7 +3046,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) /* FIXME: why use only legacy modes, why not any * HT/VHT modes? Would that even make any * difference? */ - if (channel->band == IEEE80211_BAND_2GHZ) + if (channel->band == NL80211_BAND_2GHZ) ch->mode = MODE_11G; else ch->mode = MODE_11A; @@ -2978,7 +3101,7 @@ static void ath10k_regd_update(struct ath10k *ar) regpair = ar->ath_common.regulatory.regpair; - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { nl_dfs_reg = ar->dfs_detector->region; wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); } else { @@ -3007,7 +3130,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", request->dfs_region); result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, @@ -3027,6 +3150,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, /* TX handlers */ /***************/ +enum ath10k_mac_tx_path { + ATH10K_MAC_TX_HTT, + ATH10K_MAC_TX_HTT_MGMT, + ATH10K_MAC_TX_WMI_MGMT, + ATH10K_MAC_TX_UNKNOWN, +}; + void ath10k_mac_tx_lock(struct ath10k *ar, int reason) { lockdep_assert_held(&ar->htt.tx_lock); @@ -3153,35 +3283,11 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, spin_unlock_bh(&ar->htt.tx_lock); } -static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) -{ - if (ieee80211_is_mgmt(hdr->frame_control)) - return HTT_DATA_TX_EXT_TID_MGMT; - - if (!ieee80211_is_data_qos(hdr->frame_control)) - return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; - - if (!is_unicast_ether_addr(ieee80211_get_DA(hdr))) - return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; - - return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; -} - -static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif) -{ - if (vif) - return ath10k_vif_to_arvif(vif)->vdev_id; - - if (ar->monitor_started) - return ar->monitor_vdev_id; - - ath10k_warn(ar, "failed to resolve vdev id\n"); - return 0; -} - static enum ath10k_hw_txrx_mode -ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct sk_buff *skb) +ath10k_mac_tx_h_get_txmode(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff *skb) { const struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; @@ -3210,7 +3316,10 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, */ if (ar->htt.target_version_major < 3 && (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && - !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features)) + !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->running_fw->fw_file.fw_features) && + !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR, + ar->running_fw->fw_file.fw_features)) return ATH10K_HW_TXRX_MGMT; /* Workaround: @@ -3231,14 +3340,22 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, } static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, - struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct sk_buff *skb) +{ + const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const struct ieee80211_hdr *hdr = (void *)skb->data; const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_INJECTED; + + if (!ieee80211_has_protected(hdr->frame_control)) + return false; + if ((info->flags & mask) == mask) return false; + if (vif) return !ath10k_vif_to_arvif(vif)->nohwcrypt; + return true; } @@ -3265,7 +3382,7 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) */ hdr = (void *)skb->data; if (ieee80211_is_qos_nullfunc(hdr->frame_control)) - cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + cb->flags &= ~ATH10K_SKB_F_QOS; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } @@ -3305,8 +3422,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); /* This is case only for P2P_GO */ - if (arvif->vdev_type != WMI_VDEV_TYPE_AP || - arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) return; if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { @@ -3321,7 +3437,29 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, } } -static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) +static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_txq *txq, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + + cb->flags = 0; + if (!ath10k_tx_h_use_hwcrypto(vif, skb)) + cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; + + if (ieee80211_is_mgmt(hdr->frame_control)) + cb->flags |= ATH10K_SKB_F_MGMT; + + if (ieee80211_is_data_qos(hdr->frame_control)) + cb->flags |= ATH10K_SKB_F_QOS; + + cb->vif = vif; + cb->txq = txq; +} + +bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) { /* FIXME: Not really sure since when the behaviour changed. At some * point new firmware stopped requiring creation of peer entries for @@ -3329,8 +3467,9 @@ static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) * tx credit replenishment and reliability). Assuming it's at least 3.4 * because that's when the `freq` was introduced to TX_FRM HTT command. */ - return !(ar->htt.target_version_major >= 3 && - ar->htt.target_version_minor >= 4); + return (ar->htt.target_version_major >= 3 && + ar->htt.target_version_minor >= 4 && + ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); } static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) @@ -3348,26 +3487,52 @@ static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) return 0; } -static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb) +static enum ath10k_mac_tx_path +ath10k_mac_tx_h_get_txpath(struct ath10k *ar, + struct sk_buff *skb, + enum ath10k_hw_txrx_mode txmode) { - struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); - struct ath10k_htt *htt = &ar->htt; - int ret = 0; - - switch (cb->txmode) { + switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: case ATH10K_HW_TXRX_ETHERNET: - ret = ath10k_htt_tx(htt, skb); - break; + return ATH10K_MAC_TX_HTT; case ATH10K_HW_TXRX_MGMT: if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, - ar->fw_features)) - ret = ath10k_mac_tx_wmi_mgmt(ar, skb); + ar->running_fw->fw_file.fw_features) || + test_bit(WMI_SERVICE_MGMT_TX_WMI, + ar->wmi.svc_map)) + return ATH10K_MAC_TX_WMI_MGMT; else if (ar->htt.target_version_major >= 3) - ret = ath10k_htt_tx(htt, skb); + return ATH10K_MAC_TX_HTT; else - ret = ath10k_htt_mgmt_tx(htt, skb); + return ATH10K_MAC_TX_HTT_MGMT; + } + + return ATH10K_MAC_TX_UNKNOWN; +} + +static int ath10k_mac_tx_submit(struct ath10k *ar, + enum ath10k_hw_txrx_mode txmode, + enum ath10k_mac_tx_path txpath, + struct sk_buff *skb) +{ + struct ath10k_htt *htt = &ar->htt; + int ret = -EINVAL; + + switch (txpath) { + case ATH10K_MAC_TX_HTT: + ret = ath10k_htt_tx(htt, txmode, skb); + break; + case ATH10K_MAC_TX_HTT_MGMT: + ret = ath10k_htt_mgmt_tx(htt, skb); + break; + case ATH10K_MAC_TX_WMI_MGMT: + ret = ath10k_mac_tx_wmi_mgmt(ar, skb); + break; + case ATH10K_MAC_TX_UNKNOWN: + WARN_ON_ONCE(1); + ret = -EINVAL; break; } @@ -3376,6 +3541,65 @@ static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb) ret); ieee80211_free_txskb(ar->hw, skb); } + + return ret; +} + +/* This function consumes the sk_buff regardless of return value as far as + * caller is concerned so no freeing is necessary afterwards. + */ +static int ath10k_mac_tx(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ath10k_hw_txrx_mode txmode, + enum ath10k_mac_tx_path txpath, + struct sk_buff *skb) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int ret; + + skb_orphan(skb); + /* We should disable CCK RATE due to P2P */ + if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + + switch (txmode) { + case ATH10K_HW_TXRX_MGMT: + case ATH10K_HW_TXRX_NATIVE_WIFI: + ath10k_tx_h_nwifi(hw, skb); + ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); + ath10k_tx_h_seq_no(vif, skb); + break; + case ATH10K_HW_TXRX_ETHERNET: + ath10k_tx_h_8023(skb); + break; + case ATH10K_HW_TXRX_RAW: + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + WARN_ON_ONCE(1); + ieee80211_free_txskb(hw, skb); + return -ENOTSUPP; + } + } + + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + if (!ath10k_mac_tx_frm_has_freq(ar)) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", + skb); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return 0; + } + } + + ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); + if (ret) { + ath10k_warn(ar, "failed to submit frame: %d\n", ret); + return ret; + } + + return 0; } void ath10k_offchan_tx_purge(struct ath10k *ar) @@ -3395,7 +3619,12 @@ void ath10k_offchan_tx_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); struct ath10k_peer *peer; + struct ath10k_vif *arvif; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; struct ieee80211_hdr *hdr; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; struct sk_buff *skb; const u8 *peer_addr; int vdev_id; @@ -3417,14 +3646,14 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", skb); hdr = (struct ieee80211_hdr *)skb->data; peer_addr = ieee80211_get_DA(hdr); - vdev_id = ATH10K_SKB_CB(skb)->vdev_id; spin_lock_bh(&ar->data_lock); + vdev_id = ar->scan.vdev_id; peer = ath10k_peer_find(ar, vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); @@ -3434,7 +3663,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) peer_addr, vdev_id); if (!peer) { - ret = ath10k_peer_create(ar, vdev_id, peer_addr, + ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, + peer_addr, WMI_PEER_TYPE_DEFAULT); if (ret) ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", @@ -3447,12 +3677,33 @@ void ath10k_offchan_tx_work(struct work_struct *work) ar->offchan_tx_skb = skb; spin_unlock_bh(&ar->data_lock); - ath10k_mac_tx(ar, skb); + /* It's safe to access vif and sta - conf_mutex guarantees that + * sta_state() and remove_interface() are locked exclusively + * out wrt to this offchannel worker. + */ + arvif = ath10k_get_arvif(ar, vdev_id); + if (arvif) { + vif = arvif->vif; + sta = ieee80211_find_sta(vif, peer_addr); + } else { + vif = NULL; + sta = NULL; + } + + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + if (ret) { + ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", + ret); + /* not serious */ + } time_left = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); if (time_left == 0) - ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", + ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", skb); if (!peer && tmp_peer_created) { @@ -3483,6 +3734,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); struct sk_buff *skb; + dma_addr_t paddr; int ret; for (;;) { @@ -3490,13 +3742,222 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) if (!skb) break; - ret = ath10k_wmi_mgmt_tx(ar, skb); + if (QCA_REV_WCN3990(ar)) { + paddr = dma_map_single(ar->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (!paddr) + continue; + ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); + if (ret) { + ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", + ret); + dma_unmap_single(ar->dev, paddr, skb->len, + DMA_FROM_DEVICE); + ieee80211_free_txskb(ar->hw, skb); + } + } else { + ret = ath10k_wmi_mgmt_tx(ar, skb); + if (ret) { + ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", + ret); + ieee80211_free_txskb(ar->hw, skb); + } + } + } +} + +static void ath10k_mac_txq_init(struct ieee80211_txq *txq) +{ + struct ath10k_txq *artxq; + + if (!txq) + return; + + artxq = (void *)txq->drv_priv; + INIT_LIST_HEAD(&artxq->list); +} + +static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) +{ + struct ath10k_txq *artxq; + struct ath10k_skb_cb *cb; + struct sk_buff *msdu; + int msdu_id; + + if (!txq) + return; + + artxq = (void *)txq->drv_priv; + spin_lock_bh(&ar->txqs_lock); + if (!list_empty(&artxq->list)) + list_del_init(&artxq->list); + spin_unlock_bh(&ar->txqs_lock); + + spin_lock_bh(&ar->htt.tx_lock); + idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { + cb = ATH10K_SKB_CB(msdu); + if (cb->txq == txq) + cb->txq = NULL; + } + spin_unlock_bh(&ar->htt.tx_lock); +} + +struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, + u16 peer_id, + u8 tid) +{ + struct ath10k_peer *peer; + + lockdep_assert_held(&ar->data_lock); + + peer = ar->peer_map[peer_id]; + if (!peer) + return NULL; + + if (peer->sta) + return peer->sta->txq[tid]; + else if (peer->vif) + return peer->vif->txq; + else + return NULL; +} + +static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + + /* No need to get locks */ + + if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) + return true; + + if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) + return true; + + if (artxq->num_fw_queued < artxq->num_push_allowed) + return true; + + return false; +} + +int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_htt *htt = &ar->htt; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + struct ieee80211_vif *vif = txq->vif; + struct ieee80211_sta *sta = txq->sta; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + size_t skb_len; + bool is_mgmt, is_presp; + int ret; + + spin_lock_bh(&ar->htt.tx_lock); + ret = ath10k_htt_tx_inc_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + + if (ret) + return ret; + + skb = ieee80211_tx_dequeue(hw, txq); + if (!skb) { + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + + return -ENOENT; + } + + ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); + + skb_len = skb->len; + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); + + if (is_mgmt) { + hdr = (struct ieee80211_hdr *)skb->data; + is_presp = ieee80211_is_probe_resp(hdr->frame_control); + + spin_lock_bh(&ar->htt.tx_lock); + ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); + if (ret) { - ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", - ret); - ieee80211_free_txskb(ar->hw, skb); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + return ret; + } + spin_unlock_bh(&ar->htt.tx_lock); + } + + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + if (unlikely(ret)) { + ath10k_warn(ar, "failed to push frame: %d\n", ret); + + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt); + if (is_mgmt) + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + + return ret; + } + + spin_lock_bh(&ar->htt.tx_lock); + artxq->num_fw_queued++; + spin_unlock_bh(&ar->htt.tx_lock); + + return skb_len; +} + +void ath10k_mac_tx_push_pending(struct ath10k *ar) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_txq *txq; + struct ath10k_txq *artxq; + struct ath10k_txq *last; + int ret; + int max; + + if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) + return; + + spin_lock_bh(&ar->txqs_lock); + rcu_read_lock(); + + last = list_last_entry(&ar->txqs, struct ath10k_txq, list); + while (!list_empty(&ar->txqs)) { + artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); + txq = container_of((void *)artxq, struct ieee80211_txq, + drv_priv); + + /* Prevent aggressive sta/tid taking over tx queue */ + max = 16; + ret = 0; + while (ath10k_mac_tx_can_push(hw, txq) && max--) { + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; } + + list_del_init(&artxq->list); + if (ret != -ENOENT) + list_add_tail(&artxq->list, &ar->txqs); + + ath10k_htt_tx_txq_update(hw, txq); + + if (artxq == last || (ret < 0 && ret != -ENOENT)) + break; } + + rcu_read_unlock(); + spin_unlock_bh(&ar->txqs_lock); } /************/ @@ -3522,9 +3983,10 @@ void __ath10k_scan_finish(struct ath10k *ar) case ATH10K_SCAN_STARTING: ar->scan.state = ATH10K_SCAN_IDLE; ar->scan_channel = NULL; + ar->scan.roc_freq = 0; ath10k_offchan_tx_purge(ar); cancel_delayed_work(&ar->scan.timeout); - complete_all(&ar->scan.completed); + complete(&ar->scan.completed); break; } } @@ -3553,7 +4015,7 @@ static int ath10k_scan_stop(struct ath10k *ar) goto out; } - ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); + ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); if (ret == 0) { ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); ret = -ETIMEDOUT; @@ -3633,7 +4095,7 @@ static int ath10k_start_scan(struct ath10k *ar, if (ret) return ret; - ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); + ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); if (ret == 0) { ret = ath10k_scan_stop(ar); if (ret) @@ -3660,67 +4122,100 @@ static int ath10k_start_scan(struct ath10k *ar, /* mac80211 callbacks */ /**********************/ -static void ath10k_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) +static void ath10k_mac_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ath10k *ar = hw->priv; + struct ath10k_htt *htt = &ar->htt; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ieee80211_sta *sta = control->sta; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - __le16 fc = hdr->frame_control; + struct ieee80211_txq *txq = NULL; + struct ieee80211_hdr *hdr = (void *)skb->data; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; + bool is_htt; + bool is_mgmt; + bool is_presp; + int ret; - /* We should disable CCK RATE due to P2P */ - if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) - ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); - ATH10K_SKB_CB(skb)->htt.is_offchan = false; - ATH10K_SKB_CB(skb)->htt.freq = 0; - ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); - ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb); - ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); - ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb); - ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc); + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + is_htt = (txpath == ATH10K_MAC_TX_HTT || + txpath == ATH10K_MAC_TX_HTT_MGMT); + is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); - switch (ATH10K_SKB_CB(skb)->txmode) { - case ATH10K_HW_TXRX_MGMT: - case ATH10K_HW_TXRX_NATIVE_WIFI: - ath10k_tx_h_nwifi(hw, skb); - ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); - ath10k_tx_h_seq_no(vif, skb); - break; - case ATH10K_HW_TXRX_ETHERNET: - ath10k_tx_h_8023(skb); - break; - case ATH10K_HW_TXRX_RAW: - if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { - WARN_ON_ONCE(1); - ieee80211_free_txskb(hw, skb); + if (is_htt) { + spin_lock_bh(&ar->htt.tx_lock); + is_presp = ieee80211_is_probe_resp(hdr->frame_control); + + ret = ath10k_htt_tx_inc_pending(htt); + if (ret) { + ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", + ret); + spin_unlock_bh(&ar->htt.tx_lock); + ieee80211_free_txskb(ar->hw, skb); + return; + } + + ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", + ret); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + ieee80211_free_txskb(ar->hw, skb); return; } + spin_unlock_bh(&ar->htt.tx_lock); } - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { - spin_lock_bh(&ar->data_lock); - ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; - ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; - spin_unlock_bh(&ar->data_lock); + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + if (ret) { + ath10k_warn(ar, "failed to transmit frame: %d\n", ret); + if (is_htt) { + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt); + if (is_mgmt) + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + } + return; + } +} - if (ath10k_mac_need_offchan_tx_work(ar)) { - ATH10K_SKB_CB(skb)->htt.freq = 0; - ATH10K_SKB_CB(skb)->htt.is_offchan = true; +static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + struct ieee80211_txq *f_txq; + struct ath10k_txq *f_artxq; + int ret = 0; + int max = 16; - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", - skb); + spin_lock_bh(&ar->txqs_lock); + if (list_empty(&artxq->list)) + list_add_tail(&artxq->list, &ar->txqs); - skb_queue_tail(&ar->offchan_tx_queue, skb); - ieee80211_queue_work(hw, &ar->offchan_tx_work); - return; - } + f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); + f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); + list_del_init(&f_artxq->list); + + while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { + ret = ath10k_mac_tx_push_txq(hw, f_txq); + if (ret) + break; } + if (ret != -ENOENT) + list_add_tail(&f_artxq->list, &ar->txqs); + spin_unlock_bh(&ar->txqs_lock); - ath10k_mac_tx(ar, skb); + ath10k_htt_tx_txq_update(hw, f_txq); + ath10k_htt_tx_txq_update(hw, txq); } /* Must not be called with conf_mutex held as workers can use that also. */ @@ -3860,6 +4355,9 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } + if (ar->cfg_tx_chainmask <= 1) + vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; + vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); @@ -3879,7 +4377,8 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; - ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; + ht_cap.cap |= + WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; @@ -3896,7 +4395,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) ht_cap.cap |= smps; } - if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) + if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { @@ -3941,14 +4440,11 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) vht_cap = ath10k_create_vht_cap(ar); if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->ht_cap = ht_cap; - - /* Enable the VHT support at 2.4 GHz */ - band->vht_cap = vht_cap; } if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { - band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->ht_cap = ht_cap; band->vht_cap = vht_cap; } @@ -4006,12 +4502,12 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) static int ath10k_start(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; - u32 burst_enable; + u32 param; int ret = 0; /* * This makes sense only when restarting hw. It is harmless to call - * uncoditionally. This is necessary to make sure no HTT/WMI tx + * unconditionally. This is necessary to make sure no HTT/WMI tx * commands will be submitted while restarting. */ ath10k_drain_tx(ar); @@ -4023,7 +4519,8 @@ static int ath10k_start(struct ieee80211_hw *hw) ar->state = ATH10K_STATE_ON; break; case ATH10K_STATE_RESTARTING: - ath10k_halt(ar); + if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ath10k_halt(ar); ar->state = ATH10K_STATE_RESTARTED; break; case ATH10K_STATE_ON: @@ -4043,24 +4540,34 @@ static int ath10k_start(struct ieee80211_hw *hw) goto err_off; } - ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, + &ar->normal_mode_fw); if (ret) { ath10k_err(ar, "Could not init core: %d\n", ret); goto err_power_down; } - ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); + param = ar->wmi.pdev_param->pmf_qos; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); if (ret) { ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); goto err_core_stop; } - ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); + param = ar->wmi.pdev_param->dynamic_bw; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); if (ret) { ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); goto err_core_stop; } + param = ar->wmi.pdev_param->idle_ps_config; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret) { + ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); + goto err_core_stop; + } + if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { ret = ath10k_wmi_adaptive_qcs(ar, true); if (ret) { @@ -4071,8 +4578,8 @@ static int ath10k_start(struct ieee80211_hw *hw) } if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { - burst_enable = ar->wmi.pdev_param->burst_enable; - ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0); + param = ar->wmi.pdev_param->burst_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, 0); if (ret) { ath10k_warn(ar, "failed to disable burst: %d\n", ret); goto err_core_stop; @@ -4090,8 +4597,8 @@ static int ath10k_start(struct ieee80211_hw *hw) * this problem. */ - ret = ath10k_wmi_pdev_set_param(ar, - ar->wmi.pdev_param->arp_ac_override, 0); + param = ar->wmi.pdev_param->arp_ac_override; + ret = ath10k_wmi_pdev_set_param(ar, param, 0); if (ret) { ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", ret); @@ -4099,7 +4606,7 @@ static int ath10k_start(struct ieee80211_hw *hw) } if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, - ar->fw_features)) { + ar->running_fw->fw_file.fw_features)) { ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, WMI_CCA_DETECT_LEVEL_AUTO, WMI_CCA_DETECT_MARGIN_AUTO); @@ -4108,10 +4615,11 @@ static int ath10k_start(struct ieee80211_hw *hw) ret); goto err_core_stop; } + ar->sifs_burst_enabled = false; } - ret = ath10k_wmi_pdev_set_param(ar, - ar->wmi.pdev_param->ani_enable, 1); + param = ar->wmi.pdev_param->ani_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); if (ret) { ath10k_warn(ar, "failed to enable ani by default: %d\n", ret); @@ -4120,6 +4628,31 @@ static int ath10k_start(struct ieee80211_hw *hw) ar->ani_enabled = true; + if (ath10k_peer_stats_enabled(ar)) { + param = ar->wmi.pdev_param->peer_stats_update_period; + ret = ath10k_wmi_pdev_set_param(ar, param, + PEER_DEFAULT_STATS_UPDATE_PERIOD); + if (ret) { + ath10k_warn(ar, + "failed to set peer stats period : %d\n", + ret); + goto err_core_stop; + } + } + + param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, + "failed to set btcoex param: %d\n", ret); + goto err_core_stop; + } + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar); @@ -4322,6 +4855,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_peer *peer; enum wmi_sta_powersave_param param; int ret = 0; u32 value; @@ -4334,6 +4868,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); memset(arvif, 0, sizeof(*arvif)); + ath10k_mac_txq_init(vif->txq); arvif->ar = ar; arvif->vif = vif; @@ -4368,24 +4903,30 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, bit, ar->free_vdev_map); arvif->vdev_id = bit; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + arvif->vdev_subtype = + ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: arvif->vdev_type = WMI_VDEV_TYPE_STA; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); break; case NL80211_IFTYPE_ADHOC: arvif->vdev_type = WMI_VDEV_TYPE_IBSS; break; case NL80211_IFTYPE_MESH_POINT: - if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_MESH_11S); + } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { ret = -EINVAL; ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); goto err; @@ -4396,7 +4937,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, arvif->vdev_type = WMI_VDEV_TYPE_AP; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_GO); break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; @@ -4467,6 +5009,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err; } + if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar)) { + ret = ath10k_wmi_csa_offload(ar, arvif->vdev_id, true); + if (ret) { + ath10k_err(ar, "CSA offload failed for vdev %i: %d\n", + arvif->vdev_id, ret); + goto err_vdev_delete; + } + } + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); spin_lock_bh(&ar->data_lock); list_add(&arvif->list, &ar->arvifs); @@ -4513,13 +5064,31 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (arvif->vdev_type == WMI_VDEV_TYPE_AP || arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { - ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr, - WMI_PEER_TYPE_DEFAULT); + ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, + vif->addr, WMI_PEER_TYPE_DEFAULT); if (ret) { ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } + + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); + if (!peer) { + ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", + vif->addr, arvif->vdev_id); + spin_unlock_bh(&ar->data_lock); + ret = -ENOENT; + goto err_peer_delete; + } + + arvif->peer_id = find_first_bit(peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS); + + spin_unlock_bh(&ar->data_lock); + } else { + arvif->peer_id = HTT_INVALID_PEERID; } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { @@ -4598,7 +5167,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, err_peer_delete: if (arvif->vdev_type == WMI_VDEV_TYPE_AP || arvif->vdev_type == WMI_VDEV_TYPE_IBSS) - ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); + ath10k_peer_delete(ar, arvif->vdev_id, vif->addr); err_vdev_delete: ath10k_wmi_vdev_delete(ar, arvif->vdev_id); @@ -4632,7 +5201,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_peer *peer; + unsigned long time_left; int ret; + int i; cancel_work_sync(&arvif->ap_csa_work); cancel_delayed_work_sync(&arvif->connection_loss_work); @@ -4655,8 +5227,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, if (arvif->vdev_type == WMI_VDEV_TYPE_AP || arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { - ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, - vif->addr); + ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, + vif->addr); if (ret) ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", arvif->vdev_id, ret); @@ -4664,6 +5236,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, kfree(arvif->u.ap.noa_data); } + if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar)) + ath10k_wmi_csa_offload(ar, arvif->vdev_id, false); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", arvif->vdev_id); @@ -4672,6 +5247,16 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", arvif->vdev_id, ret); + if (QCA_REV_WCN3990(ar)) { + time_left = wait_for_completion_timeout( + &ar->vdev_delete_done, + ATH10K_VDEV_DELETE_TIMEOUT_HZ); + if (time_left == 0) { + ath10k_warn(ar, "Timeout in receiving vdev delete resp\n"); + return; + } + } + /* Some firmware revisions don't notify host about self-peer removal * until after associated vdev is deleted. */ @@ -4688,7 +5273,22 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); } + spin_lock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + peer = ar->peer_map[i]; + if (!peer) + continue; + + if (peer->vif == vif) { + ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", + vif->addr, arvif->vdev_id); + peer->vif = NULL; + } + } + spin_unlock_bh(&ar->data_lock); + ath10k_peer_cleanup(ar, arvif->vdev_id); + ath10k_mac_txq_unref(ar, vif->txq); if (vif->type == NL80211_IFTYPE_MONITOR) { ar->monitor_arvif = NULL; @@ -4697,13 +5297,39 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); } + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + spin_lock_bh(&ar->htt.tx_lock); ath10k_mac_vif_tx_unlock_all(arvif); spin_unlock_bh(&ar->htt.tx_lock); + ath10k_mac_txq_unref(ar, vif->txq); + mutex_unlock(&ar->conf_mutex); } +static int ath10k_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, bool p2p) +{ + struct ath10k *ar = hw->priv; + int ret = 0; + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "change_interface new: %d (%d), old: %d (%d)\n", new_type, + p2p, vif->type, vif->p2p); + + if (new_type != vif->type || vif->p2p != p2p) { + ath10k_remove_interface(hw, vif); + vif->type = new_type; + vif->p2p = p2p; + ret = ath10k_add_interface(hw, vif); + } + return ret; +} + /* * FIXME: Has to be verified. */ @@ -4732,7 +5358,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, ret = ath10k_monitor_recalc(ar); if (ret) - ath10k_warn(ar, "failed to recalc montior: %d\n", ret); + ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); mutex_unlock(&ar->conf_mutex); } @@ -4924,7 +5550,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; struct wmi_start_scan_arg arg; - int ret = 0; + const u8 *ptr; + int ret = 0, ie_skip_len = 0; int i; mutex_lock(&ar->conf_mutex); @@ -4956,8 +5583,16 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.scan_id = ATH10K_SCAN_ID; if (req->ie_len) { - arg.ie_len = req->ie_len; - memcpy(arg.ie, req->ie, arg.ie_len); + if (QCA_REV_WCN3990(ar)) { + ptr = req->ie; + while (ptr[0] == WLAN_EID_SUPP_RATES || + ptr[0] == WLAN_EID_EXT_SUPP_RATES) { + ie_skip_len = ptr[1] + 2; + ptr += ie_skip_len; + } + } + arg.ie_len = req->ie_len - ie_skip_len; + memcpy(arg.ie, req->ie + ie_skip_len, arg.ie_len); } if (req->n_ssids) { @@ -4966,6 +5601,11 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.ssids[i].len = req->ssids[i].ssid_len; arg.ssids[i].ssid = req->ssids[i].ssid; } + if (QCA_REV_WCN3990(ar)) { + arg.scan_ctrl_flags &= + ~(WMI_SCAN_ADD_BCAST_PROBE_REQ | + WMI_SCAN_CHAN_STAT_EVENT); + } } else { arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; } @@ -5048,6 +5688,22 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, arvif->vdev_id, ret); } +static void ath10k_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + mutex_lock(&ar->conf_mutex); + memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); + memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); + arvif->gtk_rekey_data.replay_ctr = + be64_to_cpup((__be64 *)data->replay_ctr); + arvif->gtk_rekey_data.valid = true; + mutex_unlock(&ar->conf_mutex); +} + static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -5228,7 +5884,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) struct ath10k_sta *arsta; struct ieee80211_sta *sta; struct cfg80211_chan_def def; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; u32 changed, bw, nss, smps; @@ -5402,13 +6058,18 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_peer *peer; int ret = 0; + int i; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { memset(arsta, 0, sizeof(*arsta)); arsta->arvif = arvif; INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + ath10k_mac_txq_init(sta->txq[i]); } /* cancel must be done outside the mutex to avoid deadlock */ @@ -5416,6 +6077,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NOTEXIST)) cancel_work_sync(&arsta->update_wk); + if (vif->type == NL80211_IFTYPE_STATION && new_state > ar->sta_state) + ar->sta_state = new_state; + mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && @@ -5443,8 +6107,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, if (sta->tdls) peer_type = WMI_PEER_TYPE_TDLS; - ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr, - peer_type); + ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, + sta->addr, peer_type); if (ret) { ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); @@ -5452,6 +6116,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, goto exit; } + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", + vif->addr, arvif->vdev_id); + spin_unlock_bh(&ar->data_lock); + ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); + ath10k_mac_dec_num_stations(arvif, sta); + ret = -ENOENT; + goto exit; + } + + arsta->peer_id = find_first_bit(peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS); + + spin_unlock_bh(&ar->data_lock); + if (!sta->tdls) goto exit; @@ -5504,8 +6186,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, * Existing station deletion. */ ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d peer delete %pM (sta gone)\n", - arvif->vdev_id, sta->addr); + "mac vdev %d peer delete %pM sta %pK (sta gone)\n", + arvif->vdev_id, sta->addr, sta); if (sta->tdls) { ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, @@ -5524,6 +6206,31 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_mac_dec_num_stations(arvif, sta); + spin_lock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + peer = ar->peer_map[i]; + if (!peer) + continue; + + if (peer->sta == sta) { + ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", + sta->addr, peer, i, arvif->vdev_id); + peer->sta = NULL; + + /* Clean up the peer object as well since we + * must have failed to do this above. + */ + list_del(&peer->list); + ar->peer_map[i] = NULL; + kfree(peer); + ar->num_peers--; + } + } + spin_unlock_bh(&ar->data_lock); + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + ath10k_mac_txq_unref(ar, sta->txq[i]); + if (!sta->tdls) goto exit; @@ -5770,7 +6477,7 @@ exit: return ret; } -#define ATH10K_ROC_TIMEOUT_HZ (2*HZ) +#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) static int ath10k_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -5822,7 +6529,13 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, arg.dwell_time_passive = scan_time_msec; arg.max_scan_time = scan_time_msec; arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; - arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + if (QCA_REV_WCN3990(ar)) { + arg.scan_ctrl_flags &= ~(WMI_SCAN_FILTER_PROBE_REQ | + WMI_SCAN_CHAN_STAT_EVENT | + WMI_SCAN_ADD_BCAST_PROBE_REQ); + } else { + arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + } arg.burst_duration_ms = duration; ret = ath10k_start_scan(ar, &arg); @@ -5834,7 +6547,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, goto exit; } - ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); if (ret == 0) { ath10k_warn(ar, "failed to switch to channel for roc scan\n"); @@ -5986,6 +6699,39 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw, mutex_unlock(&ar->conf_mutex); } +static void +ath10k_mac_update_bss_chan_survey(struct ath10k *ar, + struct ieee80211_channel *channel) +{ + int ret; + enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || + (ar->rx_channel != channel)) + return; + + if (ar->scan.state != ATH10K_SCAN_IDLE) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); + return; + } + + reinit_completion(&ar->bss_survey_done); + + ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); + if (ret) { + ath10k_warn(ar, "failed to send pdev bss chan info request\n"); + return; + } + + ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); + if (!ret) { + ath10k_warn(ar, "bss channel survey timed out\n"); + return; + } +} + static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { @@ -5996,20 +6742,23 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, mutex_lock(&ar->conf_mutex); - sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) - sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (!sband || idx >= sband->n_channels) { ret = -ENOENT; goto exit; } + if (!QCA_REV_WCN3990(ar)) + ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); + spin_lock_bh(&ar->data_lock); memcpy(survey, ar_survey, sizeof(*survey)); spin_unlock_bh(&ar->data_lock); @@ -6026,7 +6775,7 @@ exit: static bool ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; @@ -6045,7 +6794,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, static bool ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, int *nss) { @@ -6094,7 +6843,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, static int ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, u8 *rate, u8 *nss) { @@ -6195,7 +6944,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, static bool ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, - enum ieee80211_band band, + enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int i; @@ -6247,7 +6996,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath10k *ar = arvif->ar; - enum ieee80211_band band; + enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; u8 rate; @@ -6408,6 +7157,32 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) return 0; } +static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 tsf) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf; + int ret; + + /* Workaround: + * + * Given tsf argument is entire TSF value, but firmware accepts + * only TSF offset to current TSF. + * + * get_tsf function is used to get offset value, however since + * ath10k_get_tsf is not implemented properly, it will return 0 always. + * Luckily all the caller functions to set_tsf, as of now, also rely on + * get_tsf function to get entire tsf value such get_tsf() + tsf_delta, + * final tsf offset value to firmware will be arithmetically correct. + */ + tsf_offset = tsf - ath10k_get_tsf(hw, vif); + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, tsf_offset); + if (ret && ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to set tsf offset: %d\n", ret); +} + static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) @@ -6526,12 +7301,25 @@ ath10k_mac_update_vif_chan(struct ath10k *ar, if (WARN_ON(!arvif->is_up)) continue; + if (QCA_REV_WCN3990(ar)) { + /* In the case of wcn3990 WLAN module we send + * vdev restart only, no need to send vdev down. + */ - ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); - if (ret) { - ath10k_warn(ar, "failed to down vdev %d: %d\n", - arvif->vdev_id, ret); - continue; + ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); + if (ret) { + ath10k_warn(ar, + "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } else { + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) { + ath10k_warn(ar, "failed to down vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } } } @@ -6562,11 +7350,17 @@ ath10k_mac_update_vif_chan(struct ath10k *ar, ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", ret); - ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); - if (ret) { - ath10k_warn(ar, "failed to restart vdev %d: %d\n", - arvif->vdev_id, ret); - continue; + if (!QCA_REV_WCN3990(ar)) { + /* In case of other than wcn3990 WLAN module we + * send vdev down and vdev restart to the firmware. + */ + + ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); + if (ret) { + ath10k_warn(ar, "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } } ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, @@ -6588,7 +7382,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac chanctx add freq %hu width %d ptr %p\n", + "mac chanctx add freq %hu width %d ptr %pK\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); @@ -6612,7 +7406,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac chanctx remove freq %hu width %d ptr %p\n", + "mac chanctx remove freq %hu width %d ptr %pK\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); @@ -6677,7 +7471,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac chanctx change freq %hu width %d ptr %p changed %x\n", + "mac chanctx change freq %hu width %d ptr %pK changed %x\n", ctx->def.chan->center_freq, ctx->def.width, ctx, changed); /* This shouldn't really happen because channel switching should use @@ -6735,7 +7529,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac chanctx assign ptr %p vdev_id %i\n", + "mac chanctx assign ptr %pK vdev_id %i\n", ctx, arvif->vdev_id); if (WARN_ON(arvif->is_started)) { @@ -6803,12 +7597,13 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac chanctx unassign ptr %p vdev_id %i\n", + "mac chanctx unassign ptr %pK vdev_id %i\n", ctx, arvif->vdev_id); WARN_ON(!arvif->is_started); - - if (vif->type == NL80211_IFTYPE_MONITOR) { + if (vif->type == NL80211_IFTYPE_MONITOR || + (vif->type == NL80211_IFTYPE_STATION && + ar->sta_state < IEEE80211_STA_ASSOC)) { WARN_ON(!arvif->is_up); ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); @@ -6824,6 +7619,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to stop vdev %i: %d\n", arvif->vdev_id, ret); + ar->sta_state = IEEE80211_STA_NOTEXIST; arvif->is_started = false; mutex_unlock(&ar->conf_mutex); @@ -6849,16 +7645,19 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, } static const struct ieee80211_ops ath10k_ops = { - .tx = ath10k_tx, + .tx = ath10k_mac_op_tx, + .wake_tx_queue = ath10k_mac_op_wake_tx_queue, .start = ath10k_start, .stop = ath10k_stop, .config = ath10k_config, .add_interface = ath10k_add_interface, + .change_interface = ath10k_change_interface, .remove_interface = ath10k_remove_interface, .configure_filter = ath10k_configure_filter, .bss_info_changed = ath10k_bss_info_changed, .hw_scan = ath10k_hw_scan, .cancel_hw_scan = ath10k_cancel_hw_scan, + .set_rekey_data = ath10k_set_rekey_data, .set_key = ath10k_set_key, .set_default_unicast_key = ath10k_set_default_unicast_key, .sta_state = ath10k_sta_state, @@ -6876,6 +7675,7 @@ static const struct ieee80211_ops ath10k_ops = { .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, .sta_rc_update = ath10k_sta_rc_update, .get_tsf = ath10k_get_tsf, + .set_tsf = ath10k_set_tsf, .ampdu_action = ath10k_ampdu_action, .get_et_sset_count = ath10k_debug_get_et_sset_count, .get_et_stats = ath10k_debug_get_et_stats, @@ -6892,14 +7692,16 @@ static const struct ieee80211_ops ath10k_ops = { #ifdef CONFIG_PM .suspend = ath10k_wow_op_suspend, .resume = ath10k_wow_op_resume, + .set_wakeup = ath10k_wow_op_set_wakeup, #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, + .sta_statistics = ath10k_sta_statistics, #endif }; #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -6908,7 +7710,7 @@ static const struct ieee80211_ops ath10k_ops = { } #define CHAN5G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -6964,54 +7766,69 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { struct ath10k *ath10k_mac_create(size_t priv_size) { struct ieee80211_hw *hw; + struct ieee80211_ops *ops; struct ath10k *ar; - hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops); - if (!hw) + ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); + if (!ops) return NULL; + hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); + if (!hw) { + kfree(ops); + return NULL; + } + ar = hw->priv; ar->hw = hw; + ar->ops = ops; return ar; } void ath10k_mac_destroy(struct ath10k *ar) { + struct ieee80211_ops *ops = ar->ops; + ieee80211_free_hw(ar->hw); + kfree(ops); } static const struct ieee80211_iface_limit ath10k_if_limits[] = { { - .max = 8, - .types = BIT(NL80211_IFTYPE_STATION) - | BIT(NL80211_IFTYPE_P2P_CLIENT) + .max = 8, + .types = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { - .max = 3, - .types = BIT(NL80211_IFTYPE_P2P_GO) + .max = 3, + .types = BIT(NL80211_IFTYPE_P2P_GO) }, { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, { - .max = 7, - .types = BIT(NL80211_IFTYPE_AP) + .max = 7, + .types = BIT(NL80211_IFTYPE_AP) #ifdef CONFIG_MAC80211_MESH - | BIT(NL80211_IFTYPE_MESH_POINT) + | BIT(NL80211_IFTYPE_MESH_POINT) #endif }, }; static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { { - .max = 8, - .types = BIT(NL80211_IFTYPE_AP) + .max = 8, + .types = BIT(NL80211_IFTYPE_AP) #ifdef CONFIG_MAC80211_MESH - | BIT(NL80211_IFTYPE_MESH_POINT) + | BIT(NL80211_IFTYPE_MESH_POINT) #endif }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION) + }, }; static const struct ieee80211_iface_combination ath10k_if_comb[] = { @@ -7133,6 +7950,85 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { }, }; +static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), + }, +}; + +static const struct ieee80211_iface_limit ath10k_wcn3990_qcs_if_limit[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_P2P_GO), + }, +}; + +static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit_ibss[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC), + }, +}; + +static struct ieee80211_iface_combination ath10k_wcn3990_qcs_if_comb[] = { + { + .limits = ath10k_wcn3990_if_limit, + .num_different_channels = 1, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit), +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +#endif + }, + { + .limits = ath10k_wcn3990_qcs_if_limit, + .num_different_channels = 2, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_wcn3990_qcs_if_limit), + }, + { + .limits = ath10k_wcn3990_if_limit_ibss, + .num_different_channels = 1, + .max_interfaces = 2, + .n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit_ibss), +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +#endif + }, +}; + static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { { .max = 1, @@ -7224,13 +8120,19 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; - band->n_bitrates = ath10k_g_rates_size; - band->bitrates = ath10k_g_rates; - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; + if (ar->hw_params.cck_rate_map_rev2) { + band->n_bitrates = ath10k_g_rates_rev2_size; + band->bitrates = ath10k_g_rates_rev2; + } else { + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + } + + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { @@ -7242,12 +8144,12 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } - band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); band->channels = channels; band->n_bitrates = ath10k_a_rates_size; band->bitrates = ath10k_a_rates; - ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; } ath10k_mac_setup_ht_vht_cap(ar); @@ -7260,12 +8162,16 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; - if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) + if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); + if (QCA_REV_WCN3990(ar)) + ar->hw->wiphy->interface_modes &= + ~BIT(NL80211_IFTYPE_P2P_DEVICE); + ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); @@ -7300,6 +8206,7 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->vif_data_size = sizeof(struct ath10k_vif); ar->hw->sta_data_size = sizeof(struct ath10k_sta); + ar->hw->txq_data_size = sizeof(struct ath10k_txq); ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; @@ -7324,7 +8231,8 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_AP_SCAN; ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; @@ -7348,7 +8256,7 @@ int ath10k_mac_register(struct ath10k *ar) */ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; - switch (ar->wmi.op_version) { + switch (ar->running_fw->fw_file.wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->hw->wiphy->iface_combinations = ath10k_if_comb; ar->hw->wiphy->n_iface_combinations = @@ -7356,6 +8264,14 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); break; case ATH10K_FW_WMI_OP_VERSION_TLV: + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + if (QCA_REV_WCN3990(ar)) { + ar->hw->wiphy->iface_combinations = + ath10k_wcn3990_qcs_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_wcn3990_qcs_if_comb); + break; + } if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { ar->hw->wiphy->iface_combinations = ath10k_tlv_qcs_if_comb; @@ -7366,7 +8282,6 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath10k_tlv_if_comb); } - ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: @@ -7390,7 +8305,7 @@ int ath10k_mac_register(struct ath10k *ar) if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) ar->hw->netdev_features = NETIF_F_HW_CSUM; - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { /* Init ath dfs pattern detector */ ar->ath_common.debug_mask = ATH_DBG_DFS; ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, @@ -7400,6 +8315,15 @@ int ath10k_mac_register(struct ath10k *ar) ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); } + /* Current wake_tx_queue implementation imposes a significant + * performance penalty in some setups. The tx scheduling code needs + * more work anyway so disable the wake_tx_queue unless firmware + * supports the pull-push mechanism. + */ + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + ar->ops->wake_tx_queue = NULL; + ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); if (ret) { @@ -7429,12 +8353,12 @@ err_unregister: ieee80211_unregister_hw(ar->hw); err_dfs_detector_exit: - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) ar->dfs_detector->exit(ar->dfs_detector); err_free: - kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); - kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); SET_IEEE80211_DEV(ar->hw, NULL); return ret; @@ -7442,13 +8366,14 @@ err_free: void ath10k_mac_unregister(struct ath10k *ar) { + ath10k_wow_deinit(ar); ieee80211_unregister_hw(ar->hw); - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) ar->dfs_detector->exit(ar->dfs_detector); - kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); - kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); SET_IEEE80211_DEV(ar->hw, NULL); } diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index e3cefe4c7cfd..1bd29ecfcdcc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -66,7 +66,7 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, enum wmi_tlv_tx_pause_action action); u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, - u8 hw_rate); + u8 hw_rate, bool cck); u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, u32 bitrate); @@ -74,6 +74,14 @@ void ath10k_mac_tx_lock(struct ath10k *ar, int reason); void ath10k_mac_tx_unlock(struct ath10k *ar, int reason); void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason); void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason); +bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar); +void ath10k_mac_tx_push_pending(struct ath10k *ar); +int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, + u16 peer_id, + u8 tid); +int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 907fd60c4241..ca77861c4320 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -33,12 +33,6 @@ #include "ce.h" #include "pci.h" -enum ath10k_pci_irq_mode { - ATH10K_PCI_IRQ_AUTO = 0, - ATH10K_PCI_IRQ_LEGACY = 1, - ATH10K_PCI_IRQ_MSI = 2, -}; - enum ath10k_pci_reset_mode { ATH10K_PCI_RESET_AUTO = 0, ATH10K_PCI_RESET_WARM_ONLY = 1, @@ -62,7 +56,10 @@ static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ + { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ {0} }; @@ -87,14 +84,19 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, + { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, + + { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, + + { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); static int ath10k_pci_safe_chip_reset(struct ath10k *ar); -static int ath10k_pci_wait_for_target_init(struct ath10k *ar); static int ath10k_pci_init_irq(struct ath10k *ar); static int ath10k_pci_deinit_irq(struct ath10k *ar); static int ath10k_pci_request_irq(struct ath10k *ar); @@ -107,6 +109,7 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static struct ce_attr host_ce_config_wlan[] = { @@ -186,6 +189,7 @@ static struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 128, + .recv_cb = ath10k_pci_pktlog_rx_cb, }, /* CE9 target autonomous qcache memcpy */ @@ -485,6 +489,9 @@ static int ath10k_pci_force_wake(struct ath10k *ar) unsigned long flags; int ret = 0; + if (ar_pci->pci_ps) + return ret; + spin_lock_irqsave(&ar_pci->ps_lock, flags); if (!ar_pci->ps_awake) { @@ -615,7 +622,7 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar) spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } -void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; @@ -637,7 +644,7 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) ath10k_pci_sleep(ar); } -u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 val; @@ -662,6 +669,20 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) return val; } +inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + ar_pci->opaque_ctx.bus_ops->write32(ar, offset, value); +} + +inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + return ar_pci->opaque_ctx.bus_ops->read32(ar, offset); +} + u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) { return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); @@ -682,7 +703,7 @@ void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); } -static bool ath10k_pci_irq_pending(struct ath10k *ar) +bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; @@ -695,7 +716,7 @@ static bool ath10k_pci_irq_pending(struct ath10k *ar) return false; } -static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) +void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) { /* IMPORTANT: INTR_CLR register has to be set after * INTR_ENABLE is set to 0, otherwise interrupt can not be @@ -711,7 +732,7 @@ static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) PCIE_INTR_ENABLE_ADDRESS); } -static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) +void ath10k_pci_enable_legacy_irq(struct ath10k *ar) { ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, @@ -727,10 +748,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (ar_pci->num_msi_intrs > 1) - return "msi-x"; - - if (ar_pci->num_msi_intrs == 1) + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) return "msi"; return "legacy"; @@ -762,9 +780,9 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); @@ -788,10 +806,11 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) if (!ce_pipe->dest_ring) return; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); - while (num--) { + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); + + while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); if (ret) { if (ret == -ENOSPC) @@ -801,10 +820,11 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) ATH10K_PCI_RX_POST_RETRY_MS); break; } + num--; } } -static void ath10k_pci_rx_post(struct ath10k *ar) +void ath10k_pci_rx_post(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; @@ -813,7 +833,7 @@ static void ath10k_pci_rx_post(struct ath10k *ar) ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); } -static void ath10k_pci_rx_replenish_retry(unsigned long ptr) +void ath10k_pci_rx_replenish_retry(unsigned long ptr) { struct ath10k *ar = (void *)ptr; @@ -826,15 +846,21 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) & 0x7ff) << 21; break; + case ATH10K_HW_QCA9888: case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA4019: val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); break; + default: + break; } val |= 0x100000 | (addr & 0xfffff); @@ -851,10 +877,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; - u32 buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; - unsigned int id; - unsigned int flags; + u32 *buf; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; @@ -862,7 +886,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); ce_diag = ar_pci->ce_diag; @@ -872,9 +896,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, - orig_nbytes, + alloc_nbytes, &ce_data_base, GFP_ATOMIC); @@ -882,15 +907,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ret = -ENOMEM; goto done; } - memset(data_buf, 0, orig_nbytes); + memset(data_buf, 0, alloc_nbytes); - remaining_bytes = orig_nbytes; + remaining_bytes = nbytes; ce_data = ce_data_base; while (remaining_bytes) { nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); + ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done; @@ -921,9 +946,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } i = 0; - while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, + (void **)&buf, + &completed_nbytes) + != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -937,28 +963,31 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, goto done; } - if (buf != ce_data) { + if (*buf != ce_data) { ret = -EIO; goto done; } remaining_bytes -= nbytes; + + if (ret) { + ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", + address, ret); + break; + } + memcpy(data, data_buf, nbytes); + address += nbytes; - ce_data += nbytes; + data += nbytes; } done: - if (ret == 0) - memcpy(data, data_buf, orig_nbytes); - else - ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", - address, ret); if (data_buf) - dma_free_coherent(ar->dev, orig_nbytes, data_buf, + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return ret; } @@ -1002,22 +1031,20 @@ static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) -static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, - const void *data, int nbytes) +int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; - u32 buf; + u32 *buf; unsigned int completed_nbytes, orig_nbytes, remaining_bytes; - unsigned int id; - unsigned int flags; struct ath10k_ce_pipe *ce_diag; void *data_buf = NULL; u32 ce_data; /* Host buffer address in CE space */ dma_addr_t ce_data_base = 0; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); ce_diag = ar_pci->ce_diag; @@ -1059,7 +1086,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); + ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done; @@ -1084,9 +1111,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } i = 0; - while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, + (void **)&buf, + &completed_nbytes) + != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -1100,7 +1128,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, goto done; } - if (buf != address) { + if (*buf != address) { ret = -EIO; goto done; } @@ -1120,7 +1148,7 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return ret; } @@ -1162,15 +1190,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, struct sk_buff *skb; struct sk_buff_head list; void *transfer_context; - u32 ce_data; unsigned int nbytes, max_nbytes; - unsigned int transfer_id; - unsigned int flags; __skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, - &ce_data, &nbytes, &transfer_id, - &flags) == 0) { + &nbytes) == 0) { skb = transfer_context; max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, @@ -1199,6 +1223,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, ath10k_pci_rx_post_pipe(pipe_info); } +static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes, nentries; + int orig_len; + + /* No need to aquire ce_lock for CE5, since this is the only place CE5 + * is processed other than init and deinit. Before releasing CE5 + * buffers, interrupts are disabled. Thus CE5 access is serialized. + */ + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + continue; + } + + dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + nentries = skb_queue_len(&list); + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", + ce_state->id, skb->len); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", + skb->data, skb->len); + + orig_len = skb->len; + callback(ar, skb); + skb_push(skb, orig_len - skb->len); + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); + + /*let device gain the buffer again*/ + dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + } + ath10k_ce_rx_update_write_idx(ce_pipe, nentries); +} + /* Called by lower (CE) layer when data is received from the Target. */ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) { @@ -1215,6 +1296,15 @@ static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } +/* Called by lower (CE) layer when data is received from the Target. + * Only 10.4 firmware uses separate CE to transfer pktlog data. + */ +static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_pci_process_rx_cb(ce_state, + ath10k_htt_rx_pktlog_completion_handler); +} + /* Called by lower (CE) layer when a send to HTT Target completes. */ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) { @@ -1246,11 +1336,11 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) */ ath10k_ce_per_engine_service(ce_state->ar, 4); - ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); + ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); } -static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, - struct ath10k_hif_sg_item *items, int n_items) +int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; @@ -1261,7 +1351,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, unsigned int write_index; int err, i = 0; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -1275,8 +1365,8 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, for (i = 0; i < n_items - 1; i++) { ath10k_dbg(ar, ATH10K_DBG_PCI, - "pci tx item %d paddr 0x%08x len %d n_items %d\n", - i, items[i].paddr, items[i].len, n_items); + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); @@ -1293,8 +1383,8 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, /* `i` is equal to `n_items -1` after for() */ ath10k_dbg(ar, ATH10K_DBG_PCI, - "pci tx item %d paddr 0x%08x len %d n_items %d\n", - i, items[i].paddr, items[i].len, n_items); + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); @@ -1307,24 +1397,24 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (err) goto err; - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return err; } -static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, - size_t buf_len) +int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) { return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); } -static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -1392,8 +1482,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) queue_work(ar->workqueue, &ar->restart_work); } -static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, - int force) +void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); @@ -1418,22 +1508,15 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, ath10k_ce_per_engine_service(ar, pipe); } -static void ath10k_pci_kill_tasklet(struct ath10k *ar) +static void ath10k_pci_rx_retry_sync(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - - tasklet_kill(&ar_pci->intr_tq); - tasklet_kill(&ar_pci->msi_fw_err); - - for (i = 0; i < CE_COUNT; i++) - tasklet_kill(&ar_pci->pipe_info[i].intr); del_timer_sync(&ar_pci->rx_post_retry); } -static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe) +int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; @@ -1477,8 +1560,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, return 0; } -static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, - u8 *ul_pipe, u8 *dl_pipe) +void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); @@ -1487,12 +1570,13 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, ul_pipe, dl_pipe); } -static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) +void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) { u32 val; switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -1502,10 +1586,15 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA9888: + case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI. */ break; + default: + break; } } @@ -1515,6 +1604,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -1524,10 +1614,15 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA9888: + case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to unmask irq/MSI. */ break; + default: + break; } } @@ -1541,10 +1636,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar) static void ath10k_pci_irq_sync(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - synchronize_irq(ar_pci->pdev->irq + i); + synchronize_irq(ar_pci->pdev->irq); } static void ath10k_pci_irq_enable(struct ath10k *ar) @@ -1604,14 +1697,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; - struct ath10k_pci *ar_pci; struct ath10k_ce_pipe *ce_pipe; struct ath10k_ce_ring *ce_ring; struct sk_buff *skb; int i; ar = pci_pipe->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); ce_pipe = pci_pipe->ce_hdl; ce_ring = ce_pipe->src_ring; @@ -1654,7 +1745,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) } } -static void ath10k_pci_ce_deinit(struct ath10k *ar) +void ath10k_pci_ce_deinit(struct ath10k *ar) { int i; @@ -1662,9 +1753,9 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) ath10k_ce_deinit_pipe(ar, i); } -static void ath10k_pci_flush(struct ath10k *ar) +void ath10k_pci_flush(struct ath10k *ar) { - ath10k_pci_kill_tasklet(ar); + ath10k_pci_rx_retry_sync(ar); ath10k_pci_buffer_cleanup(ar); } @@ -1691,15 +1782,17 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); ath10k_pci_flush(ar); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); spin_lock_irqsave(&ar_pci->ps_lock, flags); WARN_ON(ar_pci->ps_wake_refcount > 0); spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } -static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, - void *req, u32 req_len, - void *resp, u32 *resp_len) +int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; @@ -1742,7 +1835,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, DMA_FROM_DEVICE); ret = dma_mapping_error(ar->dev, resp_paddr); if (ret) { - ret = EIO; + ret = -EIO; goto err_req; } @@ -1805,13 +1898,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct bmi_xfer *xfer; - u32 ce_data; unsigned int nbytes; - unsigned int transfer_id; - unsigned int flags; - if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, - &nbytes, &transfer_id, &flags)) + if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, + &nbytes)) return; if (WARN_ON_ONCE(!xfer)) @@ -1868,6 +1958,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: + case QCA9888_2_0_DEVICE_ID: + case QCA9984_1_0_DEVICE_ID: + case QCA9887_1_0_DEVICE_ID: return 1; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: @@ -1893,7 +1986,14 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) return 1; } -static int ath10k_pci_init_config(struct ath10k *ar) +static int ath10k_bus_get_num_banks(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + return ar_pci->opaque_ctx.bus_ops->get_num_banks(ar); +} + +int ath10k_pci_init_config(struct ath10k *ar) { u32 interconnect_targ_addr; u32 pcie_state_targ_addr = 0; @@ -2004,7 +2104,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) /* first bank is switched to IRAM */ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & HI_EARLY_ALLOC_MAGIC_MASK); - ealloc_value |= ((ath10k_pci_get_num_banks(ar) << + ealloc_value |= ((ath10k_bus_get_num_banks(ar) << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & HI_EARLY_ALLOC_IRAM_BANKS_MASK); @@ -2057,7 +2157,7 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar) target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); } -static int ath10k_pci_alloc_pipes(struct ath10k *ar) +int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe; @@ -2065,7 +2165,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar) for (i = 0; i < CE_COUNT; i++) { pipe = &ar_pci->pipe_info[i]; - pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->ce_hdl = &ar_pci->opaque_ctx.ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; @@ -2088,7 +2188,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar) return 0; } -static void ath10k_pci_free_pipes(struct ath10k *ar) +void ath10k_pci_free_pipes(struct ath10k *ar) { int i; @@ -2096,7 +2196,7 @@ static void ath10k_pci_free_pipes(struct ath10k *ar) ath10k_ce_free_pipe(ar, i); } -static int ath10k_pci_init_pipes(struct ath10k *ar) +int ath10k_pci_init_pipes(struct ath10k *ar) { int i, ret; @@ -2127,6 +2227,14 @@ static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); } +static bool ath10k_pci_has_device_gone(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + return (val == 0xffffffff); +} + /* this function effectively clears target memory controller assert line */ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) { @@ -2222,16 +2330,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) +{ + ath10k_pci_irq_disable(ar); + return ath10k_pci_qca99x0_chip_reset(ar); +} + static int ath10k_pci_safe_chip_reset(struct ath10k *ar) { - if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { - return ath10k_pci_warm_reset(ar); - } else if (QCA_REV_99X0(ar)) { - ath10k_pci_irq_disable(ar); - return ath10k_pci_qca99x0_chip_reset(ar); - } else { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci->pci_soft_reset) return -ENOTSUPP; - } + + return ar_pci->pci_soft_reset(ar); } static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) @@ -2366,16 +2478,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) static int ath10k_pci_chip_reset(struct ath10k *ar) { - if (QCA_REV_988X(ar)) - return ath10k_pci_qca988x_chip_reset(ar); - else if (QCA_REV_6174(ar)) - return ath10k_pci_qca6174_chip_reset(ar); - else if (QCA_REV_9377(ar)) - return ath10k_pci_qca6174_chip_reset(ar); - else if (QCA_REV_99X0(ar)) - return ath10k_pci_qca99x0_chip_reset(ar); - else + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (WARN_ON(!ar_pci->pci_hard_reset)) return -ENOTSUPP; + + return ar_pci->pci_hard_reset(ar); } static int ath10k_pci_hif_power_up(struct ath10k *ar) @@ -2429,6 +2537,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce; } + napi_enable(&ar->napi); return 0; @@ -2439,7 +2548,7 @@ err_sleep: return ret; } -static void ath10k_pci_hif_power_down(struct ath10k *ar) +void ath10k_pci_hif_power_down(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); @@ -2469,12 +2578,10 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) u32 val; int ret = 0; - if (ar_pci->pci_ps == 0) { - ret = ath10k_pci_force_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake up target: %d\n", ret); - return ret; - } + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up target: %d\n", ret); + return ret; } /* Suspend/Resume resets the PCI configuration space, so we have to @@ -2490,6 +2597,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) } #endif +static bool ath10k_pci_validate_cal(void *data, size_t size) +{ + __le16 *cal_words = data; + u16 checksum = 0; + size_t i; + + if (size % 2 != 0) + return false; + + for (i = 0; i < size / 2; i++) + checksum ^= le16_to_cpu(cal_words[i]); + + return checksum == 0xffff; +} + +static void ath10k_pci_enable_eeprom(struct ath10k *ar) +{ + /* Enable SI clock */ + ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); + + /* Configure GPIOs for I2C operation */ + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, + SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, + GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, + SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + + QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, + 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); + + /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ + ath10k_pci_write32(ar, + SI_BASE_ADDRESS + SI_CONFIG_OFFSET, + SM(1, SI_CONFIG_ERR_INT) | + SM(1, SI_CONFIG_BIDIR_OD_DATA) | + SM(1, SI_CONFIG_I2C) | + SM(1, SI_CONFIG_POS_SAMPLE) | + SM(1, SI_CONFIG_INACTIVE_DATA) | + SM(1, SI_CONFIG_INACTIVE_CLK) | + SM(8, SI_CONFIG_DIVIDER)); +} + +static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) +{ + u32 reg; + int wait_limit; + + /* set device select byte and for the read operation */ + reg = QCA9887_EEPROM_SELECT_READ | + SM(addr, QCA9887_EEPROM_ADDR_LO) | + SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); + + /* write transmit data, transfer length, and START bit */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, + SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | + SM(4, SI_CS_TX_CNT)); + + /* wait max 1 sec */ + wait_limit = 100000; + + /* wait for SI_CS_DONE_INT */ + do { + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); + if (MS(reg, SI_CS_DONE_INT)) + break; + + wait_limit--; + udelay(10); + } while (wait_limit > 0); + + if (!MS(reg, SI_CS_DONE_INT)) { + ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", + addr); + return -ETIMEDOUT; + } + + /* clear SI_CS_DONE_INT */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); + + if (MS(reg, SI_CS_DONE_ERR)) { + ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); + return -EIO; + } + + /* extract receive data */ + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); + *out = reg; + + return 0; +} + +static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, + size_t *data_len) +{ + u8 *caldata = NULL; + size_t calsize, i; + int ret; + + if (!QCA_REV_9887(ar)) + return -EOPNOTSUPP; + + calsize = ar->hw_params.cal_data_len; + caldata = kmalloc(calsize, GFP_KERNEL); + if (!caldata) + return -ENOMEM; + + ath10k_pci_enable_eeprom(ar); + + for (i = 0; i < calsize; i++) { + ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); + if (ret) + goto err_free; + } + + if (!ath10k_pci_validate_cal(caldata, calsize)) + goto err_free; + + *data = caldata; + *data_len = calsize; + + return 0; + +err_free: + kfree(caldata); + + return -EINVAL; +} + static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, .diag_read = ath10k_pci_hif_diag_read, @@ -2509,67 +2754,9 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, #endif + .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; -static void ath10k_pci_ce_tasklet(unsigned long ptr) -{ - struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; - struct ath10k_pci *ar_pci = pipe->ar_pci; - - ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); -} - -static void ath10k_msi_err_tasklet(unsigned long data) -{ - struct ath10k *ar = (struct ath10k *)data; - - if (!ath10k_pci_has_fw_crashed(ar)) { - ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); - return; - } - - ath10k_pci_irq_disable(ar); - ath10k_pci_fw_crashed_clear(ar); - ath10k_pci_fw_crashed_dump(ar); -} - -/* - * Handler for a per-engine interrupt on a PARTICULAR CE. - * This is used in cases where each CE has a private MSI interrupt. - */ -static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) -{ - struct ath10k *ar = arg; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; - - if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { - ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, - ce_id); - return IRQ_HANDLED; - } - - /* - * NOTE: We are able to derive ce_id from irq because we - * use a one-to-one mapping for CE's 0..5. - * CE's 6 & 7 do not use interrupts at all. - * - * This mapping must be kept in sync with the mapping - * used by firmware. - */ - tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); - return IRQ_HANDLED; -} - -static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) -{ - struct ath10k *ar = arg; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - tasklet_schedule(&ar_pci->msi_fw_err); - return IRQ_HANDLED; -} - /* * Top-level interrupt handler for all PCI interrupts from a Target. * When a block of MSI interrupts is allocated, this top-level handler @@ -2581,77 +2768,63 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - if (ar_pci->pci_ps == 0) { - ret = ath10k_pci_force_wake(ar); - if (ret) { - ath10k_warn(ar, "failed to wake device up on irq: %d\n", - ret); - return IRQ_NONE; - } - } - - if (ar_pci->num_msi_intrs == 0) { - if (!ath10k_pci_irq_pending(ar)) - return IRQ_NONE; + if (ath10k_pci_has_device_gone(ar)) + return IRQ_NONE; - ath10k_pci_disable_and_clear_legacy_irq(ar); + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); + return IRQ_NONE; } - tasklet_schedule(&ar_pci->intr_tq); + if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) && + !ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_legacy_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + napi_schedule(&ar->napi); return IRQ_HANDLED; } -static void ath10k_pci_tasklet(unsigned long data) +static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) { - struct ath10k *ar = (struct ath10k *)data; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done = 0; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); if (ath10k_pci_has_fw_crashed(ar)) { - ath10k_pci_irq_disable(ar); ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_dump(ar); - return; + napi_complete(ctx); + return done; } ath10k_ce_per_engine_service_any(ar); - /* Re-enable legacy irq that was disabled in the irq handler */ - if (ar_pci->num_msi_intrs == 0) - ath10k_pci_enable_legacy_irq(ar); -} - -static int ath10k_pci_request_irq_msix(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret, i; - - ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, - ath10k_pci_msi_fw_handler, - IRQF_SHARED, "ath10k_pci", ar); - if (ret) { - ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", - ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); - return ret; - } - - for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { - ret = request_irq(ar_pci->pdev->irq + i, - ath10k_pci_per_engine_handler, - IRQF_SHARED, "ath10k_pci", ar); - if (ret) { - ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", - ar_pci->pdev->irq + i, ret); - - for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) - free_irq(ar_pci->pdev->irq + i, ar); - - free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); - return ret; + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) { + napi_complete(ctx); + /* In case of MSI, it is possible that interrupts are received + * while NAPI poll is inprogress. So pending interrupts that are + * received after processing all copy engine pipes by NAPI poll + * will not be handled again. This is causing failure to + * complete boot sequence in x86 platform. So before enabling + * interrupts safer to check for pending interrupts for + * immediate servicing. + */ + if (CE_INTERRUPT_SUMMARY(ar, ar_opaque)) { + napi_reschedule(ctx); + goto out; } + ath10k_pci_enable_legacy_irq(ar); + ath10k_pci_irq_msi_fw_unmask(ar); } - return 0; +out: + return done; } static int ath10k_pci_request_irq_msi(struct ath10k *ar) @@ -2692,41 +2865,27 @@ static int ath10k_pci_request_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - switch (ar_pci->num_msi_intrs) { - case 0: + switch (ar_pci->oper_irq_mode) { + case ATH10K_PCI_IRQ_LEGACY: return ath10k_pci_request_irq_legacy(ar); - case 1: + case ATH10K_PCI_IRQ_MSI: return ath10k_pci_request_irq_msi(ar); default: - return ath10k_pci_request_irq_msix(ar); + return -EINVAL; } } static void ath10k_pci_free_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - /* There's at least one interrupt irregardless whether its legacy INTR - * or MSI or MSI-X */ - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - free_irq(ar_pci->pdev->irq + i, ar); + free_irq(ar_pci->pdev->irq, ar); } -static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) +void ath10k_pci_init_napi(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - - tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); - tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, - (unsigned long)ar); - - for (i = 0; i < CE_COUNT; i++) { - ar_pci->pipe_info[i].ar_pci = ar_pci; - tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, - (unsigned long)&ar_pci->pipe_info[i]); - } + netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll, + ATH10K_NAPI_BUDGET); } static int ath10k_pci_init_irq(struct ath10k *ar) @@ -2734,26 +2893,15 @@ static int ath10k_pci_init_irq(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - ath10k_pci_init_irq_tasklets(ar); + ath10k_pci_init_napi(ar); if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) ath10k_info(ar, "limiting irq mode to: %d\n", ath10k_pci_irq_mode); - /* Try MSI-X */ - if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { - ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1; - ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, - ar_pci->num_msi_intrs); - if (ret > 0) - return 0; - - /* fall-through */ - } - /* Try MSI */ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { - ar_pci->num_msi_intrs = 1; + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; ret = pci_enable_msi(ar_pci->pdev); if (ret == 0) return 0; @@ -2769,7 +2917,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar) * This write might get lost if target has NOT written BAR. * For now, fix the race by repeating the write in below * synchronization checking. */ - ar_pci->num_msi_intrs = 0; + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); @@ -2787,8 +2935,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - switch (ar_pci->num_msi_intrs) { - case 0: + switch (ar_pci->oper_irq_mode) { + case ATH10K_PCI_IRQ_LEGACY: ath10k_pci_deinit_irq_legacy(ar); break; default: @@ -2799,7 +2947,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) return 0; } -static int ath10k_pci_wait_for_target_init(struct ath10k *ar) +int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long timeout; @@ -2826,7 +2974,7 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) if (val & FW_IND_INITIALIZED) break; - if (ar_pci->num_msi_intrs == 0) + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) /* Fix potential race by repeating CORE_BASE writes */ ath10k_pci_enable_legacy_irq(ar); @@ -2937,7 +3085,7 @@ static int ath10k_pci_claim(struct ath10k *ar) goto err_master; } - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem); return 0; err_master: @@ -2980,6 +3128,44 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) return false; } +int ath10k_pci_setup_resource(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + spin_lock_init(&ar_pci->opaque_ctx.ce_lock); + spin_lock_init(&ar_pci->ps_lock); + + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, + (unsigned long)ar); + + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) + ath10k_pci_override_ce_config(ar); + + ret = ath10k_pci_alloc_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", + ret); + return ret; + } + + return 0; +} + +void ath10k_pci_release_resource(struct ath10k *ar) +{ + ath10k_pci_rx_retry_sync(ar); + netif_napi_del(&ar->napi); + ath10k_pci_ce_deinit(ar); + ath10k_pci_free_pipes(ar); +} + +static const struct ath10k_bus_ops ath10k_pci_bus_ops = { + .read32 = ath10k_bus_pci_read32, + .write32 = ath10k_bus_pci_write32, + .get_num_banks = ath10k_pci_get_num_banks, +}; + static int ath10k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_dev) { @@ -2989,24 +3175,52 @@ static int ath10k_pci_probe(struct pci_dev *pdev, enum ath10k_hw_rev hw_rev; u32 chip_id; bool pci_ps; + int (*pci_soft_reset)(struct ath10k *ar); + int (*pci_hard_reset)(struct ath10k *ar); switch (pci_dev->device) { case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; + break; + case QCA9887_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9887; + pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; break; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; pci_ps = true; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + break; + case QCA9984_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9984; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + break; + case QCA9888_2_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9888; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; break; case QCA9377_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9377; pci_ps = true; + pci_soft_reset = NULL; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; break; default: WARN_ON(1); @@ -3030,55 +3244,50 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; + ar_pci->opaque_ctx.bus_ops = &ath10k_pci_bus_ops; + ar_pci->pci_soft_reset = pci_soft_reset; + ar_pci->pci_hard_reset = pci_hard_reset; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; ar->id.subsystem_vendor = pdev->subsystem_vendor; ar->id.subsystem_device = pdev->subsystem_device; - spin_lock_init(&ar_pci->ce_lock); spin_lock_init(&ar_pci->ps_lock); - setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, (unsigned long)ar); - ret = ath10k_pci_claim(ar); + ret = ath10k_pci_setup_resource(ar); if (ret) { - ath10k_err(ar, "failed to claim device: %d\n", ret); + ath10k_err(ar, "failed to setup resource: %d\n", ret); goto err_core_destroy; } - if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) - ath10k_pci_override_ce_config(ar); + ret = ath10k_pci_claim(ar); + if (ret) { + ath10k_err(ar, "failed to claim device: %d\n", ret); + goto err_free_pipes; + } - ret = ath10k_pci_alloc_pipes(ar); + ret = ath10k_pci_force_wake(ar); if (ret) { - ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", - ret); + ath10k_warn(ar, "failed to wake up device : %d\n", ret); goto err_sleep; } ath10k_pci_ce_deinit(ar); ath10k_pci_irq_disable(ar); - if (ar_pci->pci_ps == 0) { - ret = ath10k_pci_force_wake(ar); - if (ret) { - ath10k_warn(ar, "failed to wake up device : %d\n", ret); - goto err_free_pipes; - } - } - ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); - goto err_free_pipes; + goto err_sleep; } - ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", - ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, + ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", + ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, ath10k_pci_irq_mode, ath10k_pci_reset_mode); ret = ath10k_pci_request_irq(ar); @@ -3115,18 +3324,18 @@ static int ath10k_pci_probe(struct pci_dev *pdev, err_free_irq: ath10k_pci_free_irq(ar); - ath10k_pci_kill_tasklet(ar); + ath10k_pci_rx_retry_sync(ar); err_deinit_irq: ath10k_pci_deinit_irq(ar); -err_free_pipes: - ath10k_pci_free_pipes(ar); - err_sleep: ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); +err_free_pipes: + ath10k_pci_free_pipes(ar); + err_core_destroy: ath10k_core_destroy(ar); @@ -3150,10 +3359,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_core_unregister(ar); ath10k_pci_free_irq(ar); - ath10k_pci_kill_tasklet(ar); ath10k_pci_deinit_irq(ar); - ath10k_pci_ce_deinit(ar); - ath10k_pci_free_pipes(ar); + ath10k_pci_release_resource(ar); ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); @@ -3177,6 +3384,10 @@ static int __init ath10k_pci_init(void) printk(KERN_ERR "failed to register ath10k pci driver: %d\n", ret); + ret = ath10k_ahb_init(); + if (ret) + printk(KERN_ERR "ahb init failed: %d\n", ret); + return ret; } module_init(ath10k_pci_init); @@ -3184,16 +3395,16 @@ module_init(ath10k_pci_init); static void __exit ath10k_pci_exit(void) { pci_unregister_driver(&ath10k_pci_driver); + ath10k_ahb_exit(); } module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); @@ -3201,6 +3412,11 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); +/* QCA9887 1.0 firmware files */ +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); + /* QCA6174 2.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index f91bf333cb75..22730c700af3 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -22,6 +22,7 @@ #include "hw.h" #include "ce.h" +#include "ahb.h" /* * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite @@ -147,9 +148,6 @@ struct ath10k_pci_pipe { /* protects compl_free and num_send_allowed */ spinlock_t pipe_lock; - - struct ath10k_pci *ar_pci; - struct tasklet_struct intr; }; struct ath10k_pci_supp_chip { @@ -157,32 +155,28 @@ struct ath10k_pci_supp_chip { u32 rev_id; }; +enum ath10k_pci_irq_mode { + ATH10K_PCI_IRQ_AUTO = 0, + ATH10K_PCI_IRQ_LEGACY = 1, + ATH10K_PCI_IRQ_MSI = 2, +}; + struct ath10k_pci { + struct bus_opaque opaque_ctx; struct pci_dev *pdev; struct device *dev; struct ath10k *ar; void __iomem *mem; size_t mem_len; - /* - * Number of MSI interrupts granted, 0 --> using legacy PCI line - * interrupts. - */ - int num_msi_intrs; - - struct tasklet_struct intr_tq; - struct tasklet_struct msi_fw_err; + /* Operating interrupt mode */ + enum ath10k_pci_irq_mode oper_irq_mode; struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; - /* FIXME: document what this really protects */ - spinlock_t ce_lock; - - /* Map CE id to ce_state */ - struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; struct timer_list rx_post_retry; /* Due to HW quirks it is recommended to disable ASPM during device @@ -225,6 +219,18 @@ struct ath10k_pci { * on MMIO read/write. */ bool pci_ps; + + /* Chip specific pci reset routine used to do a safe reset */ + int (*pci_soft_reset)(struct ath10k *ar); + + /* Chip specific pci full reset function */ + int (*pci_hard_reset)(struct ath10k *ar); + + /* Keep this entry in the last, memory for struct ath10k_ahb is + * allocated (ahb support enabled case) in the continuation of + * this struct. + */ + struct ath10k_ahb ahb[0]; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) @@ -253,6 +259,40 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); +int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items); +int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len); +int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes); +int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len, + void *resp, u32 *resp_len); +int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, + u8 *dl_pipe); +void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force); +u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe); +void ath10k_pci_hif_power_down(struct ath10k *ar); +int ath10k_pci_alloc_pipes(struct ath10k *ar); +void ath10k_pci_free_pipes(struct ath10k *ar); +void ath10k_pci_free_pipes(struct ath10k *ar); +void ath10k_pci_rx_replenish_retry(unsigned long ptr); +void ath10k_pci_ce_deinit(struct ath10k *ar); +void ath10k_pci_init_napi(struct ath10k *ar); +int ath10k_pci_init_pipes(struct ath10k *ar); +int ath10k_pci_init_config(struct ath10k *ar); +void ath10k_pci_rx_post(struct ath10k *ar); +void ath10k_pci_flush(struct ath10k *ar); +void ath10k_pci_enable_legacy_irq(struct ath10k *ar); +bool ath10k_pci_irq_pending(struct ath10k *ar); +void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar); +void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar); +int ath10k_pci_wait_for_target_init(struct ath10k *ar); +int ath10k_pci_setup_resource(struct ath10k *ar); +void ath10k_pci_release_resource(struct ath10k *ar); + /* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too * frequently. To avoid this put SoC to sleep after a very conservative grace * period. Adjust with great care. diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c new file mode 100644 index 000000000000..dd310d5a7028 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -0,0 +1,902 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <soc/qcom/subsystem_notif.h> +#include <soc/qcom/subsystem_restart.h> +#include <soc/qcom/service-notifier.h> +#include <soc/qcom/msm_qmi_interface.h> +#include <soc/qcom/icnss.h> +#include <soc/qcom/service-locator.h> +#include "core.h" +#include "qmi.h" +#include "snoc.h" +#include "wcn3990_qmi_service_v01.h" + +static DECLARE_WAIT_QUEUE_HEAD(ath10k_fw_ready_wait_event); + +static int +ath10k_snoc_service_notifier_notify(struct notifier_block *nb, + unsigned long notification, void *data) +{ + struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, + service_notifier_nb); + enum pd_subsys_state *state = data; + struct ath10k *ar = ar_snoc->ar; + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + int ret; + + switch (notification) { + case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service down, data: 0x%pK\n", + data); + + if (!state || *state != ROOT_PD_SHUTDOWN) { + atomic_set(&ar_snoc->fw_crashed, 1); + atomic_set(&qmi_cfg->fw_ready, 0); + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "PD went down %d\n", + atomic_read(&ar_snoc->fw_crashed)); + break; + case SERVREG_NOTIF_SERVICE_STATE_UP_V01: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service up\n"); + ret = wait_event_timeout(ath10k_fw_ready_wait_event, + (atomic_read(&qmi_cfg->fw_ready) && + atomic_read(&qmi_cfg->server_connected)), + msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT)); + if (ret) { + queue_work(ar->workqueue, &ar->restart_work); + } else { + ath10k_err(ar, "restart failed, fw_ready timed out\n"); + return NOTIFY_OK; + } + break; + default: + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Service state Unknown, notification: 0x%lx\n", + notification); + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static int ath10k_snoc_get_service_location_notify(struct notifier_block *nb, + unsigned long opcode, + void *data) +{ + struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, + get_service_nb); + struct ath10k *ar = ar_snoc->ar; + struct pd_qmi_client_data *pd = data; + int curr_state; + int ret; + int i; + struct ath10k_service_notifier_context *notifier; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Get service notify opcode: %lu\n", + opcode); + + if (opcode != LOCATOR_UP) + return NOTIFY_DONE; + + if (!pd->total_domains) { + ath10k_err(ar, "Did not find any domains\n"); + ret = -ENOENT; + goto out; + } + + notifier = kcalloc(pd->total_domains, + sizeof(struct ath10k_service_notifier_context), + GFP_KERNEL); + if (!notifier) { + ret = -ENOMEM; + goto out; + } + + ar_snoc->service_notifier_nb.notifier_call = + ath10k_snoc_service_notifier_notify; + + for (i = 0; i < pd->total_domains; i++) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "%d: domain_name: %s, instance_id: %d\n", i, + pd->domain_list[i].name, + pd->domain_list[i].instance_id); + + notifier[i].handle = + service_notif_register_notifier( + pd->domain_list[i].name, + pd->domain_list[i].instance_id, + &ar_snoc->service_notifier_nb, + &curr_state); + notifier[i].instance_id = pd->domain_list[i].instance_id; + strlcpy(notifier[i].name, pd->domain_list[i].name, + QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1); + + if (IS_ERR(notifier[i].handle)) { + ath10k_err(ar, "%d: Unable to register notifier for %s(0x%x)\n", + i, pd->domain_list->name, + pd->domain_list->instance_id); + ret = PTR_ERR(notifier[i].handle); + goto free_handle; + } + } + + ar_snoc->service_notifier = notifier; + ar_snoc->total_domains = pd->total_domains; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "PD restart enabled\n"); + + return NOTIFY_OK; + +free_handle: + for (i = 0; i < pd->total_domains; i++) { + if (notifier[i].handle) { + service_notif_unregister_notifier( + notifier[i].handle, + &ar_snoc->service_notifier_nb); + } + } + kfree(notifier); + +out: + ath10k_err(ar, "PD restart not enabled: %d\n", ret); + + return NOTIFY_OK; +} + +int ath10k_snoc_pd_restart_enable(struct ath10k *ar) +{ + int ret; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Get service location\n"); + + ar_snoc->get_service_nb.notifier_call = + ath10k_snoc_get_service_location_notify; + ret = get_service_location(ATH10K_SERVICE_LOCATION_CLIENT_NAME, + ATH10K_WLAN_SERVICE_NAME, + &ar_snoc->get_service_nb); + if (ret) { + ath10k_err(ar, "Get service location failed: %d\n", ret); + goto out; + } + + return 0; +out: + ath10k_err(ar, "PD restart not enabled: %d\n", ret); + return ret; +} + +int ath10k_snoc_pdr_unregister_notifier(struct ath10k *ar) +{ + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ar_snoc->total_domains; i++) { + if (ar_snoc->service_notifier[i].handle) + service_notif_unregister_notifier( + ar_snoc->service_notifier[i].handle, + &ar_snoc->service_notifier_nb); + } + + kfree(ar_snoc->service_notifier); + + ar_snoc->service_notifier = NULL; + + return 0; +} + +static int ath10k_snoc_modem_notifier_nb(struct notifier_block *nb, + unsigned long code, + void *data) +{ + struct notif_data *notif = data; + struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, + modem_ssr_nb); + struct ath10k *ar = ar_snoc->ar; + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + if (code != SUBSYS_BEFORE_SHUTDOWN) + return NOTIFY_OK; + + if (notif->crashed) { + atomic_set(&ar_snoc->fw_crashed, 1); + atomic_set(&qmi_cfg->fw_ready, 0); + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Modem went down %d\n", + atomic_read(&ar_snoc->fw_crashed)); + + return NOTIFY_OK; +} + +int ath10k_snoc_modem_ssr_register_notifier(struct ath10k *ar) +{ + int ret = 0; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ar_snoc->modem_ssr_nb.notifier_call = ath10k_snoc_modem_notifier_nb; + + ar_snoc->modem_notify_handler = + subsys_notif_register_notifier("modem", &ar_snoc->modem_ssr_nb); + + if (IS_ERR(ar_snoc->modem_notify_handler)) { + ret = PTR_ERR(ar_snoc->modem_notify_handler); + ath10k_err(ar, "Modem register notifier failed: %d\n", ret); + } + + return ret; +} + +int ath10k_snoc_modem_ssr_unregister_notifier(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + subsys_notif_unregister_notifier(ar_snoc->modem_notify_handler, + &ar_snoc->modem_ssr_nb); + ar_snoc->modem_notify_handler = NULL; + + return 0; +} + +static char * +ath10k_snoc_driver_event_to_str(enum ath10k_snoc_driver_event_type type) +{ + switch (type) { + case ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE: + return "SERVER_ARRIVE"; + case ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT: + return "SERVER_EXIT"; + case ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND: + return "FW_READY"; + case ATH10K_SNOC_DRIVER_EVENT_MAX: + return "EVENT_MAX"; + } + + return "UNKNOWN"; +}; + +static int +ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type, + u32 flags, void *data) +{ + int ret = 0; + int i = 0; + unsigned long irq_flags; + struct ath10k *ar = (struct ath10k *)data; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Posting event: %s type: %d\n", + ath10k_snoc_driver_event_to_str(type), type); + + if (type >= ATH10K_SNOC_DRIVER_EVENT_MAX) { + ath10k_err(ar, "Invalid Event type: %d, can't post", type); + return -EINVAL; + } + + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); + + for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++) { + if (atomic_read(&qmi_cfg->qmi_ev_list[i].event_handled)) { + qmi_cfg->qmi_ev_list[i].type = type; + qmi_cfg->qmi_ev_list[i].data = data; + init_completion(&qmi_cfg->qmi_ev_list[i].complete); + qmi_cfg->qmi_ev_list[i].ret = + ATH10K_SNOC_EVENT_PENDING; + qmi_cfg->qmi_ev_list[i].sync = + !!(flags & ATH10K_SNOC_EVENT_SYNC); + atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 0); + list_add_tail(&qmi_cfg->qmi_ev_list[i].list, + &qmi_cfg->event_list); + break; + } + } + + if (i >= ATH10K_SNOC_DRIVER_EVENT_MAX) + i = ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE; + + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); + + queue_work(qmi_cfg->event_wq, &qmi_cfg->event_work); + + if (!(flags & ATH10K_SNOC_EVENT_SYNC)) + goto out; + + if (flags & ATH10K_SNOC_EVENT_UNINTERRUPTIBLE) + wait_for_completion(&qmi_cfg->qmi_ev_list[i].complete); + else + ret = wait_for_completion_interruptible( + &qmi_cfg->qmi_ev_list[i].complete); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Completed event: %s(%d)\n", + ath10k_snoc_driver_event_to_str(type), type); + + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); + if (ret == -ERESTARTSYS && + qmi_cfg->qmi_ev_list[i].ret == ATH10K_SNOC_EVENT_PENDING) { + qmi_cfg->qmi_ev_list[i].sync = false; + atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); + ret = -EINTR; + goto out; + } + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); + +out: + return ret; +} + +static int +ath10k_snoc_wlan_mode_send_sync_msg(struct ath10k *ar, + enum wlfw_driver_mode_enum_v01 mode) +{ + int ret; + struct wlfw_wlan_mode_req_msg_v01 req; + struct wlfw_wlan_mode_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + if (!qmi_cfg || !qmi_cfg->wlfw_clnt) + return -ENODEV; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Sending Mode request, mode: %d\n", mode); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.mode = mode; + req.hw_debug_valid = 1; + req.hw_debug = 0; + + req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01; + req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01; + resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei; + + ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt, + &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + WLFW_TIMEOUT_MS); + if (ret < 0) { + ath10k_err(ar, "Send mode req failed, mode: %d ret: %d\n", + mode, ret); + return ret; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "QMI mode request rejected:"); + ath10k_err(ar, "mode:%d result:%d error:%d\n", + mode, resp.resp.result, resp.resp.error); + ret = resp.resp.result; + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "wlan Mode request send success, mode: %d\n", mode); + return 0; +} + +static int +ath10k_snoc_wlan_cfg_send_sync_msg(struct ath10k *ar, + struct wlfw_wlan_cfg_req_msg_v01 *data) +{ + int ret; + struct wlfw_wlan_cfg_req_msg_v01 req; + struct wlfw_wlan_cfg_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + if (!qmi_cfg || !qmi_cfg->wlfw_clnt) + return -ENODEV; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Sending config request\n"); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + memcpy(&req, data, sizeof(req)); + + req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01; + req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01; + resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei; + + ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt, + &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + WLFW_TIMEOUT_MS); + if (ret < 0) { + ath10k_err(ar, "Send config req failed %d\n", ret); + return ret; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "QMI config request rejected:"); + ath10k_err(ar, "result:%d error:%d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "wlan config request success..\n"); + return 0; +} + +int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar, + struct ath10k_wlan_enable_cfg *config, + enum ath10k_driver_mode mode, + const char *host_version) +{ + struct wlfw_wlan_cfg_req_msg_v01 req; + u32 i; + int ret; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + unsigned long time_left; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Mode: %d, config: %p, host_version: %s\n", + mode, config, host_version); + + memset(&req, 0, sizeof(req)); + if (!config || !host_version) { + ath10k_err(ar, "WLAN_EN Config Invalid:%p: host_version:%p\n", + config, host_version); + ret = -EINVAL; + return ret; + } + + time_left = wait_event_timeout( + ath10k_fw_ready_wait_event, + (atomic_read(&qmi_cfg->fw_ready) && + atomic_read(&qmi_cfg->server_connected)), + msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT)); + if (time_left == 0) { + ath10k_err(ar, "Wait for FW ready and server connect timed out\n"); + return -ETIMEDOUT; + } + + req.host_version_valid = 1; + strlcpy(req.host_version, host_version, + QMI_WLFW_MAX_STR_LEN_V01 + 1); + + req.tgt_cfg_valid = 1; + if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01) + req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; + else + req.tgt_cfg_len = config->num_ce_tgt_cfg; + for (i = 0; i < req.tgt_cfg_len; i++) { + req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num; + req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir; + req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries; + req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max; + req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags; + } + + req.svc_cfg_valid = 1; + if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01) + req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01; + else + req.svc_cfg_len = config->num_ce_svc_pipe_cfg; + for (i = 0; i < req.svc_cfg_len; i++) { + req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id; + req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir; + req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num; + } + + req.shadow_reg_valid = 1; + if (config->num_shadow_reg_cfg > + QMI_WLFW_MAX_NUM_SHADOW_REG_V01) + req.shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01; + else + req.shadow_reg_len = config->num_shadow_reg_cfg; + + memcpy(req.shadow_reg, config->shadow_reg_cfg, + sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req.shadow_reg_len); + + ret = ath10k_snoc_wlan_cfg_send_sync_msg(ar, &req); + if (ret) { + ath10k_err(ar, "WLAN config send failed\n"); + return ret; + } + + ret = ath10k_snoc_wlan_mode_send_sync_msg(ar, mode); + if (ret) { + ath10k_err(ar, "WLAN mode send failed\n"); + return ret; + } + + return 0; +} + +int ath10k_snoc_qmi_wlan_disable(struct ath10k *ar) +{ + return ath10k_snoc_wlan_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01); +} + +static int ath10k_snoc_ind_register_send_sync_msg(struct ath10k *ar) +{ + int ret; + struct wlfw_ind_register_req_msg_v01 req; + struct wlfw_ind_register_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Sending indication register message,\n"); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.client_id_valid = 1; + req.client_id = WLFW_CLIENT_ID; + req.fw_ready_enable_valid = 1; + req.fw_ready_enable = 1; + req.msa_ready_enable_valid = 1; + req.msa_ready_enable = 1; + + req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01; + req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01; + resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei; + + ret = qmi_send_req_wait(qmi_cfg->wlfw_clnt, + &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + WLFW_TIMEOUT_MS); + if (ret < 0) { + ath10k_err(ar, "Send indication register req failed %d\n", ret); + return ret; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "QMI indication register request rejected:"); + ath10k_err(ar, "resut:%d error:%d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + return ret; + } + + return 0; +} + +static void ath10k_snoc_qmi_wlfw_clnt_notify_work(struct work_struct *work) +{ + int ret; + struct ath10k_snoc_qmi_config *qmi_cfg = + container_of(work, struct ath10k_snoc_qmi_config, + qmi_recv_msg_work); + struct ath10k_snoc *ar_snoc = + container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg); + struct ath10k *ar = ar_snoc->ar; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Receiving Event in work queue context\n"); + + do { + } while ((ret = qmi_recv_msg(qmi_cfg->wlfw_clnt)) == 0); + + if (ret != -ENOMSG) + ath10k_err(ar, "Error receiving message: %d\n", ret); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Receiving Event completed\n"); +} + +static void +ath10k_snoc_qmi_wlfw_clnt_notify(struct qmi_handle *handle, + enum qmi_event_type event, + void *notify_priv) +{ + struct ath10k_snoc_qmi_config *qmi_cfg = + (struct ath10k_snoc_qmi_config *)notify_priv; + struct ath10k_snoc *ar_snoc = + container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg); + struct ath10k *ar = ar_snoc->ar; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI client notify: %d\n", event); + + if (!qmi_cfg || !qmi_cfg->wlfw_clnt) + return; + + switch (event) { + case QMI_RECV_MSG: + schedule_work(&qmi_cfg->qmi_recv_msg_work); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Unknown Event: %d\n", event); + break; + } +} + +static void +ath10k_snoc_qmi_wlfw_clnt_ind(struct qmi_handle *handle, + unsigned int msg_id, void *msg, + unsigned int msg_len, void *ind_cb_priv) +{ + struct ath10k_snoc_qmi_config *qmi_cfg = + (struct ath10k_snoc_qmi_config *)ind_cb_priv; + struct ath10k_snoc *ar_snoc = + container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg); + struct ath10k *ar = ar_snoc->ar; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Received Ind 0x%x, msg_len: %d\n", msg_id, msg_len); + switch (msg_id) { + case QMI_WLFW_FW_READY_IND_V01: + ath10k_snoc_driver_event_post( + ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND, 0, ar); + break; + case QMI_WLFW_MSA_READY_IND_V01: + qmi_cfg->msa_ready = true; + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Received MSA Ready, ind = 0x%x\n", msg_id); + break; + default: + ath10k_err(ar, "Invalid msg_id 0x%x\n", msg_id); + break; + } +} + +static int ath10k_snoc_driver_event_server_arrive(struct ath10k *ar) +{ + int ret = 0; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + if (!qmi_cfg) + return -ENODEV; + + qmi_cfg->wlfw_clnt = qmi_handle_create( + ath10k_snoc_qmi_wlfw_clnt_notify, qmi_cfg); + if (!qmi_cfg->wlfw_clnt) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "QMI client handle create failed\n"); + return -ENOMEM; + } + + ret = qmi_connect_to_service(qmi_cfg->wlfw_clnt, + WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01); + if (ret < 0) { + ath10k_err(ar, "QMI WLAN Service not found : %d\n", ret); + goto err_qmi_config; + } + + ret = qmi_register_ind_cb(qmi_cfg->wlfw_clnt, + ath10k_snoc_qmi_wlfw_clnt_ind, qmi_cfg); + if (ret < 0) { + ath10k_err(ar, "Failed to register indication callback: %d\n", + ret); + goto err_qmi_config; + } + + ret = ath10k_snoc_ind_register_send_sync_msg(ar); + if (ret) { + ath10k_err(ar, "Failed to config qmi ind register\n"); + goto err_qmi_config; + } + + atomic_set(&qmi_cfg->server_connected, 1); + wake_up_all(&ath10k_fw_ready_wait_event); + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "QMI Server Arrive Configuration Success\n"); + return 0; + +err_qmi_config: + qmi_handle_destroy(qmi_cfg->wlfw_clnt); + qmi_cfg->wlfw_clnt = NULL; + return ret; +} + +static int ath10k_snoc_driver_event_server_exit(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI Server Exit event received\n"); + atomic_set(&qmi_cfg->fw_ready, 0); + qmi_cfg->msa_ready = false; + atomic_set(&qmi_cfg->server_connected, 0); + qmi_handle_destroy(qmi_cfg->wlfw_clnt); + return 0; +} + +static int ath10k_snoc_driver_event_fw_ready_ind(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "FW Ready event received.\n"); + atomic_set(&qmi_cfg->fw_ready, 1); + wake_up_all(&ath10k_fw_ready_wait_event); + + return 0; +} + +static void ath10k_snoc_driver_event_work(struct work_struct *work) +{ + int ret; + unsigned long irq_flags; + struct ath10k_snoc_qmi_driver_event *event; + struct ath10k_snoc_qmi_config *qmi_cfg = + container_of(work, struct ath10k_snoc_qmi_config, event_work); + struct ath10k_snoc *ar_snoc = + container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg); + struct ath10k *ar = ar_snoc->ar; + + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); + + while (!list_empty(&qmi_cfg->event_list)) { + event = list_first_entry(&qmi_cfg->event_list, + struct ath10k_snoc_qmi_driver_event, + list); + list_del(&event->list); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Processing event: %s%s(%d)\n", + ath10k_snoc_driver_event_to_str(event->type), + event->sync ? "-sync" : "", event->type); + + switch (event->type) { + case ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE: + ret = ath10k_snoc_driver_event_server_arrive(ar); + break; + case ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT: + ret = ath10k_snoc_driver_event_server_exit(ar); + break; + case ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND: + ret = ath10k_snoc_driver_event_fw_ready_ind(ar); + break; + default: + ath10k_err(ar, "Invalid Event type: %d", event->type); + kfree(event); + continue; + } + + atomic_set(&event->event_handled, 1); + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Event Processed: %s%s(%d), ret: %d\n", + ath10k_snoc_driver_event_to_str(event->type), + event->sync ? "-sync" : "", event->type, ret); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); + if (event->sync) { + event->ret = ret; + complete(&event->complete); + continue; + } + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); + } + + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); +} + +static int +ath10k_snoc_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this, + unsigned long code, + void *_cmd) +{ + int ret = 0; + struct ath10k_snoc_qmi_config *qmi_cfg = + container_of(this, struct ath10k_snoc_qmi_config, wlfw_clnt_nb); + struct ath10k_snoc *ar_snoc = + container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg); + struct ath10k *ar = ar_snoc->ar; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Event Notify: code: %ld", code); + + switch (code) { + case QMI_SERVER_ARRIVE: + ret = ath10k_snoc_driver_event_post( + ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE, 0, ar); + break; + case QMI_SERVER_EXIT: + ret = ath10k_snoc_driver_event_post( + ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT, 0, ar); + break; + default: + ath10k_err(ar, "Invalid code: %ld", code); + break; + } + + return ret; +} + +int ath10k_snoc_start_qmi_service(struct ath10k *ar) +{ + int ret; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + qmi_cfg->event_wq = alloc_workqueue("ath10k_snoc_driver_event", + WQ_UNBOUND, 1); + if (!qmi_cfg->event_wq) { + ath10k_err(ar, "Workqueue creation failed\n"); + return -EFAULT; + } + + spin_lock_init(&qmi_cfg->event_lock); + atomic_set(&qmi_cfg->fw_ready, 0); + atomic_set(&qmi_cfg->server_connected, 0); + + INIT_WORK(&qmi_cfg->event_work, ath10k_snoc_driver_event_work); + INIT_WORK(&qmi_cfg->qmi_recv_msg_work, + ath10k_snoc_qmi_wlfw_clnt_notify_work); + INIT_LIST_HEAD(&qmi_cfg->event_list); + + for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++) + atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1); + + qmi_cfg->wlfw_clnt_nb.notifier_call = + ath10k_snoc_qmi_wlfw_clnt_svc_event_notify; + ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01, + &qmi_cfg->wlfw_clnt_nb); + if (ret < 0) { + ath10k_err(ar, "Notifier register failed: %d\n", ret); + ret = -EFAULT; + goto out_destroy_wq; + } + + if (!icnss_is_fw_ready()) { + ath10k_err(ar, "failed to get fw ready indication\n"); + ret = -EFAULT; + goto err_fw_ready; + } + + atomic_set(&qmi_cfg->fw_ready, 1); + ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI service started successfully\n"); + return 0; + +err_fw_ready: + qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01, + &qmi_cfg->wlfw_clnt_nb); +out_destroy_wq: + destroy_workqueue(qmi_cfg->event_wq); + return ret; +} + +void ath10k_snoc_stop_qmi_service(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Removing QMI service..\n"); + + wake_up_all(&ath10k_fw_ready_wait_event); + cancel_work_sync(&qmi_cfg->event_work); + cancel_work_sync(&qmi_cfg->qmi_recv_msg_work); + qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01, + &qmi_cfg->wlfw_clnt_nb); + destroy_workqueue(qmi_cfg->event_wq); + qmi_handle_destroy(qmi_cfg->wlfw_clnt); + qmi_cfg = NULL; +} diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h new file mode 100644 index 000000000000..29ad5acdf414 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -0,0 +1,156 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _QMI_H_ +#define _QMI_H_ + +#define ATH10K_SNOC_EVENT_PENDING 2989 +#define ATH10K_SNOC_EVENT_SYNC BIT(0) +#define ATH10K_SNOC_EVENT_UNINTERRUPTIBLE BIT(1) +#define ATH10K_SNOC_WLAN_FW_READY_TIMEOUT 8000 + +#define WLFW_SERVICE_INS_ID_V01 0 +#define WLFW_CLIENT_ID 0x41544851 +#define WLFW_TIMEOUT_MS 20000 + +enum ath10k_snoc_driver_event_type { + ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE, + ATH10K_SNOC_DRIVER_EVENT_SERVER_EXIT, + ATH10K_SNOC_DRIVER_EVENT_FW_READY_IND, + ATH10K_SNOC_DRIVER_EVENT_MAX, +}; + +/* enum ath10k_driver_mode: ath10k driver mode + * @ATH10K_MISSION: mission mode + * @ATH10K_FTM: ftm mode + * @ATH10K_EPPING: epping mode + * @ATH10K_OFF: off mode + */ +enum ath10k_driver_mode { + ATH10K_MISSION, + ATH10K_FTM, + ATH10K_EPPING, + ATH10K_OFF +}; + +/* struct ath10k_ce_tgt_pipe_cfg: target pipe configuration + * @pipe_num: pipe number + * @pipe_dir: pipe direction + * @nentries: entries in pipe + * @nbytes_max: pipe max size + * @flags: pipe flags + * @reserved: reserved + */ +struct ath10k_ce_tgt_pipe_cfg { + u32 pipe_num; + u32 pipe_dir; + u32 nentries; + u32 nbytes_max; + u32 flags; + u32 reserved; +}; + +/* struct ath10k_ce_svc_pipe_cfg: service pipe configuration + * @service_id: target version + * @pipe_dir: pipe direction + * @pipe_num: pipe number + */ +struct ath10k_ce_svc_pipe_cfg { + u32 service_id; + u32 pipe_dir; + u32 pipe_num; +}; + +/* struct ath10k_shadow_reg_cfg: shadow register configuration + * @ce_id: copy engine id + * @reg_offset: offset to copy engine + */ +struct ath10k_shadow_reg_cfg { + u16 ce_id; + u16 reg_offset; +}; + +/* struct ath10k_wlan_enable_cfg: wlan enable configuration + * @num_ce_tgt_cfg: no of ce target configuration + * @ce_tgt_cfg: target ce configuration + * @num_ce_svc_pipe_cfg: no of ce service configuration + * @ce_svc_cfg: ce service configuration + * @num_shadow_reg_cfg: no of shadow registers + * @shadow_reg_cfg: shadow register configuration + */ +struct ath10k_wlan_enable_cfg { + u32 num_ce_tgt_cfg; + struct ath10k_ce_tgt_pipe_cfg *ce_tgt_cfg; + u32 num_ce_svc_pipe_cfg; + struct ath10k_ce_svc_pipe_cfg *ce_svc_cfg; + u32 num_shadow_reg_cfg; + struct ath10k_shadow_reg_cfg *shadow_reg_cfg; +}; + +/* struct ath10k_snoc_qmi_driver_event: qmi driver event + * event_handled: event handled by event work handler + * sync: event synced + * ret: event received return value + * list: list to queue qmi event for process + * type: driver event type + * complete: completion for event handle complete + * data: encapsulate driver data for event handler callback + */ +struct ath10k_snoc_qmi_driver_event { + atomic_t event_handled; + bool sync; + int ret; + struct list_head list; + enum ath10k_snoc_driver_event_type type; + struct completion complete; + void *data; +}; + +/* struct ath10k_snoc_qmi_config: qmi service configuration + * fw_ready: wlan firmware ready for wlan operation + * msa_ready: wlan firmware msa memory ready for board data download + * server_connected: qmi server connected + * event_work: QMI event work + * event_list: QMI event list + * qmi_recv_msg_work: QMI message receive work + * event_wq: QMI event work queue + * wlfw_clnt_nb: WLAN firmware indication callback + * wlfw_clnt: QMI notifier handler for wlan firmware + * qmi_ev_list: QMI event list + * event_lock: spinlock for qmi event work queue + */ +struct ath10k_snoc_qmi_config { + atomic_t fw_ready; + bool msa_ready; + atomic_t server_connected; + struct work_struct event_work; + struct list_head event_list; + struct work_struct qmi_recv_msg_work; + struct workqueue_struct *event_wq; + struct notifier_block wlfw_clnt_nb; + struct qmi_handle *wlfw_clnt; + struct ath10k_snoc_qmi_driver_event + qmi_ev_list[ATH10K_SNOC_DRIVER_EVENT_MAX]; + spinlock_t event_lock; /* spinlock for qmi event work queue */ +}; + +int ath10k_snoc_pd_restart_enable(struct ath10k *ar); +int ath10k_snoc_modem_ssr_register_notifier(struct ath10k *ar); +int ath10k_snoc_modem_ssr_unregister_notifier(struct ath10k *ar); +int ath10k_snoc_pdr_unregister_notifier(struct ath10k *ar); +int ath10k_snoc_start_qmi_service(struct ath10k *ar); +void ath10k_snoc_stop_qmi_service(struct ath10k *ar); +int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar, + struct ath10k_wlan_enable_cfg *config, + enum ath10k_driver_mode mode, + const char *host_version); +int ath10k_snoc_qmi_wlan_disable(struct ath10k *ar); +#endif diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index ca8d16884af1..5499bd2712e4 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -205,12 +205,25 @@ struct rx_attention { * descriptor. */ +#ifndef CONFIG_ATH10K_SNOC struct rx_frag_info { u8 ring0_more_count; u8 ring1_more_count; u8 ring2_more_count; u8 ring3_more_count; } __packed; +#else +struct rx_frag_info { + u8 ring0_more_count; + u8 ring1_more_count; + u8 ring2_more_count; + u8 ring3_more_count; + u8 ring4_more_count; + u8 ring5_more_count; + u8 ring6_more_count; + u8 ring7_more_count; +} __packed; +#endif /* * ring0_more_count @@ -239,6 +252,9 @@ enum htt_rx_mpdu_encrypt_type { HTT_RX_MPDU_ENCRYPT_WAPI = 5, HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, HTT_RX_MPDU_ENCRYPT_NONE = 7, + HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8, + HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9, + HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10, }; #define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff @@ -465,11 +481,23 @@ struct rx_msdu_start_qca99x0 { __le32 info2; /* %RX_MSDU_START_INFO2_ */ } __packed; +#ifdef CONFIG_ATH10K_SNOC +struct rx_msdu_start_wcn3990 { + __le32 info3; +} __packed; +#else +struct rx_msdu_start_wcn3990 { +} __packed; +#endif + struct rx_msdu_start { struct rx_msdu_start_common common; union { struct rx_msdu_start_qca99x0 qca99x0; } __packed; + union { + struct rx_msdu_start_wcn3990 wcn3990; + } __packed; } __packed; /* @@ -589,11 +617,21 @@ struct rx_msdu_end_qca99x0 { __le32 info2; } __packed; +struct rx_msdu_end_wcn3990 { + __le32 rule_indication_0; + __le32 rule_indication_1; + __le32 rule_indication_2; + __le32 rule_indication_3; +} __packed; + struct rx_msdu_end { struct rx_msdu_end_common common; union { struct rx_msdu_end_qca99x0 qca99x0; } __packed; +#ifdef CONFIG_ATH10K_SNOC + struct rx_msdu_end_wcn3990 wcn3990; +#endif } __packed; /* @@ -656,26 +694,6 @@ struct rx_msdu_end { * Reserved: HW should fill with zero. FW should ignore. */ -#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0 -#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1 - -#define RX_PPDU_START_SIG_RATE_OFDM_48 0 -#define RX_PPDU_START_SIG_RATE_OFDM_24 1 -#define RX_PPDU_START_SIG_RATE_OFDM_12 2 -#define RX_PPDU_START_SIG_RATE_OFDM_6 3 -#define RX_PPDU_START_SIG_RATE_OFDM_54 4 -#define RX_PPDU_START_SIG_RATE_OFDM_36 5 -#define RX_PPDU_START_SIG_RATE_OFDM_18 6 -#define RX_PPDU_START_SIG_RATE_OFDM_9 7 - -#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0 -#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1 -#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2 -#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3 -#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4 -#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5 -#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6 - #define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 #define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 #define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 @@ -711,25 +729,6 @@ struct rx_msdu_end { /* No idea what this flag means. It seems to be always set in rate. */ #define RX_PPDU_START_RATE_FLAG BIT(3) -enum rx_ppdu_start_rate { - RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M, - RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M, - RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M, - RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M, - RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M, - RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M, - RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M, - RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M, - - RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M, - RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M, - RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M, - RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M, - RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M, - RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M, - RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M, -}; - struct rx_ppdu_start { struct { u8 pri20_mhz; @@ -992,9 +991,51 @@ struct rx_ppdu_end_qca6174 { struct rx_pkt_end { __le32 info0; /* %RX_PKT_END_INFO0_ */ +#ifndef CONFIG_ATH10K_SNOC __le32 phy_timestamp_1; __le32 phy_timestamp_2; - __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ +#else + __le64 phy_timestamp_1; + __le64 phy_timestamp_2; +#endif +} __packed; + +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30 +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14) +#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29) + +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2 +#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030 +#define RX_LOCATION_INFO1_PKT_BW_LSB 4 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27 +#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0) +#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1) +#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7) +#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29) +#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30) +#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31) + +struct rx_location_info { + __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */ + __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ +#ifdef CONFIG_ATH10K_SNOC + __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */ +#endif } __packed; enum rx_phy_ppdu_end_info0 { @@ -1067,6 +1108,7 @@ struct rx_phy_ppdu_end { struct rx_ppdu_end_qca99x0 { struct rx_pkt_end rx_pkt_end; + __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ struct rx_phy_ppdu_end rx_phy_ppdu_end; __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ @@ -1074,12 +1116,44 @@ struct rx_ppdu_end_qca99x0 { __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; +struct rx_ppdu_end_qca9984 { + struct rx_pkt_end rx_pkt_end; + struct rx_location_info rx_location_info; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +struct rx_timing_offset { + __le32 timing_offset; +}; + +struct rx_ppdu_end_wcn3990 { + __le32 reserved_info_0; + __le32 reserved_info_1; + __le32 rx_antenna_info; + __le32 rx_coex_info; + __le32 rx_mpdu_cnt_info; + __le32 rx_bb_length; + __le64 phy_timestamp_tx; + struct rx_pkt_end rx_pkt_end; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + struct rx_timing_offset rx_timing_offset; + struct rx_location_info rx_location_info; +} __packed; + struct rx_ppdu_end { struct rx_ppdu_end_common common; union { struct rx_ppdu_end_qca988x qca988x; struct rx_ppdu_end_qca6174 qca6174; struct rx_ppdu_end_qca99x0 qca99x0; + struct rx_ppdu_end_qca9984 qca9984; +#ifdef CONFIG_ATH10K_SNOC + struct rx_ppdu_end_wcn3990 wcn3990; +#endif } __packed; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c new file mode 100644 index 000000000000..638088ec45c9 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -0,0 +1,1821 @@ +/* Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/bitops.h> +#include <linux/suspend.h> +#include "core.h" +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "ce.h" +#include "snoc.h" +#include "qmi.h" +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> + +#define WCN3990_MAX_IRQ 12 +#define WCN3990_WAKE_IRQ_CE 2 + +const char *ce_name[WCN3990_MAX_IRQ] = { + "WLAN_CE_0", + "WLAN_CE_1", + "WLAN_CE_2", + "WLAN_CE_3", + "WLAN_CE_4", + "WLAN_CE_5", + "WLAN_CE_6", + "WLAN_CE_7", + "WLAN_CE_8", + "WLAN_CE_9", + "WLAN_CE_10", + "WLAN_CE_11", +}; + +#define ATH10K_SNOC_TARGET_WAIT 3000 +#define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3 +#define SNOC_HIF_POWER_DOWN_DELAY 30 +#define ATH10K_MAX_PROP_SIZE 32 + +static void ath10k_snoc_buffer_cleanup(struct ath10k *ar); +static int ath10k_snoc_request_irq(struct ath10k *ar); +static void ath10k_snoc_free_irq(struct ath10k *ar); +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); + +static struct ce_attr host_ce_config_wlan[] = { + /* CE0: host->target HTC control streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htc_tx_cb, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 64, + .recv_cb = ath10k_snoc_htc_rx_cb, + }, + + /* CE3: host->target WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htc_tx_cb, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 2048, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htt_tx_cb, + }, + + /* CE5: target->host HTT (ipa_uc->target ) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + .recv_cb = ath10k_snoc_htt_rx_cb, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 2, + .src_sz_max = 2048, + .dest_nentries = 2, + }, + + /* CE8: Target to uMC */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + }, + + /* CE9 target->host HTT */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE10: target->host HTT */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE11: target -> host PKTLOG */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, +}; + +static struct ce_pipe_config target_ce_config_wlan[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(64), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host HTT (HIF->HTT) */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(1024), + .nbytes_max = __cpu_to_le32(64), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(4), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 Target to uMC */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(0), + .reserved = __cpu_to_le32(0), + }, + + /* CE9 target->host HTT */ + { + .pipenum = __cpu_to_le32(9), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE10 target->host HTT */ + { + .pipenum = __cpu_to_le32(10), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE11 target autonomous qcache memcpy */ + { + .pipenum = __cpu_to_le32(11), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, +}; + +static struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_IPA_MSG), + __cpu_to_le32(PIPEDIR_OUT),/* IPA service */ + __cpu_to_le32(5), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(9), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(10), + }, + { /* in = DL = target -> host pktlog */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(11), + }, + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +#define WCN3990_SRC_WR_INDEX_OFFSET 0x3C +#define WCN3990_DST_WR_INDEX_OFFSET 0x40 + +static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { + { 0, WCN3990_SRC_WR_INDEX_OFFSET}, + { 3, WCN3990_SRC_WR_INDEX_OFFSET}, + { 4, WCN3990_SRC_WR_INDEX_OFFSET}, + { 5, WCN3990_SRC_WR_INDEX_OFFSET}, + { 7, WCN3990_SRC_WR_INDEX_OFFSET}, + { 1, WCN3990_DST_WR_INDEX_OFFSET}, + { 2, WCN3990_DST_WR_INDEX_OFFSET}, + { 7, WCN3990_DST_WR_INDEX_OFFSET}, + { 8, WCN3990_DST_WR_INDEX_OFFSET}, + { 9, WCN3990_DST_WR_INDEX_OFFSET}, + { 10, WCN3990_DST_WR_INDEX_OFFSET}, + { 11, WCN3990_DST_WR_INDEX_OFFSET}, +}; + +static bool ath10k_snoc_has_fw_crashed(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + return atomic_read(&ar_snoc->fw_crashed); +} + +static void ath10k_snoc_fw_crashed_clear(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + atomic_set(&ar_snoc->fw_crashed, 0); +} + +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + if (!ar_snoc) + return; + + iowrite32(value, ar_snoc->mem + offset); +} + +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + u32 val; + + if (!ar_snoc) + return -EINVAL; + + val = ioread32(ar_snoc->mem + offset); + + return val; +} + +static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map snoc rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_RXCB(skb)->paddr = paddr; + + spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock); + ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); + if (ret) { + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock); + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); + while (num--) { + ret = __ath10k_snoc_rx_post_buf(pipe); + if (ret) { + if (ret == -ENOSPC) + break; + ath10k_warn(ar, "failed to post rx buf: %d\n", ret); + mod_timer(&ar_snoc->rx_post_retry, jiffies + + ATH10K_SNOC_RX_POST_RETRY_MS); + break; + } + } +} + +static void ath10k_snoc_rx_post(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); +} + +static void ath10k_snoc_rx_replenish_retry(unsigned long ptr) +{ + struct ath10k *ar = (void *)ptr; + ath10k_snoc_rx_post(ar); +} + +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff_head list; + struct sk_buff *skb; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) + ath10k_htc_tx_completion_handler(ar, skb); +} + +static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id]; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } + + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n", + ce_state->id, skb->len); + + callback(ar, skb); + } + + ath10k_snoc_rx_post_pipe(pipe_info); +} + +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff *skb; + + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); + ath10k_htt_hif_tx_complete(ar, skb); + } +} + +static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) +{ + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htt_t2h_msg_handler(ar, skb); +} + +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver); +} + +static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *snoc_pipe; + struct ath10k_ce_pipe *ce_pipe; + int err, i = 0; + + if (!ar_snoc) + return -EINVAL; + + if (atomic_read(&ar_snoc->fw_crashed)) + return -ESHUTDOWN; + + snoc_pipe = &ar_snoc->pipe_info[pipe_id]; + ce_pipe = snoc_pipe->ce_hdl; + spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock); + + for (i = 0; i < n_items - 1; i++) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + if (ath10k_snoc_has_fw_crashed(ar)) + return -EINVAL; + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + CE_SEND_FLAG_GATHER); + if (err) + goto err; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + 0); + if (err) + goto err; + + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); + return 0; + +err: + for (; i > 0; i--) + __ath10k_ce_send_revert(ce_pipe); + + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); + return err; +} + +static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n"); + + return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl); +} + +static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) +{ + int resources; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n"); + + if (!force) { + resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe); + + if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) + return; + } + ath10k_ce_per_engine_service(ar, pipe); +} + +static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n"); + + for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + return 0; +} + +static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n"); + + (void)ath10k_snoc_hif_map_service_to_pipe(ar, + ATH10K_HTC_SVC_ID_RSVD_CTRL, + ul_pipe, dl_pipe); +} + +static void ath10k_snoc_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); +} + +static void ath10k_snoc_irq_enable(struct ath10k *ar) +{ + ath10k_ce_enable_interrupts(ar); +} + +static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k *ar; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + int i; + + ar = snoc_pipe->hif_ce_state; + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k *ar; + struct ath10k_snoc *ar_snoc; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + int i; + + ar = snoc_pipe->hif_ce_state; + ar_snoc = ath10k_snoc_priv(ar); + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + ath10k_htc_tx_completion_handler(ar, skb); + } +} + +static void ath10k_snoc_buffer_cleanup(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int pipe_num; + + del_timer_sync(&ar_snoc->rx_post_retry); + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { + struct ath10k_snoc_pipe *pipe_info; + + pipe_info = &ar_snoc->pipe_info[pipe_num]; + ath10k_snoc_rx_pipe_cleanup(pipe_info); + ath10k_snoc_tx_pipe_cleanup(pipe_info); + } +} + +static void ath10k_snoc_flush(struct ath10k *ar) +{ + ath10k_snoc_buffer_cleanup(ar); +} + +static void ath10k_snoc_hif_stop(struct ath10k *ar) +{ + if (!ar) + return; + if (ath10k_snoc_has_fw_crashed(ar) || + test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { + ath10k_snoc_free_irq(ar); + } else { + ath10k_snoc_irq_disable(ar); + } + + ath10k_snoc_flush(ar); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); +} + +static int ath10k_snoc_alloc_pipes(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe; + int i, ret; + + for (i = 0; i < CE_COUNT; i++) { + pipe = &ar_snoc->pipe_info[i]; + pipe->ce_hdl = &ar_snoc->opaque_ctx.ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", + i, ret); + return ret; + } + + pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); + } + + return 0; +} + +static void ath10k_snoc_free_pipes(struct ath10k *ar) +{ + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_free_pipe(ar, i); +} + +static void ath10k_snoc_release_resource(struct ath10k *ar) +{ + netif_napi_del(&ar->napi); + ath10k_snoc_free_pipes(ar); +} + +static int ath10k_snoc_init_pipes(struct ath10k *ar) +{ + int i, ret; + + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", + i, ret); + return ret; + } + } + + return 0; +} + +static void ath10k_snoc_hif_power_down(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); + msleep(SNOC_HIF_POWER_DOWN_DELAY); + + if (!atomic_read(&ar_snoc->pm_ops_inprogress)) + ath10k_snoc_qmi_wlan_disable(ar); + + ce_remove_rri_on_ddr(ar); +} + +int ath10k_snoc_get_ce_id(struct ath10k *ar, int irq) +{ + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (ar_snoc->ce_irqs[i].irq_line == irq) + return i; + } + ath10k_err(ar, "No matching CE id for irq %d\n", irq); + + return -EINVAL; +} + +static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ce_id = ath10k_snoc_get_ce_id(ar, irq); + + if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) { + ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, + ce_id); + return IRQ_HANDLED; + } + + ath10k_snoc_irq_disable(ar); + napi_schedule(&ar->napi); + + return IRQ_HANDLED; +} + +static int ath10k_snoc_request_irq(struct ath10k *ar) +{ + int ret, id; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int irqflags = IRQF_TRIGGER_RISING; + + for (id = 0; id < CE_COUNT_MAX; id++) { + ret = request_irq(ar_snoc->ce_irqs[id].irq_line, + ath10k_snoc_per_engine_handler, + irqflags, ce_name[id], ar); + if (ret) { + ath10k_err(ar, + "%s: cannot register CE %d irq handler, ret = %d", + __func__, id, ret); + atomic_set(&ar_snoc->ce_irqs[id].irq_req_stat, 0); + return ret; + } else { + atomic_set(&ar_snoc->ce_irqs[id].irq_req_stat, 1); + } + } + + return 0; +} + +static void ath10k_snoc_free_irq(struct ath10k *ar) +{ + int id; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (id = 0; id < CE_COUNT_MAX; id++) { + if (atomic_read(&ar_snoc->ce_irqs[id].irq_req_stat)) { + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); + atomic_set(&ar_snoc->ce_irqs[id].irq_req_stat, 0); + } + } +} + +static int ath10k_snoc_get_soc_info(struct ath10k *ar) +{ + struct resource *res; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct platform_device *pdev; + + pdev = ar_snoc->dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase"); + if (!res) { + ath10k_err(ar, "Memory base not found in DT\n"); + return -EINVAL; + } + + ar_snoc->mem_pa = res->start; + ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa, + resource_size(res)); + if (!ar_snoc->mem) { + ath10k_err(ar, "Memory base ioremap failed: phy addr: %pa\n", + &ar_snoc->mem_pa); + return -EINVAL; + } + + ar_snoc->target_info.soc_version = ATH10K_HW_WCN3990; + ar_snoc->target_info.target_version = ATH10K_HW_WCN3990; + ar_snoc->target_info.target_revision = 0; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "%s: mem = %pS mem_pa = %pad soc ver=%x tgt ver=%x\n", + __func__, ar_snoc->mem, &ar_snoc->mem_pa, + ar_snoc->target_info.soc_version, + ar_snoc->target_info.target_version); + + return 0; +} + +static int ath10k_snoc_wlan_enable(struct ath10k *ar) +{ + struct ath10k_wlan_enable_cfg cfg; + enum ath10k_driver_mode mode; + int pipe_num; + struct ath10k_ce_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX]; + + for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) { + tgt_cfg[pipe_num].pipe_num = + target_ce_config_wlan[pipe_num].pipenum; + tgt_cfg[pipe_num].pipe_dir = + target_ce_config_wlan[pipe_num].pipedir; + tgt_cfg[pipe_num].nentries = + target_ce_config_wlan[pipe_num].nentries; + tgt_cfg[pipe_num].nbytes_max = + target_ce_config_wlan[pipe_num].nbytes_max; + tgt_cfg[pipe_num].flags = + target_ce_config_wlan[pipe_num].flags; + tgt_cfg[pipe_num].reserved = 0; + } + + cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) / + sizeof(struct ath10k_ce_tgt_pipe_cfg); + cfg.ce_tgt_cfg = (struct ath10k_ce_tgt_pipe_cfg *) + &tgt_cfg; + cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) / + sizeof(struct ath10k_ce_svc_pipe_cfg); + cfg.ce_svc_cfg = (struct ath10k_ce_svc_pipe_cfg *) + &target_service_to_ce_map_wlan; + cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) / + sizeof(struct ath10k_shadow_reg_cfg); + cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *) + &target_shadow_reg_cfg_map; + + mode = ar->testmode.utf_monitor ? ATH10K_FTM : ATH10K_MISSION; + return ath10k_snoc_qmi_wlan_enable(ar, &cfg, mode, + "5.1.0.26N"); +} + +static int ath10k_snoc_bus_configure(struct ath10k *ar) +{ + int ret; + + ret = ath10k_snoc_wlan_enable(ar); + if (ret < 0) { + ath10k_err(ar, "%s: ath10k_snoc_bus_configure error = %d", + __func__, ret); + return ret; + } + + ce_config_rri_on_ddr(ar); + + return 0; +} + +static int ath10k_snoc_hif_start(struct ath10k *ar) +{ + if (ath10k_snoc_has_fw_crashed(ar)) { + ath10k_snoc_request_irq(ar); + ath10k_snoc_fw_crashed_clear(ar); + } + ath10k_snoc_irq_enable(ar); + ath10k_snoc_rx_post(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + return 0; +} + +static int ath10k_snoc_claim(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_snoc_get_soc_info(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot snoc_mem 0x%p\n", ar_snoc->mem); + + return 0; +} + +static int ath10k_snoc_hif_power_up(struct ath10k *ar) +{ + int ret; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n", + __func__, ar->state); + + if (atomic_read(&ar_snoc->pm_ops_inprogress)) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "%s: WLAN OFF CMD Reset on PM Resume\n", __func__); + ath10k_snoc_qmi_wlan_disable(ar); + atomic_set(&ar_snoc->pm_ops_inprogress, 0); + } + + if ((ar->state == ATH10K_STATE_ON) || + (ar->state == ATH10K_STATE_RESTARTING) || + test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { + ret = ath10k_snoc_bus_configure(ar); + if (ret) { + ath10k_err(ar, "failed to configure bus: %d\n", ret); + return ret; + } + } + ret = ath10k_snoc_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err_sleep; + } + + napi_enable(&ar->napi); + return 0; + +err_sleep: + return ret; +} + +static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done = 0; + + if (ath10k_snoc_has_fw_crashed(ar)) { + napi_complete(ctx); + return done; + } + ath10k_ce_per_engine_service_any(ar); + + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) { + napi_complete(ctx); + ath10k_snoc_irq_enable(ar); + } + + return done; +} + +static int ath10k_snoc_resource_init(struct ath10k *ar) +{ + int i, ret = 0; + struct resource *res; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < CE_COUNT; i++) { + res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i); + if (!res) { + ath10k_err(ar, "Fail to get IRQ-%d\n", i); + ret = -ENODEV; + goto out; + } else { + ar_snoc->ce_irqs[i].irq_line = res->start; + } + } + +out: + return ret; +} + +static +int ath10k_snoc_pm_notifier(struct notifier_block *nb, + unsigned long pm_event, void *data) +{ + struct ath10k_snoc *ar_snoc = + container_of(nb, struct ath10k_snoc, pm_notifier); + struct ath10k *ar = ar_snoc->ar; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "%s: PM Event: %lu\n", __func__, pm_event); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + case PM_RESTORE_PREPARE: + case PM_POST_RESTORE: + atomic_set(&ar_snoc->pm_ops_inprogress, 1); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_vreg_info *vreg_info) +{ + int ret = 0; + char prop_name[ATH10K_MAX_PROP_SIZE]; + struct regulator *reg; + const __be32 *prop; + int len = 0; + int i; + + reg = devm_regulator_get_optional(dev, vreg_info->name); + if (PTR_ERR(reg) == -EPROBE_DEFER) { + ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n", + vreg_info->name); + ret = PTR_ERR(reg); + goto out; + } + + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + + if (vreg_info->required) { + ath10k_err(ar, "Regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + goto out; + } else { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Optional regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + goto done; + } + } + + vreg_info->reg = reg; + + snprintf(prop_name, ATH10K_MAX_PROP_SIZE, + "qcom,%s-config", vreg_info->name); + + prop = of_get_property(dev->of_node, prop_name, &len); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Got regulator cfg,prop: %s, len: %d\n", + prop_name, len); + + if (!prop || len < (2 * sizeof(__be32))) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Property %s %s\n", prop_name, + prop ? "invalid format" : "doesn't exist"); + goto done; + } + + for (i = 0; (i * sizeof(__be32)) < len; i++) { + switch (i) { + case 0: + vreg_info->min_v = be32_to_cpup(&prop[0]); + break; + case 1: + vreg_info->max_v = be32_to_cpup(&prop[1]); + break; + case 2: + vreg_info->load_ua = be32_to_cpup(&prop[2]); + break; + case 3: + vreg_info->settle_delay = be32_to_cpup(&prop[3]); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s, ignoring val %d\n", + prop_name, i); + break; + } + } + +done: + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "vreg: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v, + vreg_info->load_ua, vreg_info->settle_delay); + + return 0; + +out: + return ret; +} + +static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_clk_info *clk_info) +{ + struct clk *handle; + int ret = 0; + + handle = devm_clk_get(dev, clk_info->name); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + if (clk_info->required) { + ath10k_err(ar, "Clock %s isn't available: %d\n", + clk_info->name, ret); + goto out; + } else { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Ignoring clk %s: %d\n", + clk_info->name, + ret); + ret = 0; + goto out; + } + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock: %s, freq: %u\n", + clk_info->name, clk_info->freq); + + clk_info->handle = handle; +out: + return ret; +} + +static int ath10k_wcn3990_vreg_on(struct ath10k *ar) +{ + int ret = 0; + struct ath10k_wcn3990_vreg_info *vreg_info; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being enabled\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v, + vreg_info->max_v); + if (ret) { + ath10k_err(ar, + "vreg %s, set failed:min:%u,max:%u,ret: %d\n", + vreg_info->name, vreg_info->min_v, + vreg_info->max_v, ret); + break; + } + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, + vreg_info->load_ua); + if (ret < 0) { + ath10k_err(ar, + "Reg %s, can't set load:%u,ret: %d\n", + vreg_info->name, + vreg_info->load_ua, ret); + break; + } + } + + ret = regulator_enable(vreg_info->reg); + if (ret) { + ath10k_err(ar, "Regulator %s, can't enable: %d\n", + vreg_info->name, ret); + break; + } + + if (vreg_info->settle_delay) + udelay(vreg_info->settle_delay); + } + + if (!ret) + return 0; + + for (; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + regulator_disable(vreg_info->reg); + regulator_set_load(vreg_info->reg, 0); + regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + } + + return ret; +} + +static int ath10k_wcn3990_vreg_off(struct ath10k *ar) +{ + int ret = 0; + struct ath10k_wcn3990_vreg_info *vreg_info; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = ATH10K_WCN3990_VREG_INFO_SIZE - 1; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being disabled\n", + vreg_info->name); + + ret = regulator_disable(vreg_info->reg); + if (ret) + ath10k_err(ar, "Regulator %s, can't disable: %d\n", + vreg_info->name, ret); + + ret = regulator_set_load(vreg_info->reg, 0); + if (ret < 0) + ath10k_err(ar, "Regulator %s, can't set load: %d\n", + vreg_info->name, ret); + + ret = regulator_set_voltage(vreg_info->reg, 0, + vreg_info->max_v); + if (ret) + ath10k_err(ar, "Regulator %s, can't set voltage: %d\n", + vreg_info->name, ret); + } + + return ret; +} + +static int ath10k_wcn3990_clk_init(struct ath10k *ar) +{ + struct ath10k_wcn3990_clk_info *clk_info; + int i; + int ret = 0; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being enabled\n", + clk_info->name); + + if (clk_info->freq) { + ret = clk_set_rate(clk_info->handle, clk_info->freq); + + if (ret) { + ath10k_err(ar, "Clk %s,set err: %u,ret: %d\n", + clk_info->name, clk_info->freq, + ret); + break; + } + } + + ret = clk_prepare_enable(clk_info->handle); + if (ret) { + ath10k_err(ar, "Clock %s, can't enable: %d\n", + clk_info->name, ret); + break; + } + } + + if (ret == 0) + return 0; + + for (; i >= 0; i--) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + clk_disable_unprepare(clk_info->handle); + } + + return ret; +} + +static int ath10k_wcn3990_clk_deinit(struct ath10k *ar) +{ + struct ath10k_wcn3990_clk_info *clk_info; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being disabled\n", + clk_info->name); + + clk_disable_unprepare(clk_info->handle); + } + + return 0; +} + +static int ath10k_hw_power_on(struct ath10k *ar) +{ + int ret = 0; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power on\n"); + + ret = ath10k_wcn3990_vreg_on(ar); + if (ret) + goto out; + + ret = ath10k_wcn3990_clk_init(ar); + if (ret) + goto vreg_off; + + return ret; + +vreg_off: + ath10k_wcn3990_vreg_off(ar); +out: + return ret; +} + +static int ath10k_hw_power_off(struct ath10k *ar) +{ + int ret = 0; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power off\n"); + + ath10k_wcn3990_clk_deinit(ar); + + ret = ath10k_wcn3990_vreg_off(ar); + + return ret; +} + +static int ath10k_snoc_hif_suspend(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret = 0; + + if (!ar_snoc) + return -EINVAL; + + if (!device_may_wakeup(ar->dev)) + return -EINVAL; + + ret = enable_irq_wake(ar_snoc->ce_irqs[WCN3990_WAKE_IRQ_CE].irq_line); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "HIF Suspend: Failed to enable wakeup IRQ\n"); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "HIF Suspended\n"); + return ret; +} + +static int ath10k_snoc_hif_resume(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret = 0; + + if (!ar_snoc) + return -EINVAL; + + if (!device_may_wakeup(ar->dev)) + return -EINVAL; + + ret = disable_irq_wake(ar_snoc->ce_irqs[WCN3990_WAKE_IRQ_CE].irq_line); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "HIF Resume: Failed to disable wakeup IRQ\n"); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "HIF Resumed\n"); + return ret; +} + +static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { + .tx_sg = ath10k_snoc_hif_tx_sg, + .start = ath10k_snoc_hif_start, + .stop = ath10k_snoc_hif_stop, + .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe, + .get_default_pipe = ath10k_snoc_hif_get_default_pipe, + .send_complete_check = ath10k_snoc_hif_send_complete_check, + .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, + .power_up = ath10k_snoc_hif_power_up, + .power_down = ath10k_snoc_hif_power_down, + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, + .suspend = ath10k_snoc_hif_suspend, + .resume = ath10k_snoc_hif_resume, +}; + +static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, +}; + +static int ath10k_snoc_probe(struct platform_device *pdev) +{ + int ret; + struct ath10k *ar; + struct ath10k_snoc *ar_snoc; + enum ath10k_hw_rev hw_rev; + struct device *dev; + u32 chip_id; + u32 i; + + dev = &pdev->dev; + hw_rev = ATH10K_HW_WCN3990; + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(37)); + ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC, + hw_rev, &ath10k_snoc_hif_ops); + if (!ar) { + dev_err(dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ar_snoc = ath10k_snoc_priv(ar); + if (!ar_snoc) + return -EINVAL; + ar_snoc->dev = pdev; + platform_set_drvdata(pdev, ar); + ar_snoc->ar = ar; + + ret = ath10k_snoc_start_qmi_service(ar); + if (ret) { + ath10k_err(ar, "failed to start QMI service: %d\n", ret); + goto err_core_destroy; + } + + spin_lock_init(&ar_snoc->opaque_ctx.ce_lock); + ar_snoc->opaque_ctx.bus_ops = &ath10k_snoc_bus_ops; + ath10k_snoc_resource_init(ar); + + ar->target_version = ATH10K_HW_WCN3990; + ar->hw->wiphy->hw_version = ATH10K_HW_WCN3990; + setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, + (unsigned long)ar); + + memcpy(ar_snoc->vreg, vreg_cfg, sizeof(vreg_cfg)); + for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) { + ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]); + if (ret) + goto err_core_destroy; + } + + memcpy(ar_snoc->clk, clk_cfg, sizeof(clk_cfg)); + for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) { + ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]); + if (ret) + goto err_core_destroy; + } + + ret = ath10k_hw_power_on(ar); + if (ret) { + ath10k_err(ar, "failed to power on device: %d\n", ret); + goto err_stop_qmi_service; + } + + ret = ath10k_snoc_claim(ar); + if (ret) { + ath10k_err(ar, "failed to claim device: %d\n", ret); + goto err_hw_power_off; + } + + ret = ath10k_snoc_bus_configure(ar); + if (ret) { + ath10k_err(ar, "failed to configure bus: %d\n", ret); + goto err_hw_power_off; + } + + ret = ath10k_snoc_alloc_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", + ret); + goto err_hw_power_off; + } + + netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, + ATH10K_NAPI_BUDGET); + + ret = ath10k_snoc_request_irq(ar); + if (ret) { + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_free_pipes; + } + + chip_id = ar_snoc->target_info.soc_version; + /* chip id needs to be retrieved from platform driver */ + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_free_irq; + } + + ath10k_snoc_modem_ssr_register_notifier(ar); + ath10k_snoc_pd_restart_enable(ar); + + ar_snoc->pm_notifier.notifier_call = ath10k_snoc_pm_notifier; + register_pm_notifier(&ar_snoc->pm_notifier); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 probed\n", __func__); + + return 0; + +err_free_irq: + ath10k_snoc_free_irq(ar); + +err_free_pipes: + ath10k_snoc_free_pipes(ar); + +err_hw_power_off: + ath10k_hw_power_off(ar); + +err_stop_qmi_service: + ath10k_snoc_stop_qmi_service(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + + return ret; +} + +static int ath10k_snoc_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + if (!ar) + return -EINVAL; + + if (!ar_snoc) + return -EINVAL; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__); + + unregister_pm_notifier(&ar_snoc->pm_notifier); + ath10k_core_unregister(ar); + ath10k_snoc_pdr_unregister_notifier(ar); + ath10k_snoc_modem_ssr_unregister_notifier(ar); + ath10k_snoc_free_irq(ar); + ath10k_snoc_release_resource(ar); + ath10k_snoc_free_pipes(ar); + ath10k_snoc_stop_qmi_service(ar); + ath10k_hw_power_off(ar); + ath10k_core_destroy(ar); + + return 0; +} + +static const struct of_device_id ath10k_snoc_dt_match[] = { + {.compatible = "qcom,wcn3990-wifi"}, + {} +}; +MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match); + +static struct platform_driver ath10k_snoc_driver = { + .probe = ath10k_snoc_probe, + .remove = ath10k_snoc_remove, + .driver = { + .name = "ath10k_snoc", + .owner = THIS_MODULE, + .of_match_table = ath10k_snoc_dt_match, + }, +}; + +static int __init ath10k_snoc_init(void) +{ + int ret; + + ret = platform_driver_register(&ath10k_snoc_driver); + if (ret) + pr_err("failed to register ath10k snoc driver: %d\n", + ret); + + return ret; +} +module_init(ath10k_snoc_init); + +static void __exit ath10k_snoc_exit(void) +{ + platform_driver_unregister(&ath10k_snoc_driver); +} +module_exit(ath10k_snoc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices"); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h new file mode 100644 index 000000000000..a02cb2ad928e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -0,0 +1,214 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SNOC_H_ +#define _SNOC_H_ + +#include "hw.h" +#include "ce.h" +#include "pci.h" +#include "qmi.h" +#include <linux/kernel.h> +#include <soc/qcom/service-locator.h> +#define ATH10K_SNOC_RX_POST_RETRY_MS 50 +#define CE_POLL_PIPE 4 +#define ATH10K_SERVICE_LOCATION_CLIENT_NAME "ATH10K-WLAN" +#define ATH10K_WLAN_SERVICE_NAME "wlan/fw" + +/* struct snoc_state: SNOC target state + * @pipe_cfg_addr: pipe configuration address + * @svc_to_pipe_map: pipe services + */ +struct snoc_state { + u32 pipe_cfg_addr; + u32 svc_to_pipe_map; +}; + +/* struct ath10k_snoc_pipe: SNOC pipe configuration + * @ath10k_ce_pipe: pipe handle + * @pipe_num: pipe number + * @hif_ce_state: pointer to ce state + * @buf_sz: buffer size + * @pipe_lock: pipe lock + * @ar_snoc: snoc private structure + */ + +struct ath10k_snoc_pipe { + struct ath10k_ce_pipe *ce_hdl; + u8 pipe_num; + struct ath10k *hif_ce_state; + size_t buf_sz; + /* protect ce info */ + spinlock_t pipe_lock; + struct ath10k_snoc *ar_snoc; +}; + +/* struct ath10k_snoc_supp_chip: supported chip set + * @dev_id: device id + * @rev_id: revison id + */ +struct ath10k_snoc_supp_chip { + u32 dev_id; + u32 rev_id; +}; + +/* struct ath10k_snoc_info: SNOC info struct + * @v_addr: base virtual address + * @p_addr: base physical address + * @chip_id: chip id + * @chip_family: chip family + * @board_id: board id + * @soc_id: soc id + * @fw_version: fw version + */ +struct ath10k_snoc_info { + void __iomem *v_addr; + phys_addr_t p_addr; + u32 chip_id; + u32 chip_family; + u32 board_id; + u32 soc_id; + u32 fw_version; +}; + +/* struct ath10k_target_info: SNOC target info + * @target_version: target version + * @target_type: target type + * @target_revision: target revision + * @soc_version: target soc version + */ +struct ath10k_target_info { + u32 target_version; + u32 target_type; + u32 target_revision; + u32 soc_version; +}; + +/* struct ath10k_service_notifier_context: service notification context + * @handle: notifier handle + * @instance_id: domain instance id + * @name: domain name + */ +struct ath10k_service_notifier_context { + void *handle; + u32 instance_id; + char name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1]; +}; + +/* struct ath10k_snoc_ce_irq: copy engine irq struct + * @irq_req_stat: irq request status + * @irq_line: irq line + */ +struct ath10k_snoc_ce_irq { + atomic_t irq_req_stat; + u32 irq_line; +}; + +struct ath10k_wcn3990_vreg_info { + struct regulator *reg; + const char *name; + u32 min_v; + u32 max_v; + u32 load_ua; + unsigned long settle_delay; + bool required; +}; + +struct ath10k_wcn3990_clk_info { + struct clk *handle; + const char *name; + u32 freq; + bool required; +}; + +static struct ath10k_wcn3990_vreg_info vreg_cfg[] = { + {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false}, + {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false}, + {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false}, + {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false}, +}; + +#define ATH10K_WCN3990_VREG_INFO_SIZE ARRAY_SIZE(vreg_cfg) + +static struct ath10k_wcn3990_clk_info clk_cfg[] = { + {NULL, "cxo_ref_clk_pin", 0, false}, +}; + +#define ATH10K_WCN3990_CLK_INFO_SIZE ARRAY_SIZE(clk_cfg) + +/* struct ath10k_snoc: SNOC info struct + * @dev: device structure + * @ar:ath10k base structure + * @mem: mem base virtual address + * @mem_pa: mem base physical address + * @target_info: snoc target info + * @mem_len: mempry map length + * @pipe_info: pipe info struct + * @ce_irqs: copy engine irq list + * @ce_lock: protect ce structures + * @ce_states: maps ce id to ce state + * @rx_post_retry: rx buffer post processing timer + * @vaddr_rri_on_ddr: virtual address for RRI + * @is_driver_probed: flag to indicate driver state + * @modem_ssr_nb: notifier callback for modem notification + * @modem_notify_handler: modem notification handler + * @service_notifier: notifier context for service notification + * @service_notifier_nb: notifier callback for service notification + * @total_domains: no of service domains + * @get_service_nb: notifier callback for service discovery + * @fw_crashed: fw state flag + */ +struct ath10k_snoc { + struct bus_opaque opaque_ctx; + struct platform_device *dev; + struct ath10k *ar; + void __iomem *mem; + dma_addr_t mem_pa; + struct ath10k_target_info target_info; + size_t mem_len; + struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX]; + struct timer_list rx_post_retry; + struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX]; + u32 *vaddr_rri_on_ddr; + bool is_driver_probed; + struct notifier_block modem_ssr_nb; + struct notifier_block pm_notifier; + void *modem_notify_handler; + struct ath10k_service_notifier_context *service_notifier; + struct notifier_block service_notifier_nb; + int total_domains; + struct notifier_block get_service_nb; + atomic_t fw_crashed; + atomic_t pm_ops_inprogress; + struct ath10k_snoc_qmi_config qmi_cfg; + struct ath10k_wcn3990_vreg_info vreg[ATH10K_WCN3990_VREG_INFO_SIZE]; + struct ath10k_wcn3990_clk_info clk[ATH10K_WCN3990_CLK_INFO_SIZE]; +}; + +struct ath10k_event_pd_down_data { + bool crashed; + bool fw_rejuvenate; +}; + +static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) +{ + return (struct ath10k_snoc *)ar->drv_priv; +} + +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); +void ath10k_snoc_soc_write32(struct ath10k *ar, u32 addr, u32 val); +void ath10k_snoc_reg_write32(struct ath10k *ar, u32 addr, u32 val); +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); +u32 ath10k_snoc_soc_read32(struct ath10k *ar, u32 addr); +u32 ath10k_snoc_reg_read32(struct ath10k *ar, u32 addr); + +#endif /* _SNOC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index a0e7eebc206a..2ffc1fe4923b 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -101,9 +101,9 @@ int ath10k_spectral_process_fft(struct ath10k *ar, break; case 80: /* TODO: As experiments with an analogue sender and various - * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz) + * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz) * show, the particular configuration of 80 MHz/64 bins does - * not match with the other smaples at all. Until the reason + * not match with the other samples at all. Until the reason * for that is found, don't report these samples. */ if (bin_len == 64) diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c index 3ca3fae408a7..adf4592374b4 100644 --- a/drivers/net/wireless/ath/ath10k/swap.c +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -135,26 +135,17 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) } int ath10k_swap_code_seg_configure(struct ath10k *ar, - enum ath10k_swap_code_seg_bin_type type) + const struct ath10k_fw_file *fw_file) { int ret; struct ath10k_swap_code_seg_info *seg_info = NULL; - switch (type) { - case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW: - if (!ar->swap.firmware_swap_code_seg_info) - return 0; - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); - seg_info = ar->swap.firmware_swap_code_seg_info; - break; - default: - case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP: - case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF: - ath10k_warn(ar, "ignoring unknown code swap binary type %d\n", - type); + if (!fw_file->firmware_swap_code_seg_info) return 0; - } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); + + seg_info = fw_file->firmware_swap_code_seg_info; ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, &seg_info->seg_hw_info, @@ -168,32 +159,41 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar, return 0; } -void ath10k_swap_code_seg_release(struct ath10k *ar) +void ath10k_swap_code_seg_release(struct ath10k *ar, + struct ath10k_fw_file *fw_file) { - ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info); - ar->swap.firmware_codeswap_data = NULL; - ar->swap.firmware_codeswap_len = 0; - ar->swap.firmware_swap_code_seg_info = NULL; + ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info); + + /* FIXME: these two assignments look to bein wrong place! Shouldn't + * they be in ath10k_core_free_firmware_files() like the rest? + */ + fw_file->codeswap_data = NULL; + fw_file->codeswap_len = 0; + + fw_file->firmware_swap_code_seg_info = NULL; } -int ath10k_swap_code_seg_init(struct ath10k *ar) +int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file) { int ret; struct ath10k_swap_code_seg_info *seg_info; + const void *codeswap_data; + size_t codeswap_len; + + codeswap_data = fw_file->codeswap_data; + codeswap_len = fw_file->codeswap_len; - if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data) + if (!codeswap_len || !codeswap_data) return 0; - seg_info = ath10k_swap_code_seg_alloc(ar, - ar->swap.firmware_codeswap_len); + seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len); if (!seg_info) { ath10k_err(ar, "failed to allocate fw code swap segment\n"); return -ENOMEM; } ret = ath10k_swap_code_seg_fill(ar, seg_info, - ar->swap.firmware_codeswap_data, - ar->swap.firmware_codeswap_len); + codeswap_data, codeswap_len); if (ret) { ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n", @@ -202,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar) return ret; } - ar->swap.firmware_swap_code_seg_info = seg_info; + fw_file->firmware_swap_code_seg_info = seg_info; return 0; } diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h index 5c89952dd20f..f5dc0476493e 100644 --- a/drivers/net/wireless/ath/ath10k/swap.h +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -23,6 +23,8 @@ /* Currently only one swap segment is supported */ #define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1 +struct ath10k_fw_file; + struct ath10k_swap_code_seg_tlv { __le32 address; __le32 length; @@ -39,12 +41,6 @@ union ath10k_swap_code_seg_item { struct ath10k_swap_code_seg_tail tail; } __packed; -enum ath10k_swap_code_seg_bin_type { - ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF, -}; - struct ath10k_swap_code_seg_hw_info { /* Swap binary image size */ __le32 swap_size; @@ -65,8 +61,10 @@ struct ath10k_swap_code_seg_info { }; int ath10k_swap_code_seg_configure(struct ath10k *ar, - enum ath10k_swap_code_seg_bin_type type); -void ath10k_swap_code_seg_release(struct ath10k *ar); -int ath10k_swap_code_seg_init(struct ath10k *ar); + const struct ath10k_fw_file *fw_file); +void ath10k_swap_code_seg_release(struct ath10k *ar, + struct ath10k_fw_file *fw_file); +int ath10k_swap_code_seg_init(struct ath10k *ar, + struct ath10k_fw_file *fw_file); #endif diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 05a421bc322a..a47cab44d9c8 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -405,7 +405,7 @@ Fw Mode/SubMode Mask * 1. target firmware would check magic number and if it's a match, firmware * would consider the bits[0:15] are valid and base on that to calculate * the end of DRAM. Early allocation would be located at that area and - * may be reclaimed when necesary + * may be reclaimed when necessary * 2. if no magic number is found, early allocation would happen at "_end" * symbol of ROM which is located before the app-data and might NOT be * re-claimable. If this is adopted, link script should keep this in @@ -438,7 +438,7 @@ Fw Mode/SubMode Mask ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED)) #define HI_DEV_LPL_TYPE_GET(_devix) \ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \ - (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2))) + (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2))) #define HOST_INTEREST_SMPS_IS_ALLOWED() \ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK)) @@ -447,6 +447,9 @@ Fw Mode/SubMode Mask #define QCA988X_BOARD_DATA_SZ 7168 #define QCA988X_BOARD_EXT_DATA_SZ 0 +#define QCA9887_BOARD_DATA_SZ 7168 +#define QCA9887_BOARD_EXT_DATA_SZ 0 + #define QCA6174_BOARD_DATA_SZ 8192 #define QCA6174_BOARD_EXT_DATA_SZ 0 @@ -456,4 +459,7 @@ Fw Mode/SubMode Mask #define QCA99X0_BOARD_DATA_SZ 12288 #define QCA99X0_BOARD_EXT_DATA_SZ 0 +#define QCA4019_BOARD_DATA_SZ 12064 +#define QCA4019_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 1d5a2fdcbf56..1a067a4ece4d 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -23,6 +23,7 @@ #include "wmi.h" #include "hif.h" #include "hw.h" +#include "core.h" #include "testmode_i.h" @@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) int ret; ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode event wmi cmd_id %d skb %p skb->len %d\n", + "testmode event wmi cmd_id %d skb %pK skb->len %d\n", cmd_id, skb, skb->len); ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len); @@ -136,130 +137,18 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) return ret; } - return cfg80211_testmode_reply(skb); -} - -static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar) -{ - size_t len, magic_len, ie_len; - struct ath10k_fw_ie *hdr; - char filename[100]; - __le32 *version; - const u8 *data; - int ie_id, ret; - - snprintf(filename, sizeof(filename), "%s/%s", - ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE); - - /* load utf firmware image */ - ret = request_firmware(&ar->testmode.utf, filename, ar->dev); + ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION, + ar->normal_mode_fw.fw_file.wmi_op_version); if (ret) { - ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", - filename, ret); + kfree_skb(skb); return ret; } - data = ar->testmode.utf->data; - len = ar->testmode.utf->size; - - /* FIXME: call release_firmware() in error cases */ - - /* magic also includes the null byte, check that as well */ - magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; - - if (len < magic_len) { - ath10k_err(ar, "utf firmware file is too small to contain magic\n"); - ret = -EINVAL; - goto err; - } - - if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { - ath10k_err(ar, "invalid firmware magic\n"); - ret = -EINVAL; - goto err; - } - - /* jump over the padding */ - magic_len = ALIGN(magic_len, 4); - - len -= magic_len; - data += magic_len; - - /* loop elements */ - while (len > sizeof(struct ath10k_fw_ie)) { - hdr = (struct ath10k_fw_ie *)data; - - ie_id = le32_to_cpu(hdr->id); - ie_len = le32_to_cpu(hdr->len); - - len -= sizeof(*hdr); - data += sizeof(*hdr); - - if (len < ie_len) { - ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", - ie_id, len, ie_len); - ret = -EINVAL; - goto err; - } - - switch (ie_id) { - case ATH10K_FW_IE_FW_VERSION: - if (ie_len > sizeof(ar->testmode.utf_version) - 1) - break; - - memcpy(ar->testmode.utf_version, data, ie_len); - ar->testmode.utf_version[ie_len] = '\0'; - - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode found fw utf version %s\n", - ar->testmode.utf_version); - break; - case ATH10K_FW_IE_TIMESTAMP: - /* ignore timestamp, but don't warn about it either */ - break; - case ATH10K_FW_IE_FW_IMAGE: - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode found fw image ie (%zd B)\n", - ie_len); - - ar->testmode.utf_firmware_data = data; - ar->testmode.utf_firmware_len = ie_len; - break; - case ATH10K_FW_IE_WMI_OP_VERSION: - if (ie_len != sizeof(u32)) - break; - version = (__le32 *)data; - ar->testmode.op_version = le32_to_cpup(version); - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n", - ar->testmode.op_version); - break; - default: - ath10k_warn(ar, "Unknown testmode FW IE: %u\n", - le32_to_cpu(hdr->id)); - break; - } - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - - len -= ie_len; - data += ie_len; - } - - if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) { - ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n"); - ret = -EINVAL; - goto err; - } - - return 0; - -err: - release_firmware(ar->testmode.utf); - - return ret; + return cfg80211_testmode_reply(skb); } -static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar) +static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, + struct ath10k_fw_file *fw_file) { char filename[100]; int ret; @@ -268,7 +157,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); /* load utf firmware image */ - ret = request_firmware(&ar->testmode.utf, filename, ar->dev); + ret = request_firmware(&fw_file->firmware, filename, ar->dev); if (ret) { ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", filename, ret); @@ -281,24 +170,34 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar) * correct WMI interface. */ - ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1; - ar->testmode.utf_firmware_data = ar->testmode.utf->data; - ar->testmode.utf_firmware_len = ar->testmode.utf->size; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; + fw_file->firmware_data = fw_file->firmware->data; + fw_file->firmware_len = fw_file->firmware->size; return 0; } static int ath10k_tm_fetch_firmware(struct ath10k *ar) { + struct ath10k_fw_components *utf_mode_fw; + struct ath10k_fw_file *fw_file; int ret; - ret = ath10k_tm_fetch_utf_firmware_api_2(ar); + if (!ar->is_bmi) { + fw_file = &ar->testmode.utf_mode_fw.fw_file; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; + return 0; + } + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE, + &ar->testmode.utf_mode_fw.fw_file); if (ret == 0) { ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2"); - return 0; + goto out; } - ret = ath10k_tm_fetch_utf_firmware_api_1(ar); + ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file); if (ret) { ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret); return ret; @@ -306,6 +205,21 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1"); +out: + utf_mode_fw = &ar->testmode.utf_mode_fw; + + /* Use the same board data file as the normal firmware uses (but + * it's still "owned" by normal_mode_fw so we shouldn't free it. + */ + utf_mode_fw->board_data = ar->normal_mode_fw.board_data; + utf_mode_fw->board_len = ar->normal_mode_fw.board_len; + + if (!utf_mode_fw->fw_file.otp_data) { + ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware"); + utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data; + utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len; + } + return 0; } @@ -329,7 +243,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) goto err; } - if (WARN_ON(ar->testmode.utf != NULL)) { + if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) { /* utf image is already downloaded, it shouldn't be */ ret = -EEXIST; goto err; @@ -341,30 +255,34 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) goto err; } + if (ar->testmode.utf_mode_fw.fw_file.codeswap_data && + ar->testmode.utf_mode_fw.fw_file.codeswap_len) { + ret = ath10k_swap_code_seg_init(ar, + &ar->testmode.utf_mode_fw.fw_file); + if (ret) { + ath10k_warn(ar, + "failed to init utf code swap segment: %d\n", + ret); + goto err_release_utf_mode_fw; + } + } + spin_lock_bh(&ar->data_lock); ar->testmode.utf_monitor = true; spin_unlock_bh(&ar->data_lock); - BUILD_BUG_ON(sizeof(ar->fw_features) != - sizeof(ar->testmode.orig_fw_features)); - - memcpy(ar->testmode.orig_fw_features, ar->fw_features, - sizeof(ar->fw_features)); - ar->testmode.orig_wmi_op_version = ar->wmi.op_version; - memset(ar->fw_features, 0, sizeof(ar->fw_features)); - - ar->wmi.op_version = ar->testmode.op_version; ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n", - ar->wmi.op_version); + ar->testmode.utf_mode_fw.fw_file.wmi_op_version); ret = ath10k_hif_power_up(ar); if (ret) { ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret); ar->state = ATH10K_STATE_OFF; - goto err_fw_features; + goto err_release_utf_mode_fw; } - ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF, + &ar->testmode.utf_mode_fw); if (ret) { ath10k_err(ar, "failed to start core (testmode): %d\n", ret); ar->state = ATH10K_STATE_OFF; @@ -373,8 +291,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) ar->state = ATH10K_STATE_UTF; - if (strlen(ar->testmode.utf_version) > 0) - ver = ar->testmode.utf_version; + if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0) + ver = ar->testmode.utf_mode_fw.fw_file.fw_version; else ver = "API 1"; @@ -387,14 +305,14 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) err_power_down: ath10k_hif_power_down(ar); -err_fw_features: - /* return the original firmware features */ - memcpy(ar->fw_features, ar->testmode.orig_fw_features, - sizeof(ar->fw_features)); - ar->wmi.op_version = ar->testmode.orig_wmi_op_version; +err_release_utf_mode_fw: + if (ar->testmode.utf_mode_fw.fw_file.codeswap_data && + ar->testmode.utf_mode_fw.fw_file.codeswap_len) + ath10k_swap_code_seg_release(ar, + &ar->testmode.utf_mode_fw.fw_file); - release_firmware(ar->testmode.utf); - ar->testmode.utf = NULL; + release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); + ar->testmode.utf_mode_fw.fw_file.firmware = NULL; err: mutex_unlock(&ar->conf_mutex); @@ -415,13 +333,13 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) spin_unlock_bh(&ar->data_lock); - /* return the original firmware features */ - memcpy(ar->fw_features, ar->testmode.orig_fw_features, - sizeof(ar->fw_features)); - ar->wmi.op_version = ar->testmode.orig_wmi_op_version; + if (ar->testmode.utf_mode_fw.fw_file.codeswap_data && + ar->testmode.utf_mode_fw.fw_file.codeswap_len) + ath10k_swap_code_seg_release(ar, + &ar->testmode.utf_mode_fw.fw_file); - release_firmware(ar->testmode.utf); - ar->testmode.utf = NULL; + release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); + ar->testmode.utf_mode_fw.fw_file.firmware = NULL; ar->state = ATH10K_STATE_OFF; } @@ -479,7 +397,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[]) cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]); ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode cmd wmi cmd_id %d buf %p buf_len %d\n", + "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n", cmd_id, buf, buf_len); ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len); diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h index ba81bf66ce85..191a8f34c8ea 100644 --- a/drivers/net/wireless/ath/ath10k/testmode_i.h +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -33,6 +33,7 @@ enum ath10k_tm_attr { ATH10K_TM_ATTR_WMI_CMDID = 3, ATH10K_TM_ATTR_VERSION_MAJOR = 4, ATH10K_TM_ATTR_VERSION_MINOR = 5, + ATH10K_TM_ATTR_WMI_OP_VERSION = 6, /* keep last */ __ATH10K_TM_ATTR_AFTER_LAST, diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index 60fe562e3041..0a47269be289 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -187,12 +187,12 @@ int ath10k_thermal_register(struct ath10k *ar) /* Do not register hwmon device when temperature reading is not * supported by firmware */ - if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4) + if (!(ar->wmi.ops->gen_pdev_get_temperature)) return 0; /* Avoid linking error on devm_hwmon_device_register_with_groups, I * guess linux/hwmon.h is missing proper stubs. */ - if (!config_enabled(CONFIG_HWMON)) + if (!IS_REACHABLE(CONFIG_HWMON)) return 0; hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h index b610ea5caae8..3abb97f63b1e 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.h +++ b/drivers/net/wireless/ath/ath10k/thermal.h @@ -20,7 +20,7 @@ #define ATH10K_QUIET_PERIOD_MIN 25 #define ATH10K_QUIET_START_OFFSET 10 #define ATH10K_HWMON_NAME_LEN 15 -#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ) +#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ) #define ATH10K_THERMAL_THROTTLE_MAX 100 struct ath10k_thermal { @@ -36,7 +36,7 @@ struct ath10k_thermal { int temperature; }; -#ifdef CONFIG_THERMAL +#if IS_REACHABLE(CONFIG_THERMAL) int ath10k_thermal_register(struct ath10k *ar); void ath10k_thermal_unregister(struct ath10k *ar); void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature); diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 0194bebbdbf7..5b974bb76e6c 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -246,6 +246,7 @@ TRACE_EVENT(ath10k_wmi_dbglog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type); __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), @@ -253,14 +254,16 @@ TRACE_EVENT(ath10k_wmi_dbglog, TP_fast_assign( __assign_str(device, dev_name(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev)); + __entry->hw_type = ar->hw_rev; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "%s %s len %zu", + "%s %s %d len %zu", __get_str(driver), __get_str(device), + __entry->hw_type, __entry->buf_len ) ); @@ -273,6 +276,7 @@ TRACE_EVENT(ath10k_htt_pktlog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type); __field(u16, buf_len) __dynamic_array(u8, pktlog, buf_len) ), @@ -280,14 +284,16 @@ TRACE_EVENT(ath10k_htt_pktlog, TP_fast_assign( __assign_str(device, dev_name(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev)); + __entry->hw_type = ar->hw_rev; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(pktlog), buf, buf_len); ), TP_printk( - "%s %s size %hu", + "%s %s %d size %hu", __get_str(driver), __get_str(device), + __entry->hw_type, __entry->buf_len ) ); @@ -436,6 +442,7 @@ TRACE_EVENT(ath10k_htt_rx_desc, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type); __field(u16, len) __dynamic_array(u8, rxdesc, len) ), @@ -443,14 +450,16 @@ TRACE_EVENT(ath10k_htt_rx_desc, TP_fast_assign( __assign_str(device, dev_name(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev)); + __entry->hw_type = ar->hw_rev; __entry->len = len; memcpy(__get_dynamic_array(rxdesc), data, len); ), TP_printk( - "%s %s rxdesc len %d", + "%s %s %d rxdesc len %d", __get_str(driver), __get_str(device), + __entry->hw_type, __entry->len ) ); diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index f9d0f8372e3f..beeb6be06939 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -23,7 +23,12 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) { - if (!ATH10K_SKB_CB(skb)->htt.is_offchan) + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))) + return; + + if (ath10k_mac_tx_frm_has_freq(ar)) return; /* If the original wait_for_completion() timed out before @@ -39,32 +44,30 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) complete(&ar->offchan_tx_completed); ar->offchan_tx_skb = NULL; /* just for sanity */ - ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); + ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb); out: spin_unlock_bh(&ar->data_lock); } -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done) +int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_tx_info *info; + struct ieee80211_txq *txq; struct ath10k_skb_cb *skb_cb; + struct ath10k_txq *artxq; struct sk_buff *msdu; - struct ieee80211_hdr *hdr; - __le16 fc; - bool limit_mgmt_desc = false; ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt tx completion msdu_id %u discard %d no_ack %d success %d\n", - tx_done->msdu_id, !!tx_done->discard, - !!tx_done->no_ack, !!tx_done->success); + "htt tx completion msdu_id %u status %d\n", + tx_done->msdu_id, tx_done->status); if (tx_done->msdu_id >= htt->max_num_pending_tx) { ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", tx_done->msdu_id); - return; + return -EINVAL; } spin_lock_bh(&htt->tx_lock); @@ -73,23 +76,23 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", tx_done->msdu_id); spin_unlock_bh(&htt->tx_lock); - return; + return -ENOENT; } - hdr = (struct ieee80211_hdr *)msdu->data; - fc = hdr->frame_control; + skb_cb = ATH10K_SKB_CB(msdu); + txq = skb_cb->txq; - if (unlikely(ieee80211_is_mgmt(fc)) && - ar->hw_params.max_probe_resp_desc_thres) - limit_mgmt_desc = true; + if (txq) { + artxq = (void *)txq->drv_priv; + artxq->num_fw_queued--; + } ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); - __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); + ath10k_htt_tx_dec_pending(htt); if (htt->num_pending_tx == 0) wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); - skb_cb = ATH10K_SKB_CB(msdu); dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); ath10k_report_offchan_tx(htt->ar, msdu); @@ -100,22 +103,25 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); - if (tx_done->discard) { + if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) { ieee80211_free_txskb(htt->ar->hw, msdu); - return; + return 0; } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; - if (tx_done->no_ack) + if (tx_done->status == HTT_TX_COMPL_STATE_NOACK) info->flags &= ~IEEE80211_TX_STAT_ACK; - if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK)) + if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) && + (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ + + return 0; } struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, @@ -128,7 +134,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, list_for_each_entry(peer, &ar->peers, list) { if (peer->vdev_id != vdev_id) continue; - if (memcmp(peer->addr, addr, ETH_ALEN)) + if (!ether_addr_equal(peer->addr, addr)) continue; return peer; @@ -164,7 +170,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, (mapped == expect_mapped || test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); - }), 3*HZ); + }), 3 * HZ); if (time_left == 0) return -ETIMEDOUT; @@ -188,6 +194,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct ath10k_peer *peer; + if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) { + ath10k_warn(ar, + "received htt peer map event with idx out of bounds: %hu\n", + ev->peer_id); + return; + } + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); if (!peer) { @@ -204,6 +217,8 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ev->vdev_id, ev->addr, ev->peer_id); + WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer)); + ar->peer_map[ev->peer_id] = peer; set_bit(ev->peer_id, peer->peer_ids); exit: spin_unlock_bh(&ar->data_lock); @@ -215,6 +230,13 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct ath10k_peer *peer; + if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) { + ath10k_warn(ar, + "received htt peer unmap event with idx out of bounds: %hu\n", + ev->peer_id); + return; + } + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, ev->peer_id); if (!peer) { @@ -226,6 +248,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", peer->vdev_id, peer->addr, ev->peer_id); + ar->peer_map[ev->peer_id] = NULL; clear_bit(ev->peer_id, peer->peer_ids); if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index a90e09f5c7f2..e7ea1ae1c438 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -19,8 +19,8 @@ #include "htt.h" -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done); +int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done); struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, const u8 *addr); diff --git a/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c new file mode 100644 index 000000000000..7d6cf8b23814 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.c @@ -0,0 +1,2091 @@ + /* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/qmi_encdec.h> + +#include <soc/qcom/msm_qmi_interface.h> + +#include "wcn3990_qmi_service_v01.h" + +static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + nentries), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + nbytes_max), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + flags), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + service_id), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, + id), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, + offset), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01, + addr), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_memory_region_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + region_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + secure_flag), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, + chip_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, + chip_family), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_rf_board_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_board_info_s_v01, + board_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_soc_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_soc_info_s_v01, + soc_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_fw_version_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_fw_version_info_s_v01, + fw_version), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_fw_version_info_s_v01, + fw_build_timestamp), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_download_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_download_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_update_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_update_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + msa_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + msa_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + pin_connect_result_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + pin_connect_result_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + client_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + request_mem_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + request_mem_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_mem_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_mem_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cold_boot_cal_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cold_boot_cal_done_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + rejuvenate_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + rejuvenate_enable), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_ind_register_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_ind_register_resp_msg_v01, + fw_status_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_ind_register_resp_msg_v01, + fw_status), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_pin_connect_result_ind_msg_v01, + pwr_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_pin_connect_result_ind_msg_v01, + pwr_pin_result), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct wlfw_pin_connect_result_ind_msg_v01, + phy_io_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct wlfw_pin_connect_result_ind_msg_v01, + phy_io_pin_result), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct wlfw_pin_connect_result_ind_msg_v01, + rf_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct wlfw_pin_connect_result_ind_msg_v01, + rf_pin_result), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_driver_mode_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + hw_debug_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + hw_debug), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + host_version_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + host_version), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_CE_V01, + .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg), + .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SVC_V01, + .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg), + .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01, + .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg), + .ei_array = wlfw_shadow_reg_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01, + .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2), + .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + chip_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + chip_info), + .ei_array = wlfw_rf_chip_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + board_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_rf_board_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + board_info), + .ei_array = wlfw_rf_board_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + soc_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_soc_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + soc_info), + .ei_array = wlfw_soc_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_version_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_fw_version_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_version_info), + .ei_array = wlfw_fw_version_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_build_id_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_build_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(uint8_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_bdf_download_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_report_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + meta_data_len), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = QMI_WLFW_MAX_NUM_CAL_V01, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + meta_data), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_report_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct wlfw_initiate_cal_download_ind_msg_v01, + cal_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(uint8_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_cal_download_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct wlfw_initiate_cal_update_ind_msg_v01, + cal_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_initiate_cal_update_ind_msg_v01, + total_size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_update_req_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, + cal_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(uint8_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, + msa_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + mem_region_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01, + .elem_size = sizeof(struct wlfw_memory_region_info_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + mem_region_info), + .ei_array = wlfw_memory_region_info_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ini_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ini_req_msg_v01, + enablefwlog_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ini_req_msg_v01, + enablefwlog), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ini_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_ini_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + mem_type), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + data_len), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_athdiag_read_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_athdiag_read_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_athdiag_read_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(uint8_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_athdiag_read_resp_msg_v01, + data), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct wlfw_athdiag_write_req_msg_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_athdiag_write_req_msg_v01, + mem_type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct wlfw_athdiag_write_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(uint8_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct wlfw_athdiag_write_req_msg_v01, + data), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_athdiag_write_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_vbatt_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_vbatt_req_msg_v01, + voltage_uv), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_vbatt_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, + mac_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01, + .elem_size = sizeof(uint8_t), + .is_array = STATIC_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, + mac_addr), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_host_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_host_cap_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + cause_for_rejuvenation_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + cause_for_rejuvenation), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + requesting_sub_system_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + requesting_sub_system), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + line_number_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + line_number), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + function_name_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + function_name), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_rejuvenate_ack_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_dynamic_feature_mask_req_msg_v01, + mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_req_msg_v01, + mask), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + prev_mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + prev_mask), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + curr_mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + curr_mask), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + diff --git a/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h new file mode 100644 index 000000000000..21513b8f6200 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wcn3990_qmi_service_v01.h @@ -0,0 +1,619 @@ + /* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef WLAN_FIRMWARE_SERVICE_V01_H +#define WLAN_FIRMWARE_SERVICE_V01_H + +#define WLFW_SERVICE_ID_V01 0x45 +#define WLFW_SERVICE_VERS_V01 0x01 + +#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 +#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 +#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A +#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 +#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B +#define QMI_WLFW_CAP_REQ_V01 0x0024 +#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 +#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 +#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027 +#define QMI_WLFW_INI_RESP_V01 0x002F +#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026 +#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033 +#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028 +#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 +#define QMI_WLFW_MSA_READY_IND_V01 0x002B +#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031 +#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 +#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 +#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 +#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038 +#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 +#define QMI_WLFW_REJUVENATE_IND_V01 0x0039 +#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B +#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031 +#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022 +#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036 +#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C +#define QMI_WLFW_FW_READY_IND_V01 0x0021 +#define QMI_WLFW_MSA_READY_RESP_V01 0x002E +#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029 +#define QMI_WLFW_INI_REQ_V01 0x002F +#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025 +#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A +#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D +#define QMI_WLFW_MSA_READY_REQ_V01 0x002E +#define QMI_WLFW_CAP_RESP_V01 0x0024 +#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A +#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 +#define QMI_WLFW_VBATT_REQ_V01 0x0032 +#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 +#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 +#define QMI_WLFW_VBATT_RESP_V01 0x0032 +#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D +#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027 +#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030 +#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023 +#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020 + +#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2 +#define QMI_WLFW_MAX_NUM_CAL_V01 5 +#define QMI_WLFW_MAX_DATA_SIZE_V01 6144 +#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 +#define QMI_WLFW_MAX_NUM_CE_V01 12 +#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 +#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 +#define QMI_WLFW_MAX_STR_LEN_V01 16 +#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 +#define QMI_WLFW_MAC_ADDR_SIZE_V01 6 +#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36 +#define QMI_WLFW_MAX_NUM_SVC_V01 24 + +enum wlfw_driver_mode_enum_v01 { + WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_MISSION_V01 = 0, + QMI_WLFW_FTM_V01 = 1, + QMI_WLFW_EPPING_V01 = 2, + QMI_WLFW_WALTEST_V01 = 3, + QMI_WLFW_OFF_V01 = 4, + QMI_WLFW_CCPM_V01 = 5, + QMI_WLFW_QVIT_V01 = 6, + QMI_WLFW_CALIBRATION_V01 = 7, + WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +enum wlfw_cal_temp_id_enum_v01 { + WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0, + QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1, + QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2, + QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3, + QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4, + WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +enum wlfw_pipedir_enum_v01 { + WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_PIPEDIR_NONE_V01 = 0, + QMI_WLFW_PIPEDIR_IN_V01 = 1, + QMI_WLFW_PIPEDIR_OUT_V01 = 2, + QMI_WLFW_PIPEDIR_INOUT_V01 = 3, + WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((uint32_t)0x00) +#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((uint32_t)0x01) +#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((uint32_t)0x02) +#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((uint32_t)0x04) +#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((uint32_t)0x08) +#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((uint32_t)0x10) + +#define QMI_WLFW_ALREADY_REGISTERED_V01 ((uint64_t)0x01ULL) +#define QMI_WLFW_FW_READY_V01 ((uint64_t)0x02ULL) +#define QMI_WLFW_MSA_READY_V01 ((uint64_t)0x04ULL) +#define QMI_WLFW_FW_MEM_READY_V01 ((uint64_t)0x08ULL) + +#define QMI_WLFW_FW_REJUVENATE_V01 ((uint64_t)0x01ULL) + +struct wlfw_ce_tgt_pipe_cfg_s_v01 { + u32 pipe_num; + enum wlfw_pipedir_enum_v01 pipe_dir; + u32 nentries; + u32 nbytes_max; + u32 flags; +}; + +struct wlfw_ce_svc_pipe_cfg_s_v01 { + u32 service_id; + enum wlfw_pipedir_enum_v01 pipe_dir; + u32 pipe_num; +}; + +struct wlfw_shadow_reg_cfg_s_v01 { + u16 id; + u16 offset; +}; + +struct wlfw_shadow_reg_v2_cfg_s_v01 { + u32 addr; +}; + +struct wlfw_memory_region_info_s_v01 { + u64 region_addr; + u32 size; + u8 secure_flag; +}; + +struct wlfw_rf_chip_info_s_v01 { + u32 chip_id; + u32 chip_family; +}; + +struct wlfw_rf_board_info_s_v01 { + u32 board_id; +}; + +struct wlfw_soc_info_s_v01 { + u32 soc_id; +}; + +struct wlfw_fw_version_info_s_v01 { + u32 fw_version; + char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1]; +}; + +struct wlfw_ind_register_req_msg_v01 { + u8 fw_ready_enable_valid; + u8 fw_ready_enable; + u8 initiate_cal_download_enable_valid; + u8 initiate_cal_download_enable; + u8 initiate_cal_update_enable_valid; + u8 initiate_cal_update_enable; + u8 msa_ready_enable_valid; + u8 msa_ready_enable; + u8 pin_connect_result_enable_valid; + u8 pin_connect_result_enable; + u8 client_id_valid; + u32 client_id; + u8 request_mem_enable_valid; + u8 request_mem_enable; + u8 fw_mem_ready_enable_valid; + u8 fw_mem_ready_enable; + u8 cold_boot_cal_done_enable_valid; + u8 cold_boot_cal_done_enable; + u8 rejuvenate_enable_valid; + u32 rejuvenate_enable; +}; + +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46 +extern struct elem_info wlfw_ind_register_req_msg_v01_ei[]; + +struct wlfw_ind_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 fw_status_valid; + u64 fw_status; +}; + +#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[]; + +struct wlfw_fw_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[]; + +struct wlfw_msa_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[]; + +struct wlfw_pin_connect_result_ind_msg_v01 { + u8 pwr_pin_result_valid; + u32 pwr_pin_result; + u8 phy_io_pin_result_valid; + u32 phy_io_pin_result; + u8 rf_pin_result_valid; + u32 rf_pin_result; +}; + +#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21 +extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[]; + +struct wlfw_wlan_mode_req_msg_v01 { + enum wlfw_driver_mode_enum_v01 mode; + u8 hw_debug_valid; + u8 hw_debug; +}; + +#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11 +extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[]; + +struct wlfw_wlan_mode_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[]; + +struct wlfw_wlan_cfg_req_msg_v01 { + u8 host_version_valid; + char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1]; + u8 tgt_cfg_valid; + u32 tgt_cfg_len; + struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01]; + u8 svc_cfg_valid; + u32 svc_cfg_len; + struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01]; + u8 shadow_reg_valid; + u32 shadow_reg_len; + struct wlfw_shadow_reg_cfg_s_v01 + shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01]; + u8 shadow_reg_v2_valid; + u32 shadow_reg_v2_len; + struct wlfw_shadow_reg_v2_cfg_s_v01 + shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01]; +}; + +#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803 +extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[]; + +struct wlfw_wlan_cfg_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[]; + +struct wlfw_cap_req_msg_v01 { + char placeholder; +}; + +#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_cap_req_msg_v01_ei[]; + +struct wlfw_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 chip_info_valid; + struct wlfw_rf_chip_info_s_v01 chip_info; + u8 board_info_valid; + struct wlfw_rf_board_info_s_v01 board_info; + u8 soc_info_valid; + struct wlfw_soc_info_s_v01 soc_info; + u8 fw_version_info_valid; + struct wlfw_fw_version_info_s_v01 fw_version_info; + u8 fw_build_id_valid; + char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1]; +}; + +#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 203 +extern struct elem_info wlfw_cap_resp_msg_v01_ei[]; + +struct wlfw_bdf_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178 +extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[]; + +struct wlfw_bdf_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[]; + +struct wlfw_cal_report_req_msg_v01 { + u32 meta_data_len; + enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01]; +}; + +#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24 +extern struct elem_info wlfw_cal_report_req_msg_v01_ei[]; + +struct wlfw_cal_report_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[]; + +struct wlfw_initiate_cal_download_ind_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; +}; + +#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[]; + +struct wlfw_cal_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178 +extern struct elem_info wlfw_cal_download_req_msg_v01_ei[]; + +struct wlfw_cal_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[]; + +struct wlfw_initiate_cal_update_ind_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; + u32 total_size; +}; + +#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14 +extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[]; + +struct wlfw_cal_update_req_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; + u32 seg_id; +}; + +#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14 +extern struct elem_info wlfw_cal_update_req_msg_v01_ei[]; + +struct wlfw_cal_update_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181 +extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[]; + +struct wlfw_msa_info_req_msg_v01 { + u64 msa_addr; + u32 size; +}; + +#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_msa_info_req_msg_v01_ei[]; + +struct wlfw_msa_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u32 mem_region_info_len; + struct wlfw_memory_region_info_s_v01 + mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01]; +}; + +#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37 +extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[]; + +struct wlfw_msa_ready_req_msg_v01 { + char placeholder; +}; + +#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[]; + +struct wlfw_msa_ready_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[]; + +struct wlfw_ini_req_msg_v01 { + u8 enablefwlog_valid; + u8 enablefwlog; +}; + +#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4 +extern struct elem_info wlfw_ini_req_msg_v01_ei[]; + +struct wlfw_ini_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_ini_resp_msg_v01_ei[]; + +struct wlfw_athdiag_read_req_msg_v01 { + u32 offset; + u32 mem_type; + u32 data_len; +}; + +#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21 +extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[]; + +struct wlfw_athdiag_read_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; +}; + +#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156 +extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[]; + +struct wlfw_athdiag_write_req_msg_v01 { + u32 offset; + u32 mem_type; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; +}; + +#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163 +extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[]; + +struct wlfw_athdiag_write_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[]; + +struct wlfw_vbatt_req_msg_v01 { + u64 voltage_uv; +}; + +#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11 +extern struct elem_info wlfw_vbatt_req_msg_v01_ei[]; + +struct wlfw_vbatt_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[]; + +struct wlfw_mac_addr_req_msg_v01 { + u8 mac_addr_valid; + u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01]; +}; + +#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9 +extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[]; + +struct wlfw_mac_addr_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[]; + +struct wlfw_host_cap_req_msg_v01 { + u8 daemon_support_valid; + u8 daemon_support; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4 +extern struct elem_info wlfw_host_cap_req_msg_v01_ei[]; + +struct wlfw_host_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[]; + +struct wlfw_request_mem_ind_msg_v01 { + u32 size; +}; + +#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[]; + +struct wlfw_respond_mem_req_msg_v01 { + u64 addr; + u32 size; +}; + +#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[]; + +struct wlfw_respond_mem_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[]; + +struct wlfw_fw_mem_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[]; + +struct wlfw_cold_boot_cal_done_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[]; + +struct wlfw_rejuvenate_ind_msg_v01 { + u8 cause_for_rejuvenation_valid; + u8 cause_for_rejuvenation; + u8 requesting_sub_system_valid; + u8 requesting_sub_system; + u8 line_number_valid; + u16 line_number; + u8 function_name_valid; + char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1]; +}; + +#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144 +extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[]; + +struct wlfw_rejuvenate_ack_req_msg_v01 { + char placeholder; +}; + +#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[]; + +struct wlfw_rejuvenate_ack_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[]; + +struct wlfw_dynamic_feature_mask_req_msg_v01 { + u8 mask_valid; + u64 mask; +}; + +#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11 +extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[]; + +struct wlfw_dynamic_feature_mask_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 prev_mask_valid; + u64 prev_mask; + u8 curr_mask_valid; + u64 curr_mask; +}; + +#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29 +extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[]; + +#endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index cfed5808bc4e..468ad47f0298 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -29,8 +29,12 @@ struct wmi_ops { struct wmi_scan_ev_arg *arg); int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg); + int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, struct wmi_ch_info_ev_arg *arg); + int (*pull_peer_delete_resp)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_delete_resp_ev_arg *arg); int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, struct wmi_vdev_start_ev_arg *arg); int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, @@ -51,6 +55,8 @@ struct wmi_ops { struct wmi_roam_ev_arg *arg); int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, struct wmi_wow_ev_arg *arg); + int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_echo_ev_arg *arg); enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); @@ -123,7 +129,10 @@ struct wmi_ops { enum wmi_force_fw_hang_type type, u32 delay_ms); struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); - struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable, + struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, + struct sk_buff *skb, + dma_addr_t paddr); + struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, u32 log_level); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); @@ -156,6 +165,10 @@ struct wmi_ops { u32 num_ac); struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, const struct wmi_sta_keepalive_arg *arg); + struct sk_buff *(*gen_set_arp_ns_offload)(struct ath10k *ar, + struct ath10k_vif *arvif); + struct sk_buff *(*gen_gtk_offload)(struct ath10k *ar, + struct ath10k_vif *arvif); struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, enum wmi_wow_wakeup_event event, @@ -186,6 +199,20 @@ struct wmi_ops { u8 enable, u32 detect_level, u32 detect_margin); + struct sk_buff *(*gen_set_pdev_mac_addr)(struct ath10k *ar, u32 pdev_id, + u8 *mac_addr); + + struct sk_buff *(*ext_resource_config)(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap); + int (*get_vdev_subtype)(struct ath10k *ar, + enum wmi_vdev_subtype subtype); + struct sk_buff *(*gen_pdev_bss_chan_info_req) + (struct ath10k *ar, + enum wmi_bss_survey_req_type type); + struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); + struct sk_buff *(*gen_csa_offload)(struct ath10k *ar, + u32 vdev_id, bool enable); }; int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); @@ -222,6 +249,16 @@ ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, } static inline int +ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_tx_compl) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); +} + +static inline int ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) { @@ -232,6 +269,16 @@ ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, } static inline int +ath10k_wmi_pull_peer_delete_resp(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_delete_resp_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_peer_delete_resp) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_peer_delete_resp(ar, skb, arg); +} + +static inline int ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, struct wmi_ch_info_ev_arg *arg) { @@ -341,6 +388,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_wow_event(ar, skb, arg); } +static inline int +ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_echo_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_echo_ev) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_echo_ev(ar, skb, arg); +} + static inline enum wmi_txbf_conf ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) { @@ -351,6 +408,28 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) } static inline int +ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) +{ + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_mgmt_tx_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->mgmt_tx_send_cmdid); + if (ret) + return ret; + + return 0; +} + +static inline int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); @@ -364,7 +443,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) if (IS_ERR(skb)) return PTR_ERR(skb); - ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->mgmt_tx_cmdid); if (ret) return ret; @@ -930,7 +1010,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar, } static inline int -ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level) +ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) { struct sk_buff *skb; @@ -1145,6 +1225,40 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar, } static inline int +ath10k_wmi_set_arp_ns_offload(struct ath10k *ar, struct ath10k_vif *arvif) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_set_arp_ns_offload) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_arp_ns_offload(ar, arvif); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->set_arp_ns_offload_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_gtk_offload) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_gtk_offload(ar, arvif); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->gtk_offload_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int ath10k_wmi_wow_enable(struct ath10k *ar) { struct sk_buff *skb; @@ -1333,4 +1447,105 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); } +static inline int +ath10k_wmi_ext_resource_config(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->ext_resource_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->ext_resource_config(ar, type, + fw_feature_bitmap); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->ext_resource_cfg_cmdid); +} + +static inline int +ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) +{ + if (!ar->wmi.ops->get_vdev_subtype) + return -EOPNOTSUPP; + + return ar->wmi.ops->get_vdev_subtype(ar, subtype); +} + +static inline int +ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, + enum wmi_bss_survey_req_type type) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct sk_buff *skb; + + if (!wmi->ops->gen_pdev_bss_chan_info_req) + return -EOPNOTSUPP; + + skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + wmi->cmd->pdev_bss_chan_info_request_cmdid); +} + +static inline int +ath10k_wmi_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_csa_offload) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_csa_offload(ar, vdev_id, enable); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->csa_offload_enable_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_echo(struct ath10k *ar, u32 value) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct sk_buff *skb; + + if (!wmi->ops->gen_echo) + return -EOPNOTSUPP; + + skb = wmi->ops->gen_echo(ar, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); +} + +static inline int +ath10k_gen_set_base_mac_addr(struct ath10k *ar, u8 *mac) +{ + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_set_pdev_mac_addr) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_pdev_mac_addr(ar, 0, mac); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_base_macaddr_cmdid); + if (ret) + return ret; + + return 0; +} + #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index c27fff39ddae..4608b696cf92 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -468,6 +468,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_VDEV_STOPPED_EVENTID: ath10k_wmi_event_vdev_stopped(ar, skb); break; + case WMI_TLV_VDEV_DELETE_RESP_EVENTID: + ath10k_wmi_event_vdev_delete_resp(ar, skb); + break; case WMI_TLV_PEER_STA_KICKOUT_EVENTID: ath10k_wmi_event_peer_sta_kickout(ar, skb); break; @@ -552,8 +555,14 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_TX_PAUSE_EVENTID: ath10k_wmi_tlv_event_tx_pause(ar, skb); break; + case WMI_TLV_PEER_DELETE_RESP_EVENTID: + ath10k_wmi_tlv_event_peer_delete_resp(ar, skb); + break; + case WMI_TLV_MGMT_TX_COMPLETION_EVENTID: + ath10k_wmi_tlv_event_mgmt_tx_compl(ar, skb); + break; default: - ath10k_warn(ar, "Unknown eventid: %d\n", id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id); break; } @@ -593,6 +602,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev( + struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_mgmt_tx_compl_ev *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL]; + + arg->desc_id = ev->desc_id; + arg->status = ev->status; + arg->pdev_id = ev->pdev_id; + + kfree(tb); + return 0; +} + static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) @@ -642,6 +676,34 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_op_pull_peer_delete_ev( + struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_delete_resp_ev_arg *arg) +{ + const void **tb; + const struct wmi_peer_delete_resp_ev_arg *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->vdev_id = ev->vdev_id; + arg->peer_addr = ev->peer_addr; + + kfree(tb); + return 0; +} + static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_ch_info_ev_arg *arg) @@ -669,6 +731,13 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, arg->noise_floor = ev->noise_floor; arg->rx_clear_count = ev->rx_clear_count; arg->cycle_count = ev->cycle_count; + arg->chan_tx_pwr_range = ev->chan_tx_pwr_range; + arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + arg->rx_frame_count = ev->rx_frame_count; + arg->my_bss_rx_cycle_count = ev->my_bss_rx_cycle_count; + arg->rx_11b_mode_data_duration = ev->rx_11b_mode_data_duration; + arg->tx_frame_cnt = ev->tx_frame_cnt; + arg->mac_clk_mhz = ev->mac_clk_mhz; kfree(tb); return 0; @@ -882,6 +951,11 @@ static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar, arg->buf_len = __le32_to_cpu(ev->buf_len); arg->phyerrs = phyerrs; + if (QCA_REV_WCN3990(ar)) { + arg->phy_err_mask0 = __le32_to_cpu(ev->rs_phy_err_mask0); + arg->phy_err_mask1 = __le32_to_cpu(ev->rs_phy_err_mask1); + } + kfree(tb); return 0; } @@ -937,7 +1011,12 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]; reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]; - svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32]; + if (QCA_REV_WCN3990(ar)) { + svc_bmap = (__le32 *)(skb->data + + WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET); + } else { + svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32]; + } mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT]; if (!ev || !reg || !svc_bmap || !mem_reqs) { @@ -1229,6 +1308,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_echo_ev_arg *arg) +{ + const void **tb; + const struct wmi_echo_event *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->value = ev->value; + + kfree(tb); + return 0; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) { @@ -1379,7 +1485,18 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); - cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); + if (QCA_REV_WCN3990(ar)) { + cfg->num_peers = __cpu_to_le32(TARGET_HL_10_TLV_NUM_PEERS); + cfg->ast_skid_limit = + __cpu_to_le32(TARGET_HL_10_TLV_AST_SKID_LIMIT); + cfg->num_wds_entries = + __cpu_to_le32(TARGET_HL_10_TLV_NUM_WDS_ENTRIES); + } else { + cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); + cfg->ast_skid_limit = __cpu_to_le32(0x10); + cfg->num_wds_entries = __cpu_to_le32(0x20); + } + if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); @@ -1391,7 +1508,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_peer_keys = __cpu_to_le32(2); cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); - cfg->ast_skid_limit = __cpu_to_le32(0x10); cfg->tx_chain_mask = __cpu_to_le32(0x7); cfg->rx_chain_mask = __cpu_to_le32(0x7); cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); @@ -1407,7 +1523,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_mcast_table_elems = __cpu_to_le32(0); cfg->mcast2ucast_mode = __cpu_to_le32(0); cfg->tx_dbg_log_size = __cpu_to_le32(0x400); - cfg->num_wds_entries = __cpu_to_le32(0x20); cfg->dma_burst_size = __cpu_to_le32(0); cfg->mac_aggr_delim = __cpu_to_le32(0); cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); @@ -1428,7 +1543,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_ocb_vdevs = __cpu_to_le32(0); cfg->num_ocb_channels = __cpu_to_le32(0); cfg->num_ocb_schedules = __cpu_to_le32(0); - cfg->host_capab = __cpu_to_le32(0); + cfg->host_capab = + __cpu_to_le32(WMI_TLV_TX_MSDU_ID_NEW_PARTITION_SUPPORT); ath10k_wmi_put_host_mem_chunks(ar, chunks); @@ -1482,10 +1598,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, cmd->ie_len = __cpu_to_le32(arg->ie_len); cmd->num_probes = __cpu_to_le32(3); - /* FIXME: There are some scan flag inconsistencies across firmwares, - * e.g. WMI-TLV inverts the logic behind the following flag. - */ - cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); + if (!QCA_REV_WCN3990(ar)) { + cmd->common.scan_ctrl_flags ^= + __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); + } ptr += sizeof(*tlv); ptr += sizeof(*cmd); @@ -2406,6 +2522,30 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, return skb; } +static int +ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, + dma_addr_t paddr) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + int ret; + + pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC); + if (!pkt_addr) + return -ENOMEM; + + pkt_addr->vaddr = skb; + pkt_addr->paddr = paddr; + + spin_lock_bh(&wmi->mgmt_tx_lock); + ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0, + wmi->mgmt_max_num_pending_tx, GFP_ATOMIC); + spin_unlock_bh(&wmi->mgmt_tx_lock); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id %d\n", ret); + return ret; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) { @@ -2428,6 +2568,84 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) } static struct sk_buff * +ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct wmi_tlv_mgmt_tx_cmd *cmd; + struct ieee80211_hdr *hdr; + struct ath10k_vif *arvif; + u32 buf_len = msdu->len; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int desc_id, len; + u32 vdev_id; + void *ptr; + u16 fc; + + hdr = (struct ieee80211_hdr *)msdu->data; + fc = le16_to_cpu(hdr->frame_control); + + if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; + vdev_id = arvif->vdev_id; + } else { + return ERR_PTR(-EINVAL); + } + + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) + return ERR_PTR(-EINVAL); + + len = sizeof(*cmd) + buf_len; + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + len += IEEE80211_CCMP_MIC_LEN; + buf_len += IEEE80211_CCMP_MIC_LEN; + } + + len += sizeof(*tlv); + len = round_up(len, 4); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr); + if (desc_id < 0) + goto msdu_id_alloc_fail; + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); + tlv->len = __cpu_to_le16(sizeof(cmd->hdr)); + cmd = (void *)tlv->value; + cmd->hdr.vdev_id = vdev_id; + cmd->hdr.desc_id = desc_id; + cmd->hdr.chanfreq = 0; + cmd->hdr.buf_len = __cpu_to_le32(buf_len); + cmd->hdr.frame_len = __cpu_to_le32(msdu->len); + cmd->hdr.paddr = __cpu_to_le64(paddr); + cmd->data_len = buf_len; + cmd->data_tag = 0x11; + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(sizeof(msdu->len)); + memcpy(cmd->buf, msdu->data, buf_len); + + return skb; + +msdu_id_alloc_fail: + dev_kfree_skb(skb); + return ERR_PTR(desc_id); +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms) @@ -2452,7 +2670,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, } static struct sk_buff * -ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, +ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) { struct wmi_tlv_dbglog_cmd *cmd; struct wmi_tlv *tlv; @@ -2861,6 +3079,165 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_tlv_op_gen_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable) +{ + struct wmi_csa_offload_enable_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd) + sizeof(*tlv); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + if (enable) + cmd->csa_offload_enable |= + __cpu_to_le32(WMI_CSA_OFFLOAD_ENABLE); + else + cmd->csa_offload_enable |= + __cpu_to_le32(WMI_CSA_OFFLOAD_DISABLE); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi CSA offload for vdev: %d\n", vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif) +{ + struct wmi_tlv_gtk_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + struct wmi_gtk_rekey_data *rekey_data = &arvif->gtk_rekey_data; + int len; + + len = sizeof(*cmd) + sizeof(*tlv); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + if (rekey_data->enable_offload) { + cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); + cmd->flags |= __cpu_to_le32(WMI_GTK_OFFLOAD_ENABLE_OPCODE); + memcpy(cmd->kek, rekey_data->kek, NL80211_KEK_LEN); + memcpy(cmd->kck, rekey_data->kck, NL80211_KCK_LEN); + cmd->replay_ctr = __cpu_to_le64(rekey_data->replay_ctr); + } else { + cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); + cmd->flags |= __cpu_to_le32(WMI_GTK_OFFLOAD_DISABLE_OPCODE); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi GTK offload for vdev: %d\n", arvif->vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, + struct ath10k_vif *arvif) +{ + struct wmi_tlv_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + void *ptr; + int i; + struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload; + struct wmi_ns_arp_offload_req *ns = &arvif->ns_offload; + struct wmi_ns_offload *ns_tuple; + struct wmi_arp_offload *arp_tuple; + + len = sizeof(*cmd) + sizeof(*tlv) + + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * + (sizeof(struct wmi_ns_offload) + sizeof(*tlv)) + + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * + (sizeof(struct wmi_arp_offload) + sizeof(*tlv)); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (struct wmi_tlv_arp_ns_offload_cmd *)tlv->value; + cmd->flags = __cpu_to_le32(0); + cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); + + ptr += (sizeof(*tlv) + sizeof(*cmd)); + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * + (sizeof(struct wmi_ns_offload) + sizeof(*tlv))); + ptr += sizeof(*tlv); + tlv = ptr; + + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE); + tlv->len = __cpu_to_le16(sizeof(struct wmi_ns_offload)); + ns_tuple = (struct wmi_ns_offload *)tlv->value; + if (ns->enable_offload) { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); + if (ns->info.target_addr_valid.s6_addr[i]) { + memcpy(&ns_tuple->target_ipaddr[0], + &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + memcpy(&ns_tuple->solicitation_ipaddr, + &ns->info.self_addr[i], sizeof(struct in6_addr)); + if (ns->info.target_ipv6_ac.s6_addr[i] == IPV6_ADDR_ANY) + ns_tuple->flags |= + __cpu_to_le32(WMI_NSOFF_IPV6_ANYCAST); + } else { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + } + ptr += (sizeof(*tlv) + sizeof(struct wmi_ns_offload)); + tlv = ptr; + } + + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * + (sizeof(struct wmi_arp_offload) + sizeof(*tlv))); + ptr += sizeof(*tlv); + tlv = ptr; + + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE); + tlv->len = __cpu_to_le16(sizeof(struct wmi_arp_offload)); + arp_tuple = (struct wmi_arp_offload *)tlv->value; + if (arp->enable_offload && (i == 0)) { + arp_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); + memcpy(&arp_tuple->target_ipaddr, + &arp->params.ipv4_addr, + sizeof(arp_tuple->target_ipaddr)); + } else { + arp_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + } + ptr += (sizeof(*tlv) + sizeof(struct wmi_arp_offload)); + tlv = ptr; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set arp ns offload\n"); + return skb; +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) { struct wmi_tlv_wow_enable_cmd *cmd; @@ -2879,6 +3256,8 @@ ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) cmd = (void *)tlv->value; cmd->enable = __cpu_to_le32(1); + if (QCA_REV_WCN3990(ar)) + cmd->pause_iface_config = __cpu_to_le32(1); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); return skb; @@ -3065,6 +3444,33 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, } static struct sk_buff * +ath10k_wmi_tlv_op_gen_set_base_mac_addr(struct ath10k *ar, u32 pdev_id, + u8 *mac_addr) +{ + struct wmi_tlv_mac_addr_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->pdev_id = __cpu_to_le32(pdev_id); + ether_addr_copy(cmd->mac_addr.addr, mac_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set_base_mac addr pdev_id %d mac addr %pM\n", + cmd->pdev_id, cmd->mac_addr.addr); + return skb; +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) { struct wmi_tlv_adaptive_qcs *cmd; @@ -3092,6 +3498,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) return skb; } +static struct sk_buff * +ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value) +{ + struct wmi_echo_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->value = cpu_to_le32(value); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value); + return skb; +} + /****************/ /* TLV mappings */ /****************/ @@ -3138,6 +3572,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID, .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID, .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID, + .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD, .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID, .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID, .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID, @@ -3420,6 +3855,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; static const struct wmi_ops wmi_tlv_ops = { @@ -3428,7 +3864,9 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, + .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev, .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, + .pull_peer_delete_resp = ath10k_wmi_tlv_op_pull_peer_delete_ev, .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev, @@ -3439,6 +3877,7 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, + .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev, .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, @@ -3470,7 +3909,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, - /* .gen_mgmt_tx = not implemented; HTT is used */ + .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, @@ -3485,6 +3924,9 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, + .gen_set_arp_ns_offload = ath10k_wmi_tlv_op_gen_set_arp_ns_offload, + .gen_gtk_offload = ath10k_wmi_op_gen_gtk_offload, + .gen_csa_offload = ath10k_wmi_tlv_op_gen_csa_offload, .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable, .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event, .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, @@ -3494,9 +3936,29 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_echo = ath10k_wmi_tlv_op_gen_echo, + .gen_set_pdev_mac_addr = ath10k_wmi_tlv_op_gen_set_base_mac_addr, +}; + +static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { + .auth = WMI_TLV_PEER_AUTH, + .qos = WMI_TLV_PEER_QOS, + .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY, + .apsd = WMI_TLV_PEER_APSD, + .ht = WMI_TLV_PEER_HT, + .bw40 = WMI_TLV_PEER_40MHZ, + .stbc = WMI_TLV_PEER_STBC, + .ldbc = WMI_TLV_PEER_LDPC, + .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS, + .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX, + .vht = WMI_TLV_PEER_VHT, + .bw80 = WMI_TLV_PEER_80MHZ, + .pmf = WMI_TLV_PEER_PMF, }; -/************/ /* TLV init */ /************/ @@ -3506,4 +3968,5 @@ void ath10k_wmi_tlv_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; ar->wmi.ops = &wmi_tlv_ops; + ar->wmi.peer_flags = &wmi_tlv_peer_flags_map; } diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index f5031f3965c5..0d8543d20968 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +22,7 @@ #define WMI_TLV_CMD_UNSUPPORTED 0 #define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0 +#define WMI_TX_DL_FRM_LEN 64 enum wmi_tlv_grp_id { WMI_TLV_GRP_START = 0x3, @@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id { WMI_TLV_PRB_REQ_FILTER_RX_CMDID, WMI_TLV_MGMT_TX_CMDID, WMI_TLV_PRB_TMPL_CMDID, + WMI_TLV_MGMT_TX_SEND_CMD, WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG), WMI_TLV_ADDBA_SEND_CMDID, WMI_TLV_ADDBA_STATUS_CMDID, @@ -306,16 +308,21 @@ enum wmi_tlv_event_id { WMI_TLV_VDEV_STOPPED_EVENTID, WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID, WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID, + WMI_TLV_VDEV_TSF_REPORT_EVENTID, + WMI_TLV_VDEV_DELETE_RESP_EVENTID, WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER), WMI_TLV_PEER_INFO_EVENTID, WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID, WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID, WMI_TLV_PEER_STATE_EVENTID, + WMI_TLV_PEER_ASSOC_CONF_EVENTID, + WMI_TLV_PEER_DELETE_RESP_EVENTID, WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT), WMI_TLV_HOST_SWBA_EVENTID, WMI_TLV_TBTTOFFSET_UPDATE_EVENTID, WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID, WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID, + WMI_TLV_MGMT_TX_COMPLETION_EVENTID, WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG), WMI_TLV_TX_ADDBA_COMPLETE_EVENTID, WMI_TLV_BA_RSP_SSN_EVENTID, @@ -527,6 +534,24 @@ enum wmi_tlv_vdev_param { WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE, }; +enum wmi_tlv_peer_flags { + WMI_TLV_PEER_AUTH = 0x00000001, + WMI_TLV_PEER_QOS = 0x00000002, + WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_TLV_PEER_APSD = 0x00000800, + WMI_TLV_PEER_HT = 0x00001000, + WMI_TLV_PEER_40MHZ = 0x00002000, + WMI_TLV_PEER_STBC = 0x00008000, + WMI_TLV_PEER_LDPC = 0x00010000, + WMI_TLV_PEER_DYN_MIMOPS = 0x00020000, + WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000, + WMI_TLV_PEER_SPATIAL_MUX = 0x00200000, + WMI_TLV_PEER_VHT = 0x02000000, + WMI_TLV_PEER_80MHZ = 0x04000000, + WMI_TLV_PEER_PMF = 0x08000000, +}; + enum wmi_tlv_tag { WMI_TLV_TAG_LAST_RESERVED = 15, @@ -871,6 +896,11 @@ enum wmi_tlv_tag { WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT, WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM, WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR, + WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET = 176, + + WMI_TLV_TAG_STRUCT_MGMT_TX_CMD = 0x1A6, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL, + WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT = 0x1C3, WMI_TLV_TAG_MAX }; @@ -946,12 +976,56 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, WMI_TLV_SERVICE_MDNS_OFFLOAD, WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD, + WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT, + WMI_TLV_SERVICE_OCB, + WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD, + WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT, + WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD, + WMI_TLV_SERVICE_MGMT_TX_HTT, + WMI_TLV_SERVICE_MGMT_TX_WMI, + WMI_TLV_SERVICE_EXT_MSG, + WMI_TLV_SERVICE_MAWC, + WMI_TLV_SERVICE_PEER_ASSOC_CONF, + WMI_TLV_SERVICE_EGAP, + WMI_TLV_SERVICE_STA_PMF_OFFLOAD, + WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY, + WMI_TLV_SERVICE_ENHANCED_PROXY_STA, + WMI_TLV_SERVICE_ATF, + WMI_TLV_SERVICE_COEX_GPIO, + WMI_TLV_SERVICE_AUX_SPECTRAL_INTF, + WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, + WMI_TLV_SERVICE_ENTERPRISE_MESH, + WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_TLV_SERVICE_BPF_OFFLOAD, + WMI_TLV_SERVICE_SYNC_DELETE_CMDS, + WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES, + WMI_TLV_SERVICE_NAN_DATA, + WMI_TLV_SERVICE_NAN_RTT, + WMI_TLV_SERVICE_11AX, + WMI_TLV_SERVICE_DEPRECATED_REPLACE, + WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER, + WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_TLV_SERVICE_MESH_11S, + WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT, + WMI_TLV_SERVICE_VDEV_RX_FILTER, + WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT, + WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET, + WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET, + WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER, + WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT, + WMI_TLV_SERVICE_WLAN_STATS_REPORT, + WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT, + WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD, }; #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ ((svc_id) < (len) && \ - __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - BIT((svc_id)%(sizeof(u32)))) + __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ + BIT((svc_id) % (sizeof(u32)))) #define SVCMAP(x, y, len) \ do { \ @@ -1102,6 +1176,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len) WMI_SERVICE_MDNS_OFFLOAD, len); SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD, WMI_SERVICE_SAP_AUTH_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI, + WMI_SERVICE_MGMT_TX_WMI, len); } #undef SVCMAP @@ -1112,6 +1188,17 @@ struct wmi_tlv { u8 value[0]; } __packed; +struct ath10k_mgmt_tx_pkt_addr { + void *vaddr; + dma_addr_t paddr; +}; + +struct wmi_tlv_mgmt_tx_compl_ev { + __le32 desc_id; + __le32 status; + __le32 pdev_id; +}; + #define WMI_TLV_MGMT_RX_NUM_RSSI 4 struct wmi_tlv_mgmt_rx_ev { @@ -1164,6 +1251,14 @@ struct wmi_tlv_svc_rdy_ev { __le32 max_num_scan_chans; __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */ struct wmi_tlv_hw_bd_info hw_bd_info[5]; +#ifdef CONFIG_ATH10K_SNOC + __le32 max_supported_macs; + __le32 wmi_fw_sub_feat_caps; + __le32 num_dbs_hw_modes; + __le32 txrx_chainmask; + __le32 default_dbs_hw_mode_index; + __le32 num_msdu_desc; +#endif } __packed; struct wmi_tlv_rdy_ev { @@ -1172,6 +1267,8 @@ struct wmi_tlv_rdy_ev { __le32 status; } __packed; +#define WMI_TLV_TX_MSDU_ID_NEW_PARTITION_SUPPORT BIT(10) + struct wmi_tlv_resource_config { __le32 num_vdevs; __le32 num_peers; @@ -1322,6 +1419,10 @@ struct wmi_tlv_phyerr_ev { __le32 tsf_l32; __le32 tsf_u32; __le32 buf_len; + __le32 pdev_id; + __le32 rs_phy_err_mask0; + __le32 rs_phy_err_mask1; + __le32 rs_phy_err_mask2; } __packed; enum wmi_tlv_dbglog_param { @@ -1477,6 +1578,21 @@ struct wmi_tlv_wow_add_del_event_cmd { struct wmi_tlv_wow_enable_cmd { __le32 enable; + __le32 pause_iface_config; +} __packed; + +struct wmi_tlv_arp_ns_offload_cmd { + __le32 flags; + __le32 vdev_id; + __le32 num_ns_ext_tuples; +} __packed; + +struct wmi_tlv_gtk_offload_cmd { + __le32 vdev_id; + __le32 flags; + u8 kek[NL80211_KEK_LEN]; + u8 kck[NL80211_KCK_LEN]; + __le64 replay_ctr; } __packed; struct wmi_tlv_wow_host_wakeup_ind { @@ -1629,4 +1745,24 @@ struct wmi_tlv_tx_pause_ev { void ath10k_wmi_tlv_attach(struct ath10k *ar); +struct wmi_tlv_mgmt_tx_hdr { + __le32 vdev_id; + __le32 desc_id; + __le32 chanfreq; + __le64 paddr; + __le32 frame_len; + __le32 buf_len; +} __packed; + +struct wmi_tlv_mgmt_tx_cmd { + struct wmi_tlv_mgmt_tx_hdr hdr; + __le16 data_len; + __le16 data_tag; + u8 buf[0]; +} __packed; + +struct wmi_tlv_mac_addr_cmd { + __le32 pdev_id; + struct wmi_mac_addr mac_addr; +} __packed; #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 4d8cdbfc9d42..dd6fbe971ada 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -29,6 +29,9 @@ #include "p2p.h" #include "hw.h" +#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 +#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) + /* MAIN WMI cmd track */ static struct wmi_cmd_map wmi_cmd_map = { .init_cmdid = WMI_INIT_CMDID, @@ -521,7 +524,8 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, - .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = + WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, }; /* 10.4 WMI cmd track */ @@ -705,6 +709,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, .pdev_bss_chan_info_request_cmdid = WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID, }; /* MAIN WMI VDEV param map */ @@ -780,6 +785,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; /* 10.X WMI VDEV param map */ @@ -855,6 +861,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { @@ -929,6 +936,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT, }; static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { @@ -1004,6 +1012,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, + .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { @@ -1098,6 +1107,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { @@ -1193,6 +1203,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { @@ -1288,6 +1299,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; /* firmware 10.2 specific mappings */ @@ -1544,6 +1556,62 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, +}; + +static const struct wmi_peer_flags_map wmi_peer_flags_map = { + .auth = WMI_PEER_AUTH, + .qos = WMI_PEER_QOS, + .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY, + .apsd = WMI_PEER_APSD, + .ht = WMI_PEER_HT, + .bw40 = WMI_PEER_40MHZ, + .stbc = WMI_PEER_STBC, + .ldbc = WMI_PEER_LDPC, + .dyn_mimops = WMI_PEER_DYN_MIMOPS, + .static_mimops = WMI_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_PEER_SPATIAL_MUX, + .vht = WMI_PEER_VHT, + .bw80 = WMI_PEER_80MHZ, + .vht_2g = WMI_PEER_VHT_2G, + .pmf = WMI_PEER_PMF, +}; + +static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { + .auth = WMI_10X_PEER_AUTH, + .qos = WMI_10X_PEER_QOS, + .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY, + .apsd = WMI_10X_PEER_APSD, + .ht = WMI_10X_PEER_HT, + .bw40 = WMI_10X_PEER_40MHZ, + .stbc = WMI_10X_PEER_STBC, + .ldbc = WMI_10X_PEER_LDPC, + .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS, + .static_mimops = WMI_10X_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_10X_PEER_SPATIAL_MUX, + .vht = WMI_10X_PEER_VHT, + .bw80 = WMI_10X_PEER_80MHZ, +}; + +static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { + .auth = WMI_10_2_PEER_AUTH, + .qos = WMI_10_2_PEER_QOS, + .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY, + .apsd = WMI_10_2_PEER_APSD, + .ht = WMI_10_2_PEER_HT, + .bw40 = WMI_10_2_PEER_40MHZ, + .stbc = WMI_10_2_PEER_STBC, + .ldbc = WMI_10_2_PEER_LDPC, + .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS, + .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX, + .vht = WMI_10_2_PEER_VHT, + .bw80 = WMI_10_2_PEER_80MHZ, + .vht_2g = WMI_10_2_PEER_VHT_2G, + .pmf = WMI_10_2_PEER_PMF, }; void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, @@ -1573,6 +1641,7 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, ch->max_power = arg->max_power; ch->reg_power = arg->max_reg_power; ch->antenna_max = arg->max_antenna_gain; + ch->max_tx_power = arg->max_power; /* mode & flags share storage */ ch->mode = arg->mode; @@ -1660,6 +1729,8 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) struct ath10k *ar = arvif->ar; struct ath10k_skb_cb *cb; struct sk_buff *bcn; + bool dtim_zero; + bool deliver_cab; int ret; spin_lock_bh(&ar->data_lock); @@ -1679,12 +1750,14 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) arvif->beacon_state = ATH10K_BEACON_SENDING; spin_unlock_bh(&ar->data_lock); + dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO); + deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB); ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar, arvif->vdev_id, bcn->data, bcn->len, cb->paddr, - cb->bcn.dtim_zero, - cb->bcn.deliver_cab); + dtim_zero, + deliver_cab); spin_lock_bh(&ar->data_lock); @@ -1726,6 +1799,9 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) { int ret = -EOPNOTSUPP; + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + might_sleep(); if (cmd_id == WMI_CMD_UNSUPPORTED) { @@ -1744,7 +1820,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) ret = -ESHUTDOWN; (ret != -EAGAIN); - }), 3*HZ); + }), 3 * HZ); if (ret) dev_kfree_skb_any(skb); @@ -1761,16 +1837,26 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) static struct sk_buff * ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_vif *arvif; struct wmi_mgmt_tx_cmd *cmd; struct ieee80211_hdr *hdr; struct sk_buff *skb; int len; + u32 vdev_id; u32 buf_len = msdu->len; u16 fc; hdr = (struct ieee80211_hdr *)msdu->data; fc = le16_to_cpu(hdr->frame_control); + if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; + vdev_id = arvif->vdev_id; + } else { + vdev_id = 0; + } + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) return ERR_PTR(-EINVAL); @@ -1792,7 +1878,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) cmd = (struct wmi_mgmt_tx_cmd *)skb->data; - cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id); + cmd->hdr.vdev_id = __cpu_to_le32(vdev_id); cmd->hdr.tx_rate = 0; cmd->hdr.tx_power = 0; cmd->hdr.buf_len = __cpu_to_le32(buf_len); @@ -1800,7 +1886,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr)); memcpy(cmd->buf, msdu->data, msdu->len); - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n", msdu, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); trace_ath10k_tx_hdr(ar, skb->data, skb->len); @@ -2038,34 +2124,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) return 0; } -static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) -{ - enum ieee80211_band band; - - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = IEEE80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = IEEE80211_BAND_2GHZ; - } - - return band; -} - /* If keys are configured, HW decrypts all frames * with protected bit set. Mark such frames as decrypted. */ @@ -2106,10 +2164,13 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_event_v1 *ev_v1; struct wmi_mgmt_rx_event_v2 *ev_v2; struct wmi_mgmt_rx_hdr_v1 *ev_hdr; + struct wmi_mgmt_rx_ext_info *ext_info; size_t pull_len; u32 msdu_len; + u32 len; - if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, + ar->running_fw->fw_file.fw_features)) { ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; ev_hdr = &ev_v2->hdr.v1; pull_len = sizeof(*ev_v2); @@ -2134,6 +2195,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, if (skb->len < msdu_len) return -EPROTO; + if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { + len = ALIGN(le32_to_cpu(arg->buf_len), 4); + ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); + memcpy(&arg->ext_info, ext_info, + sizeof(struct wmi_mgmt_rx_ext_info)); + } /* the WMI buffer might've ended up being padded to 4 bytes due to HTC * trailer with credit update. Trim the excess garbage. */ @@ -2150,6 +2217,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, struct wmi_10_4_mgmt_rx_hdr *ev_hdr; size_t pull_len; u32 msdu_len; + struct wmi_mgmt_rx_ext_info *ext_info; + u32 len; ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; ev_hdr = &ev->hdr; @@ -2170,12 +2239,113 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, if (skb->len < msdu_len) return -EPROTO; + if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { + len = ALIGN(le32_to_cpu(arg->buf_len), 4); + ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); + memcpy(&arg->ext_info, ext_info, + sizeof(struct wmi_mgmt_rx_ext_info)); + } + /* Make sure bytes added for padding are removed. */ skb_trim(skb, msdu_len); return 0; } +static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar, + struct ieee80211_hdr *hdr) +{ + if (!ieee80211_has_protected(hdr->frame_control)) + return false; + + /* FW delivers WEP Shared Auth frame with Protected Bit set and + * encrypted payload. However in case of PMF it delivers decrypted + * frames with Protected Bit set. + */ + if (ieee80211_is_auth(hdr->frame_control)) + return false; + + /* qca99x0 based FW delivers broadcast or multicast management frames + * (ex: group privacy action frames in mesh) as encrypted payload. + */ + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) && + ar->hw_params.sw_decrypt_mcast_mgmt) + return false; + + return true; +} + +int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar, + struct sk_buff *skb) +{ + int ret; + struct wmi_peer_delete_resp_ev_arg arg = {}; + + ret = ath10k_wmi_pull_peer_delete_resp(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse peer delete resp: %d\n", ret); + dev_kfree_skb(skb); + return ret; + } + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_PEER_DELETE_RESP_EVENTID\n"); + complete(&ar->peer_delete_done); + + return 0; +} + +static int wmi_tlv_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, + u32 status) +{ + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + struct ath10k_wmi *wmi = &ar->wmi; + struct ieee80211_tx_info *info; + struct sk_buff *msdu; + int ret = 0; + + spin_lock_bh(&wmi->mgmt_tx_lock); + pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id); + if (!pkt_addr) { + ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n", + desc_id); + ret = -ENOENT; + goto tx_comp_process_done; + } + + msdu = pkt_addr->vaddr; + dma_unmap_single(ar->dev, pkt_addr->paddr, + msdu->len, DMA_FROM_DEVICE); + info = IEEE80211_SKB_CB(msdu); + if (!status) + info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags |= status; + ieee80211_tx_status_irqsafe(ar->hw, msdu); + ret = 0; + +tx_comp_process_done: + idr_remove(&wmi->mgmt_pending_tx, desc_id); + spin_unlock_bh(&wmi->mgmt_tx_lock); + + return ret; +} + +int ath10k_wmi_tlv_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) +{ + int ret; + struct wmi_tlv_mgmt_tx_compl_ev_arg arg; + + ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret); + return ret; + } + + wmi_tlv_process_mgmt_tx_comp(ar, arg.desc_id, arg.status); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_MGMT_TX_COMPLETION_EVENTID\n"); + + return 0; +} + int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_ev_arg arg = {}; @@ -2210,22 +2380,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx status %08x\n", rx_status); - if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { - dev_kfree_skb(skb); - return 0; - } - - if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { - dev_kfree_skb(skb); - return 0; - } - - if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { - dev_kfree_skb(skb); - return 0; - } - - if (rx_status & WMI_RX_STATUS_ERR_CRC) { + if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) || + (rx_status & (WMI_RX_STATUS_ERR_DECRYPT | + WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { dev_kfree_skb(skb); return 0; } @@ -2233,14 +2390,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) if (rx_status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; + if (rx_status & WMI_RX_STATUS_EXT_INFO) { + status->mactime = + __le64_to_cpu(arg.ext_info.rx_mac_timestamp); + status->flag |= RX_FLAG_MACTIME_END; + } /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to * MODE_11B. This means phy_mode is not a reliable source for the band * of mgmt rx. */ if (channel >= 1 && channel <= 14) { - status->band = IEEE80211_BAND_2GHZ; + status->band = NL80211_BAND_2GHZ; } else if (channel >= 36 && channel <= 165) { - status->band = IEEE80211_BAND_5GHZ; + status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to * mac80211 has been changed. @@ -2250,7 +2412,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) return 0; } - if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) + if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ) ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); sband = &ar->mac.sbands[status->band]; @@ -2262,13 +2424,15 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); + /* Firmware is guaranteed to report all essential management frames via + * WMI while it can deliver some extra via HTT. Since there can be + * duplicates split the reporting wrt monitor/sniffing. + */ + status->flag |= RX_FLAG_SKIP_MONITOR; + ath10k_wmi_handle_wep_reauth(ar, skb, status); - /* FW delivers WEP Shared Auth frame with Protected Bit set and - * encrypted payload. However in case of PMF it delivers decrypted - * frames with Protected Bit set. */ - if (ieee80211_has_protected(hdr->frame_control) && - !ieee80211_is_auth(hdr->frame_control)) { + if (ath10k_wmi_rx_is_decrypted(ar, hdr)) { status->flag |= RX_FLAG_DECRYPTED; if (!ieee80211_is_action(hdr->frame_control) && @@ -2285,7 +2449,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_mac_handle_beacon(ar, skb); ath10k_dbg(ar, ATH10K_DBG_MGMT, - "event mgmt rx skb %p len %d ftype %02x stype %02x\n", + "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -2304,7 +2468,7 @@ static int freq_to_idx(struct ath10k *ar, int freq) struct ieee80211_supported_band *sband; int band, ch, idx = 0; - for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { sband = ar->hw->wiphy->bands[band]; if (!sband) continue; @@ -2360,6 +2524,31 @@ static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar, return 0; } +static void wlan_fill_survey_result(struct ath10k *ar, + struct survey_info *survey, + struct wmi_ch_info_ev_arg arg) +{ + u64 clock_freq; + + if (!arg.mac_clk_mhz || !survey) + return; + + clock_freq = arg.mac_clk_mhz * 1000; + + memset(survey, 0, sizeof(*survey)); + + survey->noise = __le32_to_cpu(arg.noise_floor); + survey->time = __le32_to_cpu(arg.cycle_count) / clock_freq; + survey->time_busy = __le32_to_cpu(arg.rx_clear_count) / clock_freq; + survey->time_tx = __le32_to_cpu(arg.rx_clear_count) / clock_freq; + + survey->filled = SURVEY_INFO_NOISE_DBM; + ar->ch_info_can_report_survey = true; + + survey->filled |= (SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_TX); +} + void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) { struct wmi_ch_info_ev_arg arg = {}; @@ -2404,6 +2593,12 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) goto exit; } + if (QCA_REV_WCN3990(ar)) { + survey = &ar->survey[idx]; + wlan_fill_survey_result(ar, survey, arg); + goto exit; + } + if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { if (ar->ch_info_can_report_survey) { survey = &ar->survey[idx]; @@ -2434,7 +2629,21 @@ exit: void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); + struct wmi_echo_ev_arg arg = {}; + int ret; + + ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse echo: %d\n", ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event echo value 0x%08x\n", + le32_to_cpu(arg.value)); + + if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID) + complete(&ar->wmi.barrier); } int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) @@ -2565,6 +2774,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); } +static void +ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src, + struct ath10k_fw_stats_peer *dst) +{ + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->peer_rssi = __le32_to_cpu(src->peer_rssi); + dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); +} + static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) @@ -2815,11 +3034,17 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, /* fw doesn't implement vdev stats */ for (i = 0; i < num_peer_stats; i++) { - const struct wmi_10_2_4_peer_stats *src; + const struct wmi_10_2_4_ext_peer_stats *src; struct ath10k_fw_stats_peer *dst; + int stats_len; + + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); + else + stats_len = sizeof(struct wmi_10_2_4_peer_stats); src = (void *)skb->data; - if (!skb_pull(skb, sizeof(*src))) + if (!skb_pull(skb, stats_len)) return -EPROTO; dst = kzalloc(sizeof(*dst), GFP_ATOMIC); @@ -2829,6 +3054,9 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_pull_peer_stats(&src->common.old, dst); dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); + + if (ath10k_peer_stats_enabled(ar)) + dst->rx_duration = __le32_to_cpu(src->rx_duration); /* FIXME: expose 10.2 specific values */ list_add_tail(&dst->list, &stats->peers); @@ -2846,6 +3074,8 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, u32 num_pdev_ext_stats; u32 num_vdev_stats; u32 num_peer_stats; + u32 num_bcnflt_stats; + u32 stats_id; int i; if (!skb_pull(skb, sizeof(*ev))) @@ -2855,6 +3085,8 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); + stats_id = __le32_to_cpu(ev->stats_id); for (i = 0; i < num_pdev_stats; i++) { const struct wmi_10_4_pdev_stats *src; @@ -2905,15 +3137,46 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, if (!dst) continue; - ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); - dst->peer_rssi = __le32_to_cpu(src->peer_rssi); - dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); - dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); - /* FIXME: expose 10.4 specific values */ - + ath10k_wmi_10_4_pull_peer_stats(src, dst); list_add_tail(&dst->list, &stats->peers); } + for (i = 0; i < num_bcnflt_stats; i++) { + const struct wmi_10_4_bss_bcn_filter_stats *src; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + /* FIXME: expose values to userspace + * + * Note: Even though this loop seems to do nothing it is + * required to parse following sub-structures properly. + */ + } + + if ((stats_id & WMI_10_4_STAT_PEER_EXTD) == 0) + return 0; + + stats->extended = true; + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_4_peer_extd_stats *src; + struct ath10k_fw_extd_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->rx_duration = __le32_to_cpu(src->rx_duration); + list_add_tail(&dst->list, &stats->peers_extd); + } + return 0; } @@ -2979,6 +3242,12 @@ void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb) complete(&ar->vdev_setup_done); } +void ath10k_wmi_event_vdev_delete_resp(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n"); + complete(&ar->vdev_delete_done); +} + static int ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_peer_kick_ev_arg *arg) @@ -3135,10 +3404,10 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); if (tim->dtim_count == 0) { - ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true; + ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO; if (__le32_to_cpu(tim_info->tim_mcast) == 1) - ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; + ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB; } ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", @@ -3150,7 +3419,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, struct sk_buff *bcn, const struct wmi_p2p_noa_info *noa) { - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (!arvif->vif->p2p) return; ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); @@ -3210,6 +3479,50 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data; + u32 map; + size_t i; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; + + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; + + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; + + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + i++; + } + + return 0; +} + static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_swba_ev_arg *arg) @@ -3332,6 +3645,12 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) continue; } + /* mac80211 would have already asked us to stop beaconing and + * bring the vdev down, so continue in that case + */ + if (!arvif->is_up) + continue; + /* There are no completions for beacons so wait for next SWBA * before telling mac80211 to decrement CSA counter * @@ -3381,7 +3700,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ath10k_warn(ar, "failed to map beacon: %d\n", ret); dev_kfree_skb_any(bcn); - ret = -EIO; goto skip; } @@ -3558,7 +3876,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar, phyerr->tsf_timestamp, tsf, buf_len); /* Skip event if DFS disabled */ - if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) + if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) return; ATH10K_DFS_STAT_INC(ar, pulses_total); @@ -3831,19 +4149,20 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) left_len -= buf_len; - switch (phy_err_code) { - case PHY_ERROR_RADAR: + if ((phy_err_code == PHY_ERROR_RADAR) || + (hdr_arg.phy_err_mask0 & + WMI_PHY_ERROR_MASK0_RADAR)) { ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); - break; - case PHY_ERROR_SPECTRAL_SCAN: + } else if ((phy_err_code == + WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN) || + (hdr_arg.phy_err_mask0 & + WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN)) { ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); - break; - case PHY_ERROR_FALSE_RADAR_EXT: + } else if ((phy_err_code == PHY_ERROR_FALSE_RADAR_EXT) || + (hdr_arg.phy_err_mask0 & + WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) { ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); - break; - default: - break; } phyerr = phyerr + phyerr_arg.hdr_len + buf_len; @@ -3969,6 +4288,7 @@ void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb) return; } + ar->wow.wakeup_reason = ev.wake_reason; ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n", wow_reason(ev.wake_reason)); } @@ -4278,34 +4598,58 @@ void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); } -static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, - u32 num_units, u32 unit_len) +static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) { dma_addr_t paddr; - u32 pool_size; + u32 pool_size = 0; int idx = ar->wmi.num_mem_chunks; + void *vaddr = NULL; - pool_size = num_units * round_up(unit_len, 4); + if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks)) + return -ENOMEM; - if (!pool_size) - return -EINVAL; + while (!vaddr && num_units) { + pool_size = num_units * round_up(unit_len, 4); + if (!pool_size) + return -EINVAL; - ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, - pool_size, - &paddr, - GFP_KERNEL); - if (!ar->wmi.mem_chunks[idx].vaddr) { - ath10k_warn(ar, "failed to allocate memory chunk\n"); - return -ENOMEM; + vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN); + if (!vaddr) + num_units /= 2; } - memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); + if (!num_units) + return -ENOMEM; + + paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE); + if (dma_mapping_error(ar->dev, paddr)) { + kfree(vaddr); + return -ENOMEM; + } + ar->wmi.mem_chunks[idx].vaddr = vaddr; ar->wmi.mem_chunks[idx].paddr = paddr; ar->wmi.mem_chunks[idx].len = pool_size; ar->wmi.mem_chunks[idx].req_id = req_id; ar->wmi.num_mem_chunks++; + return num_units; +} + +static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) +{ + int ret; + + while (num_units) { + ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len); + if (ret < 0) + return ret; + + num_units -= ret; + } + return 0; } @@ -4470,10 +4814,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", arg.service_map, arg.service_map_len); - /* only manually set fw features when not using FW IE format */ - if (ar->fw_api == 1 && ar->fw_version_build > 636) - set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); - if (ar->num_rf_chains > ar->max_spatial_stream) { ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, ar->max_spatial_stream); @@ -4503,10 +4843,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) } if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC + + ar->max_num_vdevs; + else + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + + ar->max_num_vdevs; + ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + - TARGET_10_4_NUM_VDEVS; - ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + - TARGET_10_4_NUM_VDEVS; + ar->max_num_vdevs; ar->num_tids = ar->num_active_peers * 2; ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; } @@ -4620,6 +4966,50 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_echo_ev_arg *arg) +{ + struct wmi_echo_event *ev = (void *)skb->data; + + arg->value = ev->value; + + return 0; +} + +void +ath10k_generate_mac_addr_auto(struct ath10k *ar, struct wmi_rdy_ev_arg *arg) +{ + unsigned int soc_serial_num; + u8 bdata_mac_addr[ETH_ALEN]; + u8 udef_mac_addr[] = {0x00, 0x0A, 0xF5, 0x00, 0x00, 0x00}; + + soc_serial_num = socinfo_get_serial_number(); + if (!soc_serial_num) + return; + + if (arg->mac_addr) { + ether_addr_copy(ar->base_mac_addr, arg->mac_addr); + ether_addr_copy(bdata_mac_addr, arg->mac_addr); + soc_serial_num &= 0x00ffffff; + bdata_mac_addr[3] = (soc_serial_num >> 16) & 0xff; + bdata_mac_addr[4] = (soc_serial_num >> 8) & 0xff; + bdata_mac_addr[5] = soc_serial_num & 0xff; + ether_addr_copy(ar->mac_addr, bdata_mac_addr); + } else { + /* If mac address not encoded in wlan board data, + * Auto-generate mac address using device serial + * number and user defined mac address 'udef_mac_addr'. + */ + udef_mac_addr[3] = (soc_serial_num >> 16) & 0xff; + udef_mac_addr[4] = (soc_serial_num >> 8) & 0xff; + udef_mac_addr[5] = soc_serial_num & 0xff; + ether_addr_copy(ar->base_mac_addr, udef_mac_addr); + udef_mac_addr[2] = (soc_serial_num >> 24) & 0xff; + ether_addr_copy(ar->mac_addr, udef_mac_addr); + } +} + int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) { struct wmi_rdy_ev_arg arg = {}; @@ -4638,7 +5028,11 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) arg.mac_addr, __le32_to_cpu(arg.status)); - ether_addr_copy(ar->mac_addr, arg.mac_addr); + if (QCA_REV_WCN3990(ar)) + ath10k_generate_mac_addr_auto(ar, &arg); + else + ether_addr_copy(ar->mac_addr, arg.mac_addr); + complete(&ar->wmi.unified_ready); return 0; } @@ -4655,6 +5049,58 @@ static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) return 0; } +static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar, + struct sk_buff *skb) +{ + struct wmi_pdev_bss_chan_info_event *ev; + struct survey_info *survey; + u64 busy, total, tx, rx, rx_bss; + u32 freq, noise_floor; + u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz; + int idx; + + ev = (struct wmi_pdev_bss_chan_info_event *)skb->data; + if (WARN_ON(skb->len < sizeof(*ev))) + return -EPROTO; + + freq = __le32_to_cpu(ev->freq); + noise_floor = __le32_to_cpu(ev->noise_floor); + busy = __le64_to_cpu(ev->cycle_busy); + total = __le64_to_cpu(ev->cycle_total); + tx = __le64_to_cpu(ev->cycle_tx); + rx = __le64_to_cpu(ev->cycle_rx); + rx_bss = __le64_to_cpu(ev->cycle_rx_bss); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", + freq, noise_floor, busy, total, tx, rx, rx_bss); + + spin_lock_bh(&ar->data_lock); + idx = freq_to_idx(ar, freq); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", + freq, idx); + goto exit; + } + + survey = &ar->survey[idx]; + + survey->noise = noise_floor; + survey->time = div_u64(total, cc_freq_hz); + survey->time_busy = div_u64(busy, cc_freq_hz); + survey->time_rx = div_u64(rx_bss, cc_freq_hz); + survey->time_tx = div_u64(tx, cc_freq_hz); + survey->filled |= (SURVEY_INFO_NOISE_DBM | + SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX); +exit: + spin_unlock_bh(&ar->data_lock); + complete(&ar->bss_survey_done); + return 0; +} + static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -4900,6 +5346,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_10_2_event_id id; + bool consumed; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); @@ -4909,6 +5356,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_10_2_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi testmode consumed 0x%x\n", id); + goto out; + } + switch (id) { case WMI_10_2_MGMT_RX_EVENTID: ath10k_wmi_event_mgmt_rx(ar, skb); @@ -4998,6 +5457,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_2_PDEV_TEMPERATURE_EVENTID: ath10k_wmi_event_temperature(ar, skb); break; + case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID: + ath10k_wmi_event_pdev_bss_chan_info(ar, skb); + break; case WMI_10_2_RTT_KEEPALIVE_EVENTID: case WMI_10_2_GPIO_INPUT_EVENTID: case WMI_10_2_PEER_RATECODE_LIST_EVENTID: @@ -5021,6 +5483,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_10_4_event_id id; + bool consumed; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); @@ -5030,6 +5493,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_10_4_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi testmode consumed 0x%x\n", id); + goto out; + } + switch (id) { case WMI_10_4_MGMT_RX_EVENTID: ath10k_wmi_event_mgmt_rx(ar, skb); @@ -5059,6 +5534,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_PEER_STA_KICKOUT_EVENTID: ath10k_wmi_event_peer_sta_kickout(ar, skb); break; + case WMI_10_4_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; case WMI_10_4_HOST_SWBA_EVENTID: ath10k_wmi_event_host_swba(ar, skb); break; @@ -5075,12 +5553,20 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_stopped(ar, skb); break; case WMI_10_4_WOW_WAKEUP_HOST_EVENTID: + case WMI_10_4_PEER_RATECODE_LIST_EVENTID: + case WMI_10_4_WDS_PEER_EVENTID: ath10k_dbg(ar, ATH10K_DBG_WMI, "received event id %d not implemented\n", id); break; case WMI_10_4_UPDATE_STATS_EVENTID: ath10k_wmi_event_update_stats(ar, skb); break; + case WMI_10_4_PDEV_TEMPERATURE_EVENTID: + ath10k_wmi_event_temperature(ar, skb); + break; + case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID: + ath10k_wmi_event_pdev_bss_chan_info(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -5399,9 +5885,16 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) u32 len, val, features; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); - config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); - config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + + if (ath10k_peer_stats_enabled(ar)) { + config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); + config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); + } else { + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + } + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); @@ -5451,8 +5944,17 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) cmd = (struct wmi_init_cmd_10_2 *)buf->data; features = WMI_10_2_RX_BATCH_MODE; - if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) + + if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) && + test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) features |= WMI_10_2_COEX_GPIO; + + if (ath10k_peer_stats_enabled(ar)) + features |= WMI_10_2_PEER_STATS; + + if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) + features |= WMI_10_2_BSS_CHAN_INFO; + cmd->resource_config.feature_mask = __cpu_to_le32(features); memcpy(&cmd->resource_config.common, &config, sizeof(config)); @@ -5479,8 +5981,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); - config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK); - config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK); + config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask); + config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask); config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); @@ -5511,7 +6013,7 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); config.gtk_offload_max_vdev = __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); - config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC); + config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); config.max_peer_ext_stats = __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); @@ -5546,15 +6048,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) { - if (arg->ie_len && !arg->ie) - return -EINVAL; - if (arg->n_channels && !arg->channels) - return -EINVAL; - if (arg->n_ssids && !arg->ssids) - return -EINVAL; - if (arg->n_bssids && !arg->bssids) - return -EINVAL; - if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) return -EINVAL; if (arg->n_channels > ARRAY_SIZE(arg->channels)) @@ -5671,9 +6164,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs, bssids->num_bssid = __cpu_to_le32(arg->n_bssids); for (i = 0; i < arg->n_bssids; i++) - memcpy(&bssids->bssid_list[i], - arg->bssids[i].bssid, - ETH_ALEN); + ether_addr_copy(bssids->bssid_list[i].addr, + arg->bssids[i].bssid); ptr += sizeof(*bssids); ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids; @@ -5766,6 +6258,8 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar, | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHANNEL | WMI_SCAN_EVENT_DEQUEUED; + if (QCA_REV_WCN3990(ar)) + arg->scan_ctrl_flags = ar->fw_flags->flags; arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; arg->n_bssids = 1; arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; @@ -6348,6 +6842,16 @@ ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, cmd->info0 = __cpu_to_le32(info0); } +static void +ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf; + + ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg); + cmd->peer_bw_rxnss_override = 0; +} + static int ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg) { @@ -6437,6 +6941,31 @@ ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd); + struct sk_buff *skb; + int ret; + + ret = ath10k_wmi_peer_assoc_check_arg(arg); + if (ret) + return ERR_PTR(ret); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM (%s)\n", + arg->vdev_id, arg->addr, + arg->peer_reassoc ? "reassociate" : "new"); + return skb; +} + +static struct sk_buff * ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) { struct sk_buff *skb; @@ -6449,6 +6978,26 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) return skb; } +static struct sk_buff * +ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar, + enum wmi_bss_survey_req_type type) +{ + struct wmi_pdev_chan_info_req_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data; + cmd->type = __cpu_to_le32(type); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev bss info request type %d\n", type); + + return skb; +} + /* This function assumes the beacon is already DMA mapped */ static struct sk_buff * ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, @@ -6556,7 +7105,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar, } static struct sk_buff * -ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, +ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) { struct wmi_dbglog_cfg_cmd *cmd; @@ -6594,6 +7143,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, } static struct sk_buff * +ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, + u32 log_level) +{ + struct wmi_10_4_dbglog_cfg_cmd *cmd; + struct sk_buff *skb; + u32 cfg; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data; + + if (module_enable) { + cfg = SM(log_level, + ATH10K_DBGLOG_CFG_LOG_LVL); + } else { + /* set back defaults, all modules with WARN level */ + cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, + ATH10K_DBGLOG_CFG_LOG_LVL); + module_enable = ~0; + } + + cmd->module_enable = __cpu_to_le64(module_enable); + cmd->module_valid = __cpu_to_le64(~0); + cmd->config_enable = __cpu_to_le32(cfg); + cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n", + __le64_to_cpu(cmd->module_enable), + __le64_to_cpu(cmd->module_valid), + __le32_to_cpu(cmd->config_enable), + __le32_to_cpu(cmd->config_valid)); + return skb; +} + +static struct sk_buff * ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) { struct wmi_pdev_pktlog_enable_cmd *cmd; @@ -7027,6 +7614,9 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer, "Peer TX rate", peer->peer_tx_rate); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Peer RX rate", peer->peer_rx_rate); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RX duration", peer->rx_duration); + len += scnprintf(buf + len, buf_len - len, "\n"); *length = len; } @@ -7252,6 +7842,135 @@ unlock: buf[len] = 0; } +int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_LEGACY_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -ENOTSUPP; + } + return -ENOTSUPP; +} + +static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_10_2_4_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -ENOTSUPP; + } + return -ENOTSUPP; +} + +static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_10_4_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_10_4_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_10_4_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_10_4_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_10_4_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_VDEV_SUBTYPE_10_4_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; + } + return -ENOTSUPP; +} + +static struct sk_buff * +ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap) +{ + struct wmi_ext_resource_config_10_4_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data; + cmd->host_platform_config = __cpu_to_le32(type); + cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi ext resource config host type %d firmware feature bitmap %08x\n", + type, fw_feature_bitmap); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) +{ + struct wmi_echo_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_echo_cmd *)skb->data; + cmd->value = cpu_to_le32(value); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi echo value 0x%08x\n", value); + return skb; +} + +int +ath10k_wmi_barrier(struct ath10k *ar) +{ + int ret; + int time_left; + + spin_lock_bh(&ar->data_lock); + reinit_completion(&ar->wmi.barrier); + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID); + if (ret) { + ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->wmi.barrier, + ATH10K_WMI_BARRIER_TIMEOUT_HZ); + if (!time_left) + return -ETIMEDOUT; + + return 0; +} + static const struct wmi_ops wmi_ops = { .rx = ath10k_wmi_op_rx, .map_svc = wmi_main_svc_map, @@ -7268,6 +7987,7 @@ static const struct wmi_ops wmi_ops = { .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, @@ -7311,6 +8031,8 @@ static const struct wmi_ops wmi_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_echo = ath10k_wmi_op_gen_echo, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -7340,6 +8062,7 @@ static const struct wmi_ops wmi_10_1_ops = { .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, @@ -7378,6 +8101,8 @@ static const struct wmi_ops wmi_10_1_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_echo = ath10k_wmi_op_gen_echo, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -7397,6 +8122,7 @@ static const struct wmi_ops wmi_10_2_ops = { .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, + .gen_echo = ath10k_wmi_op_gen_echo, .pull_scan = ath10k_wmi_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, @@ -7408,6 +8134,7 @@ static const struct wmi_ops wmi_10_2_ops = { .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, @@ -7446,6 +8173,7 @@ static const struct wmi_ops wmi_10_2_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, /* .gen_pdev_enable_adaptive_cca not implemented */ }; @@ -7455,23 +8183,26 @@ static const struct wmi_ops wmi_10_2_4_ops = { .gen_init = ath10k_wmi_10_2_op_gen_init, .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, + .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, /* shared with 10.1 */ .map_svc = wmi_10x_svc_map, .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, + .gen_echo = ath10k_wmi_op_gen_echo, .pull_scan = ath10k_wmi_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, - .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev, .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, @@ -7512,6 +8243,7 @@ static const struct wmi_ops wmi_10_2_4_ops = { .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, .gen_pdev_enable_adaptive_cca = ath10k_wmi_op_gen_pdev_enable_adaptive_cca, + .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -7533,6 +8265,7 @@ static const struct wmi_ops wmi_10_4_ops = { .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, @@ -7556,6 +8289,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc, .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, @@ -7564,7 +8298,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, - .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, @@ -7573,44 +8307,54 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, + .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, /* shared with 10.2 */ - .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, + .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype, + .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, + .gen_echo = ath10k_wmi_op_gen_echo, }; int ath10k_wmi_attach(struct ath10k *ar) { - switch (ar->wmi.op_version) { + switch (ar->running_fw->fw_file.wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_10_4: ar->wmi.ops = &wmi_10_4_ops; ar->wmi.cmd = &wmi_10_4_cmd_map; ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_10_2_4: ar->wmi.cmd = &wmi_10_2_4_cmd_map; ar->wmi.ops = &wmi_10_2_4_ops; ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map; ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_10_2: ar->wmi.cmd = &wmi_10_2_cmd_map; ar->wmi.ops = &wmi_10_2_ops; ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_10_1: ar->wmi.cmd = &wmi_10x_cmd_map; ar->wmi.ops = &wmi_10_1_ops; ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + ar->wmi.peer_flags = &wmi_10x_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->wmi.cmd = &wmi_cmd_map; ar->wmi.ops = &wmi_ops; ar->wmi.vdev_param = &wmi_vdev_param_map; ar->wmi.pdev_param = &wmi_pdev_param_map; + ar->wmi.peer_flags = &wmi_peer_flags_map; break; case ATH10K_FW_WMI_OP_VERSION_TLV: ath10k_wmi_tlv_attach(ar); @@ -7618,15 +8362,21 @@ int ath10k_wmi_attach(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: ath10k_err(ar, "unsupported WMI op version: %d\n", - ar->wmi.op_version); + ar->running_fw->fw_file.wmi_op_version); return -EINVAL; } init_completion(&ar->wmi.service_ready); init_completion(&ar->wmi.unified_ready); + init_completion(&ar->wmi.barrier); INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); + if (QCA_REV_WCN3990(ar)) { + spin_lock_init(&ar->wmi.mgmt_tx_lock); + idr_init(&ar->wmi.mgmt_pending_tx); + } + return 0; } @@ -7636,17 +8386,42 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar) /* free the host memory chunks requested by firmware */ for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - dma_free_coherent(ar->dev, - ar->wmi.mem_chunks[i].len, - ar->wmi.mem_chunks[i].vaddr, - ar->wmi.mem_chunks[i].paddr); + dma_unmap_single(ar->dev, + ar->wmi.mem_chunks[i].paddr, + ar->wmi.mem_chunks[i].len, + DMA_TO_DEVICE); + kfree(ar->wmi.mem_chunks[i].vaddr); } ar->wmi.num_mem_chunks = 0; } +static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, + void *ctx) +{ + struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr; + struct ath10k *ar = ctx; + struct sk_buff *msdu; + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "force cleanup mgmt msdu_id %hu\n", msdu_id); + + msdu = pkt_addr->vaddr; + dma_unmap_single(ar->dev, pkt_addr->paddr, + msdu->len, DMA_FROM_DEVICE); + ieee80211_free_txskb(ar->hw, msdu); + + return 0; +} + void ath10k_wmi_detach(struct ath10k *ar) { + if (QCA_REV_WCN3990(ar)) { + idr_for_each(&ar->wmi.mgmt_pending_tx, + ath10k_wmi_mgmt_tx_clean_up_pending, ar); + idr_destroy(&ar->wmi.mgmt_pending_tx); + } + cancel_work_sync(&ar->svc_rdy_work); if (ar->svc_rdy_skb) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 66148a82ad25..5f8d7e353d1a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,6 +20,9 @@ #include <linux/types.h> #include <net/mac80211.h> +#include <linux/ipv6.h> +#include <net/ipv6.h> +#include <linux/in.h> /* * This file specifies the WMI interface for the Unified Software @@ -55,7 +58,7 @@ * type. * * 6. Comment each parameter part of the WMI command/event structure by - * using the 2 stars at the begining of C comment instead of one star to + * using the 2 stars at the beginning of C comment instead of one star to * enable HTML document generation using Doxygen. * */ @@ -175,6 +178,16 @@ enum wmi_service { WMI_SERVICE_AUX_SPECTRAL_INTF, WMI_SERVICE_AUX_CHAN_LOAD_INTF, WMI_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_MESH_11S, + WMI_SERVICE_MESH_NON_11S, + WMI_SERVICE_PEER_STATS, + WMI_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_SERVICE_TX_MODE_PUSH_ONLY, + WMI_SERVICE_TX_MODE_PUSH_PULL, + WMI_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_MGMT_TX_WMI, /* keep last */ WMI_SERVICE_MAX, @@ -206,6 +219,12 @@ enum wmi_10x_service { WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, WMI_10X_SERVICE_ATF, WMI_10X_SERVICE_COEX_GPIO, + WMI_10X_SERVICE_AUX_SPECTRAL_INTF, + WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, + WMI_10X_SERVICE_MESH, + WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_10X_SERVICE_PEER_STATS, }; enum wmi_main_service { @@ -286,6 +305,15 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, + WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_10_4_SERVICE_MESH_NON_11S, + WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_10_4_SERVICE_PEER_STATS, + WMI_10_4_SERVICE_MESH_11S, + WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, + WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, + WMI_10_4_SERVICE_TX_MODE_DYNAMIC, }; static inline char *wmi_service_name(int service_id) @@ -375,6 +403,15 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF); SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); + SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT); + SVCSTR(WMI_SERVICE_MESH_11S); + SVCSTR(WMI_SERVICE_MESH_NON_11S); + SVCSTR(WMI_SERVICE_PEER_STATS); + SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT); + SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT); + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY); + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); + SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); default: return NULL; } @@ -384,8 +421,8 @@ static inline char *wmi_service_name(int service_id) #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ ((svc_id) < (len) && \ - __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - BIT((svc_id)%(sizeof(u32)))) + __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ + BIT((svc_id) % (sizeof(u32)))) #define SVCMAP(x, y, len) \ do { \ @@ -442,6 +479,18 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_ATF, len); SVCMAP(WMI_10X_SERVICE_COEX_GPIO, WMI_SERVICE_COEX_GPIO, len); + SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_SPECTRAL_INTF, len); + SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); + SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_BSS_CHANNEL_INFO_64, len); + SVCMAP(WMI_10X_SERVICE_MESH, + WMI_SERVICE_MESH_11S, len); + SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_PEER_STATS, + WMI_SERVICE_PEER_STATS, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, @@ -600,6 +649,24 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, WMI_SERVICE_BSS_CHANNEL_INFO_64, len); + SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S, + WMI_SERVICE_MESH_NON_11S, len); + SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_SERVICE_RESTRT_CHNL_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_PEER_STATS, + WMI_SERVICE_PEER_STATS, len); + SVCMAP(WMI_10_4_SERVICE_MESH_11S, + WMI_SERVICE_MESH_11S, len); + SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, + WMI_SERVICE_TX_MODE_PUSH_ONLY, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, + WMI_SERVICE_TX_MODE_PUSH_PULL, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_TX_MODE_DYNAMIC, len); } #undef SVCMAP @@ -657,6 +724,7 @@ struct wmi_cmd_map { u32 bcn_filter_rx_cmdid; u32 prb_req_filter_rx_cmdid; u32 mgmt_tx_cmdid; + u32 mgmt_tx_send_cmdid; u32 prb_tmpl_cmdid; u32 addba_clear_resp_cmdid; u32 addba_send_cmdid; @@ -773,6 +841,7 @@ struct wmi_cmd_map { u32 set_cca_params_cmdid; u32 pdev_bss_chan_info_request_cmdid; u32 pdev_enable_adaptive_cca_cmdid; + u32 ext_resource_cfg_cmdid; }; /* @@ -1265,7 +1334,7 @@ enum wmi_10x_event_id { WMI_10X_PDEV_TPC_CONFIG_EVENTID, WMI_10X_GPIO_INPUT_EVENTID, - WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1, + WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1, }; enum wmi_10_2_cmd_id { @@ -1385,6 +1454,7 @@ enum wmi_10_2_cmd_id { WMI_10_2_MU_CAL_START_CMDID, WMI_10_2_SET_LTEU_CONFIG_CMDID, WMI_10_2_SET_CCA_PARAMS, + WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1, }; @@ -1428,6 +1498,8 @@ enum wmi_10_2_event_id { WMI_10_2_WDS_PEER_EVENTID, WMI_10_2_PEER_STA_PS_STATECHG_EVENTID, WMI_10_2_PDEV_TEMPERATURE_EVENTID, + WMI_10_2_MU_REPORT_EVENTID, + WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID, WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, }; @@ -1576,6 +1648,9 @@ enum wmi_10_4_cmd_id { WMI_10_4_MU_CAL_START_CMDID, WMI_10_4_SET_CCA_PARAMS_CMDID, WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_4_EXT_RESOURCE_CFG_CMDID, + WMI_10_4_VDEV_SET_IE_CMDID, + WMI_10_4_SET_LTEU_CONFIG_CMDID, WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, }; @@ -1638,6 +1713,7 @@ enum wmi_10_4_event_id { WMI_10_4_PDEV_TEMPERATURE_EVENTID, WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_10_4_MU_REPORT_EVENTID, WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, }; @@ -1732,6 +1808,7 @@ struct wmi_channel { __le32 reginfo1; struct { u8 antenna_max; + u8 max_tx_power; } __packed; } __packed; } __packed; @@ -1771,7 +1848,6 @@ enum wmi_channel_change_cause { #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) #define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */ -#define WMI_10_4_MAX_SPATIAL_STREAM 4 /* HT Capabilities*/ #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ @@ -1995,8 +2071,8 @@ struct wmi_10x_service_ready_event { struct wlan_host_mem_req mem_reqs[0]; } __packed; -#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) -#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) +#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) +#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ) struct wmi_ready_event { __le32 sw_version; @@ -2016,7 +2092,7 @@ struct wmi_resource_config { * In offload mode target supports features like WOW, chatter and * other protocol offloads. In order to support them some * functionalities like reorder buffering, PN checking need to be - * done in target. This determines maximum number of peers suported + * done in target. This determines maximum number of peers supported * by target in offload mode */ __le32 num_offload_peers; @@ -2197,7 +2273,7 @@ struct wmi_resource_config { * Max. number of Tx fragments per MSDU * This parameter controls the max number of Tx fragments per MSDU. * This is sent by the target as part of the WMI_SERVICE_READY event - * and is overriden by the OS shim as required. + * and is overridden by the OS shim as required. */ __le32 max_frag_entries; } __packed; @@ -2379,7 +2455,7 @@ struct wmi_resource_config_10x { * Max. number of Tx fragments per MSDU * This parameter controls the max number of Tx fragments per MSDU. * This is sent by the target as part of the WMI_SERVICE_READY event - * and is overriden by the OS shim as required. + * and is overridden by the OS shim as required. */ __le32 max_frag_entries; } __packed; @@ -2388,6 +2464,8 @@ enum wmi_10_2_feature_mask { WMI_10_2_RX_BATCH_MODE = BIT(0), WMI_10_2_ATF_CONFIG = BIT(1), WMI_10_2_COEX_GPIO = BIT(3), + WMI_10_2_BSS_CHAN_INFO = BIT(6), + WMI_10_2_PEER_STATS = BIT(7), }; struct wmi_resource_config_10_2 { @@ -2613,13 +2691,43 @@ struct wmi_resource_config_10_4 { */ __le32 iphdr_pad_config; - /* qwrap configuration + /* qwrap configuration (bits 15-0) * 1 - This is qwrap configuration * 0 - This is not qwrap + * + * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables) + * In order to get ack-RSSI reporting and to specify the tx-rate for + * individual frames, this option must be enabled. This uses an extra + * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it. */ __le32 qwrap_config; } __packed; +/** + * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags + * @WMI_10_4_LTEU_SUPPORT: LTEU config + * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config + * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan + * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan + * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats + * @WMI_10_4_PEER_STATS: Per station stats + */ +enum wmi_10_4_feature_mask { + WMI_10_4_LTEU_SUPPORT = BIT(0), + WMI_10_4_COEX_GPIO_SUPPORT = BIT(1), + WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2), + WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3), + WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4), + WMI_10_4_PEER_STATS = BIT(5), +}; + +struct wmi_ext_resource_config_10_4_cmd { + /* contains enum wmi_host_platform_type */ + __le32 host_platform_config; + /* see enum wmi_10_4_feature_mask */ + __le32 fw_feature_bitmap; +}; + /* strucutre describing host memory chunk. */ struct host_memory_chunk { /* id of the request that is passed up in service ready */ @@ -2641,7 +2749,7 @@ struct wmi_init_cmd { struct wmi_host_mem_chunks mem_chunks; } __packed; -/* _10x stucture is from 10.X FW API */ +/* _10x structure is from 10.X FW API */ struct wmi_init_cmd_10x { struct wmi_resource_config_10x resource_config; struct wmi_host_mem_chunks mem_chunks; @@ -2779,6 +2887,66 @@ struct wmi_start_scan_common { __le32 scan_ctrl_flags; } __packed; +/* ARP-NS offload data structure */ +#define WMI_NS_ARP_OFFLOAD 2 +#define WMI_ARP_NS_OFF_FLAGS_VALID BIT(0) +#define WMI_IPV4_ARP_REPLY_OFFLOAD 0 +#define WMI_ARP_NS_OFFLOAD_DISABLE 0 +#define WMI_ARP_NS_OFFLOAD_ENABLE 1 +#define WMI_NSOFF_IPV6_ANYCAST BIT(3) + +struct wmi_ns_offload_info { + struct in6_addr src_addr; + struct in6_addr self_addr[TARGET_NUM_STATIONS]; + struct in6_addr target_addr[TARGET_NUM_STATIONS]; + struct wmi_mac_addr self_macaddr; + u8 src_ipv6_addr_valid; + struct in6_addr target_addr_valid; + struct in6_addr target_ipv6_ac; + u8 slot_idx; +} __packed; + +struct wmi_ns_arp_offload_req { + u8 offload_type; + u8 enable_offload; + __le32 num_ns_offload_count; + union { + struct in_addr ipv4_addr; + struct in6_addr ipv6_addr; + } params; + struct wmi_ns_offload_info info; + struct wmi_mac_addr bssid; +} __packed; + +struct wmi_ns_offload { + __le32 flags; + struct in6_addr target_ipaddr[WMI_NS_ARP_OFFLOAD]; + struct in6_addr solicitation_ipaddr; + struct in6_addr remote_ipaddr; + struct wmi_mac_addr target_mac; +} __packed; + +struct wmi_arp_offload { + __le32 flags; + struct in_addr target_ipaddr; + struct in_addr remote_ipaddr; + struct wmi_mac_addr target_mac; +} __packed; + +/* GTK offload data structure */ +#define WMI_GTK_OFFLOAD_ENABLE_OPCODE BIT(24) +#define WMI_GTK_OFFLOAD_DISABLE_OPCODE BIT(25) +#define WMI_GTK_OFFLOAD_ENABLE 1 +#define WMI_GTK_OFFLOAD_DISABLE 0 + +struct wmi_gtk_rekey_data { + bool valid; + bool enable_offload; + u8 kck[NL80211_KCK_LEN]; + u8 kek[NL80211_KEK_LEN]; + __le64 replay_ctr; +} __packed; + struct wmi_start_scan_tlvs { /* TLV parameters. These includes channel list, ssid list, bssid list, * extra ies. @@ -2855,6 +3023,8 @@ struct wmi_start_scan_arg { /* Different FW scan engine may choose to bail out on errors. * Allow the driver to have influence over that. */ #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 +/** add DS content in probe req frame */ +#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800 /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ #define WMI_SCAN_CLASS_MASK 0xFF000000 @@ -2990,11 +3160,17 @@ struct wmi_10_4_mgmt_rx_event { u8 buf[0]; } __packed; +struct wmi_mgmt_rx_ext_info { + __le64 rx_mac_timestamp; +} __packed __aligned(4); + #define WMI_RX_STATUS_OK 0x00 #define WMI_RX_STATUS_ERR_CRC 0x01 #define WMI_RX_STATUS_ERR_DECRYPT 0x08 #define WMI_RX_STATUS_ERR_MIC 0x10 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 +/* Extension data at the end of mgmt frame */ +#define WMI_RX_STATUS_EXT_INFO 0x40 #define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26 #define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24 @@ -3003,6 +3179,10 @@ struct wmi_10_4_mgmt_rx_event { #define PHY_ERROR_10_4_RADAR_MASK 0x4 #define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK 0x4000000 +#define WMI_PHY_ERROR_MASK0_RADAR BIT(2) +#define WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT BIT(24) +#define WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN BIT(26) + enum phy_err_type { PHY_ERROR_UNKNOWN, PHY_ERROR_SPECTRAL_SCAN, @@ -3343,6 +3523,7 @@ struct wmi_pdev_param_map { u32 wapi_mbssid_offset; u32 arp_srcaddr; u32 arp_dstaddr; + u32 enable_btcoex; }; #define WMI_PDEV_PARAM_UNSUPPORTED 0 @@ -3650,6 +3831,15 @@ enum wmi_10_4_pdev_param { WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, WMI_10_4_PDEV_PARAM_ARP_SRCADDR, WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB, + WMI_10_4_PDEV_PARAM_RX_BATCHMODE, + WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY, + WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH, + WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, + WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE, + WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, }; struct wmi_pdev_set_param_cmd { @@ -3848,7 +4038,7 @@ struct wmi_pdev_stats_tx { /* illegal rate phy errors */ __le32 illgl_rate_phy_err; - /* wal pdev continous xretry */ + /* wal pdev continuous xretry */ __le32 pdev_cont_xretry; /* wal pdev continous xretry */ @@ -4019,6 +4209,13 @@ enum wmi_stats_id { WMI_STAT_VDEV_RATE = BIT(5), }; +enum wmi_10_4_stats_id { + WMI_10_4_STAT_PEER = BIT(0), + WMI_10_4_STAT_AP = BIT(1), + WMI_10_4_STAT_INST = BIT(2), + WMI_10_4_STAT_PEER_EXTD = BIT(3), +}; + struct wlan_inst_rssi_args { __le16 cfg_retry_count; __le16 retry_count; @@ -4096,10 +4293,10 @@ struct wmi_10_2_stats_event { */ struct wmi_pdev_stats_base { __le32 chan_nf; - __le32 tx_frame_count; - __le32 rx_frame_count; - __le32 rx_clear_count; - __le32 cycle_count; + __le32 tx_frame_count; /* Cycles spent transmitting frames */ + __le32 rx_frame_count; /* Cycles spent receiving frames */ + __le32 rx_clear_count; /* Total channel busy time, evidently */ + __le32 cycle_count; /* Total on-channel time */ __le32 phy_err_count; __le32 chan_tx_pwr; } __packed; @@ -4192,7 +4389,13 @@ struct wmi_10_2_peer_stats { struct wmi_10_2_4_peer_stats { struct wmi_10_2_peer_stats common; - __le32 unknown_value; /* FIXME: what is this word? */ + __le32 peer_rssi_changed; +} __packed; + +struct wmi_10_2_4_ext_peer_stats { + struct wmi_10_2_peer_stats common; + __le32 peer_rssi_changed; + __le32 rx_duration; } __packed; struct wmi_10_4_peer_stats { @@ -4212,6 +4415,27 @@ struct wmi_10_4_peer_stats { __le32 peer_rssi_changed; } __packed; +struct wmi_10_4_peer_extd_stats { + struct wmi_mac_addr peer_macaddr; + __le32 inactive_time; + __le32 peer_chain_rssi; + __le32 rx_duration; + __le32 reserved[10]; +} __packed; + +struct wmi_10_4_bss_bcn_stats { + __le32 vdev_id; + __le32 bss_bcns_dropped; + __le32 bss_bcn_delivered; +} __packed; + +struct wmi_10_4_bss_bcn_filter_stats { + __le32 bcns_dropped; + __le32 bcns_delivered; + __le32 active_filters; + struct wmi_10_4_bss_bcn_stats bss_stats; +} __packed; + struct wmi_10_2_pdev_ext_stats { __le32 rx_rssi_comb; __le32 rx_rssi[4]; @@ -4235,10 +4459,40 @@ enum wmi_vdev_type { }; enum wmi_vdev_subtype { - WMI_VDEV_SUBTYPE_NONE = 0, - WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, - WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, - WMI_VDEV_SUBTYPE_P2P_GO = 3, + WMI_VDEV_SUBTYPE_NONE, + WMI_VDEV_SUBTYPE_P2P_DEVICE, + WMI_VDEV_SUBTYPE_P2P_CLIENT, + WMI_VDEV_SUBTYPE_P2P_GO, + WMI_VDEV_SUBTYPE_PROXY_STA, + WMI_VDEV_SUBTYPE_MESH_11S, + WMI_VDEV_SUBTYPE_MESH_NON_11S, +}; + +enum wmi_vdev_subtype_legacy { + WMI_VDEV_SUBTYPE_LEGACY_NONE = 0, + WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3, + WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4, +}; + +enum wmi_vdev_subtype_10_2_4 { + WMI_VDEV_SUBTYPE_10_2_4_NONE = 0, + WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3, + WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5, +}; + +enum wmi_vdev_subtype_10_4 { + WMI_VDEV_SUBTYPE_10_4_NONE = 0, + WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3, + WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5, + WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6, }; /* values for vdev_subtype */ @@ -4247,14 +4501,14 @@ enum wmi_vdev_subtype { /* * Indicates that AP VDEV uses hidden ssid. only valid for * AP/GO */ -#define WMI_VDEV_START_HIDDEN_SSID (1<<0) +#define WMI_VDEV_START_HIDDEN_SSID (1 << 0) /* * Indicates if robust management frame/management frame * protection is enabled. For GO/AP vdevs, it indicates that * it may support station/client associations with RMF enabled. * For STA/client vdevs, it indicates that sta will * associate with AP with RMF enabled. */ -#define WMI_VDEV_START_PMF_ENABLED (1<<1) +#define WMI_VDEV_START_PMF_ENABLED (1 << 1) struct wmi_p2p_noa_descriptor { __le32 type_count; /* 255: continuous schedule, 0: reserved */ @@ -4278,9 +4532,9 @@ struct wmi_vdev_start_request_cmd { __le32 flags; /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */ struct wmi_ssid ssid; - /* beacon/probe reponse xmit rate. Applicable for SoftAP. */ + /* beacon/probe response xmit rate. Applicable for SoftAP. */ __le32 bcn_tx_rate; - /* beacon/probe reponse xmit power. Applicable for SoftAP. */ + /* beacon/probe response xmit power. Applicable for SoftAP. */ __le32 bcn_tx_power; /* number of p2p NOA descriptor(s) from scan entry */ __le32 num_noa_descriptors; @@ -4493,6 +4747,7 @@ struct wmi_vdev_param_map { u32 meru_vc; u32 rx_decap_type; u32 bw_nss_ratemask; + u32 set_tsf; }; #define WMI_VDEV_PARAM_UNSUPPORTED 0 @@ -4507,7 +4762,7 @@ enum wmi_vdev_param { WMI_VDEV_PARAM_BEACON_INTERVAL, /* Listen interval in TUs */ WMI_VDEV_PARAM_LISTEN_INTERVAL, - /* muticast rate in Mbps */ + /* multicast rate in Mbps */ WMI_VDEV_PARAM_MULTICAST_RATE, /* management frame rate in Mbps */ WMI_VDEV_PARAM_MGMT_TX_RATE, @@ -4638,7 +4893,7 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_BEACON_INTERVAL, /* Listen interval in TUs */ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, - /* muticast rate in Mbps */ + /* multicast rate in Mbps */ WMI_10X_VDEV_PARAM_MULTICAST_RATE, /* management frame rate in Mbps */ WMI_10X_VDEV_PARAM_MGMT_TX_RATE, @@ -4749,6 +5004,7 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_RTS_FIXED_RATE, WMI_10X_VDEV_PARAM_VHT_SGIMASK, WMI_10X_VDEV_PARAM_VHT80_RATEMASK, + WMI_10X_VDEV_PARAM_TSF_INCREMENT, }; enum wmi_10_4_vdev_param { @@ -4818,6 +5074,12 @@ enum wmi_10_4_vdev_param { WMI_10_4_VDEV_PARAM_MERU_VC, WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, + WMI_10_4_VDEV_PARAM_SENSOR_AP, + WMI_10_4_VDEV_PARAM_BEACON_RATE, + WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS, + WMI_10_4_VDEV_PARAM_STA_KICKOUT, + WMI_10_4_VDEV_PARAM_CAPABILITIES, + WMI_10_4_VDEV_PARAM_TSF_INCREMENT, }; #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) @@ -4877,7 +5139,7 @@ struct wmi_vdev_simple_event { } __packed; /* VDEV start response status codes */ -/* VDEV succesfully started */ +/* VDEV successfully started */ #define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0 /* requested VDEV not found */ @@ -5193,7 +5455,7 @@ enum wmi_sta_ps_param_pspoll_count { #define WMI_UAPSD_AC_TYPE_TRIG 1 #define WMI_UAPSD_AC_BIT_MASK(ac, type) \ - ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1))) + (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1)) enum wmi_sta_ps_param_uapsd { WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), @@ -5406,6 +5668,16 @@ struct wmi_host_swba_event { struct wmi_bcn_info bcn_info[0]; } __packed; +struct wmi_10_2_4_bcn_info { + struct wmi_tim_info tim_info; + /* The 10.2.4 FW doesn't have p2p NOA info */ +} __packed; + +struct wmi_10_2_4_host_swba_event { + __le32 vdev_map; + struct wmi_10_2_4_bcn_info bcn_info[0]; +} __packed; + /* 16 words = 512 client + 1 word = for guard */ #define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17 @@ -5598,7 +5870,7 @@ struct wmi_rate_set { * the rates are filled from least significant byte to most * significant byte. */ - __le32 rates[(MAX_SUPPORTED_RATES/4)+1]; + __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1]; } __packed; struct wmi_rate_set_arg { @@ -5642,21 +5914,79 @@ struct wmi_peer_set_q_empty_callback_cmd { __le32 callback_enable; } __packed; -#define WMI_PEER_AUTH 0x00000001 -#define WMI_PEER_QOS 0x00000002 -#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 -#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 -#define WMI_PEER_APSD 0x00000800 -#define WMI_PEER_HT 0x00001000 -#define WMI_PEER_40MHZ 0x00002000 -#define WMI_PEER_STBC 0x00008000 -#define WMI_PEER_LDPC 0x00010000 -#define WMI_PEER_DYN_MIMOPS 0x00020000 -#define WMI_PEER_STATIC_MIMOPS 0x00040000 -#define WMI_PEER_SPATIAL_MUX 0x00200000 -#define WMI_PEER_VHT 0x02000000 -#define WMI_PEER_80MHZ 0x04000000 -#define WMI_PEER_VHT_2G 0x08000000 +struct wmi_peer_flags_map { + u32 auth; + u32 qos; + u32 need_ptk_4_way; + u32 need_gtk_2_way; + u32 apsd; + u32 ht; + u32 bw40; + u32 stbc; + u32 ldbc; + u32 dyn_mimops; + u32 static_mimops; + u32 spatial_mux; + u32 vht; + u32 bw80; + u32 vht_2g; + u32 pmf; +}; + +enum wmi_peer_flags { + WMI_PEER_AUTH = 0x00000001, + WMI_PEER_QOS = 0x00000002, + WMI_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_PEER_APSD = 0x00000800, + WMI_PEER_HT = 0x00001000, + WMI_PEER_40MHZ = 0x00002000, + WMI_PEER_STBC = 0x00008000, + WMI_PEER_LDPC = 0x00010000, + WMI_PEER_DYN_MIMOPS = 0x00020000, + WMI_PEER_STATIC_MIMOPS = 0x00040000, + WMI_PEER_SPATIAL_MUX = 0x00200000, + WMI_PEER_VHT = 0x02000000, + WMI_PEER_80MHZ = 0x04000000, + WMI_PEER_VHT_2G = 0x08000000, + WMI_PEER_PMF = 0x10000000, +}; + +enum wmi_10x_peer_flags { + WMI_10X_PEER_AUTH = 0x00000001, + WMI_10X_PEER_QOS = 0x00000002, + WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_10X_PEER_APSD = 0x00000800, + WMI_10X_PEER_HT = 0x00001000, + WMI_10X_PEER_40MHZ = 0x00002000, + WMI_10X_PEER_STBC = 0x00008000, + WMI_10X_PEER_LDPC = 0x00010000, + WMI_10X_PEER_DYN_MIMOPS = 0x00020000, + WMI_10X_PEER_STATIC_MIMOPS = 0x00040000, + WMI_10X_PEER_SPATIAL_MUX = 0x00200000, + WMI_10X_PEER_VHT = 0x02000000, + WMI_10X_PEER_80MHZ = 0x04000000, +}; + +enum wmi_10_2_peer_flags { + WMI_10_2_PEER_AUTH = 0x00000001, + WMI_10_2_PEER_QOS = 0x00000002, + WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_10_2_PEER_APSD = 0x00000800, + WMI_10_2_PEER_HT = 0x00001000, + WMI_10_2_PEER_40MHZ = 0x00002000, + WMI_10_2_PEER_STBC = 0x00008000, + WMI_10_2_PEER_LDPC = 0x00010000, + WMI_10_2_PEER_DYN_MIMOPS = 0x00020000, + WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000, + WMI_10_2_PEER_SPATIAL_MUX = 0x00200000, + WMI_10_2_PEER_VHT = 0x02000000, + WMI_10_2_PEER_80MHZ = 0x04000000, + WMI_10_2_PEER_VHT_2G = 0x08000000, + WMI_10_2_PEER_PMF = 0x10000000, +}; /* * Peer rate capabilities. @@ -5722,6 +6052,11 @@ struct wmi_10_2_peer_assoc_complete_cmd { __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */ } __packed; +struct wmi_10_4_peer_assoc_complete_cmd { + struct wmi_10_2_peer_assoc_complete_cmd cmd; + __le32 peer_bw_rxnss_override; +} __packed; + struct wmi_peer_assoc_complete_arg { u8 addr[ETH_ALEN]; u32 vdev_id; @@ -5769,6 +6104,13 @@ struct wmi_chan_info_event { __le32 noise_floor; __le32 rx_clear_count; __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; } __packed; struct wmi_10_4_chan_info_event { @@ -5911,6 +6253,20 @@ struct wmi_dbglog_cfg_cmd { __le32 config_valid; } __packed; +struct wmi_10_4_dbglog_cfg_cmd { + /* bitmask to hold mod id config*/ + __le64 module_enable; + + /* see ATH10K_DBGLOG_CFG_ */ + __le32 config_enable; + + /* mask of module id bits to be changed */ + __le64 module_valid; + + /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */ + __le32 config_valid; +} __packed; + enum wmi_roam_reason { WMI_ROAM_REASON_BETTER_AP = 1, WMI_ROAM_REASON_BEACON_MISS = 2, @@ -5948,6 +6304,17 @@ struct wmi_scan_ev_arg { __le32 vdev_id; }; +struct wmi_peer_delete_resp_ev_arg { + __le32 vdev_id; + struct wmi_mac_addr peer_addr; +}; + +struct wmi_tlv_mgmt_tx_compl_ev_arg { + __le32 desc_id; + __le32 status; + __le32 pdev_id; +}; + struct wmi_mgmt_rx_ev_arg { __le32 channel; __le32 snr; @@ -5955,6 +6322,7 @@ struct wmi_mgmt_rx_ev_arg { __le32 phy_mode; __le32 buf_len; __le32 status; /* %WMI_RX_STATUS_ */ + struct wmi_mgmt_rx_ext_info ext_info; }; struct wmi_ch_info_ev_arg { @@ -5967,6 +6335,10 @@ struct wmi_ch_info_ev_arg { __le32 chan_tx_pwr_range; __le32 chan_tx_pwr_tp; __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; }; /* From 10.4 firmware, not sure all have the same values. */ @@ -6011,6 +6383,8 @@ struct wmi_phyerr_hdr_arg { u32 tsf_u32; u32 buf_len; const void *phyerrs; + u32 phy_err_mask0; + u32 phy_err_mask1; }; struct wmi_svc_rdy_ev_arg { @@ -6043,11 +6417,26 @@ struct wmi_roam_ev_arg { __le32 rssi; }; +struct wmi_echo_ev_arg { + __le32 value; +}; + struct wmi_pdev_temperature_event { /* temperature value in Celcius degree */ __le32 temperature; } __packed; +struct wmi_pdev_bss_chan_info_event { + __le32 freq; + __le32 noise_floor; + __le64 cycle_busy; + __le64 cycle_total; + __le64 cycle_tx; + __le64 cycle_rx; + __le64 cycle_rx_bss; + __le32 reserved; +} __packed; + /* WOW structures */ enum wmi_wow_wakeup_event { WOW_BMISS_EVENT = 0, @@ -6246,6 +6635,21 @@ struct wmi_pdev_set_adaptive_cca_params { __le32 cca_detect_margin; } __packed; +enum wmi_host_platform_type { + WMI_HOST_PLATFORM_HIGH_PERF, + WMI_HOST_PLATFORM_LOW_PERF, +}; + +enum wmi_bss_survey_req_type { + WMI_BSS_SURVEY_REQ_TYPE_READ = 1, + WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR, +}; + +struct wmi_pdev_chan_info_req_cmd { + __le32 type; + __le32 reserved; +} __packed; + struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; @@ -6288,13 +6692,17 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg); int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar, + struct sk_buff *skb); int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_tlv_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_vdev_delete_resp(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb); @@ -6343,5 +6751,8 @@ size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head); void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, char *buf); +int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype); +int ath10k_wmi_barrier(struct ath10k *ar); #endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 8e02b381990f..2280f47dc227 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -17,6 +17,7 @@ #include "mac.h" #include <net/mac80211.h> +#include <net/addrconf.h> #include "hif.h" #include "core.h" #include "debug.h" @@ -25,7 +26,9 @@ static const struct wiphy_wowlan_support ath10k_wowlan_support = { .flags = WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_MAGIC_PKT, + WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE, .pattern_min_len = WOW_MIN_PATTERN_SIZE, .pattern_max_len = WOW_MAX_PATTERN_SIZE, .max_pkt_offset = WOW_MAX_PKT_OFFSET, @@ -82,6 +85,7 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, int ret, i; unsigned long wow_mask = 0; struct ath10k *ar = arvif->ar; + struct ieee80211_bss_conf *bss = &arvif->vif->bss_conf; const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; int pattern_id = 0; @@ -100,15 +104,19 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); break; case WMI_VDEV_TYPE_STA: - if (wowlan->disconnect) { - __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); - __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); - __set_bit(WOW_BMISS_EVENT, &wow_mask); - __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + if (arvif->is_up && bss->assoc) { + if (wowlan->disconnect) { + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_BMISS_EVENT, &wow_mask); + __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + } + + if (wowlan->magic_pkt) + __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + if (wowlan->gtk_rekey_failure) + __set_bit(WOW_GTK_ERR_EVENT, &wow_mask); } - - if (wowlan->magic_pkt) - __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); break; default: break; @@ -224,6 +232,253 @@ static int ath10k_wow_wakeup(struct ath10k *ar) return 0; } +static int +ath10k_wow_fill_vdev_ns_offload_struct(struct ath10k_vif *arvif, + bool enable_offload) +{ + struct in6_addr addr[TARGET_NUM_STATIONS]; + struct wmi_ns_arp_offload_req *ns; + struct wireless_dev *wdev; + struct inet6_dev *in6_dev; + struct in6_addr addr_type; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *ifaca; + struct list_head *addr_list; + u32 scope, count = 0; + int i; + + ns = &arvif->ns_offload; + if (!enable_offload) { + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE); + return 0; + } + + wdev = ieee80211_vif_to_wdev(arvif->vif); + if (!wdev) + return -ENODEV; + + in6_dev = __in6_dev_get(wdev->netdev); + if (!in6_dev) + return -ENODEV; + + memset(&addr, 0, TARGET_NUM_STATIONS * sizeof(struct in6_addr)); + memset(&addr_type, 0, sizeof(struct in6_addr)); + + /* Unicast Addresses */ + read_lock_bh(&in6_dev->lock); + list_for_each(addr_list, &in6_dev->addr_list) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + ifa = list_entry(addr_list, struct inet6_ifaddr, if_list); + if (ifa->flags & IFA_F_DADFAILED) + continue; + scope = ipv6_addr_src_scope(&ifa->addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifa->addr.s6_addr, + sizeof(ifa->addr.s6_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_UNICAST; + count += 1; + break; + } + } + + /* Anycast Addresses */ + for (ifaca = in6_dev->ac_list; ifaca; ifaca = ifaca->aca_next) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + scope = ipv6_addr_src_scope(&ifaca->aca_addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifaca->aca_addr, + sizeof(ifaca->aca_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_ANY; + count += 1; + break; + } + } + read_unlock_bh(&in6_dev->lock); + + /* Filling up the request structure + * Filling the self_addr with solicited address + * A Solicited-Node multicast address is created by + * taking the last 24 bits of a unicast or anycast + * address and appending them to the prefix + * + * FF02:0000:0000:0000:0000:0001:FFXX:XXXX + * + * here XX is the unicast/anycast bits + */ + for (i = 0; i < count; i++) { + ns->info.self_addr[i].s6_addr[0] = 0xFF; + ns->info.self_addr[i].s6_addr[1] = 0x02; + ns->info.self_addr[i].s6_addr[11] = 0x01; + ns->info.self_addr[i].s6_addr[12] = 0xFF; + ns->info.self_addr[i].s6_addr[13] = addr[i].s6_addr[13]; + ns->info.self_addr[i].s6_addr[14] = addr[i].s6_addr[14]; + ns->info.self_addr[i].s6_addr[15] = addr[i].s6_addr[15]; + ns->info.slot_idx = i; + memcpy(&ns->info.target_addr[i], &addr[i], + sizeof(struct in6_addr)); + ns->info.target_addr_valid.s6_addr[i] = 1; + ns->info.target_ipv6_ac.s6_addr[i] = addr_type.s6_addr[i]; + memcpy(&ns->params.ipv6_addr, &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE); + ns->num_ns_offload_count = __cpu_to_le16(count); + + return 0; +} + +static int +ath10k_wow_fill_vdev_arp_offload_struct(struct ath10k_vif *arvif, + bool enable_offload) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + bool offload_params_found = false; + struct wireless_dev *wdev = ieee80211_vif_to_wdev(arvif->vif); + struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload; + + if (!enable_offload) { + arp->offload_type = __cpu_to_le16(WMI_IPV4_ARP_REPLY_OFFLOAD); + arp->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE); + return 0; + } + + if (!wdev) + return -ENODEV; + if (!wdev->netdev) + return -ENODEV; + in_dev = __in_dev_get_rtnl(wdev->netdev); + if (!in_dev) + return -ENODEV; + + arp->offload_type = __cpu_to_le16(WMI_IPV4_ARP_REPLY_OFFLOAD); + arp->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE); + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + if (!memcmp(ifa->ifa_label, wdev->netdev->name, IFNAMSIZ)) { + offload_params_found = true; + break; + } + } + + if (!offload_params_found) + return -ENODEV; + memcpy(&arp->params.ipv4_addr, &ifa->ifa_local, + sizeof(arp->params.ipv4_addr)); + + return 0; +} + +static int ath10k_wow_enable_ns_arp_offload(struct ath10k *ar, bool offload) +{ + struct ath10k_vif *arvif; + int ret; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + if (!arvif->is_up) + continue; + + ret = ath10k_wow_fill_vdev_arp_offload_struct(arvif, offload); + if (ret) { + ath10k_err(ar, "ARP-offload config failed, vdev: %d\n", + arvif->vdev_id); + return ret; + } + + ret = ath10k_wow_fill_vdev_ns_offload_struct(arvif, offload); + if (ret) { + ath10k_err(ar, "NS-offload config failed, vdev: %d\n", + arvif->vdev_id); + return ret; + } + + ret = ath10k_wmi_set_arp_ns_offload(ar, arvif); + if (ret) { + ath10k_err(ar, "failed to send offload cmd, vdev: %d\n", + arvif->vdev_id); + return ret; + } + } + + return 0; +} + +static int ath10k_config_wow_listen_interval(struct ath10k *ar) +{ + int ret; + u32 param = ar->wmi.vdev_param->listen_interval; + u8 listen_interval = ar->hw_values->default_listen_interval; + struct ath10k_vif *arvif; + + if (!listen_interval) + return 0; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + param, listen_interval); + if (ret) { + ath10k_err(ar, "failed to config LI for vdev_id: %d\n", + arvif->vdev_id); + return ret; + } + } + + return 0; +} + +static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload) +{ + struct ath10k_vif *arvif; + struct ieee80211_bss_conf *bss; + struct wmi_gtk_rekey_data *rekey_data; + int ret; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + bss = &arvif->vif->bss_conf; + if (!arvif->is_up || !bss->assoc) + continue; + + rekey_data = &arvif->gtk_rekey_data; + if (!rekey_data->valid) + continue; + + if (gtk_offload) + rekey_data->enable_offload = WMI_GTK_OFFLOAD_ENABLE; + else + rekey_data->enable_offload = WMI_GTK_OFFLOAD_DISABLE; + ret = ath10k_wmi_gtk_offload(ar, arvif); + if (ret) { + ath10k_err(ar, "GTK offload failed for vdev_id: %d\n", + arvif->vdev_id); + return ret; + } + } + + return 0; +} + int ath10k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { @@ -233,7 +488,7 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->fw_features))) { + ar->running_fw->fw_file.fw_features))) { ret = 1; goto exit; } @@ -245,10 +500,29 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto exit; } + ret = ath10k_wow_config_gtk_offload(ar, true); + if (ret) { + ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret); + goto exit; + } + + ret = ath10k_wow_enable_ns_arp_offload(ar, true); + if (ret) { + ath10k_warn(ar, "failed to enable ARP-NS offload: %d\n", ret); + goto disable_gtk_offload; + } + ret = ath10k_wow_set_wakeups(ar, wowlan); if (ret) { ath10k_warn(ar, "failed to set wow wakeup events: %d\n", ret); + goto disable_ns_arp_offload; + } + + ret = ath10k_config_wow_listen_interval(ar); + if (ret) { + ath10k_warn(ar, "failed to config wow listen interval: %d\n", + ret); goto cleanup; } @@ -272,11 +546,68 @@ wakeup: cleanup: ath10k_wow_cleanup(ar); +disable_ns_arp_offload: + ath10k_wow_enable_ns_arp_offload(ar, false); + +disable_gtk_offload: + ath10k_wow_config_gtk_offload(ar, false); exit: mutex_unlock(&ar->conf_mutex); return ret ? 1 : 0; } +void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) { + device_set_wakeup_enable(ar->dev, enabled); + } + mutex_unlock(&ar->conf_mutex); +} + +static void ath10k_wow_op_report_wakeup_reason(struct ath10k *ar) +{ + struct cfg80211_wowlan_wakeup *wakeup = &ar->wow.wakeup; + struct ath10k_vif *arvif; + + memset(wakeup, 0, sizeof(struct cfg80211_wowlan_wakeup)); + switch (ar->wow.wakeup_reason) { + case WOW_REASON_UNSPECIFIED: + wakeup = NULL; + break; + case WOW_REASON_RECV_MAGIC_PATTERN: + wakeup->magic_pkt = true; + break; + case WOW_REASON_DEAUTH_RECVD: + case WOW_REASON_DISASSOC_RECVD: + case WOW_REASON_AP_ASSOC_LOST: + case WOW_REASON_CSA_EVENT: + wakeup->disconnect = true; + break; + case WOW_REASON_GTK_HS_ERR: + wakeup->gtk_rekey_failure = true; + break; + } + ar->wow.wakeup_reason = WOW_REASON_UNSPECIFIED; + + if (wakeup) { + wakeup->pattern_idx = -1; + list_for_each_entry(arvif, &ar->arvifs, list) { + ieee80211_report_wowlan_wakeup(arvif->vif, + wakeup, GFP_KERNEL); + if (wakeup->disconnect) + ieee80211_resume_disconnect(arvif->vif); + } + } else { + list_for_each_entry(arvif, &ar->arvifs, list) + ieee80211_report_wowlan_wakeup(arvif->vif, + NULL, GFP_KERNEL); + } +} + int ath10k_wow_op_resume(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; @@ -285,7 +616,7 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw) mutex_lock(&ar->conf_mutex); if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->fw_features))) { + ar->running_fw->fw_file.fw_features))) { ret = 1; goto exit; } @@ -297,8 +628,20 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw) } ret = ath10k_wow_wakeup(ar); - if (ret) + if (ret) { ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret); + goto exit; + } + + ret = ath10k_wow_enable_ns_arp_offload(ar, false); + if (ret) { + ath10k_warn(ar, "failed to disable ARP-NS offload: %d\n", ret); + goto exit; + } + + ret = ath10k_wow_config_gtk_offload(ar, false); + if (ret) + ath10k_warn(ar, "failed to disable GTK offload: %d\n", ret); exit: if (ret) { @@ -319,13 +662,15 @@ exit: } } + ath10k_wow_op_report_wakeup_reason(ar); mutex_unlock(&ar->conf_mutex); return ret; } int ath10k_wow_init(struct ath10k *ar) { - if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features)) + if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) return 0; if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map))) @@ -334,6 +679,15 @@ int ath10k_wow_init(struct ath10k *ar) ar->wow.wowlan_support = ath10k_wowlan_support; ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; + device_init_wakeup(ar->dev, true); return 0; } + +void ath10k_wow_deinit(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features) && + test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)) + device_init_wakeup(ar->dev, false); +} diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index abbb04b6d1bd..2ca4ba4848c9 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -17,18 +17,21 @@ #define _WOW_H_ struct ath10k_wow { + u32 wakeup_reason; u32 max_num_patterns; struct completion wakeup_completed; + struct cfg80211_wowlan_wakeup wakeup; struct wiphy_wowlan_support wowlan_support; }; #ifdef CONFIG_PM int ath10k_wow_init(struct ath10k *ar); +void ath10k_wow_deinit(struct ath10k *ar); int ath10k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int ath10k_wow_op_resume(struct ieee80211_hw *hw); - +void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); #else static inline int ath10k_wow_init(struct ath10k *ar) @@ -36,5 +39,8 @@ static inline int ath10k_wow_init(struct ath10k *ar) return 0; } +void ath10k_wow_deinit(struct ath10k *ar) +{ +} #endif /* CONFIG_PM */ #endif /* _WOW_H_ */ diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index 38be2702c0e2..0624333f5430 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -279,7 +279,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) ath5k_ani_set_firstep_level(ah, as->firstep_level + 1); return; - } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) { + } else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) { /* beacon RSSI is low. in B/G mode turn of OFDM weak signal * detect and zero firstep level to maximize CCK sensitivity */ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index ba12f7f4061d..67fedb61fcc0 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1265,10 +1265,10 @@ struct ath5k_hw { void __iomem *iobase; /* address of the device */ struct mutex lock; /* dev-level lock */ struct ieee80211_hw *hw; /* IEEE 802.11 common */ - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; struct ieee80211_channel channels[ATH_CHAN_MAX]; - struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; - s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; + struct ieee80211_rate rates[NUM_NL80211_BANDS][AR5K_MAX_RATES]; + s8 rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES]; enum nl80211_iftype opmode; #ifdef CONFIG_ATH5K_DEBUG @@ -1532,7 +1532,7 @@ int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah, /* Protocol Control Unit Functions */ /* Helpers */ -int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, +int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band, int len, struct ieee80211_rate *rate, bool shortpre); unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); @@ -1611,7 +1611,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); /* PHY functions */ /* Misc PHY functions */ -u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band); +u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band); int ath5k_hw_phy_disable(struct ath5k_hw *ah); /* Gain_F optimization */ enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 66b6366158b9..233054bd6b52 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -152,7 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & 0xffffffff; ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_5GHZ); + NL80211_BAND_5GHZ); /* Try to identify radio chip based on its srev */ switch (ah->ah_radio_5ghz_revision & 0xf0) { @@ -160,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah) ah->ah_radio = AR5K_RF5111; ah->ah_single_chip = false; ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); break; case AR5K_SREV_RAD_5112: case AR5K_SREV_RAD_2112: ah->ah_radio = AR5K_RF5112; ah->ah_single_chip = false; ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); break; case AR5K_SREV_RAD_2413: ah->ah_radio = AR5K_RF2413; @@ -204,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) ah->ah_radio = AR5K_RF5111; ah->ah_single_chip = false; ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, - IEEE80211_BAND_2GHZ); + NL80211_BAND_2GHZ); } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) || ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) || ah->ah_phy_revision == AR5K_SREV_PHY_2425) { diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 342563a3706f..b2dd6014fb5d 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -268,15 +268,15 @@ static void ath5k_reg_notifier(struct wiphy *wiphy, * Returns true for the channel numbers used. */ #ifdef CONFIG_ATH5K_TEST_CHANNELS -static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) +static bool ath5k_is_standard_channel(short chan, enum nl80211_band band) { return true; } #else -static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) +static bool ath5k_is_standard_channel(short chan, enum nl80211_band band) { - if (band == IEEE80211_BAND_2GHZ && chan <= 14) + if (band == NL80211_BAND_2GHZ && chan <= 14) return true; return /* UNII 1,2 */ @@ -297,18 +297,18 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, unsigned int mode, unsigned int max) { unsigned int count, size, freq, ch; - enum ieee80211_band band; + enum nl80211_band band; switch (mode) { case AR5K_MODE_11A: /* 1..220, but 2GHz frequencies are filtered by check_channel */ size = 220; - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G: size = 26; - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; break; default: ATH5K_WARN(ah, "bad mode, not copying channels\n"); @@ -363,13 +363,13 @@ ath5k_setup_bands(struct ieee80211_hw *hw) int max_c, count_c = 0; int i; - BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS); + BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS); max_c = ARRAY_SIZE(ah->channels); /* 2GHz band */ - sband = &ah->sbands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; - sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0]; + sband = &ah->sbands[NL80211_BAND_2GHZ]; + sband->band = NL80211_BAND_2GHZ; + sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0]; if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) { /* G mode */ @@ -381,7 +381,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11G, max_c); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { @@ -407,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11B, max_c); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } @@ -415,9 +415,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw) /* 5GHz band, A mode */ if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { - sband = &ah->sbands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; - sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0]; + sband = &ah->sbands[NL80211_BAND_5GHZ]; + sband->band = NL80211_BAND_5GHZ; + sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0]; memcpy(sband->bitrates, &ath5k_rates[4], sizeof(struct ieee80211_rate) * 8); @@ -427,7 +427,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11A, max_c); - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } ath5k_setup_rate_idx(ah, sband); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 7c5f189cace7..7fecec45f877 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -1046,14 +1046,14 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah) BUG_ON(!ah->sbands); - for (b = 0; b < IEEE80211_NUM_BANDS; b++) { + for (b = 0; b < NUM_NL80211_BANDS; b++) { struct ieee80211_supported_band *band = &ah->sbands[b]; char bname[6]; switch (band->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: strcpy(bname, "2 GHz"); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: strcpy(bname, "5 GHz"); break; default: diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index bf29da5e90da..fc47b70988b1 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] = * bwmodes. */ int -ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, +ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band, int len, struct ieee80211_rate *rate, bool shortpre) { int sifs, preamble, plcp_bits, sym_time; @@ -221,7 +221,7 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah) case AR5K_BWMODE_DEFAULT: sifs = AR5K_INIT_SIFS_DEFAULT_BG; default: - if (channel->band == IEEE80211_BAND_5GHZ) + if (channel->band == NL80211_BAND_5GHZ) sifs = AR5K_INIT_SIFS_DEFAULT_A; break; } @@ -279,7 +279,7 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah) struct ieee80211_rate *rate; unsigned int i; /* 802.11g covers both OFDM and CCK */ - u8 band = IEEE80211_BAND_2GHZ; + u8 band = NL80211_BAND_2GHZ; /* Write rate duration table */ for (i = 0; i < ah->sbands[band].n_bitrates; i++) { diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 0fce1c76638e..0ce7ab32c5ac 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -75,13 +75,13 @@ /** * ath5k_hw_radio_revision() - Get the PHY Chip revision * @ah: The &struct ath5k_hw - * @band: One of enum ieee80211_band + * @band: One of enum nl80211_band * * Returns the revision number of a 2GHz, 5GHz or single chip * radio. */ u16 -ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) +ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band) { unsigned int i; u32 srev; @@ -91,10 +91,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) * Set the radio chip access register */ switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0)); break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); break; default: @@ -138,11 +138,11 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) u16 freq = channel->center_freq; /* Check if the channel is in our supported range */ - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) && (freq <= ah->ah_capabilities.cap_range.range_2ghz_max)) return true; - } else if (channel->band == IEEE80211_BAND_5GHZ) + } else if (channel->band == NL80211_BAND_5GHZ) if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) && (freq <= ah->ah_capabilities.cap_range.range_5ghz_max)) return true; @@ -743,7 +743,7 @@ done: /** * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw * @ah: The &struct ath5k_hw - * @band: One of enum ieee80211_band + * @band: One of enum nl80211_band * * Write initial RF gain table to set the RF sensitivity. * @@ -751,7 +751,7 @@ done: * with Gain_F calibration */ static int -ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) +ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band) { const struct ath5k_ini_rfgain *ath5k_rfg; unsigned int i, size, index; @@ -786,7 +786,7 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) return -EINVAL; } - index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0; + index = (band == NL80211_BAND_2GHZ) ? 1 : 0; for (i = 0; i < size; i++) { AR5K_REG_WAIT(i); @@ -917,7 +917,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, } /* Set Output and Driver bias current (OB/DB) */ - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { if (channel->hw_value == AR5K_MODE_11B) ee_mode = AR5K_EEPROM_MODE_11B; @@ -944,7 +944,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, AR5K_RF_DB_2GHZ, true); /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */ - } else if ((channel->band == IEEE80211_BAND_5GHZ) || + } else if ((channel->band == NL80211_BAND_5GHZ) || (ah->ah_radio == AR5K_RF5111)) { /* For 11a, Turbo and XR we need to choose @@ -1145,7 +1145,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah, } if (ah->ah_radio == AR5K_RF5413 && - channel->band == IEEE80211_BAND_2GHZ) { + channel->band == NL80211_BAND_2GHZ) { ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE, true); @@ -1270,7 +1270,7 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah, */ data0 = data1 = 0; - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { /* Map 2GHz channel to 5GHz Atheros channel ID */ ret = ath5k_hw_rf5111_chan2athchan( ieee80211_frequency_to_channel(channel->center_freq), @@ -1919,7 +1919,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, /* Convert current frequency to fbin value (the same way channels * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale * up by 2 so we can compare it later */ - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { chan_fbin = (channel->center_freq - 2300) * 10; freq_band = AR5K_EEPROM_BAND_2GHZ; } else { @@ -1983,7 +1983,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4; break; default: - if (channel->band == IEEE80211_BAND_5GHZ) { + if (channel->band == NL80211_BAND_5GHZ) { /* Both sample_freq and chip_freq are 40MHz */ spur_delta_phase = (spur_offset << 17) / 25; spur_freq_sigma_delta = diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index ddaad712c59a..beda11ce34a7 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -559,7 +559,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) { struct ieee80211_channel *channel = ah->ah_current_channel; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_rate *rate; u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; @@ -596,10 +596,10 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) * * Also we have different lowest rate for 802.11a */ - if (channel->band == IEEE80211_BAND_5GHZ) - band = IEEE80211_BAND_5GHZ; + if (channel->band == NL80211_BAND_5GHZ) + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; switch (ah->ah_bwmode) { case AR5K_BWMODE_5MHZ: diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 99e62f99a182..b6d7bd441975 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -752,7 +752,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) clock = AR5K_PHY_PLL_RF5111; /*Zero*/ } - if (channel->band == IEEE80211_BAND_2GHZ) { + if (channel->band == NL80211_BAND_2GHZ) { mode |= AR5K_PHY_MODE_FREQ_2GHZ; clock |= AR5K_PHY_PLL_44MHZ; @@ -771,7 +771,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) else mode |= AR5K_PHY_MODE_MOD_DYN; } - } else if (channel->band == IEEE80211_BAND_5GHZ) { + } else if (channel->band == NL80211_BAND_5GHZ) { mode |= (AR5K_PHY_MODE_FREQ_5GHZ | AR5K_PHY_MODE_MOD_OFDM); @@ -906,7 +906,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, u32 data; ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD, AR5K_PHY_CCKTXCTL); - if (channel->band == IEEE80211_BAND_5GHZ) + if (channel->band == NL80211_BAND_5GHZ) data = 0xffb81020; else data = 0xffb80d20; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 7653fa47508b..2a53c681e6fd 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -34,7 +34,7 @@ } #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -43,7 +43,7 @@ } #define CHAN5G(_channel, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = 5000 + (5 * (_channel)), \ .flags = (_flags), \ @@ -2583,7 +2583,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar) } #endif -static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, +static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band, bool ht_enable) { struct ath6kl_htcap *htcap = &vif->htcap[band]; @@ -2594,7 +2594,7 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, if (ht_enable) { /* Set default ht capabilities */ htcap->ht_enable = true; - htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ? + htcap->cap_info = (band == NL80211_BAND_2GHZ) ? ath6kl_g_htcap : ath6kl_a_htcap; htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K; } else /* Disable ht */ @@ -2609,7 +2609,7 @@ static int ath6kl_restore_htcap(struct ath6kl_vif *vif) struct wiphy *wiphy = vif->ar->wiphy; int band, ret = 0; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; @@ -3530,7 +3530,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, struct regulatory_request *request) { struct ath6kl *ar = wiphy_priv(wiphy); - u32 rates[IEEE80211_NUM_BANDS]; + u32 rates[NUM_NL80211_BANDS]; int ret, i; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, @@ -3555,7 +3555,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, * changed. */ - for (i = 0; i < IEEE80211_NUM_BANDS; i++) + for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; @@ -3791,8 +3791,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; vif->bg_scan_period = 0; - vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true; - vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true; + vif->htcap[NL80211_BAND_2GHZ].ht_enable = true; + vif->htcap[NL80211_BAND_5GHZ].ht_enable = true; memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); if (fw_vif_idx != 0) { @@ -3943,9 +3943,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) wiphy->available_antennas_rx = ar->hw.rx_ant; if (band_2gig) - wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; + wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz; if (band_5gig) - wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; + wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 5f3acfe6015e..713a571a27ce 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -623,7 +623,7 @@ struct ath6kl_vif { struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; struct aggr_info *aggr_cntxt; - struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS]; + struct ath6kl_htcap htcap[NUM_NL80211_BANDS]; struct timer_list disconnect_timer; struct timer_list sched_scan_timer; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 7e1010475cfb..da3b47b87ecc 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2051,7 +2051,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, sc->no_cck = cpu_to_le32(no_cck); sc->num_ch = num_chan; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = ar->wiphy->bands[band]; if (!sband) @@ -2778,10 +2778,10 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx, memset(&ratemask, 0, sizeof(ratemask)); /* only check 2.4 and 5 GHz bands, skip the rest */ - for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) { + for (band = 0; band <= NL80211_BAND_5GHZ; band++) { /* copy legacy rate mask */ ratemask[band] = mask->control[band].legacy; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) ratemask[band] = mask->control[band].legacy << 4; @@ -2807,9 +2807,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx, if (mode == WMI_RATES_MODE_11A || mode == WMI_RATES_MODE_11A_HT20 || mode == WMI_RATES_MODE_11A_HT40) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; cmd->ratemask[mode] = cpu_to_le64(ratemask[band]); } @@ -2830,10 +2830,10 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx, memset(&ratemask, 0, sizeof(ratemask)); /* only check 2.4 and 5 GHz bands, skip the rest */ - for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) { + for (band = 0; band <= NL80211_BAND_5GHZ; band++) { /* copy legacy rate mask */ ratemask[band] = mask->control[band].legacy; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) ratemask[band] = mask->control[band].legacy << 4; @@ -2857,9 +2857,9 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx, if (mode == WMI_RATES_MODE_11A || mode == WMI_RATES_MODE_11A_HT20 || mode == WMI_RATES_MODE_11A_HT40) - band = IEEE80211_BAND_5GHZ; + band = NL80211_BAND_5GHZ; else - band = IEEE80211_BAND_2GHZ; + band = NL80211_BAND_2GHZ; cmd->ratemask[mode] = cpu_to_le32(ratemask[band]); } @@ -3182,7 +3182,7 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, } int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, - enum ieee80211_band band, + enum nl80211_band band, struct ath6kl_htcap *htcap) { struct sk_buff *skb; @@ -3195,7 +3195,7 @@ int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, cmd = (struct wmi_set_htcap_cmd *) skb->data; /* - * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely + * NOTE: Band in firmware matches enum nl80211_band, it is unlikely * this will be changed in firmware. If at all there is any change in * band value, the host needs to be fixed. */ diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h index 05d25a94c781..3af464a73b58 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -2628,7 +2628,7 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, u8 keep_alive_intvl); int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, - enum ieee80211_band band, + enum nl80211_band band, struct ath6kl_htcap *htcap); int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 3e2e24e4843f..b0e4cce8236f 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -145,14 +145,14 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah, } static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, - enum ieee80211_band band, + enum nl80211_band band, int16_t *nft) { switch (band) { - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5); break; - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2); break; default: diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 90f5773a1a61..b8f072a76600 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -107,9 +107,9 @@ void ath_chanctx_init(struct ath_softc *sc) struct ieee80211_channel *chan; int i, j; - sband = &common->sbands[IEEE80211_BAND_2GHZ]; + sband = &common->sbands[NL80211_BAND_2GHZ]; if (!sband->n_channels) - sband = &common->sbands[IEEE80211_BAND_5GHZ]; + sband = &common->sbands[NL80211_BAND_5GHZ]; chan = &sband->channels[0]; for (i = 0; i < ATH9K_NUM_CHANCTX; i++) { @@ -1315,9 +1315,9 @@ void ath9k_offchannel_init(struct ath_softc *sc) struct ieee80211_channel *chan; int i; - sband = &common->sbands[IEEE80211_BAND_2GHZ]; + sband = &common->sbands[NL80211_BAND_2GHZ]; if (!sband->n_channels) - sband = &common->sbands[IEEE80211_BAND_5GHZ]; + sband = &common->sbands[NL80211_BAND_5GHZ]; chan = &sband->channels[0]; diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c index a006c1499728..8b4f7fdabf58 100644 --- a/drivers/net/wireless/ath/ath9k/common-init.c +++ b/drivers/net/wireless/ath/ath9k/common-init.c @@ -19,14 +19,14 @@ #include "common.h" #define CHAN2G(_freq, _idx) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 20, \ } #define CHAN5G(_freq, _idx) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 20, \ @@ -139,12 +139,12 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common) memcpy(channels, ath9k_2ghz_chantable, sizeof(ath9k_2ghz_chantable)); - common->sbands[IEEE80211_BAND_2GHZ].channels = channels; - common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; - common->sbands[IEEE80211_BAND_2GHZ].n_channels = + common->sbands[NL80211_BAND_2GHZ].channels = channels; + common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ; + common->sbands[NL80211_BAND_2GHZ].n_channels = ARRAY_SIZE(ath9k_2ghz_chantable); - common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; - common->sbands[IEEE80211_BAND_2GHZ].n_bitrates = + common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; + common->sbands[NL80211_BAND_2GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates); } @@ -156,13 +156,13 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common) memcpy(channels, ath9k_5ghz_chantable, sizeof(ath9k_5ghz_chantable)); - common->sbands[IEEE80211_BAND_5GHZ].channels = channels; - common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; - common->sbands[IEEE80211_BAND_5GHZ].n_channels = + common->sbands[NL80211_BAND_5GHZ].channels = channels; + common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ; + common->sbands[NL80211_BAND_5GHZ].n_channels = ARRAY_SIZE(ath9k_5ghz_chantable); - common->sbands[IEEE80211_BAND_5GHZ].bitrates = + common->sbands[NL80211_BAND_5GHZ].bitrates = ath9k_legacy_rates + 4; - common->sbands[IEEE80211_BAND_5GHZ].n_bitrates = + common->sbands[NL80211_BAND_5GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates) - 4; } return 0; @@ -236,9 +236,9 @@ void ath9k_cmn_reload_chainmask(struct ath_hw *ah) if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) ath9k_cmn_setup_ht_cap(ah, - &common->sbands[IEEE80211_BAND_2GHZ].ht_cap); + &common->sbands[NL80211_BAND_2GHZ].ht_cap); if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) ath9k_cmn_setup_ht_cap(ah, - &common->sbands[IEEE80211_BAND_5GHZ].ht_cap); + &common->sbands[NL80211_BAND_5GHZ].ht_cap); } EXPORT_SYMBOL(ath9k_cmn_reload_chainmask); diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index e8c699446470..b80e08b13b74 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -173,7 +173,7 @@ int ath9k_cmn_process_rate(struct ath_common *common, struct ieee80211_rx_status *rxs) { struct ieee80211_supported_band *sband; - enum ieee80211_band band; + enum nl80211_band band; unsigned int i = 0; struct ath_hw *ah = common->ah; @@ -305,7 +305,7 @@ static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, ichan->channel = chan->center_freq; ichan->chan = chan; - if (chan->band == IEEE80211_BAND_5GHZ) + if (chan->band == NL80211_BAND_5GHZ) flags |= CHANNEL_5GHZ; switch (chandef->width) { diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index c2ca57a2ed09..b66cfa91364f 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -139,7 +139,7 @@ void ath_debug_rate_stats(struct ath_softc *sc, } if (IS_OFDM_RATE(rs->rs_rate)) { - if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ) + if (ah->curchan->chan->band == NL80211_BAND_2GHZ) rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++; else rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++; @@ -173,7 +173,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf, struct ath_hw *ah = sc->sc_ah; struct ath_rx_rate_stats *rstats; struct ieee80211_sta *sta = an->sta; - enum ieee80211_band band; + enum nl80211_band band; u32 len = 0, size = 4096; char *buf; size_t retval; @@ -206,7 +206,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf, len += scnprintf(buf + len, size - len, "\n"); legacy: - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { PRINT_CCK_RATE("CCK-1M/LP", 0, false); PRINT_CCK_RATE("CCK-2M/LP", 1, false); PRINT_CCK_RATE("CCK-5.5M/LP", 2, false); diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index 58205a5bd74b..c702e397495b 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -212,7 +212,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, struct ieee80211_tx_rate *rates = info->status.rates; rate = &common->sbands[info->band].bitrates[rates[ridx].idx]; - if (info->band == IEEE80211_BAND_2GHZ && + if (info->band == NL80211_BAND_2GHZ && !(rate->flags & IEEE80211_RATE_ERP_G)) phy = WLAN_RC_PHY_CCK; else diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 379e843fbe0d..e21df6eae634 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -765,11 +765,11 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, sizeof(struct htc_frame_hdr) + 4; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &common->sbands[IEEE80211_BAND_2GHZ]; + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &common->sbands[NL80211_BAND_2GHZ]; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &common->sbands[IEEE80211_BAND_5GHZ]; + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &common->sbands[NL80211_BAND_5GHZ]; ath9k_cmn_reload_chainmask(ah); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index e4281438c04f..83103d25bc91 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1770,8 +1770,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask)); tmask.vif_index = avp->index; - tmask.band = IEEE80211_BAND_2GHZ; - tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy); + tmask.band = NL80211_BAND_2GHZ; + tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy); WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); if (ret) { @@ -1781,8 +1781,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, goto out; } - tmask.band = IEEE80211_BAND_5GHZ; - tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy); + tmask.band = NL80211_BAND_5GHZ; + tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy); WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); if (ret) { @@ -1793,8 +1793,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw, } ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n", - mask->control[IEEE80211_BAND_2GHZ].legacy, - mask->control[IEEE80211_BAND_5GHZ].legacy); + mask->control[NL80211_BAND_2GHZ].legacy, + mask->control[NL80211_BAND_5GHZ].legacy); out: return ret; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 64ff52eed9f5..6a9c9b4ef2c9 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -494,7 +494,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI) rate->flags |= IEEE80211_TX_RC_SHORT_GI; } else { - if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ) + if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ) rate->idx += 4; /* No CCK rates */ } diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 0f5672f5c9ba..07a1594d44cc 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -714,9 +714,9 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc) struct ath9k_channel *curchan = ah->curchan; if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) - ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); + ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ); if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) - ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); + ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ); ah->curchan = curchan; } @@ -902,11 +902,11 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) sc->ant_tx = hw->wiphy->available_antennas_tx; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &common->sbands[IEEE80211_BAND_2GHZ]; + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &common->sbands[NL80211_BAND_2GHZ]; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &common->sbands[IEEE80211_BAND_5GHZ]; + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &common->sbands[NL80211_BAND_5GHZ]; #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT ath9k_set_mcc_capab(sc, hw); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index f09168a885a5..fe4f9ec661fa 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1934,14 +1934,14 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, if (idx == 0) ath_update_survey_stats(sc); - sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) - sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (!sband || idx >= sband->n_channels) { spin_unlock_bh(&common->cc_lock); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 94fac5dc1b4a..e8a76f15f373 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1112,7 +1112,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, bool is_2ghz; struct modal_eep_header *pmodal; - is_2ghz = info->band == IEEE80211_BAND_2GHZ; + is_2ghz = info->band == NL80211_BAND_2GHZ; pmodal = &eep->modalHeader[is_2ghz]; power_ht40delta = pmodal->ht40PowerIncForPdadc; } else { @@ -1241,7 +1241,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, /* legacy rates */ rate = &common->sbands[tx_info->band].bitrates[rates[i].idx]; - if ((tx_info->band == IEEE80211_BAND_2GHZ) && + if ((tx_info->band == NL80211_BAND_2GHZ) && !(rate->flags & IEEE80211_RATE_ERP_G)) phy = WLAN_RC_PHY_CCK; else diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c index a2f005703c04..7d4a72dc98db 100644 --- a/drivers/net/wireless/ath/carl9170/mac.c +++ b/drivers/net/wireless/ath/carl9170/mac.c @@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar) if (conf_is_ht40(&ar->hw->conf)) val = 0x010a; else { - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) val = 0x105; else val = 0x104; @@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar) rts_rate = 0x1da; cts_rate = 0x10a; } else { - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) { /* 11 mbit CCK */ rts_rate = 033; cts_rate = 003; @@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar) return 0; } - if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) || + if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) || vif->bss_conf.use_short_slot) slottime = 9; @@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar) basic |= (vif->bss_conf.basic_rates & 0xff0) << 4; rcu_read_unlock(); - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */ else mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */ @@ -512,10 +512,10 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel) chains = AR9170_TX_PHY_TXCHAIN_1; switch (channel->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: power = ar->power_2G_ofdm[0] & 0x3f; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: power = ar->power_5G_leg[0] & 0x3f; break; default: diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 4fe8482b4187..202f421e0e37 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1653,7 +1653,7 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx, return err; } - for (b = 0; b < IEEE80211_NUM_BANDS; b++) { + for (b = 0; b < NUM_NL80211_BANDS; b++) { band = ar->hw->wiphy->bands[b]; if (!band) @@ -1928,13 +1928,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar) } if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &carl9170_band_2GHz; chans += carl9170_band_2GHz.n_channels; bands++; } if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { - ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = &carl9170_band_5GHz; chans += carl9170_band_5GHz.n_channels; bands++; diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c index dca6df13fd5b..34d9fd77046e 100644 --- a/drivers/net/wireless/ath/carl9170/phy.c +++ b/drivers/net/wireless/ath/carl9170/phy.c @@ -540,11 +540,11 @@ static int carl9170_init_phy_from_eeprom(struct ar9170 *ar, return carl9170_regwrite_result(); } -static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band) +static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band) { int i, err; u32 val; - bool is_2ghz = band == IEEE80211_BAND_2GHZ; + bool is_2ghz = band == NL80211_BAND_2GHZ; bool is_40mhz = conf_is_ht40(&ar->hw->conf); carl9170_regwrite_begin(ar); @@ -1125,13 +1125,13 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar, u8 f, tmp; switch (channel->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: f = channel->center_freq - 2300; cal_freq_pier = ar->eeprom.cal_freq_pier_2G; i = AR5416_NUM_2G_CAL_PIERS - 1; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: f = (channel->center_freq - 4800) / 5; cal_freq_pier = ar->eeprom.cal_freq_pier_5G; i = AR5416_NUM_5G_CAL_PIERS - 1; @@ -1158,12 +1158,12 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar, int j; switch (channel->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: cal_pier_data = &ar->eeprom. cal_pier_data_2G[chain][idx]; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: cal_pier_data = &ar->eeprom. cal_pier_data_5G[chain][idx]; break; @@ -1340,7 +1340,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw) /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */ return; - if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { + if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) { modes = mode_list_2ghz; nr_modes = ARRAY_SIZE(mode_list_2ghz); } else { @@ -1607,7 +1607,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, return err; err = carl9170_init_rf_banks_0_7(ar, - channel->band == IEEE80211_BAND_5GHZ); + channel->band == NL80211_BAND_5GHZ); if (err) return err; @@ -1621,7 +1621,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, return err; err = carl9170_init_rf_bank4_pwr(ar, - channel->band == IEEE80211_BAND_5GHZ, + channel->band == NL80211_BAND_5GHZ, channel->center_freq, bw); if (err) return err; diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index d66533cbc38a..0c34c8729dc6 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -417,7 +417,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, return -EINVAL; } - if (status->band == IEEE80211_BAND_2GHZ) + if (status->band == NL80211_BAND_2GHZ) status->rate_idx += 4; break; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index ae86a600d920..2bf04c9edc98 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -720,12 +720,12 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, /* +1 dBm for HT40 */ *tpc += 2; - if (info->band == IEEE80211_BAND_2GHZ) + if (info->band == NL80211_BAND_2GHZ) txpower = ar->power_2G_ht40; else txpower = ar->power_5G_ht40; } else { - if (info->band == IEEE80211_BAND_2GHZ) + if (info->band == NL80211_BAND_2GHZ) txpower = ar->power_2G_ht20; else txpower = ar->power_5G_ht20; @@ -734,7 +734,7 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, *phyrate = txrate->idx; *tpc += txpower[idx & 7]; } else { - if (info->band == IEEE80211_BAND_2GHZ) { + if (info->band == NL80211_BAND_2GHZ) { if (idx < 4) txpower = ar->power_2G_cck; else @@ -797,7 +797,7 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar, * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); */ } else { - if (info->band == IEEE80211_BAND_2GHZ) { + if (info->band == NL80211_BAND_2GHZ) { if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M) tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK); else diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 62077bda8dde..81f0dc1cddc0 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -38,28 +38,28 @@ static int __ath_regd_init(struct ath_regulatory *reg); /* We enable active scan on these a case by case basis by regulatory domain */ #define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\ NL80211_RRF_NO_IR) -#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\ +#define ATH9K_2GHZ_CH14 REG_RULE(2484 - 10, 2484 + 10, 20, 0, 20,\ NL80211_RRF_NO_IR | \ NL80211_RRF_NO_OFDM) /* We allow IBSS on these on a case by case basis by regulatory domain */ -#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\ +#define ATH9K_5GHZ_5180_5320 REG_RULE(5180 - 10, 5320 + 10, 160, 0, 20,\ NL80211_RRF_NO_IR) -#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\ +#define ATH9K_5GHZ_5500_5825 REG_RULE(5500 - 10, 5825 + 10, 80, 0, 20,\ NL80211_RRF_NO_IR) -#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\ +#define ATH9K_5GHZ_5745_5825 REG_RULE(5745 - 10, 5825 + 10, 80, 0, 20,\ NL80211_RRF_NO_IR) #define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ ATH9K_2GHZ_CH12_13, \ ATH9K_2GHZ_CH14 -#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \ - ATH9K_5GHZ_5470_5850 +#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5180_5320, \ + ATH9K_5GHZ_5500_5825 /* This one skips what we call "mid band" */ -#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \ - ATH9K_5GHZ_5725_5850 +#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5180_5320, \ + ATH9K_5GHZ_5745_5825 /* Can be used for: * 0x60, 0x61, 0x62 */ @@ -260,7 +260,7 @@ static bool ath_is_radar_freq(u16 center_freq, { if (reg->country_code == CTRY_INDIA) return (center_freq >= 5500 && center_freq <= 5700); - return (center_freq >= 5260 && center_freq <= 5700); + return (center_freq >= 5260 && center_freq <= 5720); } static void ath_force_clear_no_ir_chan(struct wiphy *wiphy, @@ -340,12 +340,12 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy, struct ath_regulatory *reg, enum nl80211_reg_initiator initiator) { - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; unsigned int i; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; sband = wiphy->bands[band]; @@ -378,7 +378,7 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy, { struct ieee80211_supported_band *sband; - sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = wiphy->bands[NL80211_BAND_2GHZ]; if (!sband) return; @@ -407,10 +407,10 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy, struct ieee80211_channel *ch; unsigned int i; - if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + if (!wiphy->bands[NL80211_BAND_5GHZ]) return; - sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + sband = wiphy->bands[NL80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; @@ -636,6 +636,8 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, struct regulatory_request *request)) { const struct ieee80211_regdomain *regd; + u32 chan_num; + struct ieee80211_channel *chan; wiphy->reg_notifier = reg_notifier; wiphy->regulatory_flags |= REGULATORY_STRICT_REG | @@ -658,6 +660,20 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, } wiphy_apply_custom_regulatory(wiphy, regd); + + /* For regulatory rules similar to the following: + * REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), channels 12/13 are enabled + * due to support of 5/10 MHz. + * Therefore, disable 2.4 Ghz channels that dont have 20 mhz bw + */ + for (chan_num = 0; + chan_num < wiphy->bands[IEEE80211_BAND_2GHZ]->n_channels; + chan_num++) { + chan = &wiphy->bands[IEEE80211_BAND_2GHZ]->channels[chan_num]; + if (chan->flags & IEEE80211_CHAN_NO_20MHZ) + chan->flags |= IEEE80211_CHAN_DISABLED; + } + ath_reg_apply_radar_flags(wiphy, reg); ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); return 0; @@ -777,7 +793,7 @@ ath_regd_init(struct ath_regulatory *reg, EXPORT_SYMBOL(ath_regd_init); u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, - enum ieee80211_band band) + enum nl80211_band band) { if (!reg->regpair || (reg->country_code == CTRY_DEFAULT && @@ -799,9 +815,9 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, } switch (band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: return reg->regpair->reg_2ghz_ctl; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: return reg->regpair->reg_5ghz_ctl; default: return NO_CTL; diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 184b6810cde9..8553ab44d930 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -260,7 +260,7 @@ int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)); u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, - enum ieee80211_band band); + enum nl80211_band band); void ath_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct ath_regulatory *reg); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 274d114962e8..1fe727f2b41b 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -26,14 +26,14 @@ module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); #define CHAN2G(_freq, _idx) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 25, \ } #define CHAN5G(_freq, _idx) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 25, \ @@ -516,7 +516,7 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, - enum ieee80211_band band) + enum nl80211_band band) { int i, size; u16 *rates_table; @@ -529,7 +529,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates); rates_table = sta_priv->supported_rates.dsss_rates; - if (band == IEEE80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { for (i = 0; i < size; i++) { if (rates & 0x01) { rates_table[i] = wcn_2ghz_rates[i].hw_value; @@ -958,8 +958,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); - wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz; - wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz; + wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz; + wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; wcn->hw->wiphy->cipher_suites = cipher_suites; wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index c9263e1c75d4..dfd1445b5489 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -104,11 +104,11 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn, struct ieee80211_sta *sta, struct wcn36xx_hal_config_bss_params *bss_params) { - if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn)) + if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn)) bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE; else if (sta && sta->ht_cap.ht_supported) bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE; - else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f)) + else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f)) bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE; else bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE; diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 99c21aac68bd..6c47a7336c38 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -225,7 +225,7 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd, /* default rate for unicast */ if (ieee80211_is_mgmt(hdr->frame_control)) - bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ? + bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ? WCN36XX_BD_RATE_CTRL : WCN36XX_BD_RATE_MGMT; else if (ieee80211_is_ctl(hdr->frame_control)) diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 7df13a684d2d..2d571fd709c2 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -41,3 +41,35 @@ config WIL6210_TRACING option if you are interested in debugging the driver. If unsure, say Y to make it easier to debug problems. + +config WIL6210_WRITE_IOCTL + bool "wil6210 write ioctl to the device" + depends on WIL6210 + default y + ---help--- + Say Y here to allow write-access from user-space to + the device memory through ioctl. This is useful for + debugging purposes only. + + If unsure, say N. + +config WIL6210_PLATFORM_MSM + bool "wil6210 MSM platform specific support" + depends on WIL6210 + depends on ARCH_QCOM + default y + ---help--- + Say Y here to enable wil6210 driver support for MSM + platform specific features + +config WIL6210_DEBUGFS + bool "wil6210 debugfs support" + depends on WIL6210 + depends on DEBUG_FS + default y + ---help--- + Say Y here to enable wil6210 debugfs support, using the + kernel debugfs infrastructure. Select this + option if you are interested in debugging the driver. + + If unsure, say Y to make it easier to debug problems. diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index fdf63d5fe82b..94df1decae7a 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -4,7 +4,8 @@ wil6210-y := main.o wil6210-y += netdev.o wil6210-y += cfg80211.o wil6210-y += pcie_bus.o -wil6210-y += debugfs.o +wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o +wil6210-y += sysfs.o wil6210-y += wmi.o wil6210-y += interrupt.o wil6210-y += txrx.o @@ -18,8 +19,13 @@ wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o wil6210-y += ethtool.o wil6210-y += wil_crash_dump.o +wil6210-y += p2p.o +wil6210-y += ftm.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) subdir-ccflags-y += -D__CHECK_ENDIAN__ + +MSM_11AD_PATH = drivers/platform/msm/msm_11ad +CFLAGS_wil_platform.o := -I$(MSM_11AD_PATH) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 20d07ef679e8..353412b5cdd6 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -15,11 +15,30 @@ */ #include <linux/etherdevice.h> +#include <linux/moduleparam.h> +#include <net/netlink.h> #include "wil6210.h" #include "wmi.h" +#include "ftm.h" + +#define WIL_MAX_ROC_DURATION_MS 5000 + +bool disable_ap_sme; +module_param(disable_ap_sme, bool, 0444); +MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); + +static bool ignore_reg_hints = true; +module_param(ignore_reg_hints, bool, 0444); +MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)"); + +#ifdef CONFIG_PM +static struct wiphy_wowlan_support wil_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif #define CHAN60G(_channel, _flags) { \ - .band = IEEE80211_BAND_60GHZ, \ + .band = NL80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ @@ -34,6 +53,193 @@ static struct ieee80211_channel wil_60ghz_channels[] = { /* channel 4 not supported yet */ }; +/* Vendor id to be used in vendor specific command and events + * to user space. + * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, + * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and + * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in + * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that + */ + +#define QCA_NL80211_VENDOR_ID 0x001374 + +#define WIL_MAX_RF_SECTORS (128) +#define WIL_CID_ALL (0xff) + +enum qca_wlan_vendor_attr_rf_sector { + QCA_ATTR_MAC_ADDR = 6, + QCA_ATTR_PAD = 13, + QCA_ATTR_TSF = 29, + QCA_ATTR_DMG_RF_SECTOR_INDEX = 30, + QCA_ATTR_DMG_RF_SECTOR_TYPE = 31, + QCA_ATTR_DMG_RF_MODULE_MASK = 32, + QCA_ATTR_DMG_RF_SECTOR_CFG = 33, + QCA_ATTR_DMG_RF_SECTOR_MAX, +}; + +enum qca_wlan_vendor_attr_dmg_rf_sector_type { + QCA_ATTR_DMG_RF_SECTOR_TYPE_RX, + QCA_ATTR_DMG_RF_SECTOR_TYPE_TX, + QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX +}; + +enum qca_wlan_vendor_attr_dmg_rf_sector_cfg { + QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0, + QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX, + QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0, + QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1, + QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2, + QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI, + QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO, + QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16, + + /* keep last */ + QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST, + QCA_ATTR_DMG_RF_SECTOR_CFG_MAX = + QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1 +}; + +static const struct +nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = { + [QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN }, + [QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 }, + [QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 }, + [QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED }, +}; + +static const struct +nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = { + [QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 }, + [QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 }, +}; + +enum qca_nl80211_vendor_subcmds { + QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128, + QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129, + QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130, + QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT = 131, + QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE = 132, + QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER = 133, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134, + QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142, +}; + +static int wil_rf_sector_get_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len); +static int wil_rf_sector_set_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len); +static int wil_rf_sector_get_selected(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len); +static int wil_rf_sector_set_selected(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len); + +/* vendor specific commands */ +static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_get_capabilities + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_start_session + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_abort_session + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_configure_responder + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_aoa_start_measurement + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_aoa_abort_measurement + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_rf_sector_get_cfg + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_rf_sector_set_cfg + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_rf_sector_get_selected + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_rf_sector_set_selected + }, +}; + +/* vendor specific events */ +static const struct nl80211_vendor_cmd_info wil_nl80211_vendor_events[] = { + [QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT + }, + [QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE + }, + [QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + }, +}; + static struct ieee80211_supported_band wil_band_60ghz = { .channels = wil_60ghz_channels, .n_channels = ARRAY_SIZE(wil_60ghz_channels), @@ -60,9 +266,16 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { }, [NL80211_IFTYPE_AP] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + BIT(IEEE80211_STYPE_PROBE_RESP >> 4) | + BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | @@ -76,12 +289,24 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, }; static const u32 wil_cipher_suites[] = { WLAN_CIPHER_SUITE_GCMP, }; +static const char * const key_usage_str[] = { + [WMI_KEY_USE_PAIRWISE] = "PTK", + [WMI_KEY_USE_RX_GROUP] = "RX_GTK", + [WMI_KEY_USE_TX_GROUP] = "TX_GTK", +}; + int wil_iftype_nl2wmi(enum nl80211_iftype type) { static const struct { @@ -113,7 +338,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, .interval_usec = 0, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_notify_req_done_event evt; } __packed reply; struct wil_net_stats *stats = &wil->sta[cid].stats; @@ -126,12 +351,12 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, wil_dbg_wmi(wil, "Link status for CID %d: {\n" " MCS %d TSF 0x%016llx\n" - " BF status 0x%08x SNR 0x%08x SQI %d%%\n" + " BF status 0x%08x RSSI %d SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", cid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, - le32_to_cpu(reply.evt.snr_val), + reply.evt.rssi, reply.evt.sqi, le32_to_cpu(reply.evt.tx_tpt), le32_to_cpu(reply.evt.tx_goodput), @@ -165,7 +390,11 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, if (test_bit(wil_status_fwconnected, wil->status)) { sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); - sinfo->signal = reply.evt.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, + wil->fw_capabilities)) + sinfo->signal = reply.evt.rssi; + else + sinfo->signal = reply.evt.sqi; } return rc; @@ -180,7 +409,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy, int cid = wil_find_cid(wil, mac); - wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); + wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid); if (cid < 0) return cid; @@ -219,20 +448,116 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, return -ENOENT; ether_addr_copy(mac, wil->sta[cid].addr); - wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); + wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid); rc = wil_cid_fill_sinfo(wil, cid, sinfo); return rc; } +static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "start_p2p_device: entered\n"); + wil->p2p.p2p_dev_started = 1; + return 0; +} + +static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_p2p_info *p2p = &wil->p2p; + + if (!p2p->p2p_dev_started) + return; + + wil_dbg_misc(wil, "stop_p2p_device: entered\n"); + mutex_lock(&wil->mutex); + mutex_lock(&wil->p2p_wdev_mutex); + wil_p2p_stop_radio_operations(wil); + p2p->p2p_dev_started = 0; + mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->mutex); +} + +static struct wireless_dev * +wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + u32 *flags, struct vif_params *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct net_device *ndev = wil_to_ndev(wil); + struct wireless_dev *p2p_wdev; + + wil_dbg_misc(wil, "add_iface\n"); + + if (type != NL80211_IFTYPE_P2P_DEVICE) { + wil_err(wil, "unsupported iftype %d\n", type); + return ERR_PTR(-EINVAL); + } + + if (wil->p2p_wdev) { + wil_err(wil, "P2P_DEVICE interface already created\n"); + return ERR_PTR(-EINVAL); + } + + p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL); + if (!p2p_wdev) + return ERR_PTR(-ENOMEM); + + p2p_wdev->iftype = type; + p2p_wdev->wiphy = wiphy; + /* use our primary ethernet address */ + ether_addr_copy(p2p_wdev->address, ndev->perm_addr); + + wil->p2p_wdev = p2p_wdev; + + return p2p_wdev; +} + +static int wil_cfg80211_del_iface(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "del_iface\n"); + + if (wdev != wil->p2p_wdev) { + wil_err(wil, "delete of incorrect interface 0x%p\n", wdev); + return -EINVAL; + } + + wil_cfg80211_stop_p2p_device(wiphy, wdev); + wil_p2p_wdev_free(wil); + + return 0; +} + static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = wil_to_wdev(wil); + int rc; + + wil_dbg_misc(wil, "change_iface: type=%d\n", type); + + if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) { + wil_dbg_misc(wil, "interface is up. resetting...\n"); + mutex_lock(&wil->mutex); + __wil_down(wil); + rc = __wil_up(wil); + mutex_unlock(&wil->mutex); + + if (rc) + return rc; + } switch (type) { case NL80211_IFTYPE_STATION: @@ -260,7 +585,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = request->wdev; struct { struct wmi_start_scan_cmd cmd; u16 chnl[4]; @@ -268,15 +593,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, uint i, n; int rc; - if (wil->scan_request) { - wil_err(wil, "Already scanning\n"); - return -EAGAIN; - } + wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype); /* check we are client side */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_DEVICE: break; default: return -EOPNOTSUPP; @@ -288,14 +611,46 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } + mutex_lock(&wil->mutex); + + mutex_lock(&wil->p2p_wdev_mutex); + if (wil->scan_request || wil->p2p.discovery_started) { + wil_err(wil, "Already scanning\n"); + mutex_unlock(&wil->p2p_wdev_mutex); + rc = -EAGAIN; + goto out; + } + mutex_unlock(&wil->p2p_wdev_mutex); + + if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { + if (!wil->p2p.p2p_dev_started) { + wil_err(wil, "P2P search requested on stopped P2P device\n"); + rc = -EIO; + goto out; + } + /* social scan on P2P_DEVICE is handled as p2p search */ + if (wil_p2p_is_social_scan(request)) { + wil->scan_request = request; + wil->radio_wdev = wdev; + rc = wil_p2p_search(wil, request); + if (rc) { + wil->radio_wdev = wil_to_wdev(wil); + wil->scan_request = NULL; + } + goto out; + } + } + + (void)wil_p2p_stop_discovery(wil); + wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); wil_dbg_misc(wil, "SSID count: %d", request->n_ssids); for (i = 0; i < request->n_ssids; i++) { wil_dbg_misc(wil, "SSID[%d]", i); - print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, - request->ssids[i].ssid, - request->ssids[i].ssid_len); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + request->ssids[i].ssid, + request->ssids[i].ssid_len, true); } if (request->n_ssids) @@ -306,13 +661,14 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, if (rc) { wil_err(wil, "set SSID for scan request failed: %d\n", rc); - return rc; + goto out; } wil->scan_request = request; mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO); memset(&cmd, 0, sizeof(cmd)); + cmd.cmd.scan_type = WMI_ACTIVE_SCAN; cmd.cmd.num_channels = 0; n = min(request->n_channels, 4U); for (i = 0; i < n; i++) { @@ -331,27 +687,63 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, } if (request->ie_len) - print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET, - request->ie, request->ie_len); + wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1, + request->ie, request->ie_len, true); else wil_dbg_misc(wil, "Scan has no IE's\n"); rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); if (rc) - goto out; + goto out_restore; + if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) { + cmd.cmd.discovery_mode = 1; + wil_dbg_misc(wil, "active scan with discovery_mode=1\n"); + } + + wil->radio_wdev = wdev; rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); -out: +out_restore: if (rc) { del_timer_sync(&wil->scan_timer); + wil->radio_wdev = wil_to_wdev(wil); wil->scan_request = NULL; } - +out: + mutex_unlock(&wil->mutex); return rc; } +static void wil_cfg80211_abort_scan(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "wdev=0x%p iftype=%d\n", wdev, wdev->iftype); + + mutex_lock(&wil->mutex); + mutex_lock(&wil->p2p_wdev_mutex); + + if (!wil->scan_request) + goto out; + + if (wdev != wil->scan_request->wdev) { + wil_dbg_misc(wil, "abort scan was called on the wrong iface\n"); + goto out; + } + + if (wil->radio_wdev == wil->p2p_wdev) + wil_p2p_stop_radio_operations(wil); + else + wil_abort_scan(wil, true); + +out: + mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->mutex); +} + static void wil_print_crypto(struct wil6210_priv *wil, struct cfg80211_crypto_settings *c) { @@ -390,6 +782,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil, print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 16, 1, sme->ssid, sme->ssid_len, true); wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); + wil_info(wil, " PBSS: %d\n", sme->pbss); wil_print_crypto(wil, &sme->crypto); } @@ -404,7 +797,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, const u8 *rsn_eid; int ch; int rc = 0; + enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; + wil_dbg_misc(wil, "connect\n"); wil_print_connect_params(wil, sme); if (test_bit(wil_status_fwconnecting, wil->status) || @@ -422,9 +817,12 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, if (sme->privacy && !rsn_eid) wil_info(wil, "WSC connection\n"); + if (sme->pbss) + bss_type = IEEE80211_BSS_TYPE_PBSS; + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, - IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); + bss_type, IEEE80211_PRIVACY_ANY); if (!bss) { wil_err(wil, "Unable to find BSS\n"); return -ENOENT; @@ -437,6 +835,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, goto out; } wil->privacy = sme->privacy; + wil->pbss = sme->pbss; if (wil->privacy) { /* For secure assoc, remove old keys */ @@ -513,9 +912,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); if (rc == 0) { netif_carrier_on(ndev); + wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); + wil->bss = bss; /* Connect can take lots of time */ mod_timer(&wil->connect_timer, - jiffies + msecs_to_jiffies(2000)); + jiffies + msecs_to_jiffies(5000)); } else { clear_bit(wil_status_fwconnecting, wil->status); } @@ -533,30 +934,79 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); + wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code); - rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); + if (!(test_bit(wil_status_fwconnecting, wil->status) || + test_bit(wil_status_fwconnected, wil->status))) { + wil_err(wil, "Disconnect was called while disconnected\n"); + return 0; + } + + wil->locally_generated_disc = true; + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + WMI_DISCONNECT_EVENTID, NULL, 0, + WIL6210_DISCONNECT_TO_MS); + if (rc) + wil_err(wil, "disconnect error %d\n", rc); return rc; } +static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + + /* these parameters are explicitly not supported */ + if (changed & (WIPHY_PARAM_RETRY_LONG | + WIPHY_PARAM_FRAG_THRESHOLD | + WIPHY_PARAM_RTS_THRESHOLD)) + return -ENOTSUPP; + + if (changed & WIPHY_PARAM_RETRY_SHORT) { + rc = wmi_set_mgmt_retry(wil, wiphy->retry_short); + if (rc) + return rc; + } + + return 0; +} + int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { const u8 *buf = params->buf; - size_t len = params->len; + size_t len = params->len, total; struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; bool tx_status = false; struct ieee80211_mgmt *mgmt_frame = (void *)buf; struct wmi_sw_tx_req_cmd *cmd; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_sw_tx_complete_event evt; } __packed evt; - cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); + /* Note, currently we do not support the "wait" parameter, user-space + * must call remain_on_channel before mgmt_tx or listen on a channel + * another way (AP/PCP or connected station) + * in addition we need to check if specified "chan" argument is + * different from currently "listened" channel and fail if it is. + */ + + wil_dbg_misc(wil, "mgmt_tx\n"); + wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + + if (len < sizeof(struct ieee80211_hdr_3addr)) + return -EINVAL; + + total = sizeof(*cmd) + len; + if (total < len) + return -EINVAL; + + cmd = kmalloc(total, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; @@ -566,7 +1016,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cmd->len = cpu_to_le16(len); memcpy(cmd->payload, buf, len); - rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len, + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (rc == 0) tx_status = !evt.evt.status; @@ -582,7 +1032,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = wil_to_wdev(wil); wdev->preset_chandef = *chandef; @@ -592,22 +1042,19 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, bool pairwise) { - struct wireless_dev *wdev = wil->wdev; + struct wireless_dev *wdev = wil_to_wdev(wil); enum wmi_key_usage rc; - static const char * const key_usage_str[] = { - [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE", - [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP", - [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP", - }; if (pairwise) { rc = WMI_KEY_USE_PAIRWISE; } else { switch (wdev->iftype) { case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: rc = WMI_KEY_USE_RX_GROUP; break; case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: rc = WMI_KEY_USE_TX_GROUP; break; default: @@ -617,25 +1064,139 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, break; } } - wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]); + wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]); return rc; } +static struct wil_sta_info * +wil_find_sta_by_key_usage(struct wil6210_priv *wil, + enum wmi_key_usage key_usage, const u8 *mac_addr) +{ + int cid = -EINVAL; + + if (key_usage == WMI_KEY_USE_TX_GROUP) + return NULL; /* not needed */ + + /* supplicant provides Rx group key in STA mode with NULL MAC address */ + if (mac_addr) + cid = wil_find_cid(wil, mac_addr); + else if (key_usage == WMI_KEY_USE_RX_GROUP) + cid = wil_find_cid_by_idx(wil, 0); + if (cid < 0) { + wil_err(wil, "No CID for %pM %s\n", mac_addr, + key_usage_str[key_usage]); + return ERR_PTR(cid); + } + + return &wil->sta[cid]; +} + +static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, + struct wil_sta_info *cs, + struct key_params *params) +{ + struct wil_tid_crypto_rx_single *cc; + int tid; + + if (!cs) + return; + + switch (key_usage) { + case WMI_KEY_USE_PAIRWISE: + for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { + cc = &cs->tid_crypto_rx[tid].key_id[key_index]; + if (params->seq) + memcpy(cc->pn, params->seq, + IEEE80211_GCMP_PN_LEN); + else + memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN); + cc->key_set = true; + } + break; + case WMI_KEY_USE_RX_GROUP: + cc = &cs->group_crypto_rx.key_id[key_index]; + if (params->seq) + memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN); + else + memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN); + cc->key_set = true; + break; + default: + break; + } +} + +static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage, + struct wil_sta_info *cs) +{ + struct wil_tid_crypto_rx_single *cc; + int tid; + + if (!cs) + return; + + switch (key_usage) { + case WMI_KEY_USE_PAIRWISE: + for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { + cc = &cs->tid_crypto_rx[tid].key_id[key_index]; + cc->key_set = false; + } + break; + case WMI_KEY_USE_RX_GROUP: + cc = &cs->group_crypto_rx.key_id[key_index]; + cc->key_set = false; + break; + default: + break; + } +} + static int wil_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { + int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); + struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage, + mac_addr); + + if (!params) { + wil_err(wil, "NULL params\n"); + return -EINVAL; + } + + wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n", + mac_addr, key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + + if (IS_ERR(cs)) { + wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", + mac_addr, key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + return -EINVAL; + } - wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, - pairwise ? "PTK" : "GTK"); + wil_del_rx_key(key_index, key_usage, cs); - return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, - params->key, key_usage); + if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { + wil_err(wil, + "Wrong PN len %d, %pM %s[%d] PN %*phN\n", + params->seq_len, mac_addr, + key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + return -EINVAL; + } + + rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, + params->key, key_usage); + if (!rc) + wil_set_crypto_rx(key_index, key_usage, cs, params); + + return rc; } static int wil_cfg80211_del_key(struct wiphy *wiphy, @@ -645,9 +1206,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); + struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage, + mac_addr); + + wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr, + key_usage_str[key_usage], key_index); - wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, - pairwise ? "PTK" : "GTK"); + if (IS_ERR(cs)) + wil_info(wil, "Not connected, %pM %s[%d]\n", + mac_addr, key_usage_str[key_usage], key_index); + + if (!IS_ERR_OR_NULL(cs)) + wil_del_rx_key(key_index, key_usage, cs); return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); } @@ -658,6 +1228,9 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, u8 key_index, bool unicast, bool multicast) { + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "set_default_key: entered\n"); return 0; } @@ -670,15 +1243,11 @@ static int wil_remain_on_channel(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - /* TODO: handle duration */ - wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration); - - rc = wmi_set_channel(wil, chan->hw_value); - if (rc) - return rc; - - rc = wmi_rxon(wil, true); + wil_dbg_misc(wil, + "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n", + chan->center_freq, duration, wdev->iftype); + rc = wil_p2p_listen(wil, wdev, duration, chan, cookie); return rc; } @@ -687,51 +1256,99 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, u64 cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - int rc; - - wil_info(wil, "%s()\n", __func__); - rc = wmi_rxon(wil, false); + wil_dbg_misc(wil, "cancel_remain_on_channel\n"); - return rc; + return wil_p2p_cancel_listen(wil, cookie); } -static void wil_print_bcon_data(struct cfg80211_beacon_data *b) +/** + * find a specific IE in a list of IEs + * return a pointer to the beginning of IE in the list + * or NULL if not found + */ +static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie, + u16 ie_len) { - print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, - b->head, b->head_len); - print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET, - b->tail, b->tail_len); - print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET, - b->beacon_ies, b->beacon_ies_len); - print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET, - b->probe_resp, b->probe_resp_len); - print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET, - b->proberesp_ies, b->proberesp_ies_len); - print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET, - b->assocresp_ies, b->assocresp_ies_len); + struct ieee80211_vendor_ie *vie; + u32 oui; + + /* IE tag at offset 0, length at offset 1 */ + if (ie_len < 2 || 2 + ie[1] > ie_len) + return NULL; + + if (ie[0] != WLAN_EID_VENDOR_SPECIFIC) + return cfg80211_find_ie(ie[0], ies, ies_len); + + /* make sure there is room for 3 bytes OUI + 1 byte OUI type */ + if (ie[1] < 4) + return NULL; + vie = (struct ieee80211_vendor_ie *)ie; + oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2]; + return cfg80211_find_vendor_ie(oui, vie->oui_type, ies, + ies_len); } -static int wil_fix_bcon(struct wil6210_priv *wil, - struct cfg80211_beacon_data *bcon) +/** + * merge the IEs in two lists into a single list. + * do not include IEs from the second list which exist in the first list. + * add only vendor specific IEs from second list to keep + * the merged list sorted (since vendor-specific IE has the + * highest tag number) + * caller must free the allocated memory for merged IEs + */ +static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, + const u8 *ies2, u16 ies2_len, + u8 **merged_ies, u16 *merged_len) { - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + u8 *buf, *dpos; + const u8 *spos; - if (bcon->probe_resp_len <= hlen) + if (ies1_len == 0 && ies2_len == 0) { + *merged_ies = NULL; + *merged_len = 0; return 0; + } -/* always use IE's from full probe frame, they has more info - * notable RSN - */ - bcon->proberesp_ies = f->u.probe_resp.variable; - bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; - if (!bcon->assocresp_ies) { - bcon->assocresp_ies = bcon->proberesp_ies; - bcon->assocresp_ies_len = bcon->proberesp_ies_len; + buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, ies1, ies1_len); + dpos = buf + ies1_len; + spos = ies2; + while (spos + 1 < ies2 + ies2_len) { + /* IE tag at offset 0, length at offset 1 */ + u16 ielen = 2 + spos[1]; + + if (spos + ielen > ies2 + ies2_len) + break; + if (spos[0] == WLAN_EID_VENDOR_SPECIFIC && + !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) { + memcpy(dpos, spos, ielen); + dpos += ielen; + } + spos += ielen; } - return 1; + *merged_ies = buf; + *merged_len = dpos - buf; + return 0; +} + +static void wil_print_bcon_data(struct cfg80211_beacon_data *b) +{ + wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1, + b->head, b->head_len, true); + wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1, + b->tail, b->tail_len, true); + wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->beacon_ies, b->beacon_ies_len, true); + wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, + b->probe_resp, b->probe_resp_len, true); + wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->proberesp_ies, b->proberesp_ies_len, true); + wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->assocresp_ies, b->assocresp_ies_len, true); } /* internal functions for device reset and starting AP */ @@ -740,21 +1357,43 @@ static int _wil_cfg80211_set_ies(struct wiphy *wiphy, { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); + u16 len = 0, proberesp_len = 0; + u8 *ies = NULL, *proberesp = NULL; + + if (bcon->probe_resp) { + struct ieee80211_mgmt *f = + (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + proberesp = f->u.probe_resp.variable; + proberesp_len = bcon->probe_resp_len - hlen; + } + rc = _wil_cfg80211_merge_extra_ies(proberesp, + proberesp_len, + bcon->proberesp_ies, + bcon->proberesp_ies_len, + &ies, &len); - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, - bcon->proberesp_ies); if (rc) - return rc; + goto out; - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, - bcon->assocresp_ies); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, len, ies); + if (rc) + goto out; + + if (bcon->assocresp_ies) + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, + bcon->assocresp_ies_len, bcon->assocresp_ies); + else + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, len, ies); #if 0 /* to use beacon IE's, remove this #if 0 */ if (rc) - return rc; + goto out; rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); #endif - +out: + kfree(ies); return rc; } @@ -763,12 +1402,22 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, const u8 *ssid, size_t ssid_len, u32 privacy, int bi, u8 chan, struct cfg80211_beacon_data *bcon, - u8 hidden_ssid) + u8 hidden_ssid, u32 pbss) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO); + + if (pbss) + wmi_nettype = WMI_NETTYPE_P2P; + + wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go); + if (is_go && !pbss) { + wil_err(wil, "P2P GO must be in PBSS\n"); + return -ENOTSUPP; + } wil_set_recovery_state(wil, fw_recovery_idle); @@ -790,10 +1439,12 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, wil->privacy = privacy; wil->channel = chan; wil->hidden_ssid = hidden_ssid; + wil->pbss = pbss; netif_carrier_on(ndev); + wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); - rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid); + rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go); if (rc) goto err_pcp_start; @@ -807,6 +1458,7 @@ err_bcast: wmi_pcp_stop(wil); err_pcp_start: netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); out: mutex_unlock(&wil->mutex); return rc; @@ -820,17 +1472,12 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, int rc; u32 privacy = 0; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "change_beacon\n"); wil_print_bcon_data(bcon); - if (wil_fix_bcon(wil, bcon)) { - wil_dbg_misc(wil, "Fixed bcon\n"); - wil_print_bcon_data(bcon); - } - - if (bcon->proberesp_ies && - cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies, - bcon->proberesp_ies_len)) + if (bcon->tail && + cfg80211_find_ie(WLAN_EID_RSN, bcon->tail, + bcon->tail_len)) privacy = 1; /* in case privacy has changed, need to restart the AP */ @@ -844,7 +1491,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, wdev->ssid_len, privacy, wdev->beacon_interval, wil->channel, bcon, - wil->hidden_ssid); + wil->hidden_ssid, + wil->pbss); } else { rc = _wil_cfg80211_set_ies(wiphy, bcon); } @@ -863,7 +1511,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct cfg80211_crypto_settings *crypto = &info->crypto; u8 hidden_ssid; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "start_ap\n"); if (!channel) { wil_err(wil, "AP: No channel???\n"); @@ -895,20 +1543,16 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, info->hidden_ssid); wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, info->dtim_period); - print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, - info->ssid, info->ssid_len); + wil_dbg_misc(wil, "PBSS %d\n", info->pbss); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + info->ssid, info->ssid_len, true); wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); - if (wil_fix_bcon(wil, bcon)) { - wil_dbg_misc(wil, "Fixed bcon\n"); - wil_print_bcon_data(bcon); - } - rc = _wil_cfg80211_start_ap(wiphy, ndev, info->ssid, info->ssid_len, info->privacy, info->beacon_interval, channel->hw_value, - bcon, hidden_ssid); + bcon, hidden_ssid, info->pbss); return rc; } @@ -918,11 +1562,14 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "stop_ap\n"); netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); wil_set_recovery_state(wil, fw_recovery_idle); + set_bit(wil_status_resetting, wil->status); + mutex_lock(&wil->mutex); wmi_pcp_stop(wil); @@ -934,13 +1581,35 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_add_station(struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "add station %pM aid %d\n", mac, params->aid); + + if (!disable_ap_sme) { + wil_err(wil, "not supported with AP SME enabled\n"); + return -EOPNOTSUPP; + } + + if (params->aid > WIL_MAX_DMG_AID) { + wil_err(wil, "invalid aid\n"); + return -EINVAL; + } + + return wmi_new_sta(wil, mac, params->aid); +} + static int wil_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac, + wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac, params->reason_code); mutex_lock(&wil->mutex); @@ -950,6 +1619,52 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int authorize; + int cid, i; + struct vring_tx_data *txdata = NULL; + + wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x\n", mac, + params->sta_flags_mask, params->sta_flags_set); + + if (!disable_ap_sme) { + wil_dbg_misc(wil, "not supported with AP SME enabled\n"); + return -EOPNOTSUPP; + } + + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) + return 0; + + cid = wil_find_cid(wil, mac); + if (cid < 0) { + wil_err(wil, "station not found\n"); + return -ENOLINK; + } + + for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) + if (wil->vring2cid_tid[i][0] == cid) { + txdata = &wil->vring_tx_data[i]; + break; + } + + if (!txdata) { + wil_err(wil, "vring data not found\n"); + return -ENOLINK; + } + + authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED); + txdata->dot1x_open = authorize ? 1 : 0; + wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i, + txdata->dot1x_open); + + return 0; +} + /* probe_client handling */ static void wil_probe_client_handle(struct wil6210_priv *wil, struct wil_probe_client_req *req) @@ -999,7 +1714,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil) { struct wil_probe_client_req *req, *t; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "probe_client_flush\n"); mutex_lock(&wil->probe_client_mutex); @@ -1019,7 +1734,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy, struct wil_probe_client_req *req; int cid = wil_find_cid(wil, peer); - wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid); + wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid); if (cid < 0) return -ENOLINK; @@ -1047,7 +1762,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); if (params->ap_isolate >= 0) { - wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__, + wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n", wil->ap_isolate, params->ap_isolate); wil->ap_isolate = params->ap_isolate; } @@ -1055,10 +1770,68 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, + bool enabled, int timeout) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + enum wmi_ps_profile_type ps_profile; + + wil_dbg_misc(wil, "enabled=%d, timeout=%d\n", + enabled, timeout); + + if (enabled) + ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT; + else + ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED; + + return wil_ps_update(wil, ps_profile); +} + +static int wil_cfg80211_suspend(struct wiphy *wiphy, + struct cfg80211_wowlan *wow) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + + /* Setting the wakeup trigger based on wow is TBD */ + + if (test_bit(wil_status_suspended, wil->status)) { + wil_dbg_pm(wil, "trying to suspend while suspended\n"); + return 0; + } + + rc = wil_can_suspend(wil, false); + if (rc) + goto out; + + wil_dbg_pm(wil, "suspending\n"); + + wil_p2p_stop_discovery(wil); + + wil_abort_scan(wil, true); + +out: + return rc; +} + +static int wil_cfg80211_resume(struct wiphy *wiphy) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_pm(wil, "resuming\n"); + + return 0; +} + static struct cfg80211_ops wil_cfg80211_ops = { + .add_virtual_intf = wil_cfg80211_add_iface, + .del_virtual_intf = wil_cfg80211_del_iface, .scan = wil_cfg80211_scan, + .abort_scan = wil_cfg80211_abort_scan, .connect = wil_cfg80211_connect, .disconnect = wil_cfg80211_disconnect, + .set_wiphy_params = wil_cfg80211_set_wiphy_params, .change_virtual_intf = wil_cfg80211_change_iface, .get_station = wil_cfg80211_get_station, .dump_station = wil_cfg80211_dump_station, @@ -1073,24 +1846,36 @@ static struct cfg80211_ops wil_cfg80211_ops = { .change_beacon = wil_cfg80211_change_beacon, .start_ap = wil_cfg80211_start_ap, .stop_ap = wil_cfg80211_stop_ap, + .add_station = wil_cfg80211_add_station, .del_station = wil_cfg80211_del_station, + .change_station = wil_cfg80211_change_station, .probe_client = wil_cfg80211_probe_client, .change_bss = wil_cfg80211_change_bss, + /* P2P device */ + .start_p2p_device = wil_cfg80211_start_p2p_device, + .stop_p2p_device = wil_cfg80211_stop_p2p_device, + .set_power_mgmt = wil_cfg80211_set_power_mgmt, + .suspend = wil_cfg80211_suspend, + .resume = wil_cfg80211_resume, }; static void wil_wiphy_init(struct wiphy *wiphy) { wiphy->max_scan_ssids = 1; wiphy->max_scan_ie_len = WMI_MAX_IE_LEN; + wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS; wiphy->max_num_pmkids = 0 /* TODO: */; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_MONITOR); - /* TODO: enable P2P when integrated with supplicant: - * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) - */ - wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | - WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | + WIPHY_FLAG_PS_ON_BY_DEFAULT; + if (!disable_ap_sme) + wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", __func__, wiphy->flags); wiphy->probe_resp_offload = @@ -1098,15 +1883,29 @@ static void wil_wiphy_init(struct wiphy *wiphy) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; - wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; + wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz; - /* TODO: figure this out */ + /* may change after reading FW capabilities */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = wil_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites); wiphy->mgmt_stypes = wil_mgmt_stypes; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; + + wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands); + wiphy->vendor_commands = wil_nl80211_vendor_commands; + wiphy->vendor_events = wil_nl80211_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(wil_nl80211_vendor_events); + + if (ignore_reg_hints) { + wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS; + wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; + } + +#ifdef CONFIG_PM + wiphy->wowlan = &wil_wowlan_support; +#endif } struct wireless_dev *wil_cfg80211_init(struct device *dev) @@ -1130,14 +1929,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev) set_wiphy_dev(wdev->wiphy, dev); wil_wiphy_init(wdev->wiphy); - rc = wiphy_register(wdev->wiphy); - if (rc < 0) - goto out_failed_reg; - return wdev; -out_failed_reg: - wiphy_free(wdev->wiphy); out: kfree(wdev); @@ -1153,7 +1946,467 @@ void wil_wdev_free(struct wil6210_priv *wil) if (!wdev) return; - wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); } + +void wil_p2p_wdev_free(struct wil6210_priv *wil) +{ + struct wireless_dev *p2p_wdev; + + mutex_lock(&wil->p2p_wdev_mutex); + p2p_wdev = wil->p2p_wdev; + wil->p2p_wdev = NULL; + wil->radio_wdev = wil_to_wdev(wil); + mutex_unlock(&wil->p2p_wdev_mutex); + if (p2p_wdev) { + cfg80211_unregister_wdev(p2p_wdev); + kfree(p2p_wdev); + } +} + +static int wil_rf_sector_status_to_rc(u8 status) +{ + switch (status) { + case WMI_RF_SECTOR_STATUS_SUCCESS: + return 0; + case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR: + return -EINVAL; + case WMI_RF_SECTOR_STATUS_BUSY_ERROR: + return -EAGAIN; + case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR: + return -EOPNOTSUPP; + default: + return -EINVAL; + } +} + +static int wil_rf_sector_get_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wdev_to_wil(wdev); + int rc; + struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + u16 sector_index; + u8 sector_type; + u32 rf_modules_vec; + struct wmi_get_rf_sector_params_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_get_rf_sector_params_done_event evt; + } __packed reply; + struct sk_buff *msg; + struct nlattr *nl_cfgs, *nl_cfg; + u32 i; + struct wmi_rf_sector_info *si; + + if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) + return -EOPNOTSUPP; + + rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + wil_rf_sector_policy); + if (rc) { + wil_err(wil, "Invalid rf sector ATTR\n"); + return rc; + } + + if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] || + !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] || + !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) { + wil_err(wil, "Invalid rf sector spec\n"); + return -EINVAL; + } + + sector_index = nla_get_u16( + tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]); + if (sector_index >= WIL_MAX_RF_SECTORS) { + wil_err(wil, "Invalid sector index %d\n", sector_index); + return -EINVAL; + } + + sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]); + if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) { + wil_err(wil, "Invalid sector type %d\n", sector_type); + return -EINVAL; + } + + rf_modules_vec = nla_get_u32( + tb[QCA_ATTR_DMG_RF_MODULE_MASK]); + if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) { + wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec); + return -EINVAL; + } + + cmd.sector_idx = cpu_to_le16(sector_index); + cmd.sector_type = sector_type; + cmd.rf_modules_vec = rf_modules_vec & 0xFF; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd), + WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID, + &reply, sizeof(reply), + 500); + if (rc) + return rc; + if (reply.evt.status) { + wil_err(wil, "get rf sector cfg failed with status %d\n", + reply.evt.status); + return wil_rf_sector_status_to_rc(reply.evt.status); + } + + msg = cfg80211_vendor_cmd_alloc_reply_skb( + wiphy, 64 * WMI_MAX_RF_MODULES_NUM); + if (!msg) + return -ENOMEM; + + if (nla_put_u64(msg, QCA_ATTR_TSF, + le64_to_cpu(reply.evt.tsf))) + goto nla_put_failure; + + nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG); + if (!nl_cfgs) + goto nla_put_failure; + for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) { + if (!(rf_modules_vec & BIT(i))) + continue; + nl_cfg = nla_nest_start(msg, i); + if (!nl_cfg) + goto nla_put_failure; + si = &reply.evt.sectors_info[i]; + if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX, + i) || + nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0, + le32_to_cpu(si->etype0)) || + nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1, + le32_to_cpu(si->etype1)) || + nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2, + le32_to_cpu(si->etype2)) || + nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI, + le32_to_cpu(si->psh_hi)) || + nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO, + le32_to_cpu(si->psh_lo)) || + nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16, + le32_to_cpu(si->dtype_swch_off))) + goto nla_put_failure; + nla_nest_end(msg, nl_cfg); + } + + nla_nest_end(msg, nl_cfgs); + rc = cfg80211_vendor_cmd_reply(msg); + return rc; +nla_put_failure: + kfree_skb(msg); + return -ENOBUFS; +} + +static int wil_rf_sector_set_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wdev_to_wil(wdev); + int rc, tmp; + struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1]; + u16 sector_index, rf_module_index; + u8 sector_type; + u32 rf_modules_vec = 0; + struct wmi_set_rf_sector_params_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_set_rf_sector_params_done_event evt; + } __packed reply; + struct nlattr *nl_cfg; + struct wmi_rf_sector_info *si; + + if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) + return -EOPNOTSUPP; + + rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + wil_rf_sector_policy); + if (rc) { + wil_err(wil, "Invalid rf sector ATTR\n"); + return rc; + } + + if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] || + !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] || + !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) { + wil_err(wil, "Invalid rf sector spec\n"); + return -EINVAL; + } + + sector_index = nla_get_u16( + tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]); + if (sector_index >= WIL_MAX_RF_SECTORS) { + wil_err(wil, "Invalid sector index %d\n", sector_index); + return -EINVAL; + } + + sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]); + if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) { + wil_err(wil, "Invalid sector type %d\n", sector_type); + return -EINVAL; + } + + memset(&cmd, 0, sizeof(cmd)); + + cmd.sector_idx = cpu_to_le16(sector_index); + cmd.sector_type = sector_type; + nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG], + tmp) { + rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX, + nl_cfg, wil_rf_sector_cfg_policy); + if (rc) { + wil_err(wil, "invalid sector cfg\n"); + return -EINVAL; + } + + if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] || + !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] || + !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] || + !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] || + !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] || + !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] || + !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) { + wil_err(wil, "missing cfg params\n"); + return -EINVAL; + } + + rf_module_index = nla_get_u8( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]); + if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) { + wil_err(wil, "invalid RF module index %d\n", + rf_module_index); + return -EINVAL; + } + rf_modules_vec |= BIT(rf_module_index); + si = &cmd.sectors_info[rf_module_index]; + si->etype0 = cpu_to_le32(nla_get_u32( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0])); + si->etype1 = cpu_to_le32(nla_get_u32( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1])); + si->etype2 = cpu_to_le32(nla_get_u32( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2])); + si->psh_hi = cpu_to_le32(nla_get_u32( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI])); + si->psh_lo = cpu_to_le32(nla_get_u32( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO])); + si->dtype_swch_off = cpu_to_le32(nla_get_u32( + tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16])); + } + + cmd.rf_modules_vec = rf_modules_vec & 0xFF; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd), + WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID, + &reply, sizeof(reply), + 500); + if (rc) + return rc; + return wil_rf_sector_status_to_rc(reply.evt.status); +} + +static int wil_rf_sector_get_selected(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wdev_to_wil(wdev); + int rc; + struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + u8 sector_type, mac_addr[ETH_ALEN]; + int cid = 0; + struct wmi_get_selected_rf_sector_index_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_get_selected_rf_sector_index_done_event evt; + } __packed reply; + struct sk_buff *msg; + + if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) + return -EOPNOTSUPP; + + rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + wil_rf_sector_policy); + if (rc) { + wil_err(wil, "Invalid rf sector ATTR\n"); + return rc; + } + + if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) { + wil_err(wil, "Invalid rf sector spec\n"); + return -EINVAL; + } + sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]); + if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) { + wil_err(wil, "Invalid sector type %d\n", sector_type); + return -EINVAL; + } + + if (tb[QCA_ATTR_MAC_ADDR]) { + ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR])); + cid = wil_find_cid(wil, mac_addr); + if (cid < 0) { + wil_err(wil, "invalid MAC address %pM\n", mac_addr); + return -ENOENT; + } + } else { + if (test_bit(wil_status_fwconnected, wil->status)) { + wil_err(wil, "must specify MAC address when connected\n"); + return -EINVAL; + } + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.cid = (u8)cid; + cmd.sector_type = sector_type; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, + &cmd, sizeof(cmd), + WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, + &reply, sizeof(reply), + 500); + if (rc) + return rc; + if (reply.evt.status) { + wil_err(wil, "get rf selected sector cfg failed with status %d\n", + reply.evt.status); + return wil_rf_sector_status_to_rc(reply.evt.status); + } + + msg = cfg80211_vendor_cmd_alloc_reply_skb( + wiphy, 64 * WMI_MAX_RF_MODULES_NUM); + if (!msg) + return -ENOMEM; + + if (nla_put_u64(msg, QCA_ATTR_TSF, + le64_to_cpu(reply.evt.tsf)) || + nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX, + le16_to_cpu(reply.evt.sector_idx))) + goto nla_put_failure; + + rc = cfg80211_vendor_cmd_reply(msg); + return rc; +nla_put_failure: + kfree_skb(msg); + return -ENOBUFS; +} + +static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil, + u16 sector_index, + u8 sector_type, u8 cid) +{ + struct wmi_set_selected_rf_sector_index_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_set_selected_rf_sector_index_done_event evt; + } __packed reply; + int rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.sector_idx = cpu_to_le16(sector_index); + cmd.sector_type = sector_type; + cmd.cid = (u8)cid; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, + &cmd, sizeof(cmd), + WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, + &reply, sizeof(reply), + 500); + if (rc) + return rc; + return wil_rf_sector_status_to_rc(reply.evt.status); +} + +static int wil_rf_sector_set_selected(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wdev_to_wil(wdev); + int rc; + struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + u16 sector_index; + u8 sector_type, mac_addr[ETH_ALEN], i; + int cid = 0; + + if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) + return -EOPNOTSUPP; + + rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + wil_rf_sector_policy); + if (rc) { + wil_err(wil, "Invalid rf sector ATTR\n"); + return rc; + } + + if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] || + !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) { + wil_err(wil, "Invalid rf sector spec\n"); + return -EINVAL; + } + + sector_index = nla_get_u16( + tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]); + if (sector_index >= WIL_MAX_RF_SECTORS && + sector_index != WMI_INVALID_RF_SECTOR_INDEX) { + wil_err(wil, "Invalid sector index %d\n", sector_index); + return -EINVAL; + } + + sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]); + if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) { + wil_err(wil, "Invalid sector type %d\n", sector_type); + return -EINVAL; + } + + if (tb[QCA_ATTR_MAC_ADDR]) { + ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR])); + if (!is_broadcast_ether_addr(mac_addr)) { + cid = wil_find_cid(wil, mac_addr); + if (cid < 0) { + wil_err(wil, "invalid MAC address %pM\n", + mac_addr); + return -ENOENT; + } + } else { + if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) { + wil_err(wil, "broadcast MAC valid only with unlocking\n"); + return -EINVAL; + } + cid = -1; + } + } else { + if (test_bit(wil_status_fwconnected, wil->status)) { + wil_err(wil, "must specify MAC address when connected\n"); + return -EINVAL; + } + /* otherwise, using cid=0 for unassociated station */ + } + + if (cid >= 0) { + rc = wil_rf_sector_wmi_set_selected(wil, sector_index, + sector_type, cid); + } else { + /* unlock all cids */ + rc = wil_rf_sector_wmi_set_selected( + wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type, + WIL_CID_ALL); + if (rc == -EINVAL) { + for (i = 0; i < WIL6210_MAX_CID; i++) { + rc = wil_rf_sector_wmi_set_selected( + wil, WMI_INVALID_RF_SECTOR_INDEX, + sector_type, i); + /* the FW will silently ignore and return + * success for unused cid, so abort the loop + * on any other error + */ + if (rc) { + wil_err(wil, "unlock cid %d failed with status %d\n", + i, rc); + break; + } + } + } + } + + return rc; +} diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c index 3249562d93b4..217a4591bde4 100644 --- a/drivers/net/wireless/ath/wil6210/debug.c +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,61 +17,71 @@ #include "wil6210.h" #include "trace.h" -void wil_err(struct wil6210_priv *wil, const char *fmt, ...) +void __wil_err(struct wil6210_priv *wil, const char *fmt, ...) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - netdev_err(ndev, "%pV", &vaf); + netdev_err(wil_to_ndev(wil), "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); } -void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) +void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) { - if (net_ratelimit()) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; + struct va_format vaf; + va_list args; + + if (!net_ratelimit()) + return; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + netdev_err(wil_to_ndev(wil), "%pV", &vaf); + trace_wil6210_log_err(&vaf); + va_end(args); +} - va_start(args, fmt); - vaf.va = &args; - netdev_err(ndev, "%pV", &vaf); - trace_wil6210_log_err(&vaf); - va_end(args); - } +void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (!net_ratelimit()) + return; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + netdev_dbg(wil_to_ndev(wil), "%pV", &vaf); + trace_wil6210_log_dbg(&vaf); + va_end(args); } -void wil_info(struct wil6210_priv *wil, const char *fmt, ...) +void __wil_info(struct wil6210_priv *wil, const char *fmt, ...) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - netdev_info(ndev, "%pV", &vaf); + netdev_info(wil_to_ndev(wil), "%pV", &vaf); trace_wil6210_log_info(&vaf); va_end(args); } void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) { - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; trace_wil6210_log_dbg(&vaf); va_end(args); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 2da03d69ed42..cddd1785461e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,7 +20,6 @@ #include <linux/pci.h> #include <linux/rtnetlink.h> #include <linux/power_supply.h> - #include "wil6210.h" #include "wmi.h" #include "txrx.h" @@ -30,13 +29,13 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ -u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */ enum dbg_off_type { doff_u32 = 0, doff_x32 = 1, doff_ulong = 2, doff_io32 = 3, + doff_u8 = 4 }; /* offset to "wil" */ @@ -68,13 +67,13 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, seq_puts(s, "???\n"); } - if (vring->va && (vring->size < 1025)) { + if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { uint i; for (i = 0; i < vring->size; i++) { volatile struct vring_tx_desc *d = &vring->va[i].tx; - if ((i % 64) == 0 && (i != 0)) + if ((i % 128) == 0 && (i != 0)) seq_puts(s, "\n"); seq_printf(s, "%c", (d->dma.status & BIT(0)) ? _s : (vring->ctx[i].skb ? _h : 'h')); @@ -170,6 +169,8 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, int rsize; uint i; + wil_halp_vote(wil); + wil_memcpy_fromio_32(&r, off, sizeof(r)); wil_mbox_ring_le2cpus(&r); /* @@ -235,6 +236,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, } out: seq_puts(s, "}\n"); + wil_halp_unvote(wil); } static int wil_mbox_debugfs_show(struct seq_file *s, void *data) @@ -346,6 +348,10 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, tbl[i].mode, dbg, base + tbl[i].off); break; + case doff_u8: + f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg, + base + tbl[i].off); + break; default: f = ERR_PTR(-EINVAL); } @@ -356,13 +362,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, } static const struct dbg_off isr_off[] = { - {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32}, - {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32}, - {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32}, - {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32}, - {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32}, - {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32}, - {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32}, + {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32}, + {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32}, + {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32}, + {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32}, + {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32}, + {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32}, + {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32}, {}, }; @@ -382,9 +388,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, } static const struct dbg_off pseudo_isr_off[] = { - {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, - {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, - {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, + {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, + {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, + {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, {}, }; @@ -403,40 +409,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, } static const struct dbg_off lgc_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, {}, }; static const struct dbg_off tx_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH), + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA), + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL), + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL), doff_io32}, - {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH), + {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH), doff_io32}, - {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA), + {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA), doff_io32}, - {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL), + {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL), doff_io32}, {}, }; static const struct dbg_off rx_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH), + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA), + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL), + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL), doff_io32}, - {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH), + {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH), doff_io32}, - {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA), + {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA), doff_io32}, - {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL), + {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL), doff_io32}, {}, }; @@ -495,12 +501,16 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { enum { max_count = 4096 }; - struct debugfs_blob_wrapper *blob = file->private_data; + struct wil_blob_wrapper *wil_blob = file->private_data; loff_t pos = *ppos; - size_t available = blob->size; + size_t available = wil_blob->blob.size; void *buf; size_t ret; + if (test_bit(wil_status_suspending, wil_blob->wil->status) || + test_bit(wil_status_suspended, wil_blob->wil->status)) + return 0; + if (pos < 0) return -EINVAL; @@ -516,8 +526,8 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; - wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data + - pos, count); + wil_memcpy_fromio_32(buf, (const void __iomem *) + wil_blob->blob.data + pos, count); ret = copy_to_user(user_buf, buf, count); kfree(buf); @@ -540,9 +550,9 @@ static struct dentry *wil_debugfs_create_ioblob(const char *name, umode_t mode, struct dentry *parent, - struct debugfs_blob_wrapper *blob) + struct wil_blob_wrapper *wil_blob) { - return debugfs_create_file(name, mode, parent, blob, &fops_ioblob); + return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob); } /*---reset---*/ @@ -793,8 +803,12 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, struct wireless_dev *wdev = wil_to_wdev(wil); struct cfg80211_mgmt_tx_params params; int rc; - void *frame = kmalloc(len, GFP_KERNEL); + void *frame; + if (!len) + return -EINVAL; + + frame = kmalloc(len, GFP_KERNEL); if (!frame) return -ENOMEM; @@ -810,7 +824,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, rc = wil_cfg80211_mgmt_tx(wiphy, wdev, ¶ms, NULL); kfree(frame); - wil_info(wil, "%s() -> %d\n", __func__, rc); + wil_info(wil, "-> %d\n", rc); return len; } @@ -827,13 +841,13 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct wil6210_priv *wil = file->private_data; - struct wil6210_mbox_hdr_wmi *wmi; + struct wmi_cmd_hdr *wmi; void *cmd; - int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi); + int cmdlen = len - sizeof(struct wmi_cmd_hdr); u16 cmdid; int rc, rc1; - if (cmdlen <= 0) + if (cmdlen < 0) return -EINVAL; wmi = kmalloc(len, GFP_KERNEL); @@ -846,13 +860,13 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, return rc; } - cmd = &wmi[1]; - cmdid = le16_to_cpu(wmi->id); + cmd = (cmdlen > 0) ? &wmi[1] : NULL; + cmdid = le16_to_cpu(wmi->command_id); rc1 = wmi_send(wil, cmdid, cmd, cmdlen); kfree(wmi); - wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1); + wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1); return rc; } @@ -991,7 +1005,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) .interval_usec = 0, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_notify_req_done_event evt; } __packed reply; @@ -1011,6 +1025,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) " TSF = 0x%016llx\n" " TxMCS = %2d TxTpt = %4d\n" " SQI = %4d\n" + " RSSI = %4d\n" " Status = 0x%08x %s\n" " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n" " Goodput(rx:tx) %4d:%4d\n" @@ -1020,6 +1035,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) le16_to_cpu(reply.evt.bf_mcs), le32_to_cpu(reply.evt.tx_tpt), reply.evt.sqi, + reply.evt.rssi, status, wil_bfstatus_str(status), le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), @@ -1340,6 +1356,34 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) r->ssn_last_drop); } +static void wil_print_rxtid_crypto(struct seq_file *s, int tid, + struct wil_tid_crypto_rx *c) +{ + int i; + + for (i = 0; i < 4; i++) { + struct wil_tid_crypto_rx_single *cc = &c->key_id[i]; + + if (cc->key_set) + goto has_keys; + } + return; + +has_keys: + if (tid < WIL_STA_TID_NUM) + seq_printf(s, " [%2d] PN", tid); + else + seq_puts(s, " [GR] PN"); + + for (i = 0; i < 4; i++) { + struct wil_tid_crypto_rx_single *cc = &c->key_id[i]; + + seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-", + cc->pn); + } + seq_puts(s, "\n"); +} + static int wil_sta_debugfs_show(struct seq_file *s, void *data) __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) { @@ -1349,6 +1393,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; + u8 aid = 0; switch (p->status) { case wil_sta_unused: @@ -1359,26 +1404,34 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) break; case wil_sta_connected: status = "connected"; + aid = p->aid; break; } - seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); + seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { struct wil_tid_ampdu_rx *r = p->tid_rx[tid]; + struct wil_tid_crypto_rx *c = + &p->tid_crypto_rx[tid]; if (r) { - seq_printf(s, "[%2d] ", tid); + seq_printf(s, " [%2d] ", tid); wil_print_rxtid(s, r); } + + wil_print_rxtid_crypto(s, tid, c); } + wil_print_rxtid_crypto(s, WIL_STA_TID_NUM, + &p->group_crypto_rx); spin_unlock_bh(&p->tid_rx_lock); seq_printf(s, - "Rx invalid frame: non-data %lu, short %lu, large %lu\n", + "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n", p->stats.rx_non_data_frame, p->stats.rx_short_frame, - p->stats.rx_large_frame); + p->stats.rx_large_frame, + p->stats.rx_replay); seq_puts(s, "Rx/MCS:"); for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); @@ -1404,6 +1457,222 @@ static const struct file_operations fops_sta = { .llseek = seq_lseek, }; +static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[80]; + int n; + + n = snprintf(buf, sizeof(buf), + "led_id is set to %d, echo 1 to enable, 0 to disable\n", + led_id); + + n = min_t(int, n, sizeof(buf)); + + return simple_read_from_buffer(user_buf, count, ppos, + buf, n); +} + +static ssize_t wil_write_file_led_cfg(struct file *file, + const char __user *buf_, + size_t count, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + int val; + int rc; + + rc = kstrtoint_from_user(buf_, count, 0, &val); + if (rc) { + wil_err(wil, "Invalid argument\n"); + return rc; + } + + wil_info(wil, "%s led %d\n", val ? "Enabling" : "Disabling", led_id); + rc = wmi_led_cfg(wil, val); + if (rc) { + wil_info(wil, "%s led %d failed\n", + val ? "Enabling" : "Disabling", led_id); + return rc; + } + + return count; +} + +static const struct file_operations fops_led_cfg = { + .read = wil_read_file_led_cfg, + .write = wil_write_file_led_cfg, + .open = simple_open, +}; + +/* led_blink_time, write: + * "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast> + */ +static ssize_t wil_write_led_blink_time(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + int rc; + char *kbuf = kmalloc(len + 1, GFP_KERNEL); + + if (!kbuf) + return -ENOMEM; + + rc = simple_write_to_buffer(kbuf, len, ppos, buf, len); + if (rc != len) { + kfree(kbuf); + return rc >= 0 ? -EIO : rc; + } + + kbuf[len] = '\0'; + rc = sscanf(kbuf, "%d %d %d %d %d %d", + &led_blink_time[WIL_LED_TIME_SLOW].on_ms, + &led_blink_time[WIL_LED_TIME_SLOW].off_ms, + &led_blink_time[WIL_LED_TIME_MED].on_ms, + &led_blink_time[WIL_LED_TIME_MED].off_ms, + &led_blink_time[WIL_LED_TIME_FAST].on_ms, + &led_blink_time[WIL_LED_TIME_FAST].off_ms); + kfree(kbuf); + + if (rc < 0) + return rc; + if (rc < 6) + return -EINVAL; + + return len; +} + +static ssize_t wil_read_led_blink_time(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + static char text[400]; + + snprintf(text, sizeof(text), + "To set led blink on/off time variables write:\n" + "<blink_on_slow> <blink_off_slow> <blink_on_med> " + "<blink_off_med> <blink_on_fast> <blink_off_fast>\n" + "The current values are:\n" + "%d %d %d %d %d %d\n", + led_blink_time[WIL_LED_TIME_SLOW].on_ms, + led_blink_time[WIL_LED_TIME_SLOW].off_ms, + led_blink_time[WIL_LED_TIME_MED].on_ms, + led_blink_time[WIL_LED_TIME_MED].off_ms, + led_blink_time[WIL_LED_TIME_FAST].on_ms, + led_blink_time[WIL_LED_TIME_FAST].off_ms); + + return simple_read_from_buffer(user_buf, count, ppos, text, + sizeof(text)); +} + +static const struct file_operations fops_led_blink_time = { + .read = wil_read_led_blink_time, + .write = wil_write_led_blink_time, + .open = simple_open, +}; + +/*---------FW capabilities------------*/ +static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + + seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX, + wil->fw_capabilities); + + return 0; +} + +static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_fw_capabilities_debugfs_show, + inode->i_private); +} + +static const struct file_operations fops_fw_capabilities = { + .open = wil_fw_capabilities_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +/*---------FW version------------*/ +static int wil_fw_version_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + + if (wil->fw_version[0]) + seq_printf(s, "%s\n", wil->fw_version); + else + seq_puts(s, "N/A\n"); + + return 0; +} + +static int wil_fw_version_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_fw_version_debugfs_show, + inode->i_private); +} + +static const struct file_operations fops_fw_version = { + .open = wil_fw_version_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +/*---------suspend_stats---------*/ +static ssize_t wil_write_suspend_stats(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + + memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->suspend_stats.collection_start = ktime_get(); + + return len; +} + +static ssize_t wil_read_suspend_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + static char text[400]; + int n; + unsigned long long stats_collection_time = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.collection_start)); + + n = snprintf(text, sizeof(text), + "Suspend statistics:\n" + "successful suspends:%ld failed suspends:%ld\n" + "successful resumes:%ld failed resumes:%ld\n" + "rejected by host:%ld rejected by device:%ld\n" + "total suspend time:%lld min suspend time:%lld\n" + "max suspend time:%lld stats collection time: %lld\n", + wil->suspend_stats.successful_suspends, + wil->suspend_stats.failed_suspends, + wil->suspend_stats.successful_resumes, + wil->suspend_stats.failed_resumes, + wil->suspend_stats.rejected_by_host, + wil->suspend_stats.rejected_by_device, + wil->suspend_stats.total_suspend_time, + wil->suspend_stats.min_suspend_time, + wil->suspend_stats.max_suspend_time, + stats_collection_time); + + n = min_t(int, n, sizeof(text)); + + return simple_read_from_buffer(user_buf, count, ppos, text, n); +} + +static const struct file_operations fops_suspend_stats = { + .read = wil_read_suspend_stats, + .write = wil_write_suspend_stats, + .open = simple_open, +}; + /*----------------*/ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, struct dentry *dbg) @@ -1412,16 +1681,18 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, char name[32]; for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { - struct debugfs_blob_wrapper *blob = &wil->blobs[i]; + struct wil_blob_wrapper *wil_blob = &wil->blobs[i]; + struct debugfs_blob_wrapper *blob = &wil_blob->blob; const struct fw_map *map = &fw_mapping[i]; if (!map->name) continue; + wil_blob->wil = wil; blob->data = (void * __force)wil->csr + HOSTADDR(map->host); blob->size = map->to - map->from; snprintf(name, sizeof(name), "blob_%s", map->name); - wil_debugfs_create_ioblob(name, S_IRUGO, dbg, blob); + wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob); } } @@ -1431,25 +1702,30 @@ static const struct { umode_t mode; const struct file_operations *fops; } dbg_files[] = { - {"mbox", S_IRUGO, &fops_mbox}, - {"vrings", S_IRUGO, &fops_vring}, - {"stations", S_IRUGO, &fops_sta}, - {"desc", S_IRUGO, &fops_txdesc}, - {"bf", S_IRUGO, &fops_bf}, - {"ssid", S_IRUGO | S_IWUSR, &fops_ssid}, - {"mem_val", S_IRUGO, &fops_memread}, - {"reset", S_IWUSR, &fops_reset}, - {"rxon", S_IWUSR, &fops_rxon}, - {"tx_mgmt", S_IWUSR, &fops_txmgmt}, - {"wmi_send", S_IWUSR, &fops_wmi}, - {"back", S_IRUGO | S_IWUSR, &fops_back}, - {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg}, - {"pmcdata", S_IRUGO, &fops_pmcdata}, - {"temp", S_IRUGO, &fops_temp}, - {"freq", S_IRUGO, &fops_freq}, - {"link", S_IRUGO, &fops_link}, - {"info", S_IRUGO, &fops_info}, - {"recovery", S_IRUGO | S_IWUSR, &fops_recovery}, + {"mbox", 0444, &fops_mbox}, + {"vrings", 0444, &fops_vring}, + {"stations", 0444, &fops_sta}, + {"desc", 0444, &fops_txdesc}, + {"bf", 0444, &fops_bf}, + {"ssid", 0644, &fops_ssid}, + {"mem_val", 0644, &fops_memread}, + {"reset", 0244, &fops_reset}, + {"rxon", 0244, &fops_rxon}, + {"tx_mgmt", 0244, &fops_txmgmt}, + {"wmi_send", 0244, &fops_wmi}, + {"back", 0644, &fops_back}, + {"pmccfg", 0644, &fops_pmccfg}, + {"pmcdata", 0444, &fops_pmcdata}, + {"temp", 0444, &fops_temp}, + {"freq", 0444, &fops_freq}, + {"link", 0444, &fops_link}, + {"info", 0444, &fops_info}, + {"recovery", 0644, &fops_recovery}, + {"led_cfg", 0644, &fops_led_cfg}, + {"led_blink_time", 0644, &fops_led_blink_time}, + {"fw_capabilities", 0444, &fops_fw_capabilities}, + {"fw_version", 0444, &fops_fw_version}, + {"suspend_stats", 0644, &fops_suspend_stats}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, @@ -1488,29 +1764,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, /* fields in struct wil6210_priv */ static const struct dbg_off dbg_wil_off[] = { - WIL_FIELD(privacy, S_IRUGO, doff_u32), - WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong), - WIL_FIELD(fw_version, S_IRUGO, doff_u32), - WIL_FIELD(hw_version, S_IRUGO, doff_x32), - WIL_FIELD(recovery_count, S_IRUGO, doff_u32), - WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), + WIL_FIELD(privacy, 0444, doff_u32), + WIL_FIELD(status[0], 0644, doff_ulong), + WIL_FIELD(hw_version, 0444, doff_x32), + WIL_FIELD(recovery_count, 0444, doff_u32), + WIL_FIELD(ap_isolate, 0444, doff_u32), + WIL_FIELD(discovery_mode, 0644, doff_u8), + WIL_FIELD(chip_revision, 0444, doff_u8), + WIL_FIELD(abft_len, 0644, doff_u8), + WIL_FIELD(wakeup_trigger, 0644, doff_u8), + WIL_FIELD(vring_idle_trsh, 0644, doff_u32), {}, }; static const struct dbg_off dbg_wil_regs[] = { - {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), + {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), doff_io32}, - {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, + {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, {}, }; /* static parameters */ static const struct dbg_off dbg_statics[] = { - {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, - {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, - {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, - {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh, - doff_u32}, + {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, + {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, + {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, + {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, }; @@ -1536,6 +1815,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) wil6210_debugfs_create_ITR_CNT(wil, dbg); + wil->suspend_stats.collection_start = ktime_get(); + return 0; } diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 7053b62ca8d3..adcfef4dabf7 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev) mutex_lock(&wil->mutex); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_begin\n"); return 0; } @@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_complete\n"); mutex_unlock(&wil->mutex); } @@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, u32 tx_itr_en, tx_itr_val = 0; u32 rx_itr_en, rx_itr_val = 0; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_get_coalesce\n"); tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL); if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) @@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__, + wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n", cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c new file mode 100644 index 000000000000..5906b90b337d --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ftm.c @@ -0,0 +1,957 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <net/netlink.h> +#include "wil6210.h" +#include "ftm.h" +#include "wmi.h" + +/* FTM session ID we use with FW */ +#define WIL_FTM_FW_SESSION_ID 1 + +/* fixed spare allocation we reserve in NL messages we allocate */ +#define WIL_FTM_NL_EXTRA_ALLOC 32 + +/* approx maximum length for FTM_MEAS_RESULT NL80211 event */ +#define WIL_FTM_MEAS_RESULT_MAX_LENGTH 2048 + +/* timeout for waiting for standalone AOA measurement, milliseconds */ +#define WIL_AOA_MEASUREMENT_TIMEOUT 1000 + +/* maximum number of allowed FTM measurements per burst */ +#define WIL_FTM_MAX_MEAS_PER_BURST 31 + +/* initial token to use on non-secure FTM measurement */ +#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2 + +/* maximum AOA burst period, limited by FW */ +#define WIL_AOA_MAX_BURST_PERIOD 255 + +#define WIL_TOF_FTM_MAX_LCI_LENGTH (240) +#define WIL_TOF_FTM_MAX_LCR_LENGTH (240) + +static const struct +nla_policy wil_nl80211_loc_policy[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE] = { .type = NLA_U64 }, + [QCA_WLAN_VENDOR_ATTR_LOC_CAPA] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE] = { .type = NLA_FLAG }, + [QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_AOA_TYPE] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_FREQ] = { .type = NLA_U32 }, +}; + +static const struct +nla_policy wil_nl80211_ftm_peer_policy[ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] = { .len = ETH_ALEN }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD] = { .type = NLA_U16 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ] = { .type = NLA_U32 }, +}; + +static const struct +nla_policy wil_nl80211_ftm_meas_param_policy[ + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD] = { .type = NLA_U16 }, +}; + +static u8 wil_ftm_get_channel(struct wil6210_priv *wil, + const u8 *mac_addr, u32 freq) +{ + struct wiphy *wiphy = wil_to_wiphy(wil); + struct cfg80211_bss *bss; + struct ieee80211_channel *chan; + u8 channel; + + if (freq) { + chan = ieee80211_get_channel(wiphy, freq); + if (!chan) { + wil_err(wil, "invalid freq: %d\n", freq); + return 0; + } + channel = chan->hw_value; + } else { + bss = cfg80211_get_bss(wiphy, NULL, mac_addr, + NULL, 0, IEEE80211_BSS_TYPE_ANY, + IEEE80211_PRIVACY_ANY); + if (!bss) { + wil_err(wil, "Unable to find BSS\n"); + return 0; + } + channel = bss->channel->hw_value; + cfg80211_put_bss(wiphy, bss); + } + + wil_dbg_misc(wil, "target %pM at channel %d\n", mac_addr, channel); + return channel; +} + +static int wil_ftm_parse_meas_params(struct wil6210_priv *wil, + struct nlattr *attr, + struct wil_ftm_meas_params *params) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1]; + int rc; + + if (!attr) { + /* temporary defaults for one-shot measurement */ + params->meas_per_burst = 1; + params->burst_period = 5; /* 500 milliseconds */ + return 0; + } + rc = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX, + attr, wil_nl80211_ftm_meas_param_policy); + if (rc) { + wil_err(wil, "invalid measurement params\n"); + return rc; + } + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST]) + params->meas_per_burst = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST]); + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP]) + params->num_of_bursts_exp = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP]); + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION]) + params->burst_duration = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION]); + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD]) + params->burst_period = nla_get_u16( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD]); + return 0; +} + +static int wil_ftm_validate_meas_params(struct wil6210_priv *wil, + struct wil_ftm_meas_params *params) +{ + /* temporary allow only single-burst */ + if (params->meas_per_burst > WIL_FTM_MAX_MEAS_PER_BURST || + params->num_of_bursts_exp != 0) { + wil_err(wil, "invalid measurement params\n"); + return -EINVAL; + } + + return 0; +} + +static int wil_ftm_append_meas_params(struct wil6210_priv *wil, + struct sk_buff *msg, + struct wil_ftm_meas_params *params) +{ + struct nlattr *nl_p; + + nl_p = nla_nest_start( + msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS); + if (!nl_p) + goto out_put_failure; + if (nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST, + params->meas_per_burst) || + nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP, + params->num_of_bursts_exp) || + nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION, + params->burst_duration) || + nla_put_u16(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD, + params->burst_period)) + goto out_put_failure; + nla_nest_end(msg, nl_p); + return 0; +out_put_failure: + return -ENOBUFS; +} + +static int wil_ftm_append_peer_meas_res(struct wil6210_priv *wil, + struct sk_buff *msg, + struct wil_ftm_peer_meas_res *res) +{ + struct nlattr *nl_mres, *nl_f; + int i; + + if (nla_put(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR, + ETH_ALEN, res->mac_addr) || + nla_put_u32(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS, + res->flags) || + nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS, + res->status)) + goto out_put_failure; + if (res->status == QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED && + nla_put_u8(msg, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS, + res->value_seconds)) + goto out_put_failure; + if (res->has_params && + wil_ftm_append_meas_params(wil, msg, &res->params)) + goto out_put_failure; + nl_mres = nla_nest_start(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS); + if (!nl_mres) + goto out_put_failure; + for (i = 0; i < res->n_meas; i++) { + nl_f = nla_nest_start(msg, i); + if (!nl_f) + goto out_put_failure; + if (nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1, + res->meas[i].t1) || + nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2, + res->meas[i].t2) || + nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3, + res->meas[i].t3) || + nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4, + res->meas[i].t4)) + goto out_put_failure; + nla_nest_end(msg, nl_f); + } + nla_nest_end(msg, nl_mres); + return 0; +out_put_failure: + wil_err(wil, "fail to append peer result\n"); + return -ENOBUFS; +} + +static void wil_ftm_send_meas_result(struct wil6210_priv *wil, + struct wil_ftm_peer_meas_res *res) +{ + struct sk_buff *vendor_event = NULL; + struct nlattr *nl_res; + int rc = 0; + + wil_dbg_misc(wil, "sending %d results for peer %pM\n", + res->n_meas, res->mac_addr); + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + wil->wdev, + WIL_FTM_MEAS_RESULT_MAX_LENGTH, + QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX, + GFP_KERNEL); + if (!vendor_event) { + wil_err(wil, "fail to allocate measurement result\n"); + rc = -ENOMEM; + goto out; + } + + if (nla_put_u64( + vendor_event, QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE, + wil->ftm.session_cookie)) { + rc = -ENOBUFS; + goto out; + } + + nl_res = nla_nest_start(vendor_event, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS); + if (!nl_res) { + rc = -ENOBUFS; + goto out; + } + + rc = wil_ftm_append_peer_meas_res(wil, vendor_event, res); + if (rc) + goto out; + + nla_nest_end(vendor_event, nl_res); + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + vendor_event = NULL; +out: + if (vendor_event) + kfree_skb(vendor_event); + if (rc) + wil_err(wil, "send peer result failed, err %d\n", rc); +} + +static void wil_ftm_send_peer_res(struct wil6210_priv *wil) +{ + if (!wil->ftm.has_ftm_res || !wil->ftm.ftm_res) + return; + + wil_ftm_send_meas_result(wil, wil->ftm.ftm_res); + wil->ftm.has_ftm_res = 0; + wil->ftm.ftm_res->n_meas = 0; +} + +static void wil_aoa_measurement_timeout(struct work_struct *work) +{ + struct wil_ftm_priv *ftm = container_of(work, struct wil_ftm_priv, + aoa_timeout_work); + struct wil6210_priv *wil = container_of(ftm, struct wil6210_priv, ftm); + struct wil_aoa_meas_result res; + + wil_dbg_misc(wil, "AOA measurement timeout\n"); + + memset(&res, 0, sizeof(res)); + ether_addr_copy(res.mac_addr, wil->ftm.aoa_peer_mac_addr); + res.type = wil->ftm.aoa_type; + res.status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED; + wil_aoa_cfg80211_meas_result(wil, &res); +} + +static int +wil_ftm_cfg80211_start_session(struct wil6210_priv *wil, + struct wil_ftm_session_request *request) +{ + int rc = 0; + bool has_lci = false, has_lcr = false; + u8 max_meas = 0, channel, *ptr; + u32 i, cmd_len; + struct wmi_tof_session_start_cmd *cmd; + + mutex_lock(&wil->ftm.lock); + if (wil->ftm.session_started || wil->ftm.aoa_started) { + wil_err(wil, "FTM or AOA session already running\n"); + rc = -EAGAIN; + goto out; + } + + for (i = 0; i < request->n_peers; i++) { + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI) + has_lci = true; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR) + has_lcr = true; + max_meas = max(max_meas, + request->peers[i].params.meas_per_burst); + } + + wil->ftm.ftm_res = kzalloc(sizeof(*wil->ftm.ftm_res) + + max_meas * sizeof(struct wil_ftm_peer_meas) + + (has_lci ? WIL_TOF_FTM_MAX_LCI_LENGTH : 0) + + (has_lcr ? WIL_TOF_FTM_MAX_LCR_LENGTH : 0), GFP_KERNEL); + if (!wil->ftm.ftm_res) { + rc = -ENOMEM; + goto out; + } + ptr = (u8 *)wil->ftm.ftm_res; + ptr += sizeof(struct wil_ftm_peer_meas_res) + + max_meas * sizeof(struct wil_ftm_peer_meas); + if (has_lci) { + wil->ftm.ftm_res->lci = ptr; + ptr += WIL_TOF_FTM_MAX_LCI_LENGTH; + } + if (has_lcr) + wil->ftm.ftm_res->lcr = ptr; + wil->ftm.max_ftm_meas = max_meas; + + cmd_len = sizeof(struct wmi_tof_session_start_cmd) + + request->n_peers * sizeof(struct wmi_ftm_dest_info); + cmd = kzalloc(cmd_len, GFP_KERNEL); + if (!cmd) { + rc = -ENOMEM; + goto out_ftm_res; + } + + cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID); + cmd->aoa_type = request->aoa_type; + cmd->num_of_dest = cpu_to_le16(request->n_peers); + for (i = 0; i < request->n_peers; i++) { + ether_addr_copy(cmd->ftm_dest_info[i].dst_mac, + request->peers[i].mac_addr); + channel = wil_ftm_get_channel(wil, request->peers[i].mac_addr, + request->peers[i].freq); + if (!channel) { + wil_err(wil, "can't find FTM target at index %d\n", i); + rc = -EINVAL; + goto out_cmd; + } + cmd->ftm_dest_info[i].channel = channel - 1; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE) { + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_SECURED; + cmd->ftm_dest_info[i].initial_token = + request->peers[i].secure_token_id; + } else { + cmd->ftm_dest_info[i].initial_token = + WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN; + } + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP) + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_ASAP; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI) + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_LCI_REQ; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR) + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_LCR_REQ; + cmd->ftm_dest_info[i].num_of_ftm_per_burst = + request->peers[i].params.meas_per_burst; + cmd->ftm_dest_info[i].num_of_bursts_exp = + request->peers[i].params.num_of_bursts_exp; + cmd->ftm_dest_info[i].burst_duration = + request->peers[i].params.burst_duration; + cmd->ftm_dest_info[i].burst_period = + cpu_to_le16(request->peers[i].params.burst_period); + cmd->ftm_dest_info[i].num_burst_per_aoa_meas = + request->peers[i].aoa_burst_period; + } + + rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len); + + if (!rc) { + wil->ftm.session_cookie = request->session_cookie; + wil->ftm.session_started = 1; + } +out_cmd: + kfree(cmd); +out_ftm_res: + if (rc) { + kfree(wil->ftm.ftm_res); + wil->ftm.ftm_res = NULL; + } +out: + mutex_unlock(&wil->ftm.lock); + return rc; +} + +static void +wil_ftm_cfg80211_session_ended(struct wil6210_priv *wil, u32 status) +{ + struct sk_buff *vendor_event = NULL; + + mutex_lock(&wil->ftm.lock); + + if (!wil->ftm.session_started) { + wil_dbg_misc(wil, "FTM session not started, ignoring event\n"); + goto out; + } + + /* finish the session */ + wil_dbg_misc(wil, "finishing FTM session\n"); + + /* send left-over results if any */ + wil_ftm_send_peer_res(wil); + + wil->ftm.session_started = 0; + kfree(wil->ftm.ftm_res); + wil->ftm.ftm_res = NULL; + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + wil->wdev, + WIL_FTM_NL_EXTRA_ALLOC, + QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX, + GFP_KERNEL); + if (!vendor_event) + goto out; + + if (nla_put_u64(vendor_event, + QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE, + wil->ftm.session_cookie) || + nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS, status)) { + wil_err(wil, "failed to fill session done event\n"); + goto out; + } + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + vendor_event = NULL; +out: + kfree_skb(vendor_event); + mutex_unlock(&wil->ftm.lock); +} + +static void wil_aoa_timer_fn(ulong x) +{ + struct wil6210_priv *wil = (void *)x; + + wil_dbg_misc(wil, "AOA timer\n"); + schedule_work(&wil->ftm.aoa_timeout_work); +} + +static int +wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil, + struct wil_aoa_meas_request *request) +{ + int rc = 0; + struct wmi_aoa_meas_cmd cmd; + u8 channel; + + mutex_lock(&wil->ftm.lock); + + if (wil->ftm.aoa_started || wil->ftm.session_started) { + wil_err(wil, "AOA or FTM measurement already running\n"); + rc = -EAGAIN; + goto out; + } + if (request->type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) { + wil_err(wil, "invalid AOA type: %d\n", request->type); + rc = -EINVAL; + goto out; + } + + channel = wil_ftm_get_channel(wil, request->mac_addr, request->freq); + if (!channel) { + rc = -EINVAL; + goto out; + } + + memset(&cmd, 0, sizeof(cmd)); + ether_addr_copy(cmd.mac_addr, request->mac_addr); + cmd.channel = channel - 1; + cmd.aoa_meas_type = request->type; + + rc = wmi_send(wil, WMI_AOA_MEAS_CMDID, &cmd, sizeof(cmd)); + if (rc) + goto out; + + ether_addr_copy(wil->ftm.aoa_peer_mac_addr, request->mac_addr); + mod_timer(&wil->ftm.aoa_timer, + jiffies + msecs_to_jiffies(WIL_AOA_MEASUREMENT_TIMEOUT)); + wil->ftm.aoa_started = 1; +out: + mutex_unlock(&wil->ftm.lock); + return rc; +} + +void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil, + struct wil_aoa_meas_result *result) +{ + struct sk_buff *vendor_event = NULL; + + mutex_lock(&wil->ftm.lock); + + if (!wil->ftm.aoa_started && !wil->ftm.session_started) { + wil_info(wil, "AOA/FTM not started, not sending result\n"); + goto out; + } + + wil_dbg_misc(wil, "sending AOA measurement result\n"); + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + wil->wdev, + result->length + WIL_FTM_NL_EXTRA_ALLOC, + QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX, + GFP_KERNEL); + if (!vendor_event) { + wil_err(wil, "fail to allocate measurement result\n"); + goto out; + } + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_MAC_ADDR, + ETH_ALEN, result->mac_addr) || + nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_TYPE, + result->type) || + nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS, + result->status) || + nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK, + result->antenna_array_mask)) { + wil_err(wil, "failed to fill vendor event\n"); + goto out; + } + + if (result->length > 0 && + nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT, + result->length, result->data)) { + wil_err(wil, "failed to fill vendor event with AOA data\n"); + goto out; + } + + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + + del_timer_sync(&wil->ftm.aoa_timer); + wil->ftm.aoa_started = 0; +out: + mutex_unlock(&wil->ftm.lock); +} + +void wil_ftm_evt_session_ended(struct wil6210_priv *wil, + struct wmi_tof_session_end_event *evt) +{ + u32 status; + + switch (evt->status) { + case WMI_TOF_SESSION_END_NO_ERROR: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK; + break; + case WMI_TOF_SESSION_END_PARAMS_ERROR: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID; + break; + case WMI_TOF_SESSION_END_FAIL: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED; + break; + case WMI_TOF_SESSION_END_ABORTED: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED; + break; + default: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED; + break; + } + + wil_ftm_cfg80211_session_ended(wil, status); +} + +void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil, + struct wmi_tof_ftm_per_dest_res_event *evt) +{ + u32 i, index; + __le64 tmp = 0; + u8 n_meas; + + mutex_lock(&wil->ftm.lock); + + if (!wil->ftm.session_started || !wil->ftm.ftm_res) { + wil_dbg_misc(wil, "Session not running, ignoring res event\n"); + goto out; + } + if (wil->ftm.has_ftm_res && + !ether_addr_equal(evt->dst_mac, wil->ftm.ftm_res->mac_addr)) { + wil_dbg_misc(wil, + "Results for previous peer not properly terminated\n"); + wil_ftm_send_peer_res(wil); + } + + if (!wil->ftm.has_ftm_res) { + ether_addr_copy(wil->ftm.ftm_res->mac_addr, evt->dst_mac); + wil->ftm.has_ftm_res = 1; + } + + n_meas = evt->actual_ftm_per_burst; + switch (evt->status) { + case WMI_PER_DEST_RES_NO_ERROR: + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK; + break; + case WMI_PER_DEST_RES_TX_RX_FAIL: + /* FW reports corrupted results here, discard. */ + n_meas = 0; + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK; + break; + case WMI_PER_DEST_RES_PARAM_DONT_MATCH: + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID; + break; + default: + wil_err(wil, "unexpected status %d\n", evt->status); + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID; + break; + } + + for (i = 0; i < n_meas; i++) { + index = wil->ftm.ftm_res->n_meas; + if (index >= wil->ftm.max_ftm_meas) { + wil_dbg_misc(wil, "Too many measurements, some lost\n"); + break; + } + memcpy(&tmp, evt->responder_ftm_res[i].t1, + sizeof(evt->responder_ftm_res[i].t1)); + wil->ftm.ftm_res->meas[index].t1 = le64_to_cpu(tmp); + memcpy(&tmp, evt->responder_ftm_res[i].t2, + sizeof(evt->responder_ftm_res[i].t2)); + wil->ftm.ftm_res->meas[index].t2 = le64_to_cpu(tmp); + memcpy(&tmp, evt->responder_ftm_res[i].t3, + sizeof(evt->responder_ftm_res[i].t3)); + wil->ftm.ftm_res->meas[index].t3 = le64_to_cpu(tmp); + memcpy(&tmp, evt->responder_ftm_res[i].t4, + sizeof(evt->responder_ftm_res[i].t4)); + wil->ftm.ftm_res->meas[index].t4 = le64_to_cpu(tmp); + wil->ftm.ftm_res->n_meas++; + } + + if (evt->flags & WMI_PER_DEST_RES_BURST_REPORT_END) + wil_ftm_send_peer_res(wil); +out: + mutex_unlock(&wil->ftm.lock); +} + +void wil_aoa_evt_meas(struct wil6210_priv *wil, + struct wmi_aoa_meas_event *evt, + int len) +{ + int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data); + struct wil_aoa_meas_result *res; + + if (data_len < 0) { + wil_err(wil, "AOA event too short (%d)\n", len); + return; + } + data_len = min_t(int, le16_to_cpu(evt->length), data_len); + + res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL); + if (!res) + return; + + ether_addr_copy(res->mac_addr, evt->mac_addr); + res->type = evt->aoa_meas_type; + res->antenna_array_mask = le32_to_cpu(evt->meas_rf_mask); + res->status = evt->meas_status; + res->length = data_len; + memcpy(res->data, evt->meas_data, data_len); + + wil_dbg_misc(wil, "AOA result status %d type %d mask %d length %d\n", + res->status, res->type, + res->antenna_array_mask, res->length); + + wil_aoa_cfg80211_meas_result(wil, res); + kfree(res); +} + +int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct sk_buff *skb; + struct nlattr *attr; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -ENOTSUPP; + + /* we should get the capabilities from the FW. for now, + * report dummy capabilities for one shot measurement + */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 128); + if (!skb) + return -ENOMEM; + attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA); + if (!attr || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER | + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR | + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP | + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA) || + nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS, + 1) || + nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, 1) || + nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP, + 0) || + nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST, + 4) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES, + BIT(QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE))) { + wil_err(wil, "fail to fill get_capabilities reply\n"); + kfree_skb(skb); + return -ENOMEM; + } + nla_nest_end(skb, attr); + + return cfg80211_vendor_cmd_reply(skb); +} + +int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_ftm_session_request *request; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1]; + struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1]; + struct nlattr *peer; + int rc, n_peers = 0, index = 0, tmp; + u32 aoa_type = 0; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -ENOTSUPP; + + rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len, + wil_nl80211_loc_policy); + if (rc) { + wil_err(wil, "Invalid ATTR\n"); + return rc; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS]) { + wil_err(wil, "no peers specified\n"); + return -EINVAL; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]) { + wil_err(wil, "session cookie not specified\n"); + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) { + aoa_type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]); + if (aoa_type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) { + wil_err(wil, "invalid AOA type: %d\n", aoa_type); + return -EINVAL; + } + } + + nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS], + tmp) + n_peers++; + + if (!n_peers) { + wil_err(wil, "empty peer list\n"); + return -EINVAL; + } + + /* for now only allow measurement for a single peer */ + if (n_peers != 1) { + wil_err(wil, "only single peer allowed\n"); + return -EINVAL; + } + + request = kzalloc(sizeof(*request) + + n_peers * sizeof(struct wil_ftm_meas_peer_info), + GFP_KERNEL); + if (!request) + return -ENOMEM; + + request->session_cookie = + nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]); + request->aoa_type = aoa_type; + request->n_peers = n_peers; + nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS], + tmp) { + rc = nla_parse_nested(tb2, QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX, + peer, wil_nl80211_ftm_peer_policy); + if (rc) { + wil_err(wil, "Invalid peer ATTR\n"); + goto out; + } + if (!tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] || + nla_len(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR]) + != ETH_ALEN) { + wil_err(wil, "Peer MAC address missing or invalid\n"); + rc = -EINVAL; + goto out; + } + memcpy(request->peers[index].mac_addr, + nla_data(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR]), + ETH_ALEN); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ]) + request->peers[index].freq = nla_get_u32( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ]); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS]) + request->peers[index].flags = nla_get_u32( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS]); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]) + request->peers[index].secure_token_id = nla_get_u8( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]) { + request->peers[index].aoa_burst_period = nla_get_u16( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]); + if (request->peers[index].aoa_burst_period > + WIL_AOA_MAX_BURST_PERIOD) { + wil_err(wil, "Invalid AOA burst period at index: %d\n", + index); + rc = -EINVAL; + goto out; + } + } + + rc = wil_ftm_parse_meas_params( + wil, + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS], + &request->peers[index].params); + if (!rc) + rc = wil_ftm_validate_meas_params( + wil, &request->peers[index].params); + if (rc) + goto out; + index++; + } + + rc = wil_ftm_cfg80211_start_session(wil, request); +out: + kfree(request); + return rc; +} + +int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "stub\n"); + return -ENOTSUPP; +} + +int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "stub\n"); + return -ENOTSUPP; +} + +int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_aoa_meas_request request; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1]; + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -ENOTSUPP; + + wil_dbg_misc(wil, "AOA start measurement\n"); + + rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len, + wil_nl80211_loc_policy); + if (rc) { + wil_err(wil, "Invalid ATTR\n"); + return rc; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR] || + !tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) { + wil_err(wil, "Must specify MAC address and type\n"); + return -EINVAL; + } + + memset(&request, 0, sizeof(request)); + ether_addr_copy(request.mac_addr, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR])); + request.type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]); + if (tb[QCA_WLAN_VENDOR_ATTR_FREQ]) + request.freq = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_FREQ]); + + rc = wil_aoa_cfg80211_start_measurement(wil, &request); + return rc; +} + +int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "stub\n"); + return -ENOTSUPP; +} + +void wil_ftm_init(struct wil6210_priv *wil) +{ + mutex_init(&wil->ftm.lock); + setup_timer(&wil->ftm.aoa_timer, wil_aoa_timer_fn, (ulong)wil); + INIT_WORK(&wil->ftm.aoa_timeout_work, wil_aoa_measurement_timeout); +} + +void wil_ftm_deinit(struct wil6210_priv *wil) +{ + del_timer_sync(&wil->ftm.aoa_timer); + cancel_work_sync(&wil->ftm.aoa_timeout_work); + kfree(wil->ftm.ftm_res); +} + +void wil_ftm_stop_operations(struct wil6210_priv *wil) +{ + wil_ftm_cfg80211_session_ended( + wil, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED); +} diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h new file mode 100644 index 000000000000..21923c27ec06 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ftm.h @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL6210_FTM_H__ +#define __WIL6210_FTM_H__ + +/** + * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, + * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and + * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in + * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that + */ + +/** + * enum qca_vendor_attr_loc - attributes for FTM and AOA commands + * + * @QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE: Session cookie, specified in + * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION. It will be provided by driver + * events and can be used to identify events targeted for this session. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA: Nested attribute containing extra + * FTM/AOA capabilities, returned by %QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA. + * see %enum qca_wlan_vendor_attr_loc_capa. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS: array of nested attributes + * containing information about each peer in measurement session + * request. See %enum qca_wlan_vendor_attr_peer_info for supported + * attributes for each peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RESULTS: nested attribute containing + * measurement results for a peer. reported by the + * %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT event. + * See %enum qca_wlan_vendor_attr_peer_result for list of supported + * attributes. + * @QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE: flag attribute for + * enabling or disabling responder functionality. + * @QCA_WLAN_VENDOR_ATTR_FTM_LCI: used in the + * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to + * specify the LCI report that will be sent by the responder during + * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0, + * 9.4.2.22.10 + * @QCA_WLAN_VENDOR_ATTR_FTM_LCR: provided with the + * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to + * specify the location civic report that will be sent by the responder during + * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0, + * 9.4.2.22.13 + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS: session/measurement completion + * status code, reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE + * and %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + * @QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN: initial dialog token used + * by responder (0 if not specified) + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE: AOA measurement type. Requested in + * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS and optionally in + * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION if AOA measurements + * are needed as part of an FTM session. + * Reported by QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT. + * See enum qca_wlan_vendor_attr_aoa_type. + * @QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK: bit mask indicating + * which antenna arrays were used in location measurement. + * Reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT and + * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + * @QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT: AOA measurement data. + * Its contents depends on the AOA type and antenna array mask: + * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: array of U16 values, + * phase of the strongest CIR path for each antenna in the measured + * array(s). + * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: array of 2 U16 + * values, phase and amplitude of the strongest CIR path for each + * antenna in the measured array(s) + * @QCA_WLAN_VENDOR_ATTR_FREQ: Frequency where peer is listening, in MHz. + * Unsigned 32 bit value. + */ +enum qca_wlan_vendor_attr_loc { + /* we reuse these attributes */ + QCA_WLAN_VENDOR_ATTR_MAC_ADDR = 6, + QCA_WLAN_VENDOR_ATTR_PAD = 13, + QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE = 14, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA = 15, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS = 16, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS = 17, + QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE = 18, + QCA_WLAN_VENDOR_ATTR_FTM_LCI = 19, + QCA_WLAN_VENDOR_ATTR_FTM_LCR = 20, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS = 21, + QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN = 22, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE = 23, + QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK = 24, + QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT = 25, + QCA_WLAN_VENDOR_ATTR_FREQ = 28, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOC_MAX = QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_loc_capa - indoor location capabilities + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS: various flags. See + * %enum qca_wlan_vendor_attr_loc_capa_flags + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS: Maximum number + * of measurement sessions that can run concurrently. + * Default is one session (no session concurrency) + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS: The total number of unique + * peers that are supported in running sessions. For example, + * if the value is 8 and maximum number of sessions is 2, you can + * have one session with 8 unique peers, or 2 sessions with 4 unique + * peers each, and so on. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP: Maximum number + * of bursts per peer, as an exponent (2^value). Default is 0, + * meaning no multi-burst support. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST: Maximum number + * of measurement exchanges allowed in a single burst + * @QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES: Supported AOA measurement + * types. A bit mask (unsigned 32 bit value), each bit corresponds + * to an AOA type as defined by %enum qca_vendor_attr_aoa_type. + */ +enum qca_wlan_vendor_attr_loc_capa { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_MAX = + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_loc_capa_flags: Indoor location capability flags + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER: Set if driver + * can be configured as an FTM responder (for example, an AP that + * services FTM requests). %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * will be supported if set. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR: Set if driver + * can run FTM sessions. %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION + * will be supported if set. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP: Set if FTM responder + * supports immediate (ASAP) response. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA: Set if driver supports standalone + * AOA measurement using %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM: Set if driver supports + * requesting AOA measurements as part of an FTM session. + */ +enum qca_wlan_vendor_attr_loc_capa_flags { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER = 1 << 0, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR = 1 << 1, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP = 1 << 2, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA = 1 << 3, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM = 1 << 4, +}; + +/** + * enum qca_wlan_vendor_attr_peer_info: information about + * a single peer in a measurement session. + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR: The MAC address of the peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS: Various flags related + * to measurement. See %enum qca_wlan_vendor_attr_ftm_peer_meas_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS: Nested attribute of + * FTM measurement parameters, as specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.167. See %enum qca_wlan_vendor_attr_ftm_meas_param for + * list of supported attributes. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID: Initial token ID for + * secure measurement + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD: Request AOA + * measurement every _value_ bursts. If 0 or not specified, + * AOA measurements will be disabled for this peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ: Frequency in MHz where + * peer is listening. Optional; if not specified, use the + * entry from the kernel scan results cache. + */ +enum qca_wlan_vendor_attr_ftm_peer_info { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_meas_flags: Measurement request flags, + * per-peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP: If set, request + * immediate (ASAP) response from peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI: If set, request + * LCI report from peer. The LCI report includes the absolute + * location of the peer in "official" coordinates (similar to GPS). + * See IEEE P802.11-REVmc/D7.0, 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR: If set, request + * Location civic report from peer. The LCR includes the location + * of the peer in free-form format. See IEEE P802.11-REVmc/D7.0, + * 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE: If set, + * request a secure measurement. + * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID must also be provided. + */ +enum qca_wlan_vendor_attr_ftm_peer_meas_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP = 1 << 0, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI = 1 << 1, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR = 1 << 2, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE = 1 << 3, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas_param: Measurement parameters + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST: Number of measurements + * to perform in a single burst. + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP: Number of bursts to + * perform, specified as an exponent (2^value) + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION: Duration of burst + * instance, as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167 + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD: Time between bursts, + * as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. Must + * be larger than %QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION + */ +enum qca_wlan_vendor_attr_ftm_meas_param { + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result: Per-peer results + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR: MAC address of the reported + * peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS: Status of measurement + * request for this peer. + * See %enum qca_wlan_vendor_attr_ftm_peer_result_status + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS: Various flags related + * to measurement results for this peer. + * See %enum qca_wlan_vendor_attr_ftm_peer_result_flags + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS: Specified when + * request failed and peer requested not to send an additional request + * for this number of seconds. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI: LCI report when received + * from peer. In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.10 + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR: Location civic report when + * received from peer.In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.13 + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS: Reported when peer + * overridden some measurement request parameters. See + * enum qca_wlan_vendor_attr_ftm_meas_param. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS: AOA measurement + * for this peer. Same contents as %QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS: Array of measurement + * results. Each entry is a nested attribute defined + * by enum qca_wlan_vendor_attr_ftm_meas. + */ +enum qca_wlan_vendor_attr_ftm_peer_result { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_status + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK: Request sent ok and results + * will be provided. Peer may have overridden some measurement parameters, + * in which case overridden parameters will be report by + * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS attribute + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE: Peer is incapable + * of performing the measurement request. No more results will be sent + * for this peer in this session. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED: Peer reported request + * failed, and requested not to send an additional request for number + * of seconds specified by %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS + * attribute. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID: Request validation + * failed. Request was not sent over the air. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_status { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_flags : Various flags + * for measurement result, per-peer + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE: If set, + * measurement completed for this peer. No more results will be reported + * for this peer in this session. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE = 1 << 0, +}; + +/** + * enum qca_vendor_attr_loc_session_status: Session completion status code + * + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK: Session completed + * successfully. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED: Session aborted + * by request + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID: Session request + * was invalid and was not started + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED: Session had an error + * and did not complete normally (for example out of resources) + * + */ +enum qca_vendor_attr_loc_session_status { + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas: Single measurement data + * + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1: Time of departure(TOD) of FTM packet as + * recorded by responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2: Time of arrival(TOA) of FTM packet at + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3: TOD of ACK packet as recorded by + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4: TOA of ACK packet at + * responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI: RSSI (signal level) as recorded + * during this measurement exchange. Optional and will be provided if + * the hardware can measure it. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR: TOD error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR: TOA error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR: TOD error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR: TOA error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD: Dummy attribute for padding. + */ +enum qca_wlan_vendor_attr_ftm_meas { + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_aoa_type: AOA measurement type + * + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: Phase of the strongest + * CIR (channel impulse response) path for each antenna. + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: Phase and amplitude + * of the strongest CIR path for each antenna. + */ +enum qca_wlan_vendor_attr_aoa_type { + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX, +}; + +/* vendor event indices, used from both cfg80211.c and ftm.c */ +enum qca_nl80211_vendor_events_index { + QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX, + QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX, + QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX, +}; + +/* measurement parameters. Specified for each peer as part + * of measurement request, or provided with measurement + * results for peer in case peer overridden parameters + */ +struct wil_ftm_meas_params { + u8 meas_per_burst; + u8 num_of_bursts_exp; + u8 burst_duration; + u16 burst_period; +}; + +/* measurement request for a single peer */ +struct wil_ftm_meas_peer_info { + u8 mac_addr[ETH_ALEN]; + u32 freq; + u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */ + struct wil_ftm_meas_params params; + u8 secure_token_id; + u16 aoa_burst_period; /* 0 if no AOA, >0 every <value> bursts */ +}; + +/* session request, passed to wil_ftm_cfg80211_start_session */ +struct wil_ftm_session_request { + u64 session_cookie; + u32 n_peers; + u32 aoa_type; /* enum qca_wlan_vendor_attr_aoa_type */ + /* keep last, variable size according to n_peers */ + struct wil_ftm_meas_peer_info peers[0]; +}; + +/* single measurement for a peer */ +struct wil_ftm_peer_meas { + u64 t1, t2, t3, t4; +}; + +/* measurement results for a single peer */ +struct wil_ftm_peer_meas_res { + u8 mac_addr[ETH_ALEN]; + u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_result_flags */ + u8 status; /* enum qca_wlan_vendor_attr_ftm_peer_result_status */ + u8 value_seconds; + bool has_params; /* true if params is valid */ + struct wil_ftm_meas_params params; /* peer overridden params */ + u8 *lci; + u8 lci_length; + u8 *lcr; + u8 lcr_length; + u32 n_meas; + /* keep last, variable size according to n_meas */ + struct wil_ftm_peer_meas meas[0]; +}; + +/* standalone AOA measurement request */ +struct wil_aoa_meas_request { + u8 mac_addr[ETH_ALEN]; + u32 freq; + u32 type; +}; + +/* AOA measurement result */ +struct wil_aoa_meas_result { + u8 mac_addr[ETH_ALEN]; + u32 type; + u32 antenna_array_mask; + u32 status; + u32 length; + /* keep last, variable size according to length */ + u8 data[0]; +}; + +/* private data related to FTM. Part of the wil6210_priv structure */ +struct wil_ftm_priv { + struct mutex lock; /* protects the FTM data */ + u8 session_started; + u64 session_cookie; + struct wil_ftm_peer_meas_res *ftm_res; + u8 has_ftm_res; + u32 max_ftm_meas; + + /* standalone AOA measurement */ + u8 aoa_started; + u8 aoa_peer_mac_addr[ETH_ALEN]; + u32 aoa_type; + struct timer_list aoa_timer; + struct work_struct aoa_timeout_work; +}; + +int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); + +#endif /* __WIL6210_FTM_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 82aae2d705b4..540fc20984d8 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,8 +19,9 @@ #include "wil6210.h" #include "fw.h" -MODULE_FIRMWARE(WIL_FW_NAME); -MODULE_FIRMWARE(WIL_FW2_NAME); +MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT); +MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS); +MODULE_FIRMWARE(WIL_BOARD_FILE_NAME); static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h index 7a2c6c129ad5..2f2b910501ba 100644 --- a/drivers/net/wireless/ath/wil6210/fw.h +++ b/drivers/net/wireless/ath/wil6210/fw.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */ u8 data[0]; /* free-form data [data_size], see above */ } __packed; +/* FW capabilities encoded inside a comment record */ +#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba) +struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */ + /* identifies capabilities record */ + __le32 magic; + /* capabilities (variable size), see enum wmi_fw_capability */ + u8 capabilities[0]; +}; + /* perform action * data_size = @head.size - offsetof(struct wil_fw_record_action, data) */ @@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */ /* file header * First record of every file */ +/* the FW version prefix in the comment */ +#define WIL_FW_VERSION_PREFIX "FW version: " +#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1) struct wil_fw_record_file_header { __le32 signature ; /* Wilocity signature */ __le32 reserved; diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index d30657ee7e83..77d1902947e3 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -26,14 +26,17 @@ prefix_type, rowsize, \ groupsize, buf, len, ascii) -#define FW_ADDR_CHECK(ioaddr, val, msg) do { \ - ioaddr = wmi_buffer(wil, val); \ - if (!ioaddr) { \ - wil_err_fw(wil, "bad " msg ": 0x%08x\n", \ - le32_to_cpu(val)); \ - return -EINVAL; \ - } \ - } while (0) +static bool wil_fw_addr_check(struct wil6210_priv *wil, + void __iomem **ioaddr, __le32 val, + u32 size, const char *msg) +{ + *ioaddr = wmi_buffer_block(wil, val, size); + if (!(*ioaddr)) { + wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val)); + return false; + } + return true; +} /** * wil_fw_verify - verify firmware file validity @@ -118,11 +121,33 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size) return (int)dlen; } -static int fw_handle_comment(struct wil6210_priv *wil, const void *data, +static int fw_ignore_section(struct wil6210_priv *wil, const void *data, size_t size) { - wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true); + return 0; +} + +static int +fw_handle_comment(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_capabilities *rec = data; + size_t capa_size; + + if (size < sizeof(*rec) || + le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) { + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, + data, size, true); + return 0; + } + capa_size = size - offsetof(struct wil_fw_record_capabilities, + capabilities); + bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); + memcpy(wil->fw_capabilities, rec->capabilities, + min(sizeof(wil->fw_capabilities), capa_size)); + wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1, + rec->capabilities, capa_size, false); return 0; } @@ -138,7 +163,8 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data, return -EINVAL; } - FW_ADDR_CHECK(dst, d->addr, "address"); + if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address")) + return -EINVAL; wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr), s); wil_memcpy_toio_32(dst, d->data, s); @@ -170,7 +196,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data, return -EINVAL; } - FW_ADDR_CHECK(dst, d->addr, "address"); + if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address")) + return -EINVAL; v = le32_to_cpu(d->value); wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n", @@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data, wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment, sizeof(d->comment), true); + if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX, + WIL_FW_VERSION_PREFIX_LEN)) + memcpy(wil->fw_version, + d->comment + WIL_FW_VERSION_PREFIX_LEN, + min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN, + sizeof(wil->fw_version) - 1)); + return 0; } @@ -219,7 +253,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data, u32 v = le32_to_cpu(block[i].value); u32 x, y; - FW_ADDR_CHECK(dst, block[i].addr, "address"); + if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address")) + return -EINVAL; x = readl(dst); y = (x & m) | (v & ~m); @@ -285,10 +320,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n", n, gw_cmd); - FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); - FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr"); - FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); - FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); + if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0, + "gateway_addr_addr") || + !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0, + "gateway_value_addr") || + !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0, + "gateway_cmd_addr") || + !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0, + "gateway_ctrl_address")) + return -EINVAL; wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x" " cmd 0x%08x ctl 0x%08x\n", @@ -344,12 +384,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n", n, gw_cmd); - FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); + if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0, + "gateway_addr_addr")) + return -EINVAL; for (k = 0; k < ARRAY_SIZE(block->value); k++) - FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k], - "gateway_value_addr"); - FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); - FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); + if (!wil_fw_addr_check(wil, &gwa_val[k], + d->gateway_value_addr[k], + 0, "gateway_value_addr")) + return -EINVAL; + if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0, + "gateway_cmd_addr") || + !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0, + "gateway_ctrl_address")) + return -EINVAL; wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n", le32_to_cpu(d->gateway_addr_addr), @@ -383,42 +430,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, static const struct { int type; - int (*handler)(struct wil6210_priv *wil, const void *data, size_t size); + int (*load_handler)(struct wil6210_priv *wil, const void *data, + size_t size); + int (*parse_handler)(struct wil6210_priv *wil, const void *data, + size_t size); } wil_fw_handlers[] = { - {wil_fw_type_comment, fw_handle_comment}, - {wil_fw_type_data, fw_handle_data}, - {wil_fw_type_fill, fw_handle_fill}, + {wil_fw_type_comment, fw_handle_comment, fw_handle_comment}, + {wil_fw_type_data, fw_handle_data, fw_ignore_section}, + {wil_fw_type_fill, fw_handle_fill, fw_ignore_section}, /* wil_fw_type_action */ /* wil_fw_type_verify */ - {wil_fw_type_file_header, fw_handle_file_header}, - {wil_fw_type_direct_write, fw_handle_direct_write}, - {wil_fw_type_gateway_data, fw_handle_gateway_data}, - {wil_fw_type_gateway_data4, fw_handle_gateway_data4}, + {wil_fw_type_file_header, fw_handle_file_header, + fw_handle_file_header}, + {wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section}, + {wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section}, + {wil_fw_type_gateway_data4, fw_handle_gateway_data4, + fw_ignore_section}, }; static int wil_fw_handle_record(struct wil6210_priv *wil, int type, - const void *data, size_t size) + const void *data, size_t size, bool load) { int i; - for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) { + for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) if (wil_fw_handlers[i].type == type) - return wil_fw_handlers[i].handler(wil, data, size); - } + return load ? + wil_fw_handlers[i].load_handler( + wil, data, size) : + wil_fw_handlers[i].parse_handler( + wil, data, size); wil_err_fw(wil, "unknown record type: %d\n", type); return -EINVAL; } /** - * wil_fw_load - load FW into device - * - * Load the FW and uCode code and data to the corresponding device - * memory regions + * wil_fw_process - process section from FW file + * if load is true: Load the FW and uCode code and data to the + * corresponding device memory regions, + * otherwise only parse and look for capabilities * * Return error code */ -static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size) +static int wil_fw_process(struct wil6210_priv *wil, const void *data, + size_t size, bool load) { int rc = 0; const struct wil_fw_record_head *hdr; @@ -437,7 +493,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size) return -EINVAL; } rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type), - &hdr[1], hdr_sz); + &hdr[1], hdr_sz, load); if (rc) return rc; } @@ -456,13 +512,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size) } /** - * wil_request_firmware - Request firmware and load to device + * wil_request_firmware - Request firmware * - * Request firmware image from the file and load it to device + * Request firmware image from the file + * If load is true, load firmware to device, otherwise + * only parse and extract capabilities * * Return error code */ -int wil_request_firmware(struct wil6210_priv *wil, const char *name) +int wil_request_firmware(struct wil6210_priv *wil, const char *name, + bool load) { int rc, rc1; const struct firmware *fw; @@ -471,7 +530,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name) rc = request_firmware(&fw, name, wil_to_dev(wil)); if (rc) { - wil_err_fw(wil, "Failed to load firmware %s\n", name); + wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc); return rc; } wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size); @@ -482,7 +541,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name) rc = rc1; goto out; } - rc = wil_fw_load(wil, d, rc1); + rc = wil_fw_process(wil, d, rc1, load); if (rc < 0) goto out; } @@ -491,3 +550,24 @@ out: release_firmware(fw); return rc; } + +/** + * wil_fw_verify_file_exists - checks if firmware file exist + * + * @wil: driver context + * @name: firmware file name + * + * return value - boolean, true for success, false for failure + */ +bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, name, wil_to_dev(wil)); + if (!rc) + release_firmware(fw); + else + wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc); + return !rc; +} diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 50c136e843c4..5cf341702dc1 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -35,15 +35,19 @@ * */ -#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL) +#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL) +#define WIL6210_IRQ_DISABLE_NO_HALP (0xF7FFFFFFUL) #define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \ BIT_DMA_EP_RX_ICR_RX_HTRSH) +#define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \ + (~(BIT_DMA_EP_RX_ICR_RX_HTRSH))) #define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) -#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \ - ISR_MISC_MBOX_EVT | \ - ISR_MISC_FW_ERROR) - +#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \ + ISR_MISC_MBOX_EVT | \ + ISR_MISC_FW_ERROR) +#define WIL6210_IMC_MISC (WIL6210_IMC_MISC_NO_HALP | \ + BIT_DMA_EP_MISC_ICR_HALP) #define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \ BIT_DMA_PSEUDO_CAUSE_TX | \ BIT_DMA_PSEUDO_CAUSE_MISC)) @@ -51,6 +55,7 @@ #if defined(CONFIG_WIL6210_ISR_COR) /* configure to Clear-On-Read mode */ #define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL) +#define WIL_ICR_ICC_MISC_VALUE (0xF7FFFFFFUL) static inline void wil_icr_clear(u32 x, void __iomem *addr) { @@ -58,6 +63,7 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr) #else /* defined(CONFIG_WIL6210_ISR_COR) */ /* configure to Write-1-to-Clear mode */ #define WIL_ICR_ICC_VALUE (0UL) +#define WIL_ICR_ICC_MISC_VALUE (0UL) static inline void wil_icr_clear(u32 x, void __iomem *addr) { @@ -86,15 +92,26 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil) WIL6210_IRQ_DISABLE); } -static void wil6210_mask_irq_misc(struct wil6210_priv *wil) +static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) { + wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n", + mask_halp ? "true" : "false"); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), - WIL6210_IRQ_DISABLE); + mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP); +} + +void wil6210_mask_halp(struct wil6210_priv *wil) +{ + wil_dbg_irq(wil, "mask_halp\n"); + + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), + BIT_DMA_EP_MISC_ICR_HALP); } static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_irq_pseudo\n"); wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE); @@ -109,19 +126,32 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil) void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { + bool unmask_rx_htrsh = test_bit(wil_status_fwconnected, wil->status); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC), - WIL6210_IMC_RX); + unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH); } -static void wil6210_unmask_irq_misc(struct wil6210_priv *wil) +static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) { + wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n", + unmask_halp ? "true" : "false"); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), - WIL6210_IMC_MISC); + unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP); +} + +static void wil6210_unmask_halp(struct wil6210_priv *wil) +{ + wil_dbg_irq(wil, "unmask_halp\n"); + + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), + BIT_DMA_EP_MISC_ICR_HALP); } static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_irq_pseudo\n"); set_bit(wil_status_irqen, wil->status); @@ -130,34 +160,34 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) void wil_mask_irq(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_irq\n"); wil6210_mask_irq_tx(wil); wil6210_mask_irq_rx(wil); - wil6210_mask_irq_misc(wil); + wil6210_mask_irq_misc(wil, true); wil6210_mask_irq_pseudo(wil); } void wil_unmask_irq(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_irq\n"); wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC), WIL_ICR_ICC_VALUE); wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC), WIL_ICR_ICC_VALUE); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC), - WIL_ICR_ICC_VALUE); + WIL_ICR_ICC_MISC_VALUE); wil6210_unmask_irq_pseudo(wil); wil6210_unmask_irq_tx(wil); wil6210_unmask_irq_rx(wil); - wil6210_unmask_irq_misc(wil); + wil6210_unmask_irq_misc(wil, true); } void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "configure_interrupt_moderation\n"); /* disable interrupt moderation for monitor * to get better timestamp precision @@ -214,7 +244,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (unlikely(!isr)) { - wil_err(wil, "spurious IRQ: RX\n"); + wil_err_ratelimited(wil, "spurious IRQ: RX\n"); return IRQ_NONE; } @@ -228,11 +258,8 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) */ if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH))) { - wil_dbg_irq(wil, "RX done\n"); - - if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)) - wil_err_ratelimited(wil, - "Received \"Rx buffer is in risk of overflow\" interrupt\n"); + wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n", + isr); isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH); @@ -242,11 +269,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) need_unmask = false; napi_schedule(&wil->napi_rx); } else { - wil_err(wil, + wil_err_ratelimited( + wil, "Got Rx interrupt while stopping interface\n"); } } else { - wil_err(wil, "Got Rx interrupt while in reset\n"); + wil_err_ratelimited(wil, "Got Rx interrupt while in reset\n"); } } @@ -275,7 +303,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (unlikely(!isr)) { - wil_err(wil, "spurious IRQ: TX\n"); + wil_err_ratelimited(wil, "spurious IRQ: TX\n"); return IRQ_NONE; } @@ -291,12 +319,13 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) need_unmask = false; napi_schedule(&wil->napi_tx); } else { - wil_err(wil, "Got Tx interrupt while in reset\n"); + wil_err_ratelimited(wil, "Got Tx interrupt while in reset\n"); } } if (unlikely(isr)) - wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); + wil_err_ratelimited(wil, "un-handled TX ISR bits 0x%08x\n", + isr); /* Tx IRQ will be enabled when NAPI processing finished */ @@ -329,6 +358,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil) wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); } +static bool wil_validate_mbox_regs(struct wil6210_priv *wil) +{ + size_t min_size = sizeof(struct wil6210_mbox_hdr) + + sizeof(struct wmi_cmd_hdr); + + if (wil->mbox_ctl.rx.entry_size < min_size) { + wil_err(wil, "rx mbox entry too small (%d)\n", + wil->mbox_ctl.rx.entry_size); + return false; + } + if (wil->mbox_ctl.tx.entry_size < min_size) { + wil_err(wil, "tx mbox entry too small (%d)\n", + wil->mbox_ctl.tx.entry_size); + return false; + } + + return true; +} + static irqreturn_t wil6210_irq_misc(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -344,7 +392,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) return IRQ_NONE; } - wil6210_mask_irq_misc(wil); + wil6210_mask_irq_misc(wil, false); if (isr & ISR_MISC_FW_ERROR) { u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE); @@ -364,7 +412,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) if (isr & ISR_MISC_FW_READY) { wil_dbg_irq(wil, "IRQ: FW ready\n"); wil_cache_mbox_regs(wil); - set_bit(wil_status_mbox_ready, wil->status); + if (wil_validate_mbox_regs(wil)) + set_bit(wil_status_mbox_ready, wil->status); /** * Actual FW ready indicated by the * WMI_FW_READY_EVENTID @@ -372,12 +421,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) isr &= ~ISR_MISC_FW_READY; } + if (isr & BIT_DMA_EP_MISC_ICR_HALP) { + wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); + wil6210_mask_halp(wil); + isr &= ~BIT_DMA_EP_MISC_ICR_HALP; + complete(&wil->halp.comp); + } + wil->isr_misc = isr; if (isr) { return IRQ_WAKE_THREAD; } else { - wil6210_unmask_irq_misc(wil); + wil6210_unmask_irq_misc(wil, false); return IRQ_HANDLED; } } @@ -391,12 +447,18 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); if (isr & ISR_MISC_FW_ERROR) { + wil->recovery_state = fw_recovery_pending; wil_fw_core_dump(wil); wil_notify_fw_error(wil); isr &= ~ISR_MISC_FW_ERROR; - wil_fw_error_recovery(wil); + if (wil->platform_ops.notify) { + wil_err(wil, "notify platform driver about FW crash"); + wil->platform_ops.notify(wil->platform_handle, + WIL_PLATFORM_EVT_FW_CRASH); + } else { + wil_fw_error_recovery(wil); + } } - if (isr & ISR_MISC_MBOX_EVT) { wil_dbg_irq(wil, "MBOX event\n"); wmi_recv_cmd(wil); @@ -408,7 +470,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil->isr_misc = 0; - wil6210_unmask_irq_misc(wil); + wil6210_unmask_irq_misc(wil, false); return IRQ_HANDLED; } @@ -427,6 +489,12 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie) wil6210_unmask_irq_pseudo(wil); + if (wil->suspend_resp_rcvd) { + wil_dbg_irq(wil, "set suspend_resp_comp to true\n"); + wil->suspend_resp_comp = true; + wake_up_interruptible(&wil->wq); + } + return IRQ_HANDLED; } @@ -463,6 +531,13 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) offsetof(struct RGF_ICR, ICR)); u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMV)); + + /* HALP interrupt can be unmasked when misc interrupts are + * masked + */ + if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP) + return 0; + wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n" "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" @@ -550,11 +625,28 @@ void wil6210_clear_irq(struct wil6210_priv *wil) wmb(); /* make sure write completed */ } +void wil6210_set_halp(struct wil6210_priv *wil) +{ + wil_dbg_irq(wil, "set_halp\n"); + + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS), + BIT_DMA_EP_MISC_ICR_HALP); +} + +void wil6210_clear_halp(struct wil6210_priv *wil) +{ + wil_dbg_irq(wil, "clear_halp\n"); + + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR), + BIT_DMA_EP_MISC_ICR_HALP); + wil6210_unmask_halp(wil); +} + int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) { int rc; - wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx"); + wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); rc = request_threaded_irq(irq, wil6210_hardirq, wil6210_thread_irq, @@ -565,7 +657,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) void wil6210_fini_irq(struct wil6210_priv *wil, int irq) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "fini_irq:\n"); wil_mask_irq(wil); free_irq(irq, wil); diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c index f7f948621951..f8d2c209482c 100644 --- a/drivers/net/wireless/ath/wil6210/ioctl.c +++ b/drivers/net/wireless/ath/wil6210/ioctl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,6 +24,14 @@ DUMP_PREFIX_OFFSET, 16, 1, buf, len, true) #define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg) +#define WIL_PRIV_DATA_MAX_LEN 8192 +#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" + +struct wil_android_priv_data { + char *buf; + int used_len; + int total_len; +}; static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr, uint32_t size, enum wil_memio_op op) { @@ -46,7 +54,7 @@ static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr, } off = a - wil->csr; - if (size >= WIL6210_MEM_SIZE - off) { + if (size >= wil->bar_size - off) { wil_err(wil, "Requested block does not fit into memory: " "off = 0x%08x size = 0x%08x\n", off, size); return NULL; @@ -79,10 +87,12 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data) io.val = readl(a); need_copy = true; break; +#if defined(CONFIG_WIL6210_WRITE_IOCTL) case wil_mmio_write: writel(io.val, a); wmb(); /* make sure write propagated to HW */ break; +#endif default: wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); return -EINVAL; @@ -139,6 +149,7 @@ static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data) goto out_free; } break; +#if defined(CONFIG_WIL6210_WRITE_IOCTL) case wil_mmio_write: if (copy_from_user(block, io.block, io.size)) { rc = -EFAULT; @@ -148,6 +159,7 @@ static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data) wmb(); /* make sure write propagated to HW */ wil_hex_dump_ioctl("Write ", block, io.size); break; +#endif default: wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); rc = -EINVAL; @@ -159,15 +171,71 @@ out_free: return rc; } +static int wil_ioc_android(struct wil6210_priv *wil, void __user *data) +{ + int rc = 0; + char *command; + struct wil_android_priv_data priv_data; + + wil_dbg_ioctl(wil, "%s()\n", __func__); + + if (copy_from_user(&priv_data, data, sizeof(priv_data))) + return -EFAULT; + + if (priv_data.total_len <= 0 || + priv_data.total_len >= WIL_PRIV_DATA_MAX_LEN) { + wil_err(wil, "%s: invalid data len %d\n", + __func__, priv_data.total_len); + return -EINVAL; + } + + command = kmalloc(priv_data.total_len + 1, GFP_KERNEL); + if (!command) + return -ENOMEM; + + if (copy_from_user(command, priv_data.buf, priv_data.total_len)) { + rc = -EFAULT; + goto out_free; + } + + /* Make sure the command is NUL-terminated */ + command[priv_data.total_len] = '\0'; + + wil_dbg_ioctl(wil, "%s(command = %s)\n", __func__, command); + + /* P2P not supported, but WPS is (in AP mode). + * Ignore those in order not to block WPS functionality + * in non-P2P mode. + */ + if (strncasecmp(command, CMD_SET_AP_WPS_P2P_IE, + strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) + rc = 0; + else + rc = -ENOIOCTLCMD; + +out_free: + kfree(command); + return rc; +} int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd) { + int ret; + switch (cmd) { case WIL_IOCTL_MEMIO: - return wil_ioc_memio_dword(wil, data); + ret = wil_ioc_memio_dword(wil, data); + break; case WIL_IOCTL_MEMIO_BLOCK: - return wil_ioc_memio_block(wil, data); + ret = wil_ioc_memio_block(wil, data); + break; + case (SIOCDEVPRIVATE + 1): + ret = wil_ioc_android(wil, data); + break; default: wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd); return -ENOIOCTLCMD; } + + wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret); + return ret; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c377937aae1c..b5e6711c7a13 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -23,22 +23,27 @@ #include "wmi.h" #include "boot_loader.h" -#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 -#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 +#define WAIT_FOR_HALP_VOTE_MS 100 +#define WAIT_FOR_SCAN_ABORT_MS 1000 bool debug_fw; /* = false; */ -module_param(debug_fw, bool, S_IRUGO); +module_param(debug_fw, bool, 0444); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); +static u8 oob_mode; +module_param(oob_mode, byte, 0444); +MODULE_PARM_DESC(oob_mode, + " enable out of the box (OOB) mode in FW, for diagnostics and certification"); + bool no_fw_recovery; -module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); +module_param(no_fw_recovery, bool, 0644); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); /* if not set via modparam, will be set to default value of 1/8 of * rx ring size during init flow */ unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT; -module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO); +module_param(rx_ring_overflow_thrsh, ushort, 0444); MODULE_PARM_DESC(rx_ring_overflow_thrsh, " RX ring overflow threshold in descriptors."); @@ -68,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = { .get = param_get_uint, }; -module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO); +module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444); MODULE_PARM_DESC(mtu_max, " Max MTU value."); static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; @@ -97,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = { .get = param_get_uint, }; -module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO); +module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444); MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order"); -module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO); +module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444); MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order"); -module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO); +module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444); MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order"); #define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ @@ -164,12 +169,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) struct wil_sta_info *sta = &wil->sta[cid]; might_sleep(); - wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, - sta->status); - + wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n", + cid, sta->status); + /* inform upper/lower layers */ if (sta->status != wil_sta_unused) { - if (!from_event) - wmi_disconnect_sta(wil, sta->addr, reason_code); + if (!from_event) { + bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ? + disable_ap_sme : false; + wmi_disconnect_sta(wil, sta->addr, reason_code, + true, del_sta); + } switch (wdev->iftype) { case NL80211_IFTYPE_AP: @@ -181,8 +190,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) break; } sta->status = wil_sta_unused; + sta->fst_link_loss = false; } - + /* reorder buffers */ for (i = 0; i < WIL_STA_TID_NUM; i++) { struct wil_tid_ampdu_rx *r; @@ -194,13 +204,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) spin_unlock_bh(&sta->tid_rx_lock); } + /* crypto context */ + memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx)); + memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx)); + /* release vrings */ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { if (wil->vring2cid_tid[i][0] == cid) wil_vring_fini_tx(wil, i); } + /* statistics */ memset(&sta->stats, 0, sizeof(sta->stats)); } +static bool wil_is_connected(struct wil6210_priv *wil) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + if (wil->sta[i].status == wil_sta_connected) + return true; + } + + return false; +} + static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event) { @@ -208,9 +235,12 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; + if (unlikely(!ndev)) + return; + might_sleep(); - wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, - reason_code, from_event ? "+" : "-"); + wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid, + reason_code, from_event ? "+" : "-"); /* Cases are: * - disconnect single STA, still connected @@ -240,20 +270,34 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: wil_bcast_fini(wil); - netif_tx_stop_all_queues(ndev); + wil_update_net_queues_bh(wil, NULL, true); netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); if (test_bit(wil_status_fwconnected, wil->status)) { clear_bit(wil_status_fwconnected, wil->status); cfg80211_disconnected(ndev, reason_code, - NULL, 0, false, GFP_KERNEL); + NULL, 0, + wil->locally_generated_disc, + GFP_KERNEL); + wil->locally_generated_disc = false; } else if (test_bit(wil_status_fwconnecting, wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); + wil->bss = NULL; } clear_bit(wil_status_fwconnecting, wil->status); break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + if (!wil_is_connected(wil)) { + wil_update_net_queues_bh(wil, NULL, true); + clear_bit(wil_status_fwconnected, wil->status); + } else { + wil_update_net_queues_bh(wil, NULL, false); + } + break; default: break; } @@ -263,22 +307,49 @@ static void wil_disconnect_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, disconnect_worker); + struct net_device *ndev = wil_to_ndev(wil); + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_disconnect_event evt; + } __packed reply; - mutex_lock(&wil->mutex); - _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false); - mutex_unlock(&wil->mutex); + if (test_bit(wil_status_fwconnected, wil->status)) + /* connect succeeded after all */ + return; + + if (!test_bit(wil_status_fwconnecting, wil->status)) + /* already disconnected */ + return; + + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), + WIL6210_DISCONNECT_TO_MS); + if (rc) { + wil_err(wil, "disconnect error %d\n", rc); + return; + } + + wil_update_net_queues_bh(wil, NULL, true); + netif_carrier_off(ndev); + cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); + clear_bit(wil_status_fwconnecting, wil->status); } static void wil_connect_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; + bool q; - wil_dbg_misc(wil, "Connect timeout\n"); + wil_err(wil, "Connect timeout detected, disconnect station\n"); /* reschedule to thread context - disconnect won't - * run from atomic context + * run from atomic context. + * queue on wmi_wq to prevent race with connect event. */ - schedule_work(&wil->disconnect_worker); + q = queue_work(wil->wmi_wq, &wil->disconnect_worker); + wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q); } static void wil_scan_timer_fn(ulong x) @@ -307,22 +378,28 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil) void wil_set_recovery_state(struct wil6210_priv *wil, int state) { - wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__, + wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n", wil->recovery_state, state); wil->recovery_state = state; wake_up_interruptible(&wil->wq); } +bool wil_is_recovery_blocked(struct wil6210_priv *wil) +{ + return no_fw_recovery && (wil->recovery_state == fw_recovery_pending); +} + static void wil_fw_error_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "fw error worker\n"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { wil_info(wil, "No recovery - interface is down\n"); return; } @@ -384,6 +461,32 @@ static int wil_find_free_vring(struct wil6210_priv *wil) return -EINVAL; } +int wil_tx_init(struct wil6210_priv *wil, int cid) +{ + int rc = -EINVAL, ringid; + + if (cid < 0) { + wil_err(wil, "No connection pending\n"); + goto out; + } + ringid = wil_find_free_vring(wil); + if (ringid < 0) { + wil_err(wil, "No free vring found\n"); + goto out; + } + + wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", + cid, ringid); + + rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); + if (rc) + wil_err(wil, "wil_vring_init_tx for CID %d vring %d failed\n", + cid, ringid); + +out: + return rc; +} + int wil_bcast_init(struct wil6210_priv *wil) { int ri = wil->bcast_vring, rc; @@ -414,72 +517,50 @@ void wil_bcast_fini(struct wil6210_priv *wil) wil_vring_fini_tx(wil, ri); } -static void wil_connect_worker(struct work_struct *work) -{ - int rc; - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - connect_worker); - struct net_device *ndev = wil_to_ndev(wil); - - int cid = wil->pending_connect_cid; - int ringid = wil_find_free_vring(wil); - - if (cid < 0) { - wil_err(wil, "No connection pending\n"); - return; - } - - wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid); - - rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); - wil->pending_connect_cid = -1; - if (rc == 0) { - wil->sta[cid].status = wil_sta_connected; - netif_tx_wake_all_queues(ndev); - } else { - wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true); - } -} - int wil_priv_init(struct wil6210_priv *wil) { uint i; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "priv_init\n"); memset(wil->sta, 0, sizeof(wil->sta)); for (i = 0; i < WIL6210_MAX_CID; i++) spin_lock_init(&wil->sta[i].tid_rx_lock); + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) + spin_lock_init(&wil->vring_tx_data[i].lock); + mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); - mutex_init(&wil->back_rx_mutex); - mutex_init(&wil->back_tx_mutex); mutex_init(&wil->probe_client_mutex); + mutex_init(&wil->p2p_wdev_mutex); + mutex_init(&wil->halp.lock); init_completion(&wil->wmi_ready); init_completion(&wil->wmi_call); + init_completion(&wil->halp.comp); - wil->pending_connect_cid = -1; wil->bcast_vring = -1; setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); + setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn, + (ulong)wil); - INIT_WORK(&wil->connect_worker, wil_connect_worker); INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); - INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker); - INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker); INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker); + INIT_WORK(&wil->p2p.delayed_listen_work, wil_p2p_delayed_listen_work); INIT_LIST_HEAD(&wil->pending_wmi_ev); - INIT_LIST_HEAD(&wil->back_rx_pending); - INIT_LIST_HEAD(&wil->back_tx_pending); INIT_LIST_HEAD(&wil->probe_client_pending); spin_lock_init(&wil->wmi_ev_lock); + spin_lock_init(&wil->net_queue_lock); + wil->net_queue_stopped = 1; init_waitqueue_head(&wil->wq); + wil_ftm_init(wil); + wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi"); if (!wil->wmi_wq) return -EAGAIN; @@ -496,6 +577,15 @@ int wil_priv_init(struct wil6210_priv *wil) if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT) rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT; + + wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT; + + wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | + WMI_WAKEUP_TRIGGER_BCAST; + memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->vring_idle_trsh = 16; + return 0; out_wmi_wq: @@ -504,6 +594,14 @@ out_wmi_wq: return -EAGAIN; } +void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps) +{ + if (wil->platform_ops.bus_request) { + wil->bus_request_kbps = kbps; + wil->platform_ops.bus_request(wil->platform_handle, kbps); + } +} + /** * wil6210_disconnect - disconnect one connection * @wil: driver context @@ -517,7 +615,7 @@ out_wmi_wq: void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "disconnect\n"); del_timer_sync(&wil->connect_timer); _wil6210_disconnect(wil, bssid, reason_code, from_event); @@ -525,20 +623,20 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, void wil_priv_deinit(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "priv_deinit\n"); + wil_ftm_deinit(wil); wil_set_recovery_state(wil, fw_recovery_idle); del_timer_sync(&wil->scan_timer); + del_timer_sync(&wil->p2p.discovery_timer); cancel_work_sync(&wil->disconnect_worker); cancel_work_sync(&wil->fw_error_worker); + cancel_work_sync(&wil->p2p.discovery_expired_work); + cancel_work_sync(&wil->p2p.delayed_listen_work); mutex_lock(&wil->mutex); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); mutex_unlock(&wil->mutex); wmi_event_flush(wil); - wil_back_rx_flush(wil); - cancel_work_sync(&wil->back_rx_worker); - wil_back_tx_flush(wil); - cancel_work_sync(&wil->back_tx_worker); wil_probe_client_flush(wil); cancel_work_sync(&wil->probe_client_worker); destroy_workqueue(wil->wq_service); @@ -557,6 +655,27 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) wil_w(wil, RGF_USER_USER_CPU_0, 1); } +static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) +{ + wil_info(wil, "oob_mode to %d\n", mode); + switch (mode) { + case 0: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE | + BIT_USER_OOB_R2_MODE); + break; + case 1: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE); + wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); + break; + case 2: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); + wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE); + break; + default: + wil_err(wil, "invalid oob_mode: %d\n", mode); + } +} + static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; @@ -640,6 +759,40 @@ static int wil_target_reset(struct wil6210_priv *wil) return 0; } +static void wil_collect_fw_info(struct wil6210_priv *wil) +{ + struct wiphy *wiphy = wil_to_wiphy(wil); + u8 retry_short; + int rc; + + wil_refresh_fw_capabilities(wil); + + rc = wmi_get_mgmt_retry(wil, &retry_short); + if (!rc) { + wiphy->retry_short = retry_short; + wil_dbg_misc(wil, "FW retry_short: %d\n", retry_short); + } +} + +void wil_refresh_fw_capabilities(struct wil6210_priv *wil) +{ + struct wiphy *wiphy = wil_to_wiphy(wil); + + wil->keep_radio_on_during_sleep = + wil->platform_ops.keep_radio_on_during_sleep && + wil->platform_ops.keep_radio_on_during_sleep( + wil->platform_handle) && + test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities); + + wil_info(wil, "keep_radio_on_during_sleep (%d)\n", + wil->keep_radio_on_during_sleep); + + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + else + wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; +} + void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) { le32_to_cpus(&r->base); @@ -652,6 +805,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) static int wil_get_bl_info(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); + struct wiphy *wiphy = wil_to_wiphy(wil); union { struct bl_dedicated_registers_v0 bl0; struct bl_dedicated_registers_v1 bl1; @@ -696,6 +850,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil) } ether_addr_copy(ndev->perm_addr, mac); + ether_addr_copy(wiphy->perm_addr, mac); if (!is_valid_ether_addr(ndev->dev_addr)) ether_addr_copy(ndev->dev_addr, mac); @@ -754,6 +909,72 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil) return 0; } +void wil_abort_scan(struct wil6210_priv *wil, bool sync) +{ + int rc; + + lockdep_assert_held(&wil->p2p_wdev_mutex); + + if (!wil->scan_request) + return; + + wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request); + del_timer_sync(&wil->scan_timer); + mutex_unlock(&wil->p2p_wdev_mutex); + rc = wmi_abort_scan(wil); + if (!rc && sync) + wait_event_interruptible_timeout(wil->wq, !wil->scan_request, + msecs_to_jiffies( + WAIT_FOR_SCAN_ABORT_MS)); + + mutex_lock(&wil->p2p_wdev_mutex); + if (wil->scan_request) { + cfg80211_scan_done(wil->scan_request, true); + wil->scan_request = NULL; + } +} + +int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile) +{ + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) { + wil_err(wil, "set_power_mgmt not supported\n"); + return -EOPNOTSUPP; + } + + rc = wmi_ps_dev_profile_cfg(wil, ps_profile); + if (rc) + wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc); + else + wil->ps_profile = ps_profile; + + return rc; +} + +static void wil_pre_fw_config(struct wil6210_priv *wil) +{ + /* Mark FW as loaded from host */ + wil_s(wil, RGF_USER_USAGE_6, 1); + + /* clear any interrupts which on-card-firmware + * may have set + */ + wil6210_clear_irq(wil); + /* CAF_ICR - clear and mask */ + /* it is W1C, clear by writing back same value */ + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */ + wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0); + + if (wil->fw_calib_result > 0) { + __le32 val = cpu_to_le32(wil->fw_calib_result | + (CALIB_RESULT_SIGNATURE << 8)); + wil_w(wil, RGF_USER_FW_CALIB_RESULT, (u32 __force)val); + } +} + /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload @@ -763,7 +984,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "reset\n"); WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); @@ -782,22 +1003,35 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (wil->hw_version == HW_VER_UNKNOWN) return -ENODEV; + wil_dbg_misc(wil, "Prevent DS in BL & mark FW to set T_POWER_ON=0\n"); + wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP | + BIT_USER_SUPPORT_T_POWER_ON_0); + + if (wil->platform_ops.notify) { + rc = wil->platform_ops.notify(wil->platform_handle, + WIL_PLATFORM_EVT_PRE_RESET); + if (rc) + wil_err(wil, "PRE_RESET platform notify failed, rc %d\n", + rc); + } + set_bit(wil_status_resetting, wil->status); cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); - /* prevent NAPI from being scheduled */ - bitmap_zero(wil->status, wil_status_last); + /* Disable device led before reset*/ + wmi_led_cfg(wil, false); - if (wil->scan_request) { - wil_dbg_misc(wil, "Abort scan_request 0x%p\n", - wil->scan_request); - del_timer_sync(&wil->scan_timer); - cfg80211_scan_done(wil->scan_request, true); - wil->scan_request = NULL; - } + mutex_lock(&wil->p2p_wdev_mutex); + wil_abort_scan(wil, false); + mutex_unlock(&wil->p2p_wdev_mutex); + + /* prevent NAPI from being scheduled and prevent wmi commands */ + mutex_lock(&wil->wmi_mutex); + bitmap_zero(wil->status, wil_status_last); + mutex_unlock(&wil->wmi_mutex); wil_mask_irq(wil); @@ -807,7 +1041,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wmi_wq); wil_bl_crash_info(wil, false); + wil_disable_irq(wil); rc = wil_target_reset(wil); + wil6210_clear_irq(wil); + wil_enable_irq(wil); wil_rx_fini(wil); if (rc) { wil_bl_crash_info(wil, true); @@ -820,39 +1057,30 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (rc) return rc; + wil_set_oob_mode(wil, oob_mode); if (load_fw) { - wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME, - WIL_FW2_NAME); + wil_info(wil, "Use firmware <%s> + board <%s>\n", + wil->wil_fw_name, WIL_BOARD_FILE_NAME); wil_halt_cpu(wil); + memset(wil->fw_version, 0, sizeof(wil->fw_version)); /* Loading f/w from the file */ - rc = wil_request_firmware(wil, WIL_FW_NAME); + rc = wil_request_firmware(wil, wil->wil_fw_name, true); if (rc) return rc; - rc = wil_request_firmware(wil, WIL_FW2_NAME); + rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true); if (rc) return rc; - /* Mark FW as loaded from host */ - wil_s(wil, RGF_USER_USAGE_6, 1); - - /* clear any interrupts which on-card-firmware - * may have set - */ - wil6210_clear_irq(wil); - /* CAF_ICR - clear and mask */ - /* it is W1C, clear by writing back same value */ - wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); - + wil_pre_fw_config(wil); wil_release_cpu(wil); } /* init after reset */ - wil->pending_connect_cid = -1; wil->ap_isolate = 0; reinit_completion(&wil->wmi_ready); reinit_completion(&wil->wmi_call); + reinit_completion(&wil->halp.comp); if (load_fw) { wil_configure_interrupt_moderation(wil); @@ -860,8 +1088,37 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* we just started MAC, wait for FW ready */ rc = wil_wait_for_fw_ready(wil); - if (rc == 0) /* check FW is responsive */ - rc = wmi_echo(wil); + if (rc) + return rc; + + /* check FW is responsive */ + rc = wmi_echo(wil); + if (rc) { + wil_err(wil, "wmi_echo failed, rc %d\n", rc); + return rc; + } + + wil_collect_fw_info(wil); + + if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT) + wil_ps_update(wil, wil->ps_profile); + + if (wil->tt_data_set) + wmi_set_tt_cfg(wil, &wil->tt_data); + + if (wil->snr_thresh.enabled) + wmi_set_snr_thresh(wil, wil->snr_thresh.omni, + wil->snr_thresh.direct); + + if (wil->platform_ops.notify) { + rc = wil->platform_ops.notify(wil->platform_handle, + WIL_PLATFORM_EVT_FW_RDY); + if (rc) { + wil_err(wil, "FW_RDY notify failed, rc %d\n", + rc); + rc = 0; + } + } } return rc; @@ -931,9 +1188,7 @@ int __wil_up(struct wil6210_priv *wil) napi_enable(&wil->napi_tx); set_bit(wil_status_napi_en, wil->status); - if (wil->platform_ops.bus_request) - wil->platform_ops.bus_request(wil->platform_handle, - WIL_MAX_BUS_REQUEST_KBPS); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); return 0; } @@ -942,7 +1197,7 @@ int wil_up(struct wil6210_priv *wil) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "up\n"); mutex_lock(&wil->mutex); rc = __wil_up(wil); @@ -953,13 +1208,11 @@ int wil_up(struct wil6210_priv *wil) int __wil_down(struct wil6210_priv *wil) { - int iter = WAIT_FOR_DISCONNECT_TIMEOUT_MS / - WAIT_FOR_DISCONNECT_INTERVAL_MS; - WARN_ON(!mutex_is_locked(&wil->mutex)); - if (wil->platform_ops.bus_request) - wil->platform_ops.bus_request(wil->platform_handle, 0); + set_bit(wil_status_resetting, wil->status); + + wil6210_bus_request(wil, 0); wil_disable_irq(wil); if (test_and_clear_bit(wil_status_napi_en, wil->status)) { @@ -969,31 +1222,12 @@ int __wil_down(struct wil6210_priv *wil) } wil_enable_irq(wil); - if (wil->scan_request) { - wil_dbg_misc(wil, "Abort scan_request 0x%p\n", - wil->scan_request); - del_timer_sync(&wil->scan_timer); - cfg80211_scan_done(wil->scan_request, true); - wil->scan_request = NULL; - } + wil_ftm_stop_operations(wil); - if (test_bit(wil_status_fwconnected, wil->status) || - test_bit(wil_status_fwconnecting, wil->status)) - wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); - - /* make sure wil is idle (not connected) */ - mutex_unlock(&wil->mutex); - while (iter--) { - int idle = !test_bit(wil_status_fwconnected, wil->status) && - !test_bit(wil_status_fwconnecting, wil->status); - if (idle) - break; - msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS); - } - mutex_lock(&wil->mutex); - - if (!iter) - wil_err(wil, "timeout waiting for idle FW/HW\n"); + mutex_lock(&wil->p2p_wdev_mutex); + wil_p2p_stop_radio_operations(wil); + wil_abort_scan(wil, false); + mutex_unlock(&wil->p2p_wdev_mutex); wil_reset(wil, false); @@ -1004,7 +1238,7 @@ int wil_down(struct wil6210_priv *wil) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "down\n"); wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); @@ -1029,3 +1263,54 @@ int wil_find_cid(struct wil6210_priv *wil, const u8 *mac) return rc; } + +void wil_halp_vote(struct wil6210_priv *wil) +{ + unsigned long rc; + unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS); + + mutex_lock(&wil->halp.lock); + + wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n", + wil->halp.ref_cnt); + + if (++wil->halp.ref_cnt == 1) { + reinit_completion(&wil->halp.comp); + wil6210_set_halp(wil); + rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); + if (!rc) { + wil_err(wil, "HALP vote timed out\n"); + /* Mask HALP as done in case the interrupt is raised */ + wil6210_mask_halp(wil); + } else { + wil_dbg_irq(wil, + "halp_vote: HALP vote completed after %d ms\n", + jiffies_to_msecs(to_jiffies - rc)); + } + } + + wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n", + wil->halp.ref_cnt); + + mutex_unlock(&wil->halp.lock); +} + +void wil_halp_unvote(struct wil6210_priv *wil) +{ + WARN_ON(wil->halp.ref_cnt == 0); + + mutex_lock(&wil->halp.lock); + + wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n", + wil->halp.ref_cnt); + + if (--wil->halp.ref_cnt == 0) { + wil6210_clear_halp(wil); + wil_dbg_irq(wil, "HALP unvote\n"); + } + + wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n", + wil->halp.ref_cnt); + + mutex_unlock(&wil->halp.lock); +} diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index e3b3c8fb4605..364dc3334bbf 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,18 +14,24 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/moduleparam.h> #include <linux/etherdevice.h> #include "wil6210.h" #include "txrx.h" +static bool alt_ifname; /* = false; */ +module_param(alt_ifname, bool, S_IRUGO); +MODULE_PARM_DESC(alt_ifname, " use an alternate interface name wigigN instead of wlanN"); + static int wil_open(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "open\n"); - if (debug_fw) { - wil_err(wil, "%s() while in debug_fw mode\n", __func__); + if (debug_fw || + test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) { + wil_err(wil, "while in debug_fw or wmi_only mode\n"); return -EINVAL; } @@ -36,7 +42,7 @@ static int wil_stop(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "stop\n"); return wil_down(wil); } @@ -60,11 +66,7 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) { struct wil6210_priv *wil = ndev_to_wil(ndev); - int ret = wil_ioctl(wil, ifr->ifr_data, cmd); - - wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret); - - return ret; + return wil_ioctl(wil, ifr->ifr_data, cmd); } static const struct net_device_ops wil_netdev_ops = { @@ -88,7 +90,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) done = budget - quota; if (done < budget) { - napi_complete(napi); + napi_complete_done(napi, done); wil6210_unmask_irq_rx(wil); wil_dbg_txrx(wil, "NAPI RX complete\n"); } @@ -108,8 +110,9 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) /* always process ALL Tx complete, regardless budget - it is fast */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { struct vring *vring = &wil->vring_tx[i]; + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; - if (!vring->va) + if (!vring->va || !txdata->enabled) continue; tx_done += wil_tx_complete(wil, i); @@ -139,6 +142,7 @@ void *wil_if_alloc(struct device *dev) struct wil6210_priv *wil; struct ieee80211_channel *ch; int rc = 0; + const char *ifname = alt_ifname ? "wigig%d" : "wlan%d"; wdev = wil_cfg80211_init(dev); if (IS_ERR(wdev)) { @@ -148,8 +152,9 @@ void *wil_if_alloc(struct device *dev) wil = wdev_to_wil(wdev); wil->wdev = wdev; + wil->radio_wdev = wdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_alloc\n"); rc = wil_priv_init(wil); if (rc) { @@ -159,10 +164,10 @@ void *wil_if_alloc(struct device *dev) wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */ /* default monitor channel */ - ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels; + ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels; cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); - ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup); + ndev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, wil_dev_setup); if (!ndev) { dev_err(dev, "alloc_netdev_mqs failed\n"); rc = -ENOMEM; @@ -181,13 +186,6 @@ void *wil_if_alloc(struct device *dev) SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; - netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, - WIL6210_NAPI_BUDGET); - netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, - WIL6210_NAPI_BUDGET); - - netif_tx_stop_all_queues(ndev); - return wil; out_priv: @@ -203,7 +201,7 @@ void wil_if_free(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_free\n"); if (!ndev) return; @@ -218,25 +216,48 @@ void wil_if_free(struct wil6210_priv *wil) int wil_if_add(struct wil6210_priv *wil) { + struct wireless_dev *wdev = wil_to_wdev(wil); + struct wiphy *wiphy = wdev->wiphy; struct net_device *ndev = wil_to_ndev(wil); int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "entered"); + + strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version)); + + rc = wiphy_register(wiphy); + if (rc < 0) { + wil_err(wil, "failed to register wiphy, err %d\n", rc); + return rc; + } + + netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, + WIL6210_NAPI_BUDGET); + netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, + WIL6210_NAPI_BUDGET); + + wil_update_net_queues_bh(wil, NULL, true); rc = register_netdev(ndev); if (rc < 0) { dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc); - return rc; + goto out_wiphy; } return 0; + +out_wiphy: + wiphy_unregister(wdev->wiphy); + return rc; } void wil_if_remove(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); + struct wireless_dev *wdev = wil_to_wdev(wil); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_remove\n"); unregister_netdev(ndev); + wiphy_unregister(wdev->wiphy); } diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c new file mode 100644 index 000000000000..ccd98487ea09 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" +#include "wmi.h" + +#define P2P_WILDCARD_SSID "DIRECT-" +#define P2P_DMG_SOCIAL_CHANNEL 2 +#define P2P_SEARCH_DURATION_MS 500 +#define P2P_DEFAULT_BI 100 + +static int wil_p2p_start_listen(struct wil6210_priv *wil) +{ + struct wil_p2p_info *p2p = &wil->p2p; + u8 channel = p2p->listen_chan.hw_value; + int rc; + + lockdep_assert_held(&wil->mutex); + + rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI); + if (rc) { + wil_err(wil, "wmi_p2p_cfg failed\n"); + goto out; + } + + rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); + if (rc) { + wil_err(wil, "wmi_set_ssid failed\n"); + goto out_stop; + } + + rc = wmi_start_listen(wil); + if (rc) { + wil_err(wil, "wmi_start_listen failed\n"); + goto out_stop; + } + + INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired); + mod_timer(&p2p->discovery_timer, + jiffies + msecs_to_jiffies(p2p->listen_duration)); +out_stop: + if (rc) + wmi_stop_discovery(wil); + +out: + return rc; +} + +bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request) +{ + return (request->n_channels == 1) && + (request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL); +} + +void wil_p2p_discovery_timer_fn(ulong x) +{ + struct wil6210_priv *wil = (void *)x; + + wil_dbg_misc(wil, "p2p_discovery_timer_fn\n"); + + schedule_work(&wil->p2p.discovery_expired_work); +} + +int wil_p2p_search(struct wil6210_priv *wil, + struct cfg80211_scan_request *request) +{ + int rc; + struct wil_p2p_info *p2p = &wil->p2p; + + wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL); + + lockdep_assert_held(&wil->mutex); + + if (p2p->discovery_started) { + wil_err(wil, "search failed. discovery already ongoing\n"); + rc = -EBUSY; + goto out; + } + + rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI); + if (rc) { + wil_err(wil, "wmi_p2p_cfg failed\n"); + goto out; + } + + rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); + if (rc) { + wil_err(wil, "wmi_set_ssid failed\n"); + goto out_stop; + } + + /* Set application IE to probe request and probe response */ + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, + request->ie_len, request->ie); + if (rc) { + wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n"); + goto out_stop; + } + + /* supplicant doesn't provide Probe Response IEs. As a workaround - + * re-use Probe Request IEs + */ + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, + request->ie_len, request->ie); + if (rc) { + wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n"); + goto out_stop; + } + + rc = wmi_start_search(wil); + if (rc) { + wil_err(wil, "wmi_start_search failed\n"); + goto out_stop; + } + + p2p->discovery_started = 1; + INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired); + mod_timer(&p2p->discovery_timer, + jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS)); + +out_stop: + if (rc) + wmi_stop_discovery(wil); + +out: + return rc; +} + +int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, + unsigned int duration, struct ieee80211_channel *chan, + u64 *cookie) +{ + struct wil_p2p_info *p2p = &wil->p2p; + int rc; + + if (!chan) + return -EINVAL; + + wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration); + + mutex_lock(&wil->mutex); + + if (p2p->discovery_started) { + wil_err(wil, "discovery already ongoing\n"); + rc = -EBUSY; + goto out; + } + + memcpy(&p2p->listen_chan, chan, sizeof(*chan)); + *cookie = ++p2p->cookie; + p2p->listen_duration = duration; + + mutex_lock(&wil->p2p_wdev_mutex); + if (wil->scan_request) { + wil_dbg_misc(wil, "Delaying p2p listen until scan done\n"); + p2p->pending_listen_wdev = wdev; + p2p->discovery_started = 1; + rc = 0; + mutex_unlock(&wil->p2p_wdev_mutex); + goto out; + } + mutex_unlock(&wil->p2p_wdev_mutex); + + rc = wil_p2p_start_listen(wil); + if (rc) + goto out; + + p2p->discovery_started = 1; + wil->radio_wdev = wdev; + + cfg80211_ready_on_channel(wdev, *cookie, chan, duration, + GFP_KERNEL); + +out: + mutex_unlock(&wil->mutex); + return rc; +} + +u8 wil_p2p_stop_discovery(struct wil6210_priv *wil) +{ + struct wil_p2p_info *p2p = &wil->p2p; + u8 started = p2p->discovery_started; + + if (p2p->discovery_started) { + if (p2p->pending_listen_wdev) { + /* discovery not really started, only pending */ + p2p->pending_listen_wdev = NULL; + } else { + del_timer_sync(&p2p->discovery_timer); + wmi_stop_discovery(wil); + } + p2p->discovery_started = 0; + } + + return started; +} + +int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) +{ + struct wil_p2p_info *p2p = &wil->p2p; + u8 started; + + mutex_lock(&wil->mutex); + + if (cookie != p2p->cookie) { + wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n", + p2p->cookie, cookie); + mutex_unlock(&wil->mutex); + return -ENOENT; + } + + started = wil_p2p_stop_discovery(wil); + + mutex_unlock(&wil->mutex); + + if (!started) { + wil_err(wil, "listen not started\n"); + return -ENOENT; + } + + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_remain_on_channel_expired(wil->radio_wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); + return 0; +} + +void wil_p2p_listen_expired(struct work_struct *work) +{ + struct wil_p2p_info *p2p = container_of(work, + struct wil_p2p_info, discovery_expired_work); + struct wil6210_priv *wil = container_of(p2p, + struct wil6210_priv, p2p); + u8 started; + + wil_dbg_misc(wil, "p2p_listen_expired\n"); + + mutex_lock(&wil->mutex); + started = wil_p2p_stop_discovery(wil); + mutex_unlock(&wil->mutex); + + if (started) { + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_remain_on_channel_expired(wil->radio_wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); + wil->radio_wdev = wil->wdev; + mutex_unlock(&wil->p2p_wdev_mutex); + } + +} + +void wil_p2p_search_expired(struct work_struct *work) +{ + struct wil_p2p_info *p2p = container_of(work, + struct wil_p2p_info, discovery_expired_work); + struct wil6210_priv *wil = container_of(p2p, + struct wil6210_priv, p2p); + u8 started; + + wil_dbg_misc(wil, "p2p_search_expired\n"); + + mutex_lock(&wil->mutex); + started = wil_p2p_stop_discovery(wil); + mutex_unlock(&wil->mutex); + + if (started) { + mutex_lock(&wil->p2p_wdev_mutex); + if (wil->scan_request) { + cfg80211_scan_done(wil->scan_request, 0); + wil->scan_request = NULL; + wil->radio_wdev = wil->wdev; + } + mutex_unlock(&wil->p2p_wdev_mutex); + } +} + +void wil_p2p_delayed_listen_work(struct work_struct *work) +{ + struct wil_p2p_info *p2p = container_of(work, + struct wil_p2p_info, delayed_listen_work); + struct wil6210_priv *wil = container_of(p2p, + struct wil6210_priv, p2p); + int rc; + + mutex_lock(&wil->mutex); + + wil_dbg_misc(wil, "Checking delayed p2p listen\n"); + if (!p2p->discovery_started || !p2p->pending_listen_wdev) + goto out; + + mutex_lock(&wil->p2p_wdev_mutex); + if (wil->scan_request) { + /* another scan started, wait again... */ + mutex_unlock(&wil->p2p_wdev_mutex); + goto out; + } + mutex_unlock(&wil->p2p_wdev_mutex); + + rc = wil_p2p_start_listen(wil); + + mutex_lock(&wil->p2p_wdev_mutex); + if (rc) { + cfg80211_remain_on_channel_expired(p2p->pending_listen_wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); + wil->radio_wdev = wil->wdev; + } else { + cfg80211_ready_on_channel(p2p->pending_listen_wdev, p2p->cookie, + &p2p->listen_chan, + p2p->listen_duration, GFP_KERNEL); + wil->radio_wdev = p2p->pending_listen_wdev; + } + p2p->pending_listen_wdev = NULL; + mutex_unlock(&wil->p2p_wdev_mutex); + +out: + mutex_unlock(&wil->mutex); +} + +void wil_p2p_stop_radio_operations(struct wil6210_priv *wil) +{ + struct wil_p2p_info *p2p = &wil->p2p; + + lockdep_assert_held(&wil->mutex); + lockdep_assert_held(&wil->p2p_wdev_mutex); + + if (wil->radio_wdev != wil->p2p_wdev) + goto out; + + if (!p2p->discovery_started) { + /* Regular scan on the p2p device */ + if (wil->scan_request && + wil->scan_request->wdev == wil->p2p_wdev) + wil_abort_scan(wil, true); + goto out; + } + + /* Search or listen on p2p device */ + mutex_unlock(&wil->p2p_wdev_mutex); + wil_p2p_stop_discovery(wil); + mutex_lock(&wil->p2p_wdev_mutex); + + if (wil->scan_request) { + /* search */ + cfg80211_scan_done(wil->scan_request, 1); + wil->scan_request = NULL; + } else { + /* listen */ + cfg80211_remain_on_channel_expired(wil->radio_wdev, + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); + } + +out: + wil->radio_wdev = wil->wdev; +} diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 1a3142c332e1..5432b319a52e 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,32 +18,73 @@ #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> - +#include <linux/suspend.h> #include "wil6210.h" +#include <linux/rtnetlink.h> static bool use_msi = true; -module_param(use_msi, bool, S_IRUGO); +module_param(use_msi, bool, 0444); MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); +static bool ftm_mode; +module_param(ftm_mode, bool, 0444); +MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false"); + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int wil6210_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + static void wil_set_capabilities(struct wil6210_priv *wil) { - u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); + const char *wil_fw_name; + u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); + u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) & + RGF_USER_REVISION_ID_MASK); bitmap_zero(wil->hw_capabilities, hw_capability_last); - - switch (rev_id) { - case JTAG_DEV_ID_SPARROW_B0: - wil->hw_name = "Sparrow B0"; - wil->hw_version = HW_VER_SPARROW_B0; + bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); + wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT : + WIL_FW_NAME_DEFAULT; + wil->chip_revision = chip_revision; + + switch (jtag_id) { + case JTAG_DEV_ID_SPARROW: + switch (chip_revision) { + case REVISION_ID_SPARROW_D0: + wil->hw_name = "Sparrow D0"; + wil->hw_version = HW_VER_SPARROW_D0; + wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS : + WIL_FW_NAME_SPARROW_PLUS; + + if (wil_fw_verify_file_exists(wil, wil_fw_name)) + wil->wil_fw_name = wil_fw_name; + break; + case REVISION_ID_SPARROW_B0: + wil->hw_name = "Sparrow B0"; + wil->hw_version = HW_VER_SPARROW_B0; + break; + default: + wil->hw_name = "Unknown"; + wil->hw_version = HW_VER_UNKNOWN; + break; + } break; default: - wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id); + wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", + jtag_id, chip_revision); wil->hw_name = "Unknown"; wil->hw_version = HW_VER_UNKNOWN; } wil_info(wil, "Board hardware is %s\n", wil->hw_name); + + /* extract FW capabilities from file without loading the FW */ + wil_request_firmware(wil, wil->wil_fw_name, false); + wil_refresh_fw_capabilities(wil); } void wil_disable_irq(struct wil6210_priv *wil) @@ -67,10 +108,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) */ int msi_only = pdev->msi_enabled; bool _use_msi = use_msi; + bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY, + wil->fw_capabilities); - wil_dbg_misc(wil, "%s()\n", __func__); - - pdev->msi_enabled = 0; + wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only); pci_set_master(pdev); @@ -91,9 +132,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) if (rc) goto stop_master; - /* need reset here to obtain MAC */ + /* need reset here to obtain MAC or in case of WMI-only FW, full reset + * and fw loading takes place + */ mutex_lock(&wil->mutex); - rc = wil_reset(wil, false); + rc = wil_reset(wil, wmi_only); mutex_unlock(&wil->mutex); if (rc) goto release_irq; @@ -113,7 +156,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_pcie_disable\n"); pci_clear_master(pdev); /* disable and release IRQ */ @@ -125,21 +168,49 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil) return 0; } +static int wil_platform_rop_ramdump(void *wil_handle, void *buf, uint32_t size) +{ + struct wil6210_priv *wil = wil_handle; + + if (!wil) + return -EINVAL; + + return wil_fw_copy_crash_dump(wil, buf, size); +} + +static int wil_platform_rop_fw_recovery(void *wil_handle) +{ + struct wil6210_priv *wil = wil_handle; + + if (!wil) + return -EINVAL; + + wil_fw_error_recovery(wil); + + return 0; +} + static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct wil6210_priv *wil; struct device *dev = &pdev->dev; int rc; + const struct wil_platform_rops rops = { + .ramdump = wil_platform_rop_ramdump, + .fw_recovery = wil_platform_rop_fw_recovery, + }; + u32 bar_size = pci_resource_len(pdev, 0); /* check HW */ dev_info(&pdev->dev, WIL_NAME - " device found [%04x:%04x] (rev %x)\n", - (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); - - if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) { - dev_err(&pdev->dev, "Not " WIL_NAME "? " - "BAR0 size is %lu while expecting %lu\n", - (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE); + " device found [%04x:%04x] (rev %x) bar size 0x%x\n", + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision, + bar_size); + + if ((bar_size < WIL6210_MIN_MEM_SIZE) || + (bar_size > WIL6210_MAX_MEM_SIZE)) { + dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n", + bar_size); return -ENODEV; } @@ -149,12 +220,14 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(dev, "wil_if_alloc failed: %d\n", rc); return rc; } + wil->pdev = pdev; pci_set_drvdata(pdev, wil); + wil->bar_size = bar_size; /* rollback to if_free */ wil->platform_handle = - wil_platform_init(&pdev->dev, &wil->platform_ops); + wil_platform_init(&pdev->dev, &wil->platform_ops, &rops, wil); if (!wil->platform_handle) { rc = -ENODEV; wil_err(wil, "wil_platform_init failed\n"); @@ -162,8 +235,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* rollback to err_plat */ - rc = pci_enable_device(pdev); + /* device supports 48bit addresses */ + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (rc) { + dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc); + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(dev, + "dma_set_mask_and_coherent(32) failed: %d\n", + rc); + goto err_plat; + } + } else { + wil->use_extended_dma_addr = 1; + } + + rc = pci_enable_device(pdev); + if (rc && pdev->msi_enabled == 0) { wil_err(wil, "pci_enable_device failed, retry with MSI only\n"); /* Work around for platforms that can't allocate IRQ: @@ -178,6 +266,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_plat; } /* rollback to err_disable_pdev */ + pci_set_power_state(pdev, PCI_D0); rc = pci_request_region(pdev, 0, WIL_NAME); if (rc) { @@ -212,8 +301,20 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto bus_disable; } - wil6210_debugfs_init(wil); +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + wil->pm_notify.notifier_call = wil6210_pm_notify; + rc = register_pm_notifier(&wil->pm_notify); + if (rc) + /* Do not fail the driver initialization, as suspend can + * be prevented in a later phase if needed + */ + wil_err(wil, "register_pm_notifier failed: %d\n", rc); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + wil6210_debugfs_init(wil); + wil6210_sysfs_init(wil); return 0; @@ -239,9 +340,19 @@ static void wil_pcie_remove(struct pci_dev *pdev) struct wil6210_priv *wil = pci_get_drvdata(pdev); void __iomem *csr = wil->csr; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "pcie_remove\n"); + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&wil->pm_notify); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + wil6210_sysfs_remove(wil); wil6210_debugfs_remove(wil); + rtnl_lock(); + wil_p2p_wdev_free(wil); + rtnl_unlock(); wil_if_remove(wil); wil_if_pcie_disable(wil); pci_iounmap(pdev, csr); @@ -268,23 +379,23 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); rc = wil_can_suspend(wil, is_runtime); if (rc) goto out; rc = wil_suspend(wil, is_runtime); - if (rc) - goto out; - - /* TODO: how do I bring card in low power state? */ - - /* disable bus mastering */ - pci_clear_master(pdev); - /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */ + if (!rc) { + wil->suspend_stats.successful_suspends++; + /* If platform device supports keep_radio_on_during_sleep + * it will control PCIe master + */ + if (!wil->keep_radio_on_during_sleep) + /* disable bus mastering */ + pci_clear_master(pdev); + } out: return rc; } @@ -295,16 +406,63 @@ static int wil6210_resume(struct device *dev, bool is_runtime) struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); - - /* allow master */ - pci_set_master(pdev); + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); + /* If platform device supports keep_radio_on_during_sleep it will + * control PCIe master + */ + if (!wil->keep_radio_on_during_sleep) + /* allow master */ + pci_set_master(pdev); rc = wil_resume(wil, is_runtime); - if (rc) - pci_clear_master(pdev); + if (rc) { + wil_err(wil, "device failed to resume (%d)\n", rc); + wil->suspend_stats.failed_resumes++; + if (!wil->keep_radio_on_during_sleep) + pci_clear_master(pdev); + } else { + wil->suspend_stats.successful_resumes++; + } + + return rc; +} + +static int wil6210_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) +{ + struct wil6210_priv *wil = container_of( + notify_block, struct wil6210_priv, pm_notify); + int rc = 0; + enum wil_platform_event evt; + + wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode); + + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: + rc = wil_can_suspend(wil, false); + if (rc) + break; + evt = WIL_PLATFORM_EVT_PRE_SUSPEND; + if (wil->platform_ops.notify) + rc = wil->platform_ops.notify(wil->platform_handle, + evt); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + evt = WIL_PLATFORM_EVT_POST_SUSPEND; + if (wil->platform_ops.notify) + rc = wil->platform_ops.notify(wil->platform_handle, + evt); + break; + default: + wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode); + break; + } + wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 0b7ecbcac19c..8f5d1b447aaa 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -15,19 +15,42 @@ */ #include "wil6210.h" +#include <linux/jiffies.h> int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) { int rc = 0; struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + if (!(ndev->flags & IFF_UP)) { + /* can always sleep when down */ + wil_dbg_pm(wil, "Interface is down\n"); + goto out; + } + if (test_bit(wil_status_resetting, wil->status)) { + wil_dbg_pm(wil, "Delay suspend when resetting\n"); + rc = -EBUSY; + goto out; + } + if (wil->recovery_state != fw_recovery_idle) { + wil_dbg_pm(wil, "Delay suspend during recovery\n"); + rc = -EBUSY; + goto out; + } + /* interface is running */ switch (wdev->iftype) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: + if (test_bit(wil_status_fwconnecting, wil->status)) { + wil_dbg_pm(wil, "Delay suspend when connecting\n"); + rc = -EBUSY; + goto out; + } break; /* AP-like interface - can't suspend */ default: @@ -36,19 +59,176 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) break; } - wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, +out: + wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n", is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); + if (rc) + wil->suspend_stats.rejected_by_host++; + return rc; } -int wil_suspend(struct wil6210_priv *wil, bool is_runtime) +static int wil_resume_keep_radio_on(struct wil6210_priv *wil) +{ + int rc = 0; + + /* wil_status_resuming will be cleared when getting + * WMI_TRAFFIC_RESUME_EVENTID + */ + set_bit(wil_status_resuming, wil->status); + clear_bit(wil_status_suspended, wil->status); + wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); + wil_unmask_irq(wil); + + wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend); + + /* Send WMI resume request to the device */ + rc = wmi_resume(wil); + if (rc) { + wil_err(wil, "device failed to resume (%d)\n", rc); + if (no_fw_recovery) + goto out; + rc = wil_down(wil); + if (rc) { + wil_err(wil, "wil_down failed (%d)\n", rc); + goto out; + } + rc = wil_up(wil); + if (rc) { + wil_err(wil, "wil_up failed (%d)\n", rc); + goto out; + } + } + + /* Wake all queues */ + if (test_bit(wil_status_fwconnected, wil->status)) + wil_update_net_queues_bh(wil, NULL, false); + +out: + if (rc) + set_bit(wil_status_suspended, wil->status); + return rc; +} + +static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) +{ + int rc = 0; + unsigned long start, data_comp_to; + + wil_dbg_pm(wil, "suspend keep radio on\n"); + + /* Prevent handling of new tx and wmi commands */ + set_bit(wil_status_suspending, wil->status); + wil_update_net_queues_bh(wil, NULL, true); + + if (!wil_is_tx_idle(wil)) { + wil_dbg_pm(wil, "Pending TX data, reject suspend\n"); + wil->suspend_stats.rejected_by_host++; + goto reject_suspend; + } + + if (!wil_is_rx_idle(wil)) { + wil_dbg_pm(wil, "Pending RX data, reject suspend\n"); + wil->suspend_stats.rejected_by_host++; + goto reject_suspend; + } + + if (!wil_is_wmi_idle(wil)) { + wil_dbg_pm(wil, "Pending WMI events, reject suspend\n"); + wil->suspend_stats.rejected_by_host++; + goto reject_suspend; + } + + /* Send WMI suspend request to the device */ + rc = wmi_suspend(wil); + if (rc) { + wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n", + rc); + goto reject_suspend; + } + + /* Wait for completion of the pending RX packets */ + start = jiffies; + data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS); + if (test_bit(wil_status_napi_en, wil->status)) { + while (!wil_is_rx_idle(wil)) { + if (time_after(jiffies, data_comp_to)) { + if (wil_is_rx_idle(wil)) + break; + wil_err(wil, + "TO waiting for idle RX, suspend failed\n"); + wil->suspend_stats.failed_suspends++; + goto resume_after_fail; + } + wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n"); + napi_synchronize(&wil->napi_rx); + msleep(20); + } + } + + /* In case of pending WMI events, reject the suspend + * and resume the device. + * This can happen if the device sent the WMI events before + * approving the suspend. + */ + if (!wil_is_wmi_idle(wil)) { + wil_err(wil, "suspend failed due to pending WMI events\n"); + wil->suspend_stats.failed_suspends++; + goto resume_after_fail; + } + + wil_mask_irq(wil); + + /* Disable device reset on PERST */ + wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); + + if (wil->platform_ops.suspend) { + rc = wil->platform_ops.suspend(wil->platform_handle, true); + if (rc) { + wil_err(wil, "platform device failed to suspend (%d)\n", + rc); + wil->suspend_stats.failed_suspends++; + wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); + wil_unmask_irq(wil); + goto resume_after_fail; + } + } + + /* Save the current bus request to return to the same in resume */ + wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps; + wil6210_bus_request(wil, 0); + + set_bit(wil_status_suspended, wil->status); + clear_bit(wil_status_suspending, wil->status); + + return rc; + +resume_after_fail: + set_bit(wil_status_resuming, wil->status); + clear_bit(wil_status_suspending, wil->status); + rc = wmi_resume(wil); + /* if resume succeeded, reject the suspend */ + if (!rc) { + rc = -EBUSY; + if (test_bit(wil_status_fwconnected, wil->status)) + wil_update_net_queues_bh(wil, NULL, false); + } + return rc; + +reject_suspend: + clear_bit(wil_status_suspending, wil->status); + if (test_bit(wil_status_fwconnected, wil->status)) + wil_update_net_queues_bh(wil, NULL, false); + return -EBUSY; +} + +static int wil_suspend_radio_off(struct wil6210_priv *wil) { int rc = 0; struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "suspend radio off\n"); /* if netif up, hardware is alive, shut it down */ if (ndev->flags & IFF_UP) { @@ -59,12 +239,71 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) } } - if (wil->platform_ops.suspend) - rc = wil->platform_ops.suspend(wil->platform_handle); + /* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */ + wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n"); + wil_disable_irq(wil); + + if (wil->platform_ops.suspend) { + rc = wil->platform_ops.suspend(wil->platform_handle, false); + if (rc) { + wil_enable_irq(wil); + goto out; + } + } + + set_bit(wil_status_suspended, wil->status); out: - wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + wil_dbg_pm(wil, "suspend radio off: %d\n", rc); + + return rc; +} + +static int wil_resume_radio_off(struct wil6210_priv *wil) +{ + int rc = 0; + struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_pm(wil, "Enabling PCIe IRQ\n"); + wil_enable_irq(wil); + /* if netif up, bring hardware up + * During open(), IFF_UP set after actual device method + * invocation. This prevent recursive call to wil_up() + * wil_status_suspended will be cleared in wil_reset + */ + if (ndev->flags & IFF_UP) + rc = wil_up(wil); + else + clear_bit(wil_status_suspended, wil->status); + + return rc; +} + +int wil_suspend(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct net_device *ndev = wil_to_ndev(wil); + bool keep_radio_on = ndev->flags & IFF_UP && + wil->keep_radio_on_during_sleep; + + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); + + if (test_bit(wil_status_suspended, wil->status)) { + wil_dbg_pm(wil, "trying to suspend while suspended\n"); + return 0; + } + + if (!keep_radio_on) + rc = wil_suspend_radio_off(wil); + else + rc = wil_suspend_keep_radio_on(wil); + + wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); + + if (!rc) + wil->suspend_stats.suspend_start_time = ktime_get(); + return rc; } @@ -72,27 +311,40 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) { int rc = 0; struct net_device *ndev = wil_to_ndev(wil); + bool keep_radio_on = ndev->flags & IFF_UP && + wil->keep_radio_on_during_sleep; + unsigned long long suspend_time_usec = 0; - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); if (wil->platform_ops.resume) { - rc = wil->platform_ops.resume(wil->platform_handle); + rc = wil->platform_ops.resume(wil->platform_handle, + keep_radio_on); if (rc) { wil_err(wil, "platform_ops.resume : %d\n", rc); goto out; } } - /* if netif up, bring hardware up - * During open(), IFF_UP set after actual device method - * invocation. This prevent recursive call to wil_up() - */ - if (ndev->flags & IFF_UP) - rc = wil_up(wil); + if (keep_radio_on) + rc = wil_resume_keep_radio_on(wil); + else + rc = wil_resume_radio_off(wil); + + if (rc) + goto out; + + suspend_time_usec = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.suspend_start_time)); + wil->suspend_stats.total_suspend_time += suspend_time_usec; + if (suspend_time_usec < wil->suspend_stats.min_suspend_time) + wil->suspend_stats.min_suspend_time = suspend_time_usec; + if (suspend_time_usec > wil->suspend_stats.max_suspend_time) + wil->suspend_stats.max_suspend_time = suspend_time_usec; out: - wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, - is_runtime ? "runtime" : "system", rc); + wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n", + is_runtime ? "runtime" : "system", rc, suspend_time_usec); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index 5ca0307a3274..2e301b6b32a9 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -54,53 +54,90 @@ void wil_pmc_alloc(struct wil6210_priv *wil, struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); struct wmi_pmc_cmd pmc_cmd = {0}; + int last_cmd_err = -ENOMEM; mutex_lock(&pmc->lock); if (wil_is_pmc_allocated(pmc)) { /* sanity check */ - wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); + wil_err(wil, "ERROR pmc is already allocated\n"); + goto no_release_err; + } + if ((num_descriptors <= 0) || (descriptor_size <= 0)) { + wil_err(wil, + "Invalid params num_descriptors(%d), descriptor_size(%d)\n", + num_descriptors, descriptor_size); + last_cmd_err = -EINVAL; + goto no_release_err; + } + + if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) { + wil_err(wil, + "num_descriptors(%d) exceeds max ring size %d\n", + num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX); + last_cmd_err = -EINVAL; + goto no_release_err; + } + + if (num_descriptors > INT_MAX / descriptor_size) { + wil_err(wil, + "Overflow in num_descriptors(%d)*descriptor_size(%d)\n", + num_descriptors, descriptor_size); + last_cmd_err = -EINVAL; goto no_release_err; } pmc->num_descriptors = num_descriptors; pmc->descriptor_size = descriptor_size; - wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", - __func__, num_descriptors, descriptor_size); + wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", + num_descriptors, descriptor_size); /* allocate descriptors info list in pmc context*/ pmc->descriptors = kcalloc(num_descriptors, sizeof(struct desc_alloc_info), GFP_KERNEL); if (!pmc->descriptors) { - wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); + wil_err(wil, "ERROR allocating pmc skb list\n"); goto no_release_err; } - wil_dbg_misc(wil, - "%s: allocated descriptors info list %p\n", - __func__, pmc->descriptors); + wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", + pmc->descriptors); /* Allocate pring buffer and descriptors. * vring->va should be aligned on its size rounded up to power of 2 - * This is granted by the dma_alloc_coherent + * This is granted by the dma_alloc_coherent. + * + * HW has limitation that all vrings addresses must share the same + * upper 16 msb bits part of 48 bits address. To workaround that, + * if we are using 48 bit addresses switch to 32 bit allocation + * before allocating vring memory. + * + * There's no check for the return value of dma_set_mask_and_coherent, + * since we assume if we were able to set the mask during + * initialization in this system it will not fail if we set it again */ + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + pmc->pring_va = dma_alloc_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, &pmc->pring_pa, GFP_KERNEL); + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + wil_dbg_misc(wil, - "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", - __func__, + "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", pmc->pring_va, &pmc->pring_pa, sizeof(struct vring_tx_desc), num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors); if (!pmc->pring_va) { - wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); + wil_err(wil, "ERROR allocating pmc pring\n"); goto release_pmc_skb_list; } @@ -119,9 +156,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, GFP_KERNEL); if (unlikely(!pmc->descriptors[i].va)) { - wil_err(wil, - "%s: ERROR allocating pmc descriptor %d", - __func__, i); + wil_err(wil, "ERROR allocating pmc descriptor %d", i); goto release_pmc_skbs; } @@ -141,21 +176,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil, *_d = *d; } - wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); pmc_cmd.op = WMI_PMC_ALLOCATE; pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with ALLOCATE op failed with status %d", + pmc->last_cmd_status); goto release_pmc_skbs; } @@ -164,8 +199,8 @@ void wil_pmc_alloc(struct wil6210_priv *wil, return; release_pmc_skbs: - wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); - for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { + wil_err(wil, "exit on error: Releasing skbs...\n"); + for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) { dma_free_coherent(dev, descriptor_size, pmc->descriptors[i].va, @@ -173,7 +208,7 @@ release_pmc_skbs: pmc->descriptors[i].va = NULL; } - wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); + wil_err(wil, "exit on error: Releasing pring...\n"); dma_free_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, @@ -183,13 +218,12 @@ release_pmc_skbs: pmc->pring_va = NULL; release_pmc_skb_list: - wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", - __func__); + wil_err(wil, "exit on error: Releasing descriptors info list...\n"); kfree(pmc->descriptors); pmc->descriptors = NULL; no_release_err: - pmc->last_cmd_status = -ENOMEM; + pmc->last_cmd_status = last_cmd_err; mutex_unlock(&pmc->lock); } @@ -208,24 +242,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) pmc->last_cmd_status = 0; if (!wil_is_pmc_allocated(pmc)) { - wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", - __func__); + wil_dbg_misc(wil, + "pmc_free: Error, can't free - not allocated\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return; } if (send_pmc_cmd) { - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", - __func__); + wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); pmc_cmd.op = WMI_PMC_RELEASE; pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s WMI_PMC_CMD with RELEASE op failed, status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with RELEASE op failed, status %d", + pmc->last_cmd_status); /* There's nothing we can do with this error. * Normally, it should never occur. * Continue to freeing all memory allocated for pmc. @@ -237,8 +270,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) size_t buf_size = sizeof(struct vring_tx_desc) * pmc->num_descriptors; - wil_dbg_misc(wil, "%s: free pring va %p\n", - __func__, pmc->pring_va); + wil_dbg_misc(wil, "pmc_free: free pring va %p\n", + pmc->pring_va); dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); pmc->pring_va = NULL; @@ -250,18 +283,18 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) int i; for (i = 0; - pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { + i < pmc->num_descriptors && pmc->descriptors[i].va; i++) { dma_free_coherent(dev, pmc->descriptor_size, pmc->descriptors[i].va, pmc->descriptors[i].pa); pmc->descriptors[i].va = NULL; } - wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", - __func__, i, pmc->num_descriptors); + wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, + pmc->num_descriptors); wil_dbg_misc(wil, - "%s: free pmc descriptors info list %p\n", - __func__, pmc->descriptors); + "pmc_free: free pmc descriptors info list %p\n", + pmc->descriptors); kfree(pmc->descriptors); pmc->descriptors = NULL; } else { @@ -277,7 +310,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) */ int wil_pmc_last_cmd_status(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s: status %d\n", __func__, + wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", wil->pmc.last_cmd_status); return wil->pmc.last_cmd_status; @@ -295,20 +328,22 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, size_t retval = 0; unsigned long long idx; loff_t offset; - size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + size_t pmc_size; mutex_lock(&pmc->lock); if (!wil_is_pmc_allocated(pmc)) { - wil_err(wil, "%s: error, pmc is not allocated!\n", __func__); + wil_err(wil, "error, pmc is not allocated!\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return -EPERM; } + pmc_size = pmc->descriptor_size * pmc->num_descriptors; + wil_dbg_misc(wil, - "%s: size %u, pos %lld\n", - __func__, (unsigned)count, *f_pos); + "pmc_read: size %u, pos %lld\n", + (u32)count, *f_pos); pmc->last_cmd_status = 0; @@ -317,15 +352,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, offset = *f_pos - (idx * pmc->descriptor_size); if (*f_pos >= pmc_size) { - wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n", - __func__, *f_pos, (unsigned)pmc_size); + wil_dbg_misc(wil, + "pmc_read: reached end of pmc buf: %lld >= %u\n", + *f_pos, (u32)pmc_size); pmc->last_cmd_status = -ERANGE; goto out; } wil_dbg_misc(wil, - "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", - __func__, *f_pos, idx, offset, count); + "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", + *f_pos, idx, offset, count); /* if no errors, return the copied byte count */ retval = simple_read_from_buffer(buf, @@ -345,7 +381,18 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) loff_t newpos; struct wil6210_priv *wil = filp->private_data; struct pmc_ctx *pmc = &wil->pmc; - size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + size_t pmc_size; + + mutex_lock(&pmc->lock); + + if (!wil_is_pmc_allocated(pmc)) { + wil_err(wil, "error, pmc is not allocated!\n"); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return -EPERM; + } + + pmc_size = pmc->descriptor_size * pmc->num_descriptors; switch (whence) { case 0: /* SEEK_SET */ @@ -361,15 +408,21 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) break; default: /* can't happen */ - return -EINVAL; + newpos = -EINVAL; + goto out; } - if (newpos < 0) - return -EINVAL; + if (newpos < 0) { + newpos = -EINVAL; + goto out; + } if (newpos > pmc_size) newpos = pmc_size; filp->f_pos = newpos; +out: + mutex_unlock(&pmc->lock); + return newpos; } diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index e3d1be82f314..a43cffcf1bbf 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -261,9 +261,19 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, struct wil_tid_ampdu_rx *r) { + int i; + if (!r) return; - wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size); + + /* Do not pass remaining frames to the network stack - it may be + * not expecting to get any more Rx. Rx from here may lead to + * kernel OOPS since some per-socket accounting info was already + * released. + */ + for (i = 0; i < r->buf_size; i++) + kfree_skb(r->reorder_buf[i]); + kfree(r->reorder_buf); kfree(r->reorder_time); kfree(r); @@ -281,35 +291,15 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) return min(max_agg_size, req_agg_wsize); } -/* Block Ack - Rx side (recipient */ +/* Block Ack - Rx side (recipient) */ int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl) -{ - struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL); - - if (!req) - return -ENOMEM; - - req->cidxtid = cidxtid; - req->dialog_token = dialog_token; - req->ba_param_set = le16_to_cpu(ba_param_set); - req->ba_timeout = le16_to_cpu(ba_timeout); - req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl); - - mutex_lock(&wil->back_rx_mutex); - list_add_tail(&req->list, &wil->back_rx_pending); - mutex_unlock(&wil->back_rx_mutex); - - queue_work(wil->wq_service, &wil->back_rx_worker); - - return 0; -} - -static void wil_back_rx_handle(struct wil6210_priv *wil, - struct wil_back_rx *req) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { + u16 param_set = le16_to_cpu(ba_param_set); + u16 agg_timeout = le16_to_cpu(ba_timeout); + u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl); struct wil_sta_info *sta; u8 cid, tid; u16 agg_wsize = 0; @@ -318,47 +308,59 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) * bits 2..5: TID * bits 6..15: buffer size */ - u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15); - bool agg_amsdu = !!(req->ba_param_set & BIT(0)); - int ba_policy = req->ba_param_set & BIT(1); - u16 agg_timeout = req->ba_timeout; + u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15); + bool agg_amsdu = !!(param_set & BIT(0)); + int ba_policy = param_set & BIT(1); u16 status = WLAN_STATUS_SUCCESS; - u16 ssn = req->ba_seq_ctrl >> 4; + u16 ssn = seq_ctrl >> 4; struct wil_tid_ampdu_rx *r; - int rc; + int rc = 0; might_sleep(); - parse_cidxtid(req->cidxtid, &cid, &tid); + parse_cidxtid(cidxtid, &cid, &tid); /* sanity checks */ if (cid >= WIL6210_MAX_CID) { wil_err(wil, "BACK: invalid CID %d\n", cid); - return; + rc = -EINVAL; + goto out; } sta = &wil->sta[cid]; if (sta->status != wil_sta_connected) { wil_err(wil, "BACK: CID %d not connected\n", cid); - return; + rc = -EINVAL; + goto out; } wil_dbg_wmi(wil, "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n", - cid, sta->addr, tid, req_agg_wsize, req->ba_timeout, - agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn); + cid, sta->addr, tid, req_agg_wsize, agg_timeout, + agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn); /* apply policies */ if (ba_policy) { wil_err(wil, "BACK requested unsupported ba_policy == 1\n"); status = WLAN_STATUS_INVALID_QOS_PARAM; } - if (status == WLAN_STATUS_SUCCESS) - agg_wsize = wil_agg_size(wil, req_agg_wsize); + if (status == WLAN_STATUS_SUCCESS) { + if (req_agg_wsize == 0) { + wil_dbg_misc(wil, "Suggest BACK wsize %d\n", + WIL_MAX_AGG_WSIZE); + agg_wsize = WIL_MAX_AGG_WSIZE; + } else { + agg_wsize = min_t(u16, + WIL_MAX_AGG_WSIZE, req_agg_wsize); + } + } - rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status, + rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status, agg_amsdu, agg_wsize, agg_timeout); - if (rc || (status != WLAN_STATUS_SUCCESS)) - return; + if (rc || (status != WLAN_STATUS_SUCCESS)) { + wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc, + status); + goto out; + } /* apply */ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); @@ -366,143 +368,37 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); sta->tid_rx[tid] = r; spin_unlock_bh(&sta->tid_rx_lock); -} - -void wil_back_rx_flush(struct wil6210_priv *wil) -{ - struct wil_back_rx *evt, *t; - - wil_dbg_misc(wil, "%s()\n", __func__); - - mutex_lock(&wil->back_rx_mutex); - - list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) { - list_del(&evt->list); - kfree(evt); - } - - mutex_unlock(&wil->back_rx_mutex); -} - -/* Retrieve next ADDBA request from the pending list */ -static struct list_head *next_back_rx(struct wil6210_priv *wil) -{ - struct list_head *ret = NULL; - - mutex_lock(&wil->back_rx_mutex); - - if (!list_empty(&wil->back_rx_pending)) { - ret = wil->back_rx_pending.next; - list_del(ret); - } - - mutex_unlock(&wil->back_rx_mutex); - - return ret; -} - -void wil_back_rx_worker(struct work_struct *work) -{ - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - back_rx_worker); - struct wil_back_rx *evt; - struct list_head *lh; - - while ((lh = next_back_rx(wil)) != NULL) { - evt = list_entry(lh, struct wil_back_rx, list); - wil_back_rx_handle(wil, evt); - kfree(evt); - } +out: + return rc; } -/* BACK - Tx (originator) side */ -static void wil_back_tx_handle(struct wil6210_priv *wil, - struct wil_back_tx *req) +/* BACK - Tx side (originator) */ +int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) { - struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid]; - int rc; + u8 agg_wsize = wil_agg_size(wil, wsize); + u16 agg_timeout = 0; + struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; + int rc = 0; if (txdata->addba_in_progress) { wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n", - req->ringid); - return; + ringid); + goto out; } if (txdata->agg_wsize) { wil_dbg_misc(wil, - "ADDBA for vring[%d] already established wsize %d\n", - req->ringid, txdata->agg_wsize); - return; + "ADDBA for vring[%d] already done for wsize %d\n", + ringid, txdata->agg_wsize); + goto out; } txdata->addba_in_progress = true; - rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout); - if (rc) + rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout); + if (rc) { + wil_err(wil, "wmi_addba failed, rc (%d)", rc); txdata->addba_in_progress = false; -} - -static struct list_head *next_back_tx(struct wil6210_priv *wil) -{ - struct list_head *ret = NULL; - - mutex_lock(&wil->back_tx_mutex); - - if (!list_empty(&wil->back_tx_pending)) { - ret = wil->back_tx_pending.next; - list_del(ret); } - mutex_unlock(&wil->back_tx_mutex); - - return ret; -} - -void wil_back_tx_worker(struct work_struct *work) -{ - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - back_tx_worker); - struct wil_back_tx *evt; - struct list_head *lh; - - while ((lh = next_back_tx(wil)) != NULL) { - evt = list_entry(lh, struct wil_back_tx, list); - - wil_back_tx_handle(wil, evt); - kfree(evt); - } -} - -void wil_back_tx_flush(struct wil6210_priv *wil) -{ - struct wil_back_tx *evt, *t; - - wil_dbg_misc(wil, "%s()\n", __func__); - - mutex_lock(&wil->back_tx_mutex); - - list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) { - list_del(&evt->list); - kfree(evt); - } - - mutex_unlock(&wil->back_tx_mutex); -} - -int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) -{ - struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL); - - if (!req) - return -ENOMEM; - - req->ringid = ringid; - req->agg_wsize = wil_agg_size(wil, wsize); - req->agg_timeout = 0; - - mutex_lock(&wil->back_tx_mutex); - list_add_tail(&req->list, &wil->back_tx_pending); - mutex_unlock(&wil->back_tx_mutex); - - queue_work(wil->wq_service, &wil->back_tx_worker); - - return 0; +out: + return rc; } diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c new file mode 100644 index 000000000000..7c9a7900c5ed --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/sysfs.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/device.h> +#include <linux/sysfs.h> + +#include "wil6210.h" +#include "wmi.h" + +static ssize_t +wil_ftm_txrx_offset_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + struct { + struct wmi_cmd_hdr wmi; + struct wmi_tof_get_tx_rx_offset_event evt; + } __packed reply; + int rc; + ssize_t len; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -EOPNOTSUPP; + + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_TOF_GET_TX_RX_OFFSET_CMDID, NULL, 0, + WMI_TOF_GET_TX_RX_OFFSET_EVENTID, + &reply, sizeof(reply), 100); + if (rc < 0) + return rc; + if (reply.evt.status) { + wil_err(wil, "get_tof_tx_rx_offset failed, error %d\n", + reply.evt.status); + return -EIO; + } + len = snprintf(buf, PAGE_SIZE, "%u %u\n", + le32_to_cpu(reply.evt.tx_offset), + le32_to_cpu(reply.evt.rx_offset)); + return len; +} + +static ssize_t +wil_ftm_txrx_offset_sysfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + struct wmi_tof_set_tx_rx_offset_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_tof_set_tx_rx_offset_event evt; + } __packed reply; + unsigned int tx_offset, rx_offset; + int rc; + + if (sscanf(buf, "%u %u", &tx_offset, &rx_offset) != 2) + return -EINVAL; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -EOPNOTSUPP; + + memset(&cmd, 0, sizeof(cmd)); + cmd.tx_offset = cpu_to_le32(tx_offset); + cmd.rx_offset = cpu_to_le32(rx_offset); + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, &cmd, sizeof(cmd), + WMI_TOF_SET_TX_RX_OFFSET_EVENTID, + &reply, sizeof(reply), 100); + if (rc < 0) + return rc; + if (reply.evt.status) { + wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n", + reply.evt.status); + return -EIO; + } + return count; +} + +static DEVICE_ATTR(ftm_txrx_offset, 0644, + wil_ftm_txrx_offset_sysfs_show, + wil_ftm_txrx_offset_sysfs_store); + +static ssize_t +wil_tt_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + ssize_t len; + struct wmi_tt_data tt_data; + int i, rc; + + rc = wmi_get_tt_cfg(wil, &tt_data); + if (rc) + return rc; + + len = snprintf(buf, PAGE_SIZE, " high max critical\n"); + + len += snprintf(buf + len, PAGE_SIZE - len, "bb: "); + if (tt_data.bb_enabled) + for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i) + len += snprintf(buf + len, PAGE_SIZE - len, + "%03d-%03d ", + tt_data.bb_zones[i].temperature_high, + tt_data.bb_zones[i].temperature_low); + else + len += snprintf(buf + len, PAGE_SIZE - len, "* disabled *"); + len += snprintf(buf + len, PAGE_SIZE - len, "\nrf: "); + if (tt_data.rf_enabled) + for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i) + len += snprintf(buf + len, PAGE_SIZE - len, + "%03d-%03d ", + tt_data.rf_zones[i].temperature_high, + tt_data.rf_zones[i].temperature_low); + else + len += snprintf(buf + len, PAGE_SIZE - len, "* disabled *"); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + + return len; +} + +static ssize_t +wil_tt_sysfs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + int i, rc = -EINVAL; + char *token, *dupbuf, *tmp; + struct wmi_tt_data tt_data = { + .bb_enabled = 0, + .rf_enabled = 0, + }; + + tmp = kmemdup(buf, count + 1, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + tmp[count] = '\0'; + dupbuf = tmp; + + /* Format for writing is 12 unsigned bytes separated by spaces: + * <bb_z1_h> <bb_z1_l> <bb_z2_h> <bb_z2_l> <bb_z3_h> <bb_z3_l> \ + * <rf_z1_h> <rf_z1_l> <rf_z2_h> <rf_z2_l> <rf_z3_h> <rf_z3_l> + * To disable thermal throttling for bb or for rf, use 0 for all + * its six set points. + */ + + /* bb */ + for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i) { + token = strsep(&dupbuf, " "); + if (!token) + goto out; + if (kstrtou8(token, 0, &tt_data.bb_zones[i].temperature_high)) + goto out; + token = strsep(&dupbuf, " "); + if (!token) + goto out; + if (kstrtou8(token, 0, &tt_data.bb_zones[i].temperature_low)) + goto out; + + if (tt_data.bb_zones[i].temperature_high > 0 || + tt_data.bb_zones[i].temperature_low > 0) + tt_data.bb_enabled = 1; + } + /* rf */ + for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i) { + token = strsep(&dupbuf, " "); + if (!token) + goto out; + if (kstrtou8(token, 0, &tt_data.rf_zones[i].temperature_high)) + goto out; + token = strsep(&dupbuf, " "); + if (!token) + goto out; + if (kstrtou8(token, 0, &tt_data.rf_zones[i].temperature_low)) + goto out; + + if (tt_data.rf_zones[i].temperature_high > 0 || + tt_data.rf_zones[i].temperature_low > 0) + tt_data.rf_enabled = 1; + } + + rc = wmi_set_tt_cfg(wil, &tt_data); + if (rc) + goto out; + + rc = count; +out: + kfree(tmp); + return rc; +} + +static DEVICE_ATTR(thermal_throttling, 0644, + wil_tt_sysfs_show, wil_tt_sysfs_store); + +static ssize_t +wil_fst_link_loss_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + ssize_t len = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) + if (wil->sta[i].status == wil_sta_connected) + len += snprintf(buf + len, PAGE_SIZE - len, + "[%d] %pM %s\n", i, wil->sta[i].addr, + wil->sta[i].fst_link_loss ? + "On" : "Off"); + + return len; +} + +static ssize_t +wil_fst_link_loss_sysfs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + u8 addr[ETH_ALEN]; + char *token, *dupbuf, *tmp; + int rc = -EINVAL; + bool fst_link_loss; + + tmp = kmemdup(buf, count + 1, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp[count] = '\0'; + dupbuf = tmp; + + token = strsep(&dupbuf, " "); + if (!token) + goto out; + + /* mac address */ + if (sscanf(token, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &addr[0], &addr[1], &addr[2], + &addr[3], &addr[4], &addr[5]) != 6) + goto out; + + /* On/Off */ + if (strtobool(dupbuf, &fst_link_loss)) + goto out; + + wil_dbg_misc(wil, "set [%pM] with %d\n", addr, fst_link_loss); + + rc = wmi_link_maintain_cfg_write(wil, addr, fst_link_loss); + if (!rc) + rc = count; + +out: + kfree(tmp); + return rc; +} + +static DEVICE_ATTR(fst_link_loss, 0644, + wil_fst_link_loss_sysfs_show, + wil_fst_link_loss_sysfs_store); + +static ssize_t +wil_snr_thresh_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + ssize_t len = 0; + + if (wil->snr_thresh.enabled) + len = snprintf(buf, PAGE_SIZE, "omni=%d, direct=%d\n", + wil->snr_thresh.omni, wil->snr_thresh.direct); + + return len; +} + +static ssize_t +wil_snr_thresh_sysfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + int rc; + short omni, direct; + + /* to disable snr threshold, set both omni and direct to 0 */ + if (sscanf(buf, "%hd %hd", &omni, &direct) != 2) + return -EINVAL; + + rc = wmi_set_snr_thresh(wil, omni, direct); + if (!rc) + rc = count; + + return rc; +} + +static DEVICE_ATTR(snr_thresh, 0644, + wil_snr_thresh_sysfs_show, + wil_snr_thresh_sysfs_store); + +static struct attribute *wil6210_sysfs_entries[] = { + &dev_attr_ftm_txrx_offset.attr, + &dev_attr_thermal_throttling.attr, + &dev_attr_fst_link_loss.attr, + &dev_attr_snr_thresh.attr, + NULL +}; + +static struct attribute_group wil6210_attribute_group = { + .name = "wil6210", + .attrs = wil6210_sysfs_entries, +}; + +int wil6210_sysfs_init(struct wil6210_priv *wil) +{ + struct device *dev = wil_to_dev(wil); + int err; + + err = sysfs_create_group(&dev->kobj, &wil6210_attribute_group); + if (err) { + wil_err(wil, "failed to create sysfs group: %d\n", err); + return err; + } + + kobject_uevent(&dev->kobj, KOBJ_CHANGE); + + return 0; +} + +void wil6210_sysfs_remove(struct wil6210_priv *wil) +{ + struct device *dev = wil_to_dev(wil); + + sysfs_remove_group(&dev->kobj, &wil6210_attribute_group); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); +} diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h index e59239d22b94..c4db2a9d9f7f 100644 --- a/drivers/net/wireless/ath/wil6210/trace.h +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -37,39 +37,40 @@ static inline void trace_ ## name(proto) {} #endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ DECLARE_EVENT_CLASS(wil6210_wmi, - TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len), TP_ARGS(wmi, buf, buf_len), TP_STRUCT__entry( __field(u8, mid) - __field(u16, id) - __field(u32, timestamp) + __field(u16, command_id) + __field(u32, fw_timestamp) __field(u16, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( __entry->mid = wmi->mid; - __entry->id = le16_to_cpu(wmi->id); - __entry->timestamp = le32_to_cpu(wmi->timestamp); + __entry->command_id = le16_to_cpu(wmi->command_id); + __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp); __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( "MID %d id 0x%04x len %d timestamp %d", - __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp + __entry->mid, __entry->command_id, __entry->buf_len, + __entry->fw_timestamp ) ); DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, - TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len), TP_ARGS(wmi, buf, buf_len) ); DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, - TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len), TP_ARGS(wmi, buf, buf_len) ); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index af436292190b..41dec9e25f14 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -29,14 +29,18 @@ #include "trace.h" static bool rtap_include_phy_info; -module_param(rtap_include_phy_info, bool, S_IRUGO); +module_param(rtap_include_phy_info, bool, 0444); MODULE_PARM_DESC(rtap_include_phy_info, " Include PHY info in the radiotap header, default - no"); bool rx_align_2; -module_param(rx_align_2, bool, S_IRUGO); +module_param(rx_align_2, bool, 0444); MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); +bool rx_large_buf; +module_param(rx_large_buf, bool, 0444); +MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); + static inline uint wil_rx_snaplen(void) { return rx_align_2 ? 6 : 0; @@ -88,6 +92,63 @@ static inline int wil_vring_wmark_high(struct vring *vring) return vring->size/4; } +/* returns true if num avail descriptors is lower than wmark_low */ +static inline int wil_vring_avail_low(struct vring *vring) +{ + return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring); +} + +/* returns true if num avail descriptors is higher than wmark_high */ +static inline int wil_vring_avail_high(struct vring *vring) +{ + return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring); +} + +/* returns true when all tx vrings are empty */ +bool wil_is_tx_idle(struct wil6210_priv *wil) +{ + int i; + unsigned long data_comp_to; + + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + struct vring *vring = &wil->vring_tx[i]; + int vring_index = vring - wil->vring_tx; + struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + + spin_lock(&txdata->lock); + + if (!vring->va || !txdata->enabled) { + spin_unlock(&txdata->lock); + continue; + } + + data_comp_to = jiffies + msecs_to_jiffies( + WIL_DATA_COMPLETION_TO_MS); + if (test_bit(wil_status_napi_en, wil->status)) { + while (!wil_vring_is_empty(vring)) { + if (time_after(jiffies, data_comp_to)) { + wil_dbg_pm(wil, + "TO waiting for idle tx\n"); + spin_unlock(&txdata->lock); + return false; + } + wil_dbg_ratelimited(wil, + "tx vring is not empty -> NAPI\n"); + spin_unlock(&txdata->lock); + napi_synchronize(&wil->napi_tx); + msleep(20); + spin_lock(&txdata->lock); + if (!vring->va || !txdata->enabled) + break; + } + } + + spin_unlock(&txdata->lock); + } + + return true; +} + /* wil_val_in_range - check if value in [min,max) */ static inline bool wil_val_in_range(int val, int min, int max) { @@ -100,7 +161,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) size_t sz = vring->size * sizeof(vring->va[0]); uint i; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "vring_alloc:\n"); BUILD_BUG_ON(sizeof(vring->va[0]) != 32); @@ -111,15 +172,32 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) vring->va = NULL; return -ENOMEM; } + /* vring->va should be aligned on its size rounded up to power of 2 - * This is granted by the dma_alloc_coherent + * This is granted by the dma_alloc_coherent. + * + * HW has limitation that all vrings addresses must share the same + * upper 16 msb bits part of 48 bits address. To workaround that, + * if we are using 48 bit addresses switch to 32 bit allocation + * before allocating vring memory. + * + * There's no check for the return value of dma_set_mask_and_coherent, + * since we assume if we were able to set the mask during + * initialization in this system it will not fail if we set it again */ + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); if (!vring->va) { kfree(vring->ctx); vring->ctx = NULL; return -ENOMEM; } + + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + /* initially, all descriptors are SW owned * For Tx and Rx, ownership bit is at the same location, thus * we can use any @@ -160,6 +238,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); + lockdep_assert_held(&wil->mutex); if (tx) { int vring_index = vring - wil->vring_tx; @@ -183,6 +262,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, &vring->va[vring->swtail].tx; ctx = &vring->ctx[vring->swtail]; + if (!ctx) { + wil_dbg_txrx(wil, + "ctx(%d) was already completed\n", + vring->swtail); + vring->swtail = wil_vring_next_tail(vring); + continue; + } *d = *_d; wil_txdesc_unmap(dev, d, ctx); if (ctx->skb) @@ -218,7 +304,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, u32 i, int headroom) { struct device *dev = wil_to_dev(wil); - unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen(); + unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &vring->va[i].rx; dma_addr_t pa; @@ -365,6 +451,18 @@ static inline int wil_is_back_req(u8 fc) (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); } +bool wil_is_rx_idle(struct wil6210_priv *wil) +{ + struct vring_rx_desc *_d; + struct vring *vring = &wil->vring_rx; + + _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx; + if (_d->dma.status & RX_DMA_STATUS_DU) + return false; + + return true; +} + /** * reap 1 frame from @swhead * @@ -382,7 +480,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, struct sk_buff *skb; dma_addr_t pa; unsigned int snaplen = wil_rx_snaplen(); - unsigned int sz = mtu_max + ETH_HLEN + snaplen; + unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; u16 dmalen; u8 ftype; int cid; @@ -543,11 +641,71 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) break; } } + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + wil_w(wil, v->hwtail, v->swtail); return rc; } +/** + * reverse_memcmp - Compare two areas of memory, in reverse order + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + * + * Cut'n'paste from original memcmp (see lib/string.c) + * with minimal modifications + */ +static int reverse_memcmp(const void *cs, const void *ct, size_t count) +{ + const unsigned char *su1, *su2; + int res = 0; + + for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; + --su1, --su2, count--) { + res = *su1 - *su2; + if (res) + break; + } + return res; +} + +static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) +{ + struct vring_rx_desc *d = wil_skb_rxdesc(skb); + int cid = wil_rxdesc_cid(d); + int tid = wil_rxdesc_tid(d); + int key_id = wil_rxdesc_key_id(d); + int mc = wil_rxdesc_mcast(d); + struct wil_sta_info *s = &wil->sta[cid]; + struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : + &s->tid_crypto_rx[tid]; + struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; + const u8 *pn = (u8 *)&d->mac.pn_15_0; + + if (!cc->key_set) { + wil_err_ratelimited(wil, + "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", + cid, tid, mc, key_id); + return -EINVAL; + } + + if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { + wil_err_ratelimited(wil, + "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", + cid, tid, mc, key_id, pn, cc->pn); + return -EINVAL; + } + memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); + + return 0; +} + /* * Pass Rx packet to the netif. Update statistics. * Called in softirq context (NAPI poll). @@ -560,6 +718,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) unsigned int len = skb->len; struct vring_rx_desc *d = wil_skb_rxdesc(skb); int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ + int security = wil_rxdesc_security(d); struct ethhdr *eth = (void *)skb->data; /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication * is not suitable, need to look at data @@ -585,6 +744,13 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) skb_orphan(skb); + if (security && (wil_rx_crypto_check(wil, skb) != 0)) { + rc = GRO_DROP; + dev_kfree_skb(skb); + stats->rx_replay++; + goto stats; + } + if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { if (mcast) { /* send multicast frames both to higher layers in @@ -626,6 +792,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", len, gro_res_str[rc]); } +stats: /* statistics. rc set to GRO_NORMAL for AP bridging */ if (unlikely(rc == GRO_DROP)) { ndev->stats.rx_dropped++; @@ -656,7 +823,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); return; } - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "rx_handle\n"); while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { (*quota)--; @@ -674,18 +841,34 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) wil_rx_refill(wil, v->size); } +static void wil_rx_buf_len_init(struct wil6210_priv *wil) +{ + wil->rx_buf_len = rx_large_buf ? + WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; + if (mtu_max > wil->rx_buf_len) { + /* do not allow RX buffers to be smaller than mtu_max, for + * backward compatibility (mtu_max parameter was also used + * to support receiving large packets) + */ + wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max); + wil->rx_buf_len = mtu_max; + } +} + int wil_rx_init(struct wil6210_priv *wil, u16 size) { struct vring *vring = &wil->vring_rx; int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "rx_init\n"); if (vring->va) { wil_err(wil, "Rx ring already allocated\n"); return -EINVAL; } + wil_rx_buf_len_init(wil); + vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -710,12 +893,27 @@ void wil_rx_fini(struct wil6210_priv *wil) { struct vring *vring = &wil->vring_rx; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "rx_fini\n"); if (vring->va) wil_vring_free(wil, vring, 0); } +static inline void wil_tx_data_init(struct vring_tx_data *txdata) +{ + spin_lock_bh(&txdata->lock); + txdata->dot1x_open = 0; + txdata->enabled = 0; + txdata->idle = 0; + txdata->last_idle = 0; + txdata->begin = 0; + txdata->agg_wsize = 0; + txdata->agg_timeout = 0; + txdata->agg_amsdu = 0; + txdata->addba_in_progress = false; + spin_unlock_bh(&txdata->lock); +} + int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, int cid, int tid) { @@ -741,14 +939,15 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, }, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; } __packed reply; struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); + lockdep_assert_held(&wil->mutex); if (vring->va) { wil_err(wil, "Tx ring [%d] already allocated\n", id); @@ -756,8 +955,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, goto out; } - memset(txdata, 0, sizeof(*txdata)); - spin_lock_init(&txdata->lock); + wil_tx_data_init(txdata); vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -781,17 +979,25 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, rc = -EINVAL; goto out_free; } - vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); + spin_lock_bh(&txdata->lock); + vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); txdata->enabled = 1; + spin_unlock_bh(&txdata->lock); + if (txdata->dot1x_open && (agg_wsize >= 0)) wil_addba_tx_request(wil, id, agg_wsize); return 0; out_free: + spin_lock_bh(&txdata->lock); txdata->dot1x_open = false; txdata->enabled = 0; + spin_unlock_bh(&txdata->lock); wil_vring_free(wil, vring, 1); + wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; + wil->vring2cid_tid[id][1] = 0; + out: return rc; @@ -813,14 +1019,15 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) }, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; } __packed reply; struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); + lockdep_assert_held(&wil->mutex); if (vring->va) { wil_err(wil, "Tx ring [%d] already allocated\n", id); @@ -828,8 +1035,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) goto out; } - memset(txdata, 0, sizeof(*txdata)); - spin_lock_init(&txdata->lock); + wil_tx_data_init(txdata); vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -853,14 +1059,18 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) rc = -EINVAL; goto out_free; } - vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); + spin_lock_bh(&txdata->lock); + vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); txdata->enabled = 1; + spin_unlock_bh(&txdata->lock); return 0; out_free: + spin_lock_bh(&txdata->lock); txdata->enabled = 0; txdata->dot1x_open = false; + spin_unlock_bh(&txdata->lock); wil_vring_free(wil, vring, 1); out: @@ -872,23 +1082,29 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - WARN_ON(!mutex_is_locked(&wil->mutex)); + lockdep_assert_held(&wil->mutex); if (!vring->va) return; - wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); + wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); spin_lock_bh(&txdata->lock); txdata->dot1x_open = false; txdata->enabled = 0; /* no Tx can be in progress or start anew */ spin_unlock_bh(&txdata->lock); + /* napi_synchronize waits for completion of the current NAPI but will + * not prevent the next NAPI run. + * Add a memory barrier to guarantee that txdata->enabled is zeroed + * before napi_synchronize so that the next scheduled NAPI will not + * handle this vring + */ + wmb(); /* make sure NAPI won't touch this vring */ if (test_bit(wil_status_napi_en, wil->status)) napi_synchronize(&wil->napi_tx); wil_vring_free(wil, vring, 1); - memset(txdata, 0, sizeof(*txdata)); } static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, @@ -908,13 +1124,16 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, continue; if (wil->vring2cid_tid[i][0] == cid) { struct vring *v = &wil->vring_tx[i]; + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; - wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", - __func__, eth->h_dest, i); - if (v->va) { + wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", + eth->h_dest, i); + if (v->va && txdata->enabled) { return v; } else { - wil_dbg_txrx(wil, "vring[%d] not valid\n", i); + wil_dbg_txrx(wil, + "find_tx_ucast: vring[%d] not valid\n", + i); return NULL; } } @@ -932,6 +1151,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, struct vring *v; int i; u8 cid; + struct vring_tx_data *txdata; /* In the STA mode, it is expected to have only 1 VRING * for the AP we connected to. @@ -939,7 +1159,8 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; - if (!v->va) + txdata = &wil->vring_tx_data[i]; + if (!v->va || !txdata->enabled) continue; cid = wil->vring2cid_tid[i][0]; @@ -975,12 +1196,14 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, struct sk_buff *skb) { struct vring *v; + struct vring_tx_data *txdata; int i = wil->bcast_vring; if (i < 0) return NULL; v = &wil->vring_tx[i]; - if (!v->va) + txdata = &wil->vring_tx_data[i]; + if (!v->va || !txdata->enabled) return NULL; if (!wil->vring_tx_data[i].dot1x_open && (skb->protocol != cpu_to_be16(ETH_P_PAE))) @@ -1007,11 +1230,13 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, u8 cid; struct ethhdr *eth = (void *)skb->data; char *src = eth->h_source; + struct vring_tx_data *txdata; /* find 1-st vring eligible for data */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; - if (!v->va) + txdata = &wil->vring_tx_data[i]; + if (!v->va || !txdata->enabled) continue; cid = wil->vring2cid_tid[i][0]; @@ -1064,17 +1289,6 @@ found: return v; } -static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, - struct sk_buff *skb) -{ - struct wireless_dev *wdev = wil->wdev; - - if (wdev->iftype != NL80211_IFTYPE_AP) - return wil_find_tx_bcast_2(wil, skb); - - return wil_find_tx_bcast_1(wil, skb); -} - static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, int vring_index) { @@ -1244,8 +1458,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, int gso_type; int rc = -EINVAL; - wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", - __func__, skb->len, vring_index); + wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, + vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1452,13 +1666,20 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + descs_used)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", vring_index, used, used + descs_used); } + /* Make sure to advance the head only after descriptor update is done. + * This will prevent a race condition where the completion thread + * will see the DU bit set from previous run and will handle the + * skb before it was completed. + */ + wmb(); + /* advance swhead */ wil_vring_advance_head(vring, descs_used); wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); @@ -1475,7 +1696,7 @@ mem_error: while (descs_used > 0) { struct wil_ctx *ctx; - i = (swhead + descs_used) % vring->size; + i = (swhead + descs_used - 1) % vring->size; d = (struct vring_tx_desc *)&vring->va[i].tx; _desc = &vring->va[i].tx; *d = *_desc; @@ -1507,8 +1728,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, bool mcast = (vring_index == wil->bcast_vring); uint len = skb_headlen(skb); - wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", - __func__, skb->len, vring_index); + wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, + vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1592,13 +1813,20 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", vring_index, used, used + nr_frags + 1); } + /* Make sure to advance the head only after descriptor update is done. + * This will prevent a race condition where the completion thread + * will see the DU bit set from previous run and will handle the + * skb before it was completed. + */ + wmb(); + /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, @@ -1641,6 +1869,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, spin_lock(&txdata->lock); + if (test_bit(wil_status_suspending, wil->status) || + test_bit(wil_status_suspended, wil->status) || + test_bit(wil_status_resuming, wil->status)) { + wil_dbg_txrx(wil, + "suspend/resume in progress. drop packet\n"); + spin_unlock(&txdata->lock); + return -EINVAL; + } + rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) (wil, vring, skb); @@ -1649,6 +1886,94 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, return rc; } +/** + * Check status of tx vrings and stop/wake net queues if needed + * + * This function does one of two checks: + * In case check_stop is true, will check if net queues need to be stopped. If + * the conditions for stopping are met, netif_tx_stop_all_queues() is called. + * In case check_stop is false, will check if net queues need to be waked. If + * the conditions for waking are met, netif_tx_wake_all_queues() is called. + * vring is the vring which is currently being modified by either adding + * descriptors (tx) into it or removing descriptors (tx complete) from it. Can + * be null when irrelevant (e.g. connect/disconnect events). + * + * The implementation is to stop net queues if modified vring has low + * descriptor availability. Wake if all vrings are not in low descriptor + * availability and modified vring has high descriptor availability. + */ +static inline void __wil_update_net_queues(struct wil6210_priv *wil, + struct vring *vring, + bool check_stop) +{ + int i; + + if (vring) + wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d", + (int)(vring - wil->vring_tx), check_stop, + wil->net_queue_stopped); + else + wil_dbg_txrx(wil, "check_stop=%d, stopped=%d", + check_stop, wil->net_queue_stopped); + + if (check_stop == wil->net_queue_stopped) + /* net queues already in desired state */ + return; + + if (check_stop) { + if (!vring || unlikely(wil_vring_avail_low(vring))) { + /* not enough room in the vring */ + netif_tx_stop_all_queues(wil_to_ndev(wil)); + wil->net_queue_stopped = true; + wil_dbg_txrx(wil, "netif_tx_stop called\n"); + } + return; + } + + /* Do not wake the queues in suspend flow */ + if (test_bit(wil_status_suspending, wil->status) || + test_bit(wil_status_suspended, wil->status)) + return; + + /* check wake */ + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + struct vring *cur_vring = &wil->vring_tx[i]; + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; + + if (!cur_vring->va || !txdata->enabled || cur_vring == vring) + continue; + + if (wil_vring_avail_low(cur_vring)) { + wil_dbg_txrx(wil, "vring %d full, can't wake\n", + (int)(cur_vring - wil->vring_tx)); + return; + } + } + + if (!vring || wil_vring_avail_high(vring)) { + /* enough room in the vring */ + wil_dbg_txrx(wil, "calling netif_tx_wake\n"); + netif_tx_wake_all_queues(wil_to_ndev(wil)); + wil->net_queue_stopped = false; + } +} + +void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring, + bool check_stop) +{ + spin_lock(&wil->net_queue_lock); + __wil_update_net_queues(wil, vring, check_stop); + spin_unlock(&wil->net_queue_lock); +} + +void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring, + bool check_stop) +{ + spin_lock_bh(&wil->net_queue_lock); + __wil_update_net_queues(wil, vring, check_stop); + spin_unlock_bh(&wil->net_queue_lock); +} + netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); @@ -1658,7 +1983,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) static bool pr_once_fw; int rc; - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "start_xmit\n"); if (unlikely(!test_bit(wil_status_fwready, wil->status))) { if (!pr_once_fw) { wil_err(wil, "FW not ready\n"); @@ -1667,7 +1992,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) goto drop; } if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { - wil_err_ratelimited(wil, "FW not connected\n"); + wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n"); goto drop; } if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { @@ -1677,12 +2002,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) pr_once_fw = false; /* find vring */ - if (wil->wdev->iftype == NL80211_IFTYPE_STATION) { - /* in STA mode (ESS), all to same VRING */ + if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) { + /* in STA mode (ESS), all to same VRING (to AP) */ vring = wil_find_tx_vring_sta(wil, skb); - } else { /* direct communication, find matching VRING */ - vring = bcast ? wil_find_tx_bcast(wil, skb) : - wil_find_tx_ucast(wil, skb); + } else if (bcast) { + if (wil->pbss) + /* in pbss, no bcast VRING - duplicate skb in + * all stations VRINGs + */ + vring = wil_find_tx_bcast_2(wil, skb); + else if (wil->wdev->iftype == NL80211_IFTYPE_AP) + /* AP has a dedicated bcast VRING */ + vring = wil_find_tx_bcast_1(wil, skb); + else + /* unexpected combination, fallback to duplicating + * the skb in all stations VRINGs + */ + vring = wil_find_tx_bcast_2(wil, skb); + } else { + /* unicast, find specific VRING by dest. address */ + vring = wil_find_tx_ucast(wil, skb); } if (unlikely(!vring)) { wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); @@ -1691,14 +2030,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* set up vring entry */ rc = wil_tx_vring(wil, vring, skb); - /* do we still have enough room in the vring? */ - if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) { - netif_tx_stop_all_queues(wil_to_ndev(wil)); - wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); - } - switch (rc) { case 0: + /* shall we stop net queues? */ + wil_update_net_queues_bh(wil, vring, true); /* statistics will be updated on the tx_complete */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -1760,7 +2095,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) return 0; } - wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); + wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); used_before_complete = wil_vring_used_tx(vring); @@ -1822,6 +2157,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) wil_consume_skb(skb, d->dma.error == 0); } memset(ctx, 0, sizeof(*ctx)); + /* Make sure the ctx is zeroed before updating the tail + * to prevent a case where wil_tx_vring will see + * this descriptor as used and handle it before ctx zero + * is completed. + */ + wmb(); /* There is no need to touch HW descriptor: * - ststus bit TX_DMA_STATUS_DU is set by design, * so hardware will not try to process this desc., @@ -1834,17 +2175,16 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) /* performance monitoring */ used_new = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used_new, used_before_complete)) { wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", ringid, used_before_complete, used_new); txdata->last_idle = get_cycles(); } - if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) { - wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n"); - netif_tx_wake_all_queues(wil_to_ndev(wil)); - } + /* shall we wake net queues? */ + if (done) + wil_update_net_queues(wil, vring, false); return done; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index ee7c7b4b9a17..fcdffaa8251b 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -480,6 +480,16 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d) return WIL_GET_BITS(d->mac.d0, 28, 31); } +static inline int wil_rxdesc_key_id(struct vring_rx_desc *d) +{ + return WIL_GET_BITS(d->mac.d1, 4, 5); +} + +static inline int wil_rxdesc_security(struct vring_rx_desc *d) +{ + return WIL_GET_BITS(d->mac.d1, 7, 7); +} + static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d) { return WIL_GET_BITS(d->mac.d1, 8, 9); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index ade5f3b8274b..0df216689d20 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,25 +17,36 @@ #ifndef __WIL6210_H__ #define __WIL6210_H__ +#include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include <linux/timex.h> #include <linux/types.h> +#include "wmi.h" #include "wil_platform.h" +#include "ftm.h" extern bool no_fw_recovery; extern unsigned int mtu_max; extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; -extern u32 vring_idle_trsh; extern bool rx_align_2; +extern bool rx_large_buf; extern bool debug_fw; +extern bool disable_ap_sme; #define WIL_NAME "wil6210" -#define WIL_FW_NAME "wil6210.fw" /* code */ -#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */ +#define WIL_FW_NAME_DEFAULT "wil6210.fw" +#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw" + +#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" +#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw" + +#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ + +#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ /** @@ -47,11 +58,12 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1); } -#define WIL6210_MEM_SIZE (2*1024*1024UL) +#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL) +#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL) #define WIL_TX_Q_LEN_DEFAULT (4000) #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) -#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10) +#define WIL_TX_RING_SIZE_ORDER_DEFAULT (12) #define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7) #define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */ /* limit ring size in range [32..32k] */ @@ -71,6 +83,20 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) */ #define WIL_MAX_MPDU_OVERHEAD (62) +struct wil_suspend_stats { + unsigned long successful_suspends; + unsigned long failed_suspends; + unsigned long successful_resumes; + unsigned long failed_resumes; + unsigned long rejected_by_device; + unsigned long rejected_by_host; + unsigned long long total_suspend_time; + unsigned long long min_suspend_time; + unsigned long long max_suspend_time; + ktime_t collection_start; + ktime_t suspend_start_time; +}; + /* Calculate MAC buffer size for the firmware. It includes all overhead, * as it will go over the air, and need to be 8 byte aligned */ @@ -92,9 +118,13 @@ static inline u32 wil_mtu2macbuf(u32 mtu) #define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */ #define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000) #define WIL6210_SCAN_TO msecs_to_jiffies(10000) +#define WIL6210_DISCONNECT_TO_MS (2000) #define WIL6210_RX_HIGH_TRSH_INIT (0) #define WIL6210_RX_HIGH_TRSH_DEFAULT \ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3)) +#define WIL_MAX_DMG_AID 254 /* for DMG only 1-254 allowed (see + * 802.11REVmc/D5.0, section 9.4.1.8) + */ /* Hardware definitions begin */ /* @@ -130,6 +160,11 @@ struct RGF_ICR { /* registers - FW addresses */ #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) + #define BIT_USER_OOB_MODE BIT(31) + #define BIT_USER_OOB_R2_MODE BIT(30) +#define RGF_USER_USAGE_8 (0x880020) + #define BIT_USER_PREVENT_DEEP_SLEEP BIT(0) + #define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1) #define RGF_USER_HW_MACHINE_STATE (0x8801dc) #define HW_MACHINE_BOOT_DONE (0x3fffffd) #define RGF_USER_USER_CPU_0 (0x8801e0) @@ -139,6 +174,10 @@ struct RGF_ICR { #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) #define RGF_USER_BL (0x880A3C) /* Boot Loader */ #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ +#define RGF_USER_FW_CALIB_RESULT (0x880a90) /* b0-7:result + * b8-15:signature + */ + #define CALIB_RESULT_SIGNATURE (0x11) #define RGF_USER_CLKS_CTL_0 (0x880abc) #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */ @@ -156,6 +195,9 @@ struct RGF_ICR { #define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2) +#define RGF_DMA_PEDI_DIF (0x8812C4) /* struct RGF_PEDI_DIF */ + #define BIT_DMA_WR_CMD_ATTR_NO_SNOOP BIT(4) + #define BIT_DMA_WR_CMD_ATTR_RELAXED_ORDERING BIT(5) #define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0) #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */ @@ -165,6 +207,7 @@ struct RGF_ICR { #define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0) #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1) + #define BIT_DMA_EP_MISC_ICR_HALP BIT(27) #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */ /* Legacy interrupt moderation control (before Sparrow v2)*/ @@ -232,6 +275,7 @@ struct RGF_ICR { #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2) #define RGF_HP_CTRL (0x88265c) +#define RGF_PAL_UNIT_ICR (0x88266c) /* struct RGF_ICR */ #define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4) /* MAC timer, usec, for packet lifetime */ @@ -244,7 +288,12 @@ struct RGF_ICR { #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ - #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f) + #define JTAG_DEV_ID_SPARROW (0x2632072f) + +#define RGF_USER_REVISION_ID (0x88afe4) +#define RGF_USER_REVISION_ID_MASK (3) + #define REVISION_ID_SPARROW_B0 (0x0) + #define REVISION_ID_SPARROW_D0 (0x3) /* crash codes for FW/Ucode stored here */ #define RGF_FW_ASSERT_CODE (0x91f020) @@ -252,7 +301,8 @@ struct RGF_ICR { enum { HW_VER_UNKNOWN, - HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */ + HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */ + HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */ }; /* popular locations */ @@ -265,16 +315,19 @@ enum { #define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1) #define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3) +#define WIL_DATA_COMPLETION_TO_MS 200 + /* Hardware definitions end */ struct fw_map { u32 from; /* linker address - from, inclusive */ u32 to; /* linker address - to, exclusive */ u32 host; /* PCI/Host address - BAR0 + 0x880000 */ const char *name; /* for debugfs */ + bool fw; /* true if FW mapping, false if UCODE mapping */ }; /* array size should be in sync with actual definition in the wmi.c */ -extern const struct fw_map fw_mapping[8]; +extern const struct fw_map fw_mapping[10]; /** * mk_cidxtid - construct @cidxtid field @@ -333,29 +386,11 @@ struct wil6210_mbox_hdr { /* max. value for wil6210_mbox_hdr.len */ #define MAX_MBOXITEM_SIZE (240) -/** - * struct wil6210_mbox_hdr_wmi - WMI header - * - * @mid: MAC ID - * 00 - default, created by FW - * 01..0f - WiFi ports, driver to create - * 10..fe - debug - * ff - broadcast - * @id: command/event ID - * @timestamp: FW fills for events, free-running msec timer - */ -struct wil6210_mbox_hdr_wmi { - u8 mid; - u8 reserved; - __le16 id; - __le32 timestamp; -} __packed; - struct pending_wmi_event { struct list_head list; struct { struct wil6210_mbox_hdr hdr; - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; u8 data[0]; } __packed event; }; @@ -410,6 +445,9 @@ enum { /* for wil6210_priv.status */ wil_status_irqen, /* FIXME: interrupts enabled - for debug */ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */ wil_status_resetting, /* reset in progress */ + wil_status_suspending, /* suspend in progress */ + wil_status_suspended, /* suspend completed, device is suspended */ + wil_status_resuming, /* resume in progress */ wil_status_last /* keep last */ }; @@ -454,6 +492,33 @@ struct wil_tid_ampdu_rx { bool first_time; /* is it 1-st time this buffer used? */ }; +/** + * struct wil_tid_crypto_rx_single - TID crypto information (Rx). + * + * @pn: GCMP PN for the session + * @key_set: valid key present + */ +struct wil_tid_crypto_rx_single { + u8 pn[IEEE80211_GCMP_PN_LEN]; + bool key_set; +}; + +struct wil_tid_crypto_rx { + struct wil_tid_crypto_rx_single key_id[4]; +}; + +struct wil_p2p_info { + struct ieee80211_channel listen_chan; + u8 discovery_started; + u8 p2p_dev_started; + u64 cookie; + struct wireless_dev *pending_listen_wdev; + unsigned int listen_duration; + struct timer_list discovery_timer; /* listen/search duration */ + struct work_struct discovery_expired_work; /* listen/search expire */ + struct work_struct delayed_listen_work; /* listen after scan done */ +}; + enum wil_sta_status { wil_sta_unused = 0, wil_sta_conn_pending = 1, @@ -473,6 +538,7 @@ struct wil_net_stats { unsigned long rx_non_data_frame; unsigned long rx_short_frame; unsigned long rx_large_frame; + unsigned long rx_replay; u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; }; @@ -494,6 +560,10 @@ struct wil_sta_info { spinlock_t tid_rx_lock; /* guarding tid_rx array */ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)]; unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)]; + struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM]; + struct wil_tid_crypto_rx group_crypto_rx; + u8 aid; /* 1-254; 0 if unknown/not reported */ + bool fst_link_loss; }; enum { @@ -506,24 +576,6 @@ enum { hw_capability_last }; -struct wil_back_rx { - struct list_head list; - /* request params, converted to CPU byte order - what we asked for */ - u8 cidxtid; - u8 dialog_token; - u16 ba_param_set; - u16 ba_timeout; - u16 ba_seq_ctrl; -}; - -struct wil_back_tx { - struct list_head list; - /* request params, converted to CPU byte order - what we asked for */ - u8 ringid; - u8 agg_wsize; - u16 agg_timeout; -}; - struct wil_probe_client_req { struct list_head list; u64 cookie; @@ -541,15 +593,54 @@ struct pmc_ctx { int descriptor_size; }; +struct wil_halp { + struct mutex lock; /* protect halp ref_cnt */ + unsigned int ref_cnt; + struct completion comp; +}; + +struct wil_blob_wrapper { + struct wil6210_priv *wil; + struct debugfs_blob_wrapper blob; +}; + +#define WIL_LED_MAX_ID (2) +#define WIL_LED_INVALID_ID (0xF) +#define WIL_LED_BLINK_ON_SLOW_MS (300) +#define WIL_LED_BLINK_OFF_SLOW_MS (300) +#define WIL_LED_BLINK_ON_MED_MS (200) +#define WIL_LED_BLINK_OFF_MED_MS (200) +#define WIL_LED_BLINK_ON_FAST_MS (100) +#define WIL_LED_BLINK_OFF_FAST_MS (100) +enum { + WIL_LED_TIME_SLOW = 0, + WIL_LED_TIME_MED, + WIL_LED_TIME_FAST, + WIL_LED_TIME_LAST, +}; + +struct blink_on_off_time { + u32 on_ms; + u32 off_ms; +}; + +extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST]; +extern u8 led_id; +extern u8 led_polarity; + struct wil6210_priv { struct pci_dev *pdev; + u32 bar_size; struct wireless_dev *wdev; void __iomem *csr; DECLARE_BITMAP(status, wil_status_last); - u32 fw_version; + u8 fw_version[ETHTOOL_FWVERS_LEN]; u32 hw_version; + u8 chip_revision; const char *hw_name; + const char *wil_fw_name; DECLARE_BITMAP(hw_capabilities, hw_capability_last); + DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX); u8 n_mids; /* number of additional MIDs as reported by FW */ u32 recovery_count; /* num of FW recovery attempts in a short time */ u32 recovery_state; /* FW recovery state machine */ @@ -562,6 +653,8 @@ struct wil6210_priv { u16 channel; /* relevant in AP mode */ int sinfo_gen; u32 ap_isolate; /* no intra-BSS communication */ + struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ + int locally_generated_disc; /* relevant in STA mode */ /* interrupt moderation */ u32 tx_max_burst_duration; u32 tx_interframe_timeout; @@ -581,12 +674,10 @@ struct wil6210_priv { struct workqueue_struct *wmi_wq; /* for deferred calls */ struct work_struct wmi_event_worker; struct workqueue_struct *wq_service; - struct work_struct connect_worker; struct work_struct disconnect_worker; struct work_struct fw_error_worker; /* for FW error recovery */ struct timer_list connect_timer; struct timer_list scan_timer; /* detect scan timeout */ - int pending_connect_cid; struct list_head pending_wmi_ev; /* * protect pending_wmi_ev @@ -594,26 +685,24 @@ struct wil6210_priv { * - consumed in thread by wmi_event_worker */ spinlock_t wmi_ev_lock; + spinlock_t net_queue_lock; /* guarding stop/wake netif queue */ + int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ struct napi_struct napi_rx; struct napi_struct napi_tx; - /* BACK */ - struct list_head back_rx_pending; - struct mutex back_rx_mutex; /* protect @back_rx_pending */ - struct work_struct back_rx_worker; - struct list_head back_tx_pending; - struct mutex back_tx_mutex; /* protect @back_tx_pending */ - struct work_struct back_tx_worker; /* keep alive */ struct list_head probe_client_pending; struct mutex probe_client_mutex; /* protect @probe_client_pending */ struct work_struct probe_client_worker; /* DMA related */ struct vring vring_rx; + unsigned int rx_buf_len; struct vring vring_tx[WIL6210_MAX_TX_RINGS]; struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; int bcast_vring; + u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ + bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ /* scan */ struct cfg80211_scan_request *scan_request; @@ -622,12 +711,53 @@ struct wil6210_priv { atomic_t isr_count_rx, isr_count_tx; /* debugfs */ struct dentry *debug; - struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; + struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; + u8 discovery_mode; + u8 abft_len; + u8 wakeup_trigger; + struct wil_suspend_stats suspend_stats; void *platform_handle; struct wil_platform_ops platform_ops; + bool keep_radio_on_during_sleep; struct pmc_ctx pmc; + + bool pbss; + + struct wil_p2p_info p2p; + + /* P2P_DEVICE vif */ + struct wireless_dev *p2p_wdev; + struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */ + struct wireless_dev *radio_wdev; + + /* High Access Latency Policy voting */ + struct wil_halp halp; + + enum wmi_ps_profile_type ps_profile; + + struct wil_ftm_priv ftm; + bool tt_data_set; + struct wmi_tt_data tt_data; + struct { + bool enabled; + short omni; + short direct; + } snr_thresh; + + int fw_calib_result; + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notify; +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + + bool suspend_resp_rcvd; + bool suspend_resp_comp; + u32 bus_request_kbps; + u32 bus_request_kbps_pre_suspend; }; #define wil_to_wiphy(i) (i->wdev->wiphy) @@ -641,11 +771,13 @@ struct wil6210_priv { __printf(2, 3) void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); __printf(2, 3) -void wil_err(struct wil6210_priv *wil, const char *fmt, ...); +void __wil_err(struct wil6210_priv *wil, const char *fmt, ...); +__printf(2, 3) +void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...); __printf(2, 3) -void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...); +void __wil_info(struct wil6210_priv *wil, const char *fmt, ...); __printf(2, 3) -void wil_info(struct wil6210_priv *wil, const char *fmt, ...); +void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg(wil, fmt, arg...) do { \ netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \ wil_dbg_trace(wil, fmt, ##arg); \ @@ -656,6 +788,10 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg) #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg) #define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg) +#define wil_err(wil, fmt, arg...) __wil_err(wil, "%s: " fmt, __func__, ##arg) +#define wil_info(wil, fmt, arg...) __wil_info(wil, "%s: " fmt, __func__, ##arg) +#define wil_err_ratelimited(wil, fmt, arg...) \ + __wil_err_ratelimited(wil, "%s: " fmt, __func__, ##arg) /* target operations */ /* register read */ @@ -695,6 +831,12 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) print_hex_dump_debug("DBG[ WMI]" prefix_str,\ prefix_type, rowsize, \ groupsize, buf, len, ascii) + +#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + print_hex_dump_debug("DBG[MISC]" prefix_str,\ + prefix_type, rowsize, \ + groupsize, buf, len, ascii) #else /* defined(CONFIG_DYNAMIC_DEBUG) */ static inline void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize, @@ -707,6 +849,12 @@ void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } + +static inline +void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize, + int groupsize, const void *buf, size_t len, bool ascii) +{ +} #endif /* defined(CONFIG_DYNAMIC_DEBUG) */ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, @@ -720,17 +868,22 @@ int wil_if_add(struct wil6210_priv *wil); void wil_if_remove(struct wil6210_priv *wil); int wil_priv_init(struct wil6210_priv *wil); void wil_priv_deinit(struct wil6210_priv *wil); +int wil_ps_update(struct wil6210_priv *wil, + enum wmi_ps_profile_type ps_profile); int wil_reset(struct wil6210_priv *wil, bool no_fw); void wil_fw_error_recovery(struct wil6210_priv *wil); void wil_set_recovery_state(struct wil6210_priv *wil, int state); +bool wil_is_recovery_blocked(struct wil6210_priv *wil); int wil_up(struct wil6210_priv *wil); int __wil_up(struct wil6210_priv *wil); int wil_down(struct wil6210_priv *wil); int __wil_down(struct wil6210_priv *wil); +void wil_refresh_fw_capabilities(struct wil6210_priv *wil); void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); int wil_find_cid(struct wil6210_priv *wil, const u8 *mac); void wil_set_ethtoolops(struct net_device *ndev); +void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size); void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr); void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, @@ -753,23 +906,26 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); -int wmi_p2p_cfg(struct wil6210_priv *wil, int channel); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason); +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, + u16 reason, bool full_disconnect, bool del_sta); int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout); int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason); int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason); int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, u16 agg_wsize, u16 timeout); +int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, + enum wmi_ps_profile_type ps_profile); +int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short); +int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short); +int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid); +int wmi_set_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data); +int wmi_get_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data); int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); -void wil_back_rx_worker(struct work_struct *work); -void wil_back_rx_flush(struct wil6210_priv *wil); int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); -void wil_back_tx_worker(struct work_struct *work); -void wil_back_tx_flush(struct wil6210_priv *wil); void wil6210_clear_irq(struct wil6210_priv *wil); int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); @@ -779,22 +935,59 @@ void wil_unmask_irq(struct wil6210_priv *wil); void wil_configure_interrupt_moderation(struct wil6210_priv *wil); void wil_disable_irq(struct wil6210_priv *wil); void wil_enable_irq(struct wil6210_priv *wil); +void wil6210_mask_halp(struct wil6210_priv *wil); + +/* P2P */ +bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request); +void wil_p2p_discovery_timer_fn(ulong x); +int wil_p2p_search(struct wil6210_priv *wil, + struct cfg80211_scan_request *request); +int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, + unsigned int duration, struct ieee80211_channel *chan, + u64 *cookie); +u8 wil_p2p_stop_discovery(struct wil6210_priv *wil); +int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie); +void wil_p2p_listen_expired(struct work_struct *work); +void wil_p2p_search_expired(struct work_struct *work); +void wil_p2p_stop_radio_operations(struct wil6210_priv *wil); +void wil_p2p_delayed_listen_work(struct work_struct *work); + +/* WMI for P2P */ +int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi); +int wmi_start_listen(struct wil6210_priv *wil); +int wmi_start_search(struct wil6210_priv *wil); +int wmi_stop_discovery(struct wil6210_priv *wil); + int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); +#if defined(CONFIG_WIL6210_DEBUGFS) int wil6210_debugfs_init(struct wil6210_priv *wil); void wil6210_debugfs_remove(struct wil6210_priv *wil); +#else +static inline int wil6210_debugfs_init(struct wil6210_priv *wil) { return 0; } +static inline void wil6210_debugfs_remove(struct wil6210_priv *wil) {} +#endif +int wil6210_sysfs_init(struct wil6210_priv *wil); +void wil6210_sysfs_remove(struct wil6210_priv *wil); int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, struct station_info *sinfo); struct wireless_dev *wil_cfg80211_init(struct device *dev); void wil_wdev_free(struct wil6210_priv *wil); +void wil_p2p_wdev_free(struct wil6210_priv *wil); int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, - u8 chan, u8 hidden_ssid); + u8 chan, u8 hidden_ssid, u8 is_go); int wmi_pcp_stop(struct wil6210_priv *wil); +int wmi_led_cfg(struct wil6210_priv *wil, bool enable); +int wmi_aoa_meas(struct wil6210_priv *wil, const void *mac_addr, u8 chan, + u8 type); +int wmi_abort_scan(struct wil6210_priv *wil); +void wil_abort_scan(struct wil6210_priv *wil, bool sync); +void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps); void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event); void wil_probe_client_flush(struct wil6210_priv *wil); @@ -807,10 +1000,15 @@ void wil_rx_fini(struct wil6210_priv *wil); int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, int cid, int tid); void wil_vring_fini_tx(struct wil6210_priv *wil, int id); +int wil_tx_init(struct wil6210_priv *wil, int cid); int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size); int wil_bcast_init(struct wil6210_priv *wil); void wil_bcast_fini(struct wil6210_priv *wil); +void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring, + bool should_stop); +void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring, + bool check_stop); netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); int wil_tx_complete(struct wil6210_priv *wil, int ringid); void wil6210_unmask_irq_tx(struct wil6210_priv *wil); @@ -822,12 +1020,44 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil); int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); -int wil_request_firmware(struct wil6210_priv *wil, const char *name); +int wil_request_firmware(struct wil6210_priv *wil, const char *name, + bool load); +bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name); int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_resume(struct wil6210_priv *wil, bool is_runtime); +bool wil_is_wmi_idle(struct wil6210_priv *wil); +int wmi_resume(struct wil6210_priv *wil); +int wmi_suspend(struct wil6210_priv *wil); +bool wil_is_tx_idle(struct wil6210_priv *wil); +bool wil_is_rx_idle(struct wil6210_priv *wil); +int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size); void wil_fw_core_dump(struct wil6210_priv *wil); +void wil_halp_vote(struct wil6210_priv *wil); +void wil_halp_unvote(struct wil6210_priv *wil); +void wil6210_set_halp(struct wil6210_priv *wil); +void wil6210_clear_halp(struct wil6210_priv *wil); + +void wil_ftm_init(struct wil6210_priv *wil); +void wil_ftm_deinit(struct wil6210_priv *wil); +void wil_ftm_stop_operations(struct wil6210_priv *wil); +void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil, + struct wil_aoa_meas_result *result); + +void wil_ftm_evt_session_ended(struct wil6210_priv *wil, + struct wmi_tof_session_end_event *evt); +void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil, + struct wmi_tof_ftm_per_dest_res_event *evt); +void wil_aoa_evt_meas(struct wil6210_priv *wil, + struct wmi_aoa_meas_event *evt, + int len); +/* link loss */ +int wmi_link_maintain_cfg_write(struct wil6210_priv *wil, + const u8 *addr, + bool fst_link_loss); + +int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index 7e70934990ae..e53cf0cf7031 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -36,6 +36,9 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil, for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; + if (!map->fw) + continue; + if (map->host < host_min) host_min = map->host; @@ -51,8 +54,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil, return 0; } -static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, - u32 size) +int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) { int i; const struct fw_map *map; @@ -60,13 +62,13 @@ static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 host_min, dump_size, offset, len; if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) { - wil_err(wil, "%s: fail to obtain crash dump size\n", __func__); + wil_err(wil, "fail to obtain crash dump size\n"); return -EINVAL; } if (dump_size > size) { - wil_err(wil, "%s: not enough space for dump. Need %d have %d\n", - __func__, dump_size, size); + wil_err(wil, "not enough space for dump. Need %d have %d\n", + dump_size, size); return -EINVAL; } @@ -74,12 +76,16 @@ static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; + if (!map->fw) + continue; + data = (void * __force)wil->csr + HOSTADDR(map->host); len = map->to - map->from; offset = map->host - host_min; - wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n", - __func__, fw_mapping[i].name, len, offset); + wil_dbg_misc(wil, + "fw_copy_crash_dump: - dump %s, size %d, offset %d\n", + fw_mapping[i].name, len, offset); wil_memcpy_fromio_32((void * __force)(dest + offset), (const void __iomem * __force)data, len); @@ -94,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil) u32 fw_dump_size; if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) { - wil_err(wil, "%s: fail to get fw dump size\n", __func__); + wil_err(wil, "fail to get fw dump size\n"); return; } @@ -110,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil) * after 5 min */ dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL); - wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__, - fw_dump_size); + wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size); } diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c index 2e831bf20117..263740a9b980 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.c +++ b/drivers/net/wireless/ath/wil6210/wil_platform.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -16,14 +16,16 @@ #include <linux/device.h> #include "wil_platform.h" +#include "msm_11ad.h" int __init wil_platform_modinit(void) { - return 0; + return msm_11ad_modinit(); } void wil_platform_modexit(void) { + msm_11ad_modexit(); } /** @@ -33,9 +35,10 @@ void wil_platform_modexit(void) * It returns a handle which is used with the rest of the API * */ -void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops) +void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops, + const struct wil_platform_rops *rops, void *wil_handle) { - void *handle = ops; /* to return some non-NULL for 'void' impl. */ + void *handle; if (!ops) { dev_err(dev, @@ -43,7 +46,7 @@ void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops) return NULL; } - /* platform specific init functions should be called here */ + handle = msm_11ad_dev_init(dev, ops, rops, wil_handle); return handle; } diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index d7fa19b7886d..621005b0c5ca 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,17 +19,58 @@ struct device; +enum wil_platform_event { + WIL_PLATFORM_EVT_FW_CRASH = 0, + WIL_PLATFORM_EVT_PRE_RESET = 1, + WIL_PLATFORM_EVT_FW_RDY = 2, + WIL_PLATFORM_EVT_PRE_SUSPEND = 3, + WIL_PLATFORM_EVT_POST_SUSPEND = 4, +}; + /** - * struct wil_platform_ops - wil platform module callbacks + * struct wil_platform_ops - wil platform module calls from this + * driver to platform driver */ struct wil_platform_ops { int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */); - int (*suspend)(void *handle); - int (*resume)(void *handle); + int (*suspend)(void *handle, bool keep_device_power); + int (*resume)(void *handle, bool device_powered_on); void (*uninit)(void *handle); + int (*notify)(void *handle, enum wil_platform_event evt); + bool (*keep_radio_on_during_sleep)(void *handle); }; -void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops); +/** + * struct wil_platform_rops - wil platform module callbacks from + * platform driver to this driver + * @ramdump: store a ramdump from the wil firmware. The platform + * driver may add additional data to the ramdump to + * generate the final crash dump. + * @fw_recovery: start a firmware recovery process. Called as + * part of a crash recovery process which may include other + * related platform subsystems. + */ +struct wil_platform_rops { + int (*ramdump)(void *wil_handle, void *buf, uint32_t size); + int (*fw_recovery)(void *wil_handle); +}; + +/** + * wil_platform_init - initialize the platform driver + * + * @dev - pointer to the wil6210 device + * @ops - structure with platform driver operations. Platform + * driver will fill this structure with function pointers. + * @rops - structure with callbacks from platform driver to + * this driver. The platform driver copies the structure to + * its own storage. Can be NULL if this driver does not + * support crash recovery. + * @wil_handle - context for this driver that will be passed + * when platform driver invokes one of the callbacks in + * rops. May be NULL if rops is NULL. + */ +void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops, + const struct wil_platform_rops *rops, void *wil_handle); int __init wil_platform_modinit(void); void wil_platform_modexit(void); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 7af8479acb98..22f2be38d1c5 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,16 +22,24 @@ #include "txrx.h" #include "wmi.h" #include "trace.h" +#include "ftm.h" static uint max_assoc_sta = WIL6210_MAX_CID; -module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); +module_param(max_assoc_sta, uint, 0644); MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); int agg_wsize; /* = 0; */ -module_param(agg_wsize, int, S_IRUGO | S_IWUSR); +module_param(agg_wsize, int, 0644); MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;" " 0 - use default; < 0 - don't auto-establish"); +u8 led_id = WIL_LED_INVALID_ID; +module_param(led_id, byte, 0444); +MODULE_PARM_DESC(led_id, + " 60G device led enablement. Set the led ID (0-2) to enable"); + +#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200 + /** * WMI event receiving - theory of operations * @@ -79,23 +87,41 @@ MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;" * array size should be in sync with the declaration in the wil6210.h */ const struct fw_map fw_mapping[] = { - {0x000000, 0x040000, 0x8c0000, "fw_code"}, /* FW code RAM 256k */ - {0x800000, 0x808000, 0x900000, "fw_data"}, /* FW data RAM 32k */ - {0x840000, 0x860000, 0x908000, "fw_peri"}, /* periph. data RAM 128k */ - {0x880000, 0x88a000, 0x880000, "rgf"}, /* various RGF 40k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf 4k */ - {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext"}, /* mac_ext_rgf 512b */ - {0x8c0000, 0x949000, 0x8c0000, "upper"}, /* upper area 548k */ - /* - * 920000..930000 ucode code RAM - * 930000..932000 ucode data RAM - * 932000..949000 back-door debug data + /* FW code RAM 256k */ + {0x000000, 0x040000, 0x8c0000, "fw_code", true}, + /* FW data RAM 32k */ + {0x800000, 0x808000, 0x900000, "fw_data", true}, + /* periph data 128k */ + {0x840000, 0x860000, 0x908000, "fw_peri", true}, + /* various RGF 40k */ + {0x880000, 0x88a000, 0x880000, "rgf", true}, + /* AGC table 4k */ + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + /* Pcie_ext_rgf 4k */ + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + /* mac_ext_rgf 512b */ + {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true}, + /* upper area 548k */ + {0x8c0000, 0x949000, 0x8c0000, "upper", true}, + /* UCODE areas - accessible by debugfs blobs but not by + * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ + /* ucode code RAM 128k */ + {0x000000, 0x020000, 0x920000, "uc_code", false}, + /* ucode data RAM 16k */ + {0x800000, 0x804000, 0x940000, "uc_data", false}, }; +struct blink_on_off_time led_blink_time[] = { + {WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS}, + {WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS}, + {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS}, +}; + +u8 led_polarity = LED_POLARITY_LOW_ACTIVE; + /** - * return AHB address for given firmware/ucode internal (linker) address + * return AHB address for given firmware internal (linker) address * @x - internal address * If address have no valid AHB mapping, return 0 */ @@ -104,7 +130,8 @@ static u32 wmi_addr_remap(u32 x) uint i; for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { - if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)) + if (fw_mapping[i].fw && + ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))) return x + fw_mapping[i].host - fw_mapping[i].from; } @@ -114,13 +141,15 @@ static u32 wmi_addr_remap(u32 x) /** * Check address validity for WMI buffer; remap if needed * @ptr - internal (linker) fw/ucode address + * @size - if non zero, validate the block does not + * exceed the device memory (bar) * * Valid buffer should be DWORD aligned * * return address for accessing buffer from the host; * if buffer is not valid, return NULL. */ -void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) +void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size) { u32 off; u32 ptr = le32_to_cpu(ptr_); @@ -133,12 +162,19 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) return NULL; off = HOSTADDR(ptr); - if (off > WIL6210_MEM_SIZE - 4) + if (off > wil->bar_size - 4) + return NULL; + if (size && ((off + size > wil->bar_size) || (off + size < off))) return NULL; return wil->csr + off; } +void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) +{ + return wmi_buffer_block(wil, ptr_, 0); +} + /** * Check address validity */ @@ -153,7 +189,7 @@ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr) return NULL; off = HOSTADDR(ptr); - if (off > WIL6210_MEM_SIZE - 4) + if (off > wil->bar_size - 4) return NULL; return wil->csr + off; @@ -176,7 +212,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) { struct { struct wil6210_mbox_hdr hdr; - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; } __packed cmd = { .hdr = { .type = WIL_MBOX_HDR_TYPE_WMI, @@ -185,7 +221,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) }, .wmi = { .mid = 0, - .id = cpu_to_le16(cmdid), + .command_id = cpu_to_le16(cmdid), }, }; struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; @@ -194,8 +230,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) void __iomem *dst; void __iomem *head = wmi_addr(wil, r->head); uint retry; + int rc = 0; - if (sizeof(cmd) + len > r->entry_size) { + if (len > r->entry_size - sizeof(cmd)) { wil_err(wil, "WMI size too large: %d bytes, max is %d\n", (int)(sizeof(cmd) + len), r->entry_size); return -ERANGE; @@ -208,10 +245,23 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) return -EAGAIN; } + /* Allow sending only suspend / resume commands during susepnd flow */ + if ((test_bit(wil_status_suspending, wil->status) || + test_bit(wil_status_suspended, wil->status) || + test_bit(wil_status_resuming, wil->status)) && + ((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) && + (cmdid != WMI_TRAFFIC_RESUME_CMDID))) { + wil_err(wil, "WMI: reject send_command during suspend\n"); + return -EINVAL; + } + if (!head) { wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head); return -EINVAL; } + + wil_halp_vote(wil); + /* read Tx head till it is not busy */ for (retry = 5; retry > 0; retry--) { wil_memcpy_fromio_32(&d_head, head, sizeof(d_head)); @@ -221,13 +271,19 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) } if (d_head.sync != 0) { wil_err(wil, "WMI head busy\n"); - return -EBUSY; + rc = -EBUSY; + goto out; } /* next head */ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size); wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { + if (!test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "WMI: cannot send command while FW not ready\n"); + rc = -EAGAIN; + goto out; + } r->tail = wil_r(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.tail)); if (next_head != r->tail) @@ -236,13 +292,15 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) } if (next_head == r->tail) { wil_err(wil, "WMI ring full\n"); - return -EBUSY; + rc = -EBUSY; + goto out; } dst = wmi_buffer(wil, d_head.addr); if (!dst) { wil_err(wil, "invalid WMI buffer: 0x%08x\n", le32_to_cpu(d_head.addr)); - return -EINVAL; + rc = -EAGAIN; + goto out; } cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); /* set command */ @@ -265,7 +323,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS), SW_INT_MBOX); - return 0; +out: + wil_halp_unvote(wil); + return rc; } int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) @@ -285,15 +345,20 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) struct wireless_dev *wdev = wil->wdev; struct wmi_ready_event *evt = d; - wil->fw_version = le32_to_cpu(evt->sw_version); wil->n_mids = evt->numof_additional_mids; - wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, + wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n", + wil->fw_version, le32_to_cpu(evt->sw_version), evt->mac, wil->n_mids); /* ignore MAC address, we already have it from the boot loader */ - snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), - "%d", wil->fw_version); + strlcpy(wdev->wiphy->fw_version, wil->fw_version, + sizeof(wdev->wiphy->fw_version)); + if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) { + wil_dbg_wmi(wil, "rfc calibration result %d\n", + evt->rfc_read_calib_result); + wil->fw_calib_result = evt->rfc_read_calib_result; + } wil_set_recovery_state(wil, fw_recovery_idle); set_bit(wil_status_fwready, wil->status); /* let the reset sequence continue */ @@ -313,7 +378,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) s32 signal; __le16 fc; u32 d_len; - u16 d_status; + s16 snr; if (flen < 0) { wil_err(wil, "MGMT Rx: short event, len %d\n", len); @@ -329,16 +394,19 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) } ch_no = data->info.channel + 1; - freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); + freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ); channel = ieee80211_get_channel(wiphy, freq); - signal = data->info.sqi; - d_status = le16_to_cpu(data->info.status); + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + signal = 100 * data->info.rssi; + else + signal = data->info.sqi; + snr = le16_to_cpu(data->info.snr); /* 1/4 dB units */ fc = rx_mgmt_frame->frame_control; - wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n", - data->info.channel, data->info.mcs, data->info.snr, + wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n", + data->info.channel, data->info.mcs, data->info.rssi, data->info.sqi); - wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, + wil_dbg_wmi(wil, "snr %ddB len %d fc 0x%04x\n", snr / 4, d_len, le16_to_cpu(fc)); wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", data->info.qid, data->info.mid, data->info.cid); @@ -364,6 +432,13 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf, ie_len, true); + wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap); + + if (wil->snr_thresh.enabled && snr < wil->snr_thresh.omni) { + wil_dbg_wmi(wil, "snr below threshold. dropping\n"); + return; + } + bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, d_len, signal, GFP_KERNEL); if (bss) { @@ -374,8 +449,10 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); } } else { - cfg80211_rx_mgmt(wil->wdev, freq, signal, + mutex_lock(&wil->p2p_wdev_mutex); + cfg80211_rx_mgmt(wil->radio_wdev, freq, signal, (void *)rx_mgmt_frame, d_len, 0); + mutex_unlock(&wil->p2p_wdev_mutex); } } @@ -393,20 +470,30 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, void *d, int len) { + mutex_lock(&wil->p2p_wdev_mutex); if (wil->scan_request) { struct wmi_scan_complete_event *data = d; - bool aborted = (data->status != WMI_SCAN_SUCCESS); + int status = le32_to_cpu(data->status); + bool aborted = (status != WMI_SCAN_SUCCESS) && + (status != WMI_SCAN_ABORT_REJECTED); - wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); + wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status); wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", wil->scan_request, aborted); del_timer_sync(&wil->scan_timer); cfg80211_scan_done(wil->scan_request, aborted); + wil->radio_wdev = wil->wdev; wil->scan_request = NULL; + wake_up_interruptible(&wil->wq); + if (wil->p2p.pending_listen_wdev) { + wil_dbg_misc(wil, "Scheduling delayed listen\n"); + schedule_work(&wil->p2p.delayed_listen_work); + } } else { wil_err(wil, "SCAN_COMPLETE while not scanning\n"); } + mutex_unlock(&wil->p2p_wdev_mutex); } static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) @@ -422,6 +509,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) const size_t assoc_req_ie_offset = sizeof(u16) * 2; /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ const size_t assoc_resp_ie_offset = sizeof(u16) * 3; + int rc; if (len < sizeof(*evt)) { wil_err(wil, "Connect event too short : %d bytes\n", len); @@ -441,8 +529,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } ch = evt->channel + 1; - wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", - evt->bssid, ch, evt->cid); + wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n", + evt->bssid, ch, evt->cid, evt->aid); wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); @@ -464,20 +552,80 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) assoc_resp_ielen = 0; } + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "status_resetting, cancel connect event, CID %d\n", + evt->cid); + /* no need for cleanup, wil_reset will do that */ + return; + } + + mutex_lock(&wil->mutex); + if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (!test_bit(wil_status_fwconnecting, wil->status)) { wil_err(wil, "Not in connecting state\n"); + mutex_unlock(&wil->mutex); return; } del_timer_sync(&wil->connect_timer); - cfg80211_connect_result(ndev, evt->bssid, - assoc_req_ie, assoc_req_ielen, - assoc_resp_ie, assoc_resp_ielen, - WLAN_STATUS_SUCCESS, GFP_KERNEL); + } else if ((wdev->iftype == NL80211_IFTYPE_AP) || + (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + if (wil->sta[evt->cid].status != wil_sta_unused) { + wil_err(wil, "AP: Invalid status %d for CID %d\n", + wil->sta[evt->cid].status, evt->cid); + mutex_unlock(&wil->mutex); + return; + } + } + + /* FIXME FW can transmit only ucast frames to peer */ + /* FIXME real ring_id instead of hard coded 0 */ + ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); + wil->sta[evt->cid].status = wil_sta_conn_pending; + + rc = wil_tx_init(wil, evt->cid); + if (rc) { + wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", + evt->cid, rc); + wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, + WLAN_REASON_UNSPECIFIED, false, false); + } else { + wil_info(wil, "successful connection to CID %d\n", evt->cid); + } + if ((wdev->iftype == NL80211_IFTYPE_STATION) || + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { + if (rc) { + netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); + wil_err(wil, "cfg80211_connect_result with failure\n"); + cfg80211_connect_result(ndev, evt->bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + goto out; + } else { + struct wiphy *wiphy = wil_to_wiphy(wil); + + cfg80211_ref_bss(wiphy, wil->bss); + cfg80211_connect_bss(ndev, evt->bssid, wil->bss, + assoc_req_ie, assoc_req_ielen, + assoc_resp_ie, assoc_resp_ielen, + WLAN_STATUS_SUCCESS, GFP_KERNEL, + NL80211_TIMEOUT_UNSPECIFIED); + } + wil->bss = NULL; } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + if (rc) { + if (disable_ap_sme) + /* notify new_sta has failed */ + cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL); + goto out; + } + memset(&sinfo, 0, sizeof(sinfo)); sinfo.generation = wil->sinfo_gen++; @@ -488,17 +636,22 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); + } else { + wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype, + evt->cid); + goto out; } - clear_bit(wil_status_fwconnecting, wil->status); - set_bit(wil_status_fwconnected, wil->status); - /* FIXME FW can transmit only ucast frames to peer */ - /* FIXME real ring_id instead of hard coded 0 */ - ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); - wil->sta[evt->cid].status = wil_sta_conn_pending; + wil->sta[evt->cid].status = wil_sta_connected; + wil->sta[evt->cid].aid = evt->aid; + set_bit(wil_status_fwconnected, wil->status); + wil_update_net_queues_bh(wil, NULL, false); - wil->pending_connect_cid = evt->cid; - queue_work(wil->wq_service, &wil->connect_worker); +out: + if (rc) + wil->sta[evt->cid].status = wil_sta_unused; + clear_bit(wil_status_fwconnecting, wil->status); + mutex_unlock(&wil->mutex); } static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, @@ -507,11 +660,18 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, struct wmi_disconnect_event *evt = d; u16 reason_code = le16_to_cpu(evt->protocol_reason_status); - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", - evt->bssid, reason_code, evt->disconnect_reason); + wil_info(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + evt->bssid, reason_code, evt->disconnect_reason); wil->sinfo_gen++; + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "status_resetting, cancel disconnect event\n"); + /* no need for cleanup, wil_reset will do that */ + return; + } + mutex_lock(&wil->mutex); wil6210_disconnect(wil, evt->bssid, reason_code, true); mutex_unlock(&wil->mutex); @@ -575,6 +735,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_vring_en_event *evt = d; u8 vri = evt->vring_index; + struct wireless_dev *wdev = wil_to_wdev(wil); wil_dbg_wmi(wil, "Enable vring %d\n", vri); @@ -582,7 +743,12 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "Enable for invalid vring %d\n", vri); return; } - wil->vring_tx_data[vri].dot1x_open = true; + + if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme) + /* in AP mode with disable_ap_sme, this is done by + * wil_cfg80211_change_station() + */ + wil->vring_tx_data[vri].dot1x_open = true; if (vri == wil->bcast_vring) /* no BA for bcast */ return; if (agg_wsize >= 0) @@ -592,7 +758,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, int len) { - struct wmi_vring_ba_status_event *evt = d; + struct wmi_ba_status_event *evt = d; struct vring_tx_data *txdata; wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n", @@ -678,6 +844,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) spin_unlock_bh(&sta->tid_rx_lock); } +static void wmi_evt_aoa_meas(struct wil6210_priv *wil, int id, + void *d, int len) +{ + struct wmi_aoa_meas_event *evt = d; + + wil_aoa_evt_meas(wil, evt, len); +} + +static void wmi_evt_ftm_session_ended(struct wil6210_priv *wil, int id, + void *d, int len) +{ + struct wmi_tof_session_end_event *evt = d; + + wil_ftm_evt_session_ended(wil, evt); +} + +static void wmi_evt_per_dest_res(struct wil6210_priv *wil, int id, + void *d, int len) +{ + struct wmi_tof_ftm_per_dest_res_event *evt = d; + + wil_ftm_evt_per_dest_res(wil, evt); +} + /** * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" @@ -705,6 +895,13 @@ static const struct { {WMI_DELBA_EVENTID, wmi_evt_delba}, {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, + {WMI_AOA_MEAS_EVENTID, wmi_evt_aoa_meas}, + {WMI_TOF_SESSION_END_EVENTID, wmi_evt_ftm_session_ended}, + {WMI_TOF_GET_CAPABILITIES_EVENTID, wmi_evt_ignore}, + {WMI_TOF_SET_LCR_EVENTID, wmi_evt_ignore}, + {WMI_TOF_SET_LCI_EVENTID, wmi_evt_ignore}, + {WMI_TOF_FTM_PER_DEST_RES_EVENTID, wmi_evt_per_dest_res}, + {WMI_TOF_CHANNEL_INFO_EVENTID, wmi_evt_ignore}, }; /* @@ -723,15 +920,22 @@ void wmi_recv_cmd(struct wil6210_priv *wil) void __iomem *src; ulong flags; unsigned n; + unsigned int num_immed_reply = 0; if (!test_bit(wil_status_mbox_ready, wil->status)) { wil_err(wil, "Reset in progress. Cannot handle WMI event\n"); return; } + if (test_bit(wil_status_suspended, wil->status)) { + wil_err(wil, "suspended. cannot handle WMI event\n"); + return; + } + for (n = 0;; n++) { u16 len; bool q; + bool immed_reply = false; r->head = wil_r(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, rx.head)); @@ -776,10 +980,33 @@ void wmi_recv_cmd(struct wil6210_priv *wil) offsetof(struct wil6210_mbox_ring_desc, sync), 0); /* indicate */ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && - (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; - u16 id = le16_to_cpu(wmi->id); - u32 tstamp = le32_to_cpu(wmi->timestamp); + (len >= sizeof(struct wmi_cmd_hdr))) { + struct wmi_cmd_hdr *wmi = &evt->event.wmi; + u16 id = le16_to_cpu(wmi->command_id); + u32 tstamp = le32_to_cpu(wmi->fw_timestamp); + if (test_bit(wil_status_resuming, wil->status)) { + if (id == WMI_TRAFFIC_RESUME_EVENTID) + clear_bit(wil_status_resuming, + wil->status); + else + wil_err(wil, + "WMI evt %d while resuming\n", + id); + } + spin_lock_irqsave(&wil->wmi_ev_lock, flags); + if (wil->reply_id && wil->reply_id == id) { + if (wil->reply_buf) { + memcpy(wil->reply_buf, wmi, + min(len, wil->reply_size)); + immed_reply = true; + } + if (id == WMI_TRAFFIC_SUSPEND_EVENTID) { + wil_dbg_wmi(wil, + "set suspend_resp_rcvd\n"); + wil->suspend_resp_rcvd = true; + } + } + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", id, wmi->mid, tstamp); @@ -795,15 +1022,24 @@ void wmi_recv_cmd(struct wil6210_priv *wil) wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); - /* add to the pending list */ - spin_lock_irqsave(&wil->wmi_ev_lock, flags); - list_add_tail(&evt->list, &wil->pending_wmi_ev); - spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); - q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); - wil_dbg_wmi(wil, "queue_work -> %d\n", q); + if (immed_reply) { + wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n", + wil->reply_id); + kfree(evt); + num_immed_reply++; + complete(&wil->wmi_call); + } else { + /* add to the pending list */ + spin_lock_irqsave(&wil->wmi_ev_lock, flags); + list_add_tail(&evt->list, &wil->pending_wmi_ev); + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); + q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); + wil_dbg_wmi(wil, "queue_work -> %d\n", q); + } } /* normally, 1 event per IRQ should be processed */ - wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n); + wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n", + n - num_immed_reply, num_immed_reply); } int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, @@ -814,13 +1050,17 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, mutex_lock(&wil->wmi_mutex); + spin_lock(&wil->wmi_ev_lock); + wil->reply_id = reply_id; + wil->reply_buf = reply; + wil->reply_size = reply_size; + reinit_completion(&wil->wmi_call); + spin_unlock(&wil->wmi_ev_lock); + rc = __wmi_send(wil, cmdid, buf, len); if (rc) goto out; - wil->reply_id = reply_id; - wil->reply_buf = reply; - wil->reply_size = reply_size; remain = wait_for_completion_timeout(&wil->wmi_call, msecs_to_jiffies(to_msec)); if (0 == remain) { @@ -833,10 +1073,14 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, cmdid, reply_id, to_msec - jiffies_to_msecs(remain)); } + +out: + spin_lock(&wil->wmi_ev_lock); wil->reply_id = 0; wil->reply_buf = NULL; wil->reply_size = 0; - out: + spin_unlock(&wil->wmi_ev_lock); + mutex_unlock(&wil->wmi_mutex); return rc; @@ -863,8 +1107,62 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); } +int wmi_led_cfg(struct wil6210_priv *wil, bool enable) +{ + int rc = 0; + struct wmi_led_cfg_cmd cmd = { + .led_mode = enable, + .id = led_id, + .slow_blink_cfg.blink_on = + cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].on_ms), + .slow_blink_cfg.blink_off = + cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].off_ms), + .medium_blink_cfg.blink_on = + cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].on_ms), + .medium_blink_cfg.blink_off = + cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].off_ms), + .fast_blink_cfg.blink_on = + cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].on_ms), + .fast_blink_cfg.blink_off = + cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].off_ms), + .led_polarity = led_polarity, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_led_cfg_done_event evt; + } __packed reply; + + if (led_id == WIL_LED_INVALID_ID) + goto out; + + if (led_id > WIL_LED_MAX_ID) { + wil_err(wil, "Invalid led id %d\n", led_id); + rc = -EINVAL; + goto out; + } + + wil_dbg_wmi(wil, + "%s led %d\n", + enable ? "enabling" : "disabling", led_id); + + rc = wmi_call(wil, WMI_LED_CFG_CMDID, &cmd, sizeof(cmd), + WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply), + 100); + if (rc) + goto out; + + if (reply.evt.status) { + wil_err(wil, "led %d cfg failed with status %d\n", + led_id, le32_to_cpu(reply.evt.status)); + rc = -EINVAL; + } + +out: + return rc; +} + int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, - u8 chan, u8 hidden_ssid) + u8 chan, u8 hidden_ssid, u8 is_go) { int rc; @@ -875,9 +1173,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, .channel = chan - 1, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, + .is_go = is_go, + .disable_ap_sme = disable_ap_sme, + .abft_len = wil->abft_len, }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_pcp_started_event evt; } __packed reply; @@ -892,6 +1193,13 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; } + if (disable_ap_sme && + !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME, + wil->fw_capabilities)) { + wil_err(wil, "disable_ap_sme not supported by FW\n"); + return -EOPNOTSUPP; + } + /* * Processing time may be huge, in case of secure AP it takes about * 3500ms for FW to start AP @@ -904,11 +1212,21 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, if (reply.evt.status != WMI_FW_STATUS_SUCCESS) rc = -EINVAL; + if (wmi_nettype != WMI_NETTYPE_P2P) + /* Don't fail due to error in the led configuration */ + wmi_led_cfg(wil, true); + return rc; } int wmi_pcp_stop(struct wil6210_priv *wil) { + int rc; + + rc = wmi_led_cfg(wil, false); + if (rc) + return rc; + return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0, WMI_PCP_STOPPED_EVENTID, NULL, 0, 20); } @@ -931,7 +1249,7 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) { int rc; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_set_ssid_cmd cmd; } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ @@ -964,7 +1282,7 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) { int rc; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_set_pcp_channel_cmd cmd; } __packed reply; @@ -981,14 +1299,86 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) return 0; } -int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) +int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi) { + int rc; struct wmi_p2p_cfg_cmd cmd = { - .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD, + .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER, + .bcon_interval = cpu_to_le16(bi), .channel = channel - 1, }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_p2p_cfg_done_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n"); + + rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd), + WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300); + if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status); + rc = -EINVAL; + } - return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); + return rc; +} + +int wmi_start_listen(struct wil6210_priv *wil) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_listen_started_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n"); + + rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, + WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300); + if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "device failed to start listen. status %d\n", + reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_start_search(struct wil6210_priv *wil) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_search_started_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n"); + + rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0, + WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300); + if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "device failed to start search. status %d\n", + reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_stop_discovery(struct wil6210_priv *wil) +{ + int rc; + + wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n"); + + rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0, + WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100); + + if (rc) + wil_err(wil, "Failed to stop discovery\n"); + + return rc; } int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, @@ -1078,11 +1468,11 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) { int rc; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; } __packed reply; - wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off"); + wil_info(wil, "(%s)\n", on ? "on" : "off"); if (on) { rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, @@ -1105,7 +1495,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) struct wmi_cfg_rx_chain_cmd cmd = { .action = WMI_RX_CHAIN_ADD, .rx_sw_ring = { - .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)), + .max_mpdu_size = cpu_to_le16( + wil_mtu2macbuf(wil->rx_buf_len)), .ring_mem_base = cpu_to_le64(vring->pa), .ring_size = cpu_to_le16(vring->size), }, @@ -1115,7 +1506,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_cfg_rx_chain_done_event evt; } __packed evt; int rc; @@ -1169,7 +1560,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_temp_sense_done_event evt; } __packed reply; @@ -1186,43 +1577,56 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) return 0; } -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, + u16 reason, bool full_disconnect, bool del_sta) { int rc; u16 reason_code; - struct wmi_disconnect_sta_cmd cmd = { + struct wmi_disconnect_sta_cmd disc_sta_cmd = { + .disconnect_reason = cpu_to_le16(reason), + }; + struct wmi_del_sta_cmd del_sta_cmd = { .disconnect_reason = cpu_to_le16(reason), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_disconnect_event evt; } __packed reply; - ether_addr_copy(cmd.dst_mac, mac); - - wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); + wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); - rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd), - WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); + wil->locally_generated_disc = true; + if (del_sta) { + ether_addr_copy(del_sta_cmd.dst_mac, mac); + rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd, + sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID, + &reply, sizeof(reply), 1000); + } else { + ether_addr_copy(disc_sta_cmd.dst_mac, mac); + rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &disc_sta_cmd, + sizeof(disc_sta_cmd), WMI_DISCONNECT_EVENTID, + &reply, sizeof(reply), 1000); + } /* failure to disconnect in reasonable time treated as FW error */ if (rc) { wil_fw_error_recovery(wil); return rc; } - /* call event handler manually after processing wmi_call, - * to avoid deadlock - disconnect event handler acquires wil->mutex - * while it is already held here - */ - reason_code = le16_to_cpu(reply.evt.protocol_reason_status); - - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", - reply.evt.bssid, reason_code, - reply.evt.disconnect_reason); + if (full_disconnect) { + /* call event handler manually after processing wmi_call, + * to avoid deadlock - disconnect event handler acquires + * wil->mutex while it is already held here + */ + reason_code = le16_to_cpu(reply.evt.protocol_reason_status); - wil->sinfo_gen++; - wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + reply.evt.bssid, reason_code, + reply.evt.disconnect_reason); + wil->sinfo_gen++; + wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + } return 0; } @@ -1235,8 +1639,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) .amsdu = 0, }; - wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__, - ringid, size, timeout); + wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, + timeout); return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd)); } @@ -1248,8 +1652,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason) .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__, - ringid, reason); + wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason); return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd)); } @@ -1261,8 +1664,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason) .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__, - cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason); + wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf, + (cidxtid >> 4) & 0xf, reason); return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd)); } @@ -1285,7 +1688,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, .ba_timeout = cpu_to_le16(timeout), }; struct { - struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cmd_hdr wmi; struct wmi_rcp_addba_resp_sent_event evt; } __packed reply; @@ -1308,16 +1711,340 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, return rc; } +int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, + enum wmi_ps_profile_type ps_profile) +{ + int rc; + struct wmi_ps_dev_profile_cfg_cmd cmd = { + .ps_profile = ps_profile, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_ps_dev_profile_cfg_event evt; + } __packed reply; + u32 status; + + wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile); + + reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR); + + rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, &cmd, sizeof(cmd), + WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply), + 100); + if (rc) + return rc; + + status = le32_to_cpu(reply.evt.status); + + if (status != WMI_PS_CFG_CMD_STATUS_SUCCESS) { + wil_err(wil, "ps dev profile cfg failed with status %d\n", + status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) +{ + int rc; + struct wmi_set_mgmt_retry_limit_cmd cmd = { + .mgmt_retry_limit = retry_short, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_set_mgmt_retry_limit_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short); + + if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) + return -ENOTSUPP; + + reply.evt.status = WMI_FW_STATUS_FAILURE; + + rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, &cmd, sizeof(cmd), + WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), + 100); + if (rc) + return rc; + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "set mgmt retry limit failed with status %d\n", + reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_get_mgmt_retry_limit_event evt; + } __packed reply; + + wil_dbg_wmi(wil, "getting mgmt retry short\n"); + + if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) + return -ENOTSUPP; + + reply.evt.mgmt_retry_limit = 0; + rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, NULL, 0, + WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), + 100); + if (rc) + return rc; + + if (retry_short) + *retry_short = reply.evt.mgmt_retry_limit; + + return 0; +} + +int wmi_abort_scan(struct wil6210_priv *wil) +{ + int rc; + + wil_dbg_wmi(wil, "sending WMI_ABORT_SCAN_CMDID\n"); + + rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, NULL, 0); + if (rc) + wil_err(wil, "Failed to abort scan (%d)\n", rc); + + return rc; +} + +int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid) +{ + int rc; + struct wmi_new_sta_cmd cmd = { + .aid = aid, + }; + + wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid); + + ether_addr_copy(cmd.dst_mac, mac); + + rc = wmi_send(wil, WMI_NEW_STA_CMDID, &cmd, sizeof(cmd)); + if (rc) + wil_err(wil, "Failed to send new sta (%d)\n", rc); + + return rc; +} + +int wmi_set_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data) +{ + int rc; + struct wmi_set_thermal_throttling_cfg_cmd cmd = { + .tt_data = *tt_data, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_set_thermal_throttling_cfg_event evt; + } __packed reply; + + if (!test_bit(WMI_FW_CAPABILITY_THERMAL_THROTTLING, + wil->fw_capabilities)) + return -EOPNOTSUPP; + + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_SET_THERMAL_THROTTLING_CFG_CMDID, &cmd, + sizeof(cmd), WMI_SET_THERMAL_THROTTLING_CFG_EVENTID, + &reply, sizeof(reply), 100); + if (rc) { + wil_err(wil, "failed to set thermal throttling\n"); + return rc; + } + if (reply.evt.status) { + wil_err(wil, "set thermal throttling failed, error %d\n", + reply.evt.status); + return -EIO; + } + + wil->tt_data = *tt_data; + wil->tt_data_set = true; + + return 0; +} + +int wmi_get_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_get_thermal_throttling_cfg_event evt; + } __packed reply; + + if (!test_bit(WMI_FW_CAPABILITY_THERMAL_THROTTLING, + wil->fw_capabilities)) + return -EOPNOTSUPP; + + rc = wmi_call(wil, WMI_GET_THERMAL_THROTTLING_CFG_CMDID, NULL, 0, + WMI_GET_THERMAL_THROTTLING_CFG_EVENTID, &reply, + sizeof(reply), 100); + if (rc) { + wil_err(wil, "failed to get thermal throttling\n"); + return rc; + } + + if (tt_data) + *tt_data = reply.evt.tt_data; + + return 0; +} + void wmi_event_flush(struct wil6210_priv *wil) { + ulong flags; struct pending_wmi_event *evt, *t; - wil_dbg_wmi(wil, "%s()\n", __func__); + wil_dbg_wmi(wil, "event_flush\n"); + + spin_lock_irqsave(&wil->wmi_ev_lock, flags); list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { list_del(&evt->list); kfree(evt); } + + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); +} + +int wmi_link_maintain_cfg_write(struct wil6210_priv *wil, + const u8 *addr, + bool fst_link_loss) +{ + int rc; + int cid = wil_find_cid(wil, addr); + u32 cfg_type; + struct wmi_link_maintain_cfg_write_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_link_maintain_cfg_write_done_event evt; + } __packed reply; + + if (cid < 0) + return cid; + + switch (wil->wdev->iftype) { + case NL80211_IFTYPE_STATION: + cfg_type = fst_link_loss ? + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA : + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA; + break; + case NL80211_IFTYPE_AP: + cfg_type = fst_link_loss ? + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP : + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP; + break; + default: + wil_err(wil, "Unsupported for iftype %d", wil->wdev->iftype); + return -EINVAL; + } + + wil_dbg_misc(wil, "Setting cid:%d with cfg_type:%d\n", cid, cfg_type); + + cmd.cfg_type = cpu_to_le32(cfg_type); + cmd.cid = cpu_to_le32(cid); + + reply.evt.status = cpu_to_le32(WMI_FW_STATUS_FAILURE); + + rc = wmi_call(wil, WMI_LINK_MAINTAIN_CFG_WRITE_CMDID, &cmd, sizeof(cmd), + WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID, &reply, + sizeof(reply), 250); + if (rc) { + wil_err(wil, "Failed to %s FST link loss", + fst_link_loss ? "enable" : "disable"); + } else if (reply.evt.status == WMI_FW_STATUS_SUCCESS) { + wil->sta[cid].fst_link_loss = fst_link_loss; + } else { + wil_err(wil, "WMI_LINK_MAINTAIN_CFG_WRITE_CMDID returned status %d", + reply.evt.status); + rc = -EINVAL; + } + return rc; +} + +int wmi_suspend(struct wil6210_priv *wil) +{ + int rc; + struct wmi_traffic_suspend_cmd cmd = { + .wakeup_trigger = wil->wakeup_trigger, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_traffic_suspend_event evt; + } __packed reply; + u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP; + + wil->suspend_resp_rcvd = false; + wil->suspend_resp_comp = false; + + reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED; + + rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd), + WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply), + suspend_to); + if (rc) { + wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc); + if (rc == -ETIME) + /* wmi_call TO */ + wil->suspend_stats.rejected_by_device++; + else + wil->suspend_stats.rejected_by_host++; + goto out; + } + + wil_dbg_wmi(wil, "waiting for suspend_response_completed\n"); + + rc = wait_event_interruptible_timeout(wil->wq, + wil->suspend_resp_comp, + msecs_to_jiffies(suspend_to)); + if (rc == 0) { + wil_err(wil, "TO waiting for suspend_response_completed\n"); + if (wil->suspend_resp_rcvd) + /* Device responded but we TO due to another reason */ + wil->suspend_stats.rejected_by_host++; + else + wil->suspend_stats.rejected_by_device++; + rc = -EBUSY; + goto out; + } + + wil_dbg_wmi(wil, "suspend_response_completed rcvd\n"); + if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) { + wil_dbg_pm(wil, "device rejected the suspend\n"); + wil->suspend_stats.rejected_by_device++; + } + rc = reply.evt.status; + +out: + wil->suspend_resp_rcvd = false; + wil->suspend_resp_comp = false; + + return rc; +} + +int wmi_resume(struct wil6210_priv *wil) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_traffic_resume_event evt; + } __packed reply; + + reply.evt.status = WMI_TRAFFIC_RESUME_FAILED; + + rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0, + WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply), + WIL_WAIT_FOR_SUSPEND_RESUME_COMP); + if (rc) + return rc; + + return reply.evt.status; } static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, @@ -1341,23 +2068,20 @@ static void wmi_event_handle(struct wil6210_priv *wil, u16 len = le16_to_cpu(hdr->len); if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && - (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); + (len >= sizeof(struct wmi_cmd_hdr))) { + struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]); void *evt_data = (void *)(&wmi[1]); - u16 id = le16_to_cpu(wmi->id); + u16 id = le16_to_cpu(wmi->command_id); wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n", id, wil->reply_id); /* check if someone waits for this event */ if (wil->reply_id && wil->reply_id == id) { - if (wil->reply_buf) { - memcpy(wil->reply_buf, wmi, - min(len, wil->reply_size)); - } else { - wmi_evt_call_handler(wil, id, evt_data, - len - sizeof(*wmi)); - } - wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); + WARN_ON(wil->reply_buf); + wmi_evt_call_handler(wil, id, evt_data, + len - sizeof(*wmi)); + wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n", + id); complete(&wil->wmi_call); return; } @@ -1404,11 +2128,73 @@ void wmi_event_worker(struct work_struct *work) struct pending_wmi_event *evt; struct list_head *lh; - wil_dbg_wmi(wil, "Start %s\n", __func__); + wil_dbg_wmi(wil, "event_worker: Start\n"); while ((lh = next_wmi_ev(wil)) != NULL) { evt = list_entry(lh, struct pending_wmi_event, list); wmi_event_handle(wil, &evt->event.hdr); kfree(evt); } - wil_dbg_wmi(wil, "Finished %s\n", __func__); + wil_dbg_wmi(wil, "event_worker: Finished\n"); +} + +bool wil_is_wmi_idle(struct wil6210_priv *wil) +{ + ulong flags; + struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx; + bool rc = false; + + spin_lock_irqsave(&wil->wmi_ev_lock, flags); + + /* Check if there are pending WMI events in the events queue */ + if (!list_empty(&wil->pending_wmi_ev)) { + wil_dbg_pm(wil, "Pending WMI events in queue\n"); + goto out; + } + + /* Check if there is a pending WMI call */ + if (wil->reply_id) { + wil_dbg_pm(wil, "Pending WMI call\n"); + goto out; + } + + /* Check if there are pending RX events in mbox */ + r->head = wil_r(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, rx.head)); + if (r->tail != r->head) + wil_dbg_pm(wil, "Pending WMI mbox events\n"); + else + rc = true; + +out: + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); + return rc; +} + +int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct) +{ + int rc; + struct wmi_set_connect_snr_thr_cmd cmd = { + .enable = true, + .omni_snr_thr = cpu_to_le16(omni), + .direct_snr_thr = cpu_to_le16(direct), + }; + + if (!test_bit(WMI_FW_CAPABILITY_CONNECT_SNR_THR, wil->fw_capabilities)) + return -ENOTSUPP; + + if (omni == 0 && direct == 0) + cmd.enable = false; + + wil_dbg_wmi(wil, "%s snr thresh omni=%d, direct=%d (1/4 dB units)\n", + cmd.enable ? "enable" : "disable", omni, direct); + + rc = wmi_send(wil, WMI_SET_CONNECT_SNR_THR_CMDID, &cmd, sizeof(cmd)); + if (rc) + return rc; + + wil->snr_thresh.enabled = cmd.enable; + wil->snr_thresh.omni = omni; + wil->snr_thresh.direct = direct; + + return 0; } diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 6e90e78f1554..35752885320e 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. - * Copyright (c) 2006-2012 Wilocity . + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2006-2012 Wilocity * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,187 +17,271 @@ /* * This file contains the definitions of the WMI protocol specified in the - * Wireless Module Interface (WMI) for the Wilocity - * MARLON 60 Gigabit wireless solution. + * Wireless Module Interface (WMI) for the Qualcomm + * 60 GHz wireless solution. * It includes definitions of all the commands and events. * Commands are messages from the host to the WM. * Events are messages from the WM to the host. + * + * This is an automatically generated file. */ #ifndef __WILOCITY_WMI_H__ #define __WILOCITY_WMI_H__ /* General */ -#define WILOCITY_MAX_ASSOC_STA (8) -#define WILOCITY_DEFAULT_ASSOC_STA (1) -#define WMI_MAC_LEN (6) -#define WMI_PROX_RANGE_NUM (3) -#define WMI_MAX_LOSS_DMG_BEACONS (32) +#define WMI_MAX_ASSOC_STA (8) +#define WMI_DEFAULT_ASSOC_STA (1) +#define WMI_MAC_LEN (6) +#define WMI_PROX_RANGE_NUM (3) +#define WMI_MAX_LOSS_DMG_BEACONS (20) +#define MAX_NUM_OF_SECTORS (128) +#define WMI_SCHED_MAX_ALLOCS_PER_CMD (4) +#define WMI_RF_DTYPE_LENGTH (3) +#define WMI_RF_ETYPE_LENGTH (3) +#define WMI_RF_RX2TX_LENGTH (3) +#define WMI_RF_ETYPE_VAL_PER_RANGE (5) + +/* Mailbox interface + * used for commands and events + */ +enum wmi_mid { + MID_DEFAULT = 0x00, + FIRST_DBG_MID_ID = 0x10, + LAST_DBG_MID_ID = 0xFE, + MID_BROADCAST = 0xFF, +}; + +/* FW capability IDs + * Each ID maps to a bit in a 32-bit bitmask value provided by the FW to + * the host + */ +enum wmi_fw_capability { + WMI_FW_CAPABILITY_FTM = 0, + WMI_FW_CAPABILITY_PS_CONFIG = 1, + WMI_FW_CAPABILITY_RF_SECTORS = 2, + WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, + WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_WMI_ONLY = 5, + WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, + WMI_FW_CAPABILITY_D3_SUSPEND = 8, + WMI_FW_CAPABILITY_LONG_RANGE = 9, + WMI_FW_CAPABILITY_FIXED_SCHEDULING = 10, + WMI_FW_CAPABILITY_MULTI_DIRECTED_OMNIS = 11, + WMI_FW_CAPABILITY_RSSI_REPORTING = 12, + WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13, + WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, + WMI_FW_CAPABILITY_CONNECT_SNR_THR = 16, + WMI_FW_CAPABILITY_MAX, +}; + +/* WMI_CMD_HDR */ +struct wmi_cmd_hdr { + u8 mid; + u8 reserved; + __le16 command_id; + __le32 fw_timestamp; +} __packed; /* List of Commands */ enum wmi_command_id { - WMI_CONNECT_CMDID = 0x0001, - WMI_DISCONNECT_CMDID = 0x0003, - WMI_DISCONNECT_STA_CMDID = 0x0004, - WMI_START_SCAN_CMDID = 0x0007, - WMI_SET_BSS_FILTER_CMDID = 0x0009, - WMI_SET_PROBED_SSID_CMDID = 0x000a, - WMI_SET_LISTEN_INT_CMDID = 0x000b, - WMI_BCON_CTRL_CMDID = 0x000f, - WMI_ADD_CIPHER_KEY_CMDID = 0x0016, - WMI_DELETE_CIPHER_KEY_CMDID = 0x0017, - WMI_SET_APPIE_CMDID = 0x003f, - WMI_SET_WSC_STATUS_CMDID = 0x0041, - WMI_PXMT_RANGE_CFG_CMDID = 0x0042, - WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043, -/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */ - WMI_MEM_READ_CMDID = 0x0800, - WMI_MEM_WR_CMDID = 0x0801, - WMI_ECHO_CMDID = 0x0803, - WMI_DEEP_ECHO_CMDID = 0x0804, - WMI_CONFIG_MAC_CMDID = 0x0805, - WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806, - WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808, - WMI_PHY_GET_STATISTICS_CMDID = 0x0809, - WMI_FS_TUNE_CMDID = 0x080a, - WMI_CORR_MEASURE_CMDID = 0x080b, - WMI_READ_RSSI_CMDID = 0x080c, - WMI_TEMP_SENSE_CMDID = 0x080e, - WMI_DC_CALIB_CMDID = 0x080f, - WMI_SEND_TONE_CMDID = 0x0810, - WMI_IQ_TX_CALIB_CMDID = 0x0811, - WMI_IQ_RX_CALIB_CMDID = 0x0812, - WMI_SET_UCODE_IDLE_CMDID = 0x0813, - WMI_SET_WORK_MODE_CMDID = 0x0815, - WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816, - WMI_MARLON_R_READ_CMDID = 0x0818, - WMI_MARLON_R_WRITE_CMDID = 0x0819, - WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a, - MAC_IO_STATIC_PARAMS_CMDID = 0x081b, - MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c, - WMI_SILENT_RSSI_CALIB_CMDID = 0x081d, - WMI_RF_RX_TEST_CMDID = 0x081e, - WMI_CFG_RX_CHAIN_CMDID = 0x0820, - WMI_VRING_CFG_CMDID = 0x0821, - WMI_BCAST_VRING_CFG_CMDID = 0x0822, - WMI_VRING_BA_EN_CMDID = 0x0823, - WMI_VRING_BA_DIS_CMDID = 0x0824, - WMI_RCP_ADDBA_RESP_CMDID = 0x0825, - WMI_RCP_DELBA_CMDID = 0x0826, - WMI_SET_SSID_CMDID = 0x0827, - WMI_GET_SSID_CMDID = 0x0828, - WMI_SET_PCP_CHANNEL_CMDID = 0x0829, - WMI_GET_PCP_CHANNEL_CMDID = 0x082a, - WMI_SW_TX_REQ_CMDID = 0x082b, - WMI_READ_MAC_RXQ_CMDID = 0x0830, - WMI_READ_MAC_TXQ_CMDID = 0x0831, - WMI_WRITE_MAC_RXQ_CMDID = 0x0832, - WMI_WRITE_MAC_TXQ_CMDID = 0x0833, - WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834, - WMI_MLME_PUSH_CMDID = 0x0835, - WMI_BEAMFORMING_MGMT_CMDID = 0x0836, - WMI_BF_TXSS_MGMT_CMDID = 0x0837, - WMI_BF_SM_MGMT_CMDID = 0x0838, - WMI_BF_RXSS_MGMT_CMDID = 0x0839, - WMI_BF_TRIG_CMDID = 0x083A, - WMI_SET_SECTORS_CMDID = 0x0849, - WMI_MAINTAIN_PAUSE_CMDID = 0x0850, - WMI_MAINTAIN_RESUME_CMDID = 0x0851, - WMI_RS_MGMT_CMDID = 0x0852, - WMI_RF_MGMT_CMDID = 0x0853, - WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854, - WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855, + WMI_CONNECT_CMDID = 0x01, + WMI_DISCONNECT_CMDID = 0x03, + WMI_DISCONNECT_STA_CMDID = 0x04, + WMI_START_SCAN_CMDID = 0x07, + WMI_SET_BSS_FILTER_CMDID = 0x09, + WMI_SET_PROBED_SSID_CMDID = 0x0A, + /* deprecated */ + WMI_SET_LISTEN_INT_CMDID = 0x0B, + WMI_BCON_CTRL_CMDID = 0x0F, + WMI_ADD_CIPHER_KEY_CMDID = 0x16, + WMI_DELETE_CIPHER_KEY_CMDID = 0x17, + WMI_PCP_CONF_CMDID = 0x18, + WMI_SET_APPIE_CMDID = 0x3F, + WMI_SET_WSC_STATUS_CMDID = 0x41, + WMI_PXMT_RANGE_CFG_CMDID = 0x42, + WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43, + WMI_MEM_READ_CMDID = 0x800, + WMI_MEM_WR_CMDID = 0x801, + WMI_ECHO_CMDID = 0x803, + WMI_DEEP_ECHO_CMDID = 0x804, + WMI_CONFIG_MAC_CMDID = 0x805, + /* deprecated */ + WMI_CONFIG_PHY_DEBUG_CMDID = 0x806, + WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808, + WMI_PHY_GET_STATISTICS_CMDID = 0x809, + /* deprecated */ + WMI_FS_TUNE_CMDID = 0x80A, + /* deprecated */ + WMI_CORR_MEASURE_CMDID = 0x80B, + WMI_READ_RSSI_CMDID = 0x80C, + WMI_TEMP_SENSE_CMDID = 0x80E, + WMI_DC_CALIB_CMDID = 0x80F, + /* deprecated */ + WMI_SEND_TONE_CMDID = 0x810, + /* deprecated */ + WMI_IQ_TX_CALIB_CMDID = 0x811, + /* deprecated */ + WMI_IQ_RX_CALIB_CMDID = 0x812, + WMI_SET_WORK_MODE_CMDID = 0x815, + WMI_LO_LEAKAGE_CALIB_CMDID = 0x816, + WMI_LO_POWER_CALIB_FROM_OTP_CMDID = 0x817, + WMI_SILENT_RSSI_CALIB_CMDID = 0x81D, + /* deprecated */ + WMI_RF_RX_TEST_CMDID = 0x81E, + WMI_CFG_RX_CHAIN_CMDID = 0x820, + WMI_VRING_CFG_CMDID = 0x821, + WMI_BCAST_VRING_CFG_CMDID = 0x822, + WMI_VRING_BA_EN_CMDID = 0x823, + WMI_VRING_BA_DIS_CMDID = 0x824, + WMI_RCP_ADDBA_RESP_CMDID = 0x825, + WMI_RCP_DELBA_CMDID = 0x826, + WMI_SET_SSID_CMDID = 0x827, + WMI_GET_SSID_CMDID = 0x828, + WMI_SET_PCP_CHANNEL_CMDID = 0x829, + WMI_GET_PCP_CHANNEL_CMDID = 0x82A, + WMI_SW_TX_REQ_CMDID = 0x82B, + WMI_MLME_PUSH_CMDID = 0x835, + WMI_BEAMFORMING_MGMT_CMDID = 0x836, + WMI_BF_TXSS_MGMT_CMDID = 0x837, + WMI_BF_SM_MGMT_CMDID = 0x838, + WMI_BF_RXSS_MGMT_CMDID = 0x839, + WMI_BF_TRIG_CMDID = 0x83A, + WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842, + WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843, + WMI_SET_SECTORS_CMDID = 0x849, + WMI_MAINTAIN_PAUSE_CMDID = 0x850, + WMI_MAINTAIN_RESUME_CMDID = 0x851, + WMI_RS_MGMT_CMDID = 0x852, + WMI_RF_MGMT_CMDID = 0x853, + WMI_RF_XPM_READ_CMDID = 0x856, + WMI_RF_XPM_WRITE_CMDID = 0x857, + WMI_LED_CFG_CMDID = 0x858, + WMI_SET_CONNECT_SNR_THR_CMDID = 0x85B, + WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C, + WMI_RF_PWR_ON_DELAY_CMDID = 0x85D, + WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E, /* Performance monitoring commands */ - WMI_BF_CTRL_CMDID = 0x0862, - WMI_NOTIFY_REQ_CMDID = 0x0863, - WMI_GET_STATUS_CMDID = 0x0864, - WMI_UNIT_TEST_CMDID = 0x0900, - WMI_HICCUP_CMDID = 0x0901, - WMI_FLASH_READ_CMDID = 0x0902, - WMI_FLASH_WRITE_CMDID = 0x0903, - WMI_SECURITY_UNIT_TEST_CMDID = 0x0904, - /*P2P*/ - WMI_P2P_CFG_CMDID = 0x0910, - WMI_PORT_ALLOCATE_CMDID = 0x0911, - WMI_PORT_DELETE_CMDID = 0x0912, - WMI_POWER_MGMT_CFG_CMDID = 0x0913, - WMI_START_LISTEN_CMDID = 0x0914, - WMI_START_SEARCH_CMDID = 0x0915, - WMI_DISCOVERY_START_CMDID = 0x0916, - WMI_DISCOVERY_STOP_CMDID = 0x0917, - WMI_PCP_START_CMDID = 0x0918, - WMI_PCP_STOP_CMDID = 0x0919, - WMI_GET_PCP_FACTOR_CMDID = 0x091b, - - WMI_SET_MAC_ADDRESS_CMDID = 0xf003, - WMI_ABORT_SCAN_CMDID = 0xf007, - WMI_SET_PMK_CMDID = 0xf028, - - WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041, - WMI_GET_PMK_CMDID = 0xf048, - WMI_SET_PASSPHRASE_CMDID = 0xf049, - WMI_SEND_ASSOC_RES_CMDID = 0xf04a, - WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b, - WMI_EAPOL_TX_CMDID = 0xf04c, - WMI_MAC_ADDR_REQ_CMDID = 0xf04d, - WMI_FW_VER_CMDID = 0xf04e, - WMI_PMC_CMDID = 0xf04f, + WMI_BF_CTRL_CMDID = 0x862, + WMI_NOTIFY_REQ_CMDID = 0x863, + WMI_GET_STATUS_CMDID = 0x864, + WMI_GET_RF_STATUS_CMDID = 0x866, + WMI_GET_BASEBAND_TYPE_CMDID = 0x867, + WMI_UNIT_TEST_CMDID = 0x900, + WMI_FLASH_READ_CMDID = 0x902, + WMI_FLASH_WRITE_CMDID = 0x903, + /* Power management */ + WMI_TRAFFIC_SUSPEND_CMDID = 0x904, + WMI_TRAFFIC_RESUME_CMDID = 0x905, + /* P2P */ + WMI_P2P_CFG_CMDID = 0x910, + WMI_PORT_ALLOCATE_CMDID = 0x911, + WMI_PORT_DELETE_CMDID = 0x912, + WMI_POWER_MGMT_CFG_CMDID = 0x913, + WMI_START_LISTEN_CMDID = 0x914, + WMI_START_SEARCH_CMDID = 0x915, + WMI_DISCOVERY_START_CMDID = 0x916, + WMI_DISCOVERY_STOP_CMDID = 0x917, + WMI_PCP_START_CMDID = 0x918, + WMI_PCP_STOP_CMDID = 0x919, + WMI_GET_PCP_FACTOR_CMDID = 0x91B, + /* Power Save Configuration Commands */ + WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C, + WMI_RS_CFG_CMDID = 0x921, + WMI_GET_DETAILED_RS_RES_CMDID = 0x922, + WMI_AOA_MEAS_CMDID = 0x923, + WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924, + WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930, + WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931, + WMI_NEW_STA_CMDID = 0x935, + WMI_DEL_STA_CMDID = 0x936, + WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940, + WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, + /* Read Power Save profile type */ + WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942, + WMI_TOF_SESSION_START_CMDID = 0x991, + WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, + WMI_TOF_SET_LCR_CMDID = 0x993, + WMI_TOF_SET_LCI_CMDID = 0x994, + WMI_TOF_CFG_RESPONDER_CMDID = 0x996, + WMI_TOF_SET_TX_RX_OFFSET_CMDID = 0x997, + WMI_TOF_GET_TX_RX_OFFSET_CMDID = 0x998, + WMI_TOF_CHANNEL_INFO_CMDID = 0x999, + WMI_GET_RF_SECTOR_PARAMS_CMDID = 0x9A0, + WMI_SET_RF_SECTOR_PARAMS_CMDID = 0x9A1, + WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A2, + WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A3, + WMI_SET_RF_SECTOR_ON_CMDID = 0x9A4, + WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, + WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, + WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + WMI_SCHEDULING_SCHEME_CMDID = 0xA01, + WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, + WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, + WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04, + WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05, + WMI_SET_MAC_ADDRESS_CMDID = 0xF003, + WMI_ABORT_SCAN_CMDID = 0xF007, + WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, + /* deprecated */ + WMI_GET_PMK_CMDID = 0xF048, + WMI_SET_PASSPHRASE_CMDID = 0xF049, + /* deprecated */ + WMI_SEND_ASSOC_RES_CMDID = 0xF04A, + /* deprecated */ + WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B, + WMI_MAC_ADDR_REQ_CMDID = 0xF04D, + WMI_FW_VER_CMDID = 0xF04E, + WMI_PMC_CMDID = 0xF04F, }; -/* - * Commands data structures - */ - -/* - * WMI_CONNECT_CMDID - */ +/* WMI_CONNECT_CMDID */ enum wmi_network_type { WMI_NETTYPE_INFRA = 0x01, WMI_NETTYPE_ADHOC = 0x02, WMI_NETTYPE_ADHOC_CREATOR = 0x04, WMI_NETTYPE_AP = 0x10, WMI_NETTYPE_P2P = 0x20, - WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */ + /* PCIE over 60g */ + WMI_NETTYPE_WBE = 0x40, }; enum wmi_dot11_auth_mode { - WMI_AUTH11_OPEN = 0x01, - WMI_AUTH11_SHARED = 0x02, - WMI_AUTH11_LEAP = 0x04, - WMI_AUTH11_WSC = 0x08, + WMI_AUTH11_OPEN = 0x01, + WMI_AUTH11_SHARED = 0x02, + WMI_AUTH11_LEAP = 0x04, + WMI_AUTH11_WSC = 0x08, }; enum wmi_auth_mode { - WMI_AUTH_NONE = 0x01, - WMI_AUTH_WPA = 0x02, - WMI_AUTH_WPA2 = 0x04, - WMI_AUTH_WPA_PSK = 0x08, - WMI_AUTH_WPA2_PSK = 0x10, - WMI_AUTH_WPA_CCKM = 0x20, - WMI_AUTH_WPA2_CCKM = 0x40, + WMI_AUTH_NONE = 0x01, + WMI_AUTH_WPA = 0x02, + WMI_AUTH_WPA2 = 0x04, + WMI_AUTH_WPA_PSK = 0x08, + WMI_AUTH_WPA2_PSK = 0x10, + WMI_AUTH_WPA_CCKM = 0x20, + WMI_AUTH_WPA2_CCKM = 0x40, }; enum wmi_crypto_type { - WMI_CRYPT_NONE = 0x01, - WMI_CRYPT_WEP = 0x02, - WMI_CRYPT_TKIP = 0x04, - WMI_CRYPT_AES = 0x08, - WMI_CRYPT_AES_GCMP = 0x20, + WMI_CRYPT_NONE = 0x01, + WMI_CRYPT_AES_GCMP = 0x20, }; enum wmi_connect_ctrl_flag_bits { - WMI_CONNECT_ASSOC_POLICY_USER = 0x0001, - WMI_CONNECT_SEND_REASSOC = 0x0002, - WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004, - WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008, - WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010, - WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020, - WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040, - WMI_CONNECT_DO_NOT_DEAUTH = 0x0080, + WMI_CONNECT_ASSOC_POLICY_USER = 0x01, + WMI_CONNECT_SEND_REASSOC = 0x02, + WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x04, + WMI_CONNECT_PROFILE_MATCH_DONE = 0x08, + WMI_CONNECT_IGNORE_AAC_BEACON = 0x10, + WMI_CONNECT_CSA_FOLLOW_BSS = 0x20, + WMI_CONNECT_DO_WPA_OFFLOAD = 0x40, + WMI_CONNECT_DO_NOT_DEAUTH = 0x80, }; -#define WMI_MAX_SSID_LEN (32) +#define WMI_MAX_SSID_LEN (32) +/* WMI_CONNECT_CMDID */ struct wmi_connect_cmd { u8 network_type; u8 dot11_auth_mode; @@ -216,31 +300,17 @@ struct wmi_connect_cmd { u8 reserved1[2]; } __packed; -/* - * WMI_DISCONNECT_STA_CMDID - */ +/* WMI_DISCONNECT_STA_CMDID */ struct wmi_disconnect_sta_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 disconnect_reason; } __packed; -/* - * WMI_SET_PMK_CMDID - */ - -#define WMI_MIN_KEY_INDEX (0) #define WMI_MAX_KEY_INDEX (3) #define WMI_MAX_KEY_LEN (32) #define WMI_PASSPHRASE_LEN (64) -#define WMI_PMK_LEN (32) - -struct wmi_set_pmk_cmd { - u8 pmk[WMI_PMK_LEN]; -} __packed; -/* - * WMI_SET_PASSPHRASE_CMDID - */ +/* WMI_SET_PASSPHRASE_CMDID */ struct wmi_set_passphrase_cmd { u8 ssid[WMI_MAX_SSID_LEN]; u8 passphrase[WMI_PASSPHRASE_LEN]; @@ -248,36 +318,34 @@ struct wmi_set_passphrase_cmd { u8 passphrase_len; } __packed; -/* - * WMI_ADD_CIPHER_KEY_CMDID - */ +/* WMI_ADD_CIPHER_KEY_CMDID */ enum wmi_key_usage { - WMI_KEY_USE_PAIRWISE = 0, - WMI_KEY_USE_RX_GROUP = 1, - WMI_KEY_USE_TX_GROUP = 2, + WMI_KEY_USE_PAIRWISE = 0x00, + WMI_KEY_USE_RX_GROUP = 0x01, + WMI_KEY_USE_TX_GROUP = 0x02, }; struct wmi_add_cipher_key_cmd { u8 key_index; u8 key_type; - u8 key_usage; /* enum wmi_key_usage */ + /* enum wmi_key_usage */ + u8 key_usage; u8 key_len; - u8 key_rsc[8]; /* key replay sequence counter */ + /* key replay sequence counter */ + u8 key_rsc[8]; u8 key[WMI_MAX_KEY_LEN]; - u8 key_op_ctrl; /* Additional Key Control information */ + /* Additional Key Control information */ + u8 key_op_ctrl; u8 mac[WMI_MAC_LEN]; } __packed; -/* - * WMI_DELETE_CIPHER_KEY_CMDID - */ +/* WMI_DELETE_CIPHER_KEY_CMDID */ struct wmi_delete_cipher_key_cmd { u8 key_index; u8 mac[WMI_MAC_LEN]; } __packed; -/* - * WMI_START_SCAN_CMDID +/* WMI_START_SCAN_CMDID * * Start L1 scan operation * @@ -286,146 +354,126 @@ struct wmi_delete_cipher_key_cmd { * - WMI_SCAN_COMPLETE_EVENTID */ enum wmi_scan_type { - WMI_LONG_SCAN = 0, - WMI_SHORT_SCAN = 1, - WMI_PBC_SCAN = 2, - WMI_DIRECT_SCAN = 3, - WMI_ACTIVE_SCAN = 4, + WMI_ACTIVE_SCAN = 0x00, + WMI_SHORT_SCAN = 0x01, + WMI_PASSIVE_SCAN = 0x02, + WMI_DIRECT_SCAN = 0x03, + WMI_LONG_SCAN = 0x04, }; +/* WMI_START_SCAN_CMDID */ struct wmi_start_scan_cmd { - u8 direct_scan_mac_addr[6]; - u8 reserved[2]; - __le32 home_dwell_time; /* Max duration in the home channel(ms) */ - __le32 force_scan_interval; /* Time interval between scans (ms)*/ - u8 scan_type; /* wmi_scan_type */ - u8 num_channels; /* how many channels follow */ + u8 direct_scan_mac_addr[WMI_MAC_LEN]; + /* run scan with discovery beacon. Relevant for ACTIVE scan only. */ + u8 discovery_mode; + u8 reserved; + /* Max duration in the home channel(ms) */ + __le32 dwell_time; + /* Time interval between scans (ms) */ + __le32 force_scan_interval; + /* enum wmi_scan_type */ + u8 scan_type; + /* how many channels follow */ + u8 num_channels; + /* channels ID's: + * 0 - 58320 MHz + * 1 - 60480 MHz + * 2 - 62640 MHz + */ struct { u8 channel; u8 reserved; - } channel_list[0]; /* channels ID's */ - /* 0 - 58320 MHz */ - /* 1 - 60480 MHz */ - /* 2 - 62640 MHz */ + } channel_list[0]; } __packed; -/* - * WMI_SET_PROBED_SSID_CMDID - */ +/* WMI_SET_PROBED_SSID_CMDID */ #define MAX_PROBED_SSID_INDEX (3) enum wmi_ssid_flag { - WMI_SSID_FLAG_DISABLE = 0, /* disables entry */ - WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */ - WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */ + /* disables entry */ + WMI_SSID_FLAG_DISABLE = 0x00, + /* probes specified ssid */ + WMI_SSID_FLAG_SPECIFIC = 0x01, + /* probes for any ssid */ + WMI_SSID_FLAG_ANY = 0x02, }; struct wmi_probed_ssid_cmd { - u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */ - u8 flag; /* enum wmi_ssid_flag */ + /* 0 to MAX_PROBED_SSID_INDEX */ + u8 entry_index; + /* enum wmi_ssid_flag */ + u8 flag; u8 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* - * WMI_SET_APPIE_CMDID +/* WMI_SET_APPIE_CMDID * Add Application specified IE to a management frame */ -#define WMI_MAX_IE_LEN (1024) +#define WMI_MAX_IE_LEN (1024) -/* - * Frame Types - */ +/* Frame Types */ enum wmi_mgmt_frame_type { - WMI_FRAME_BEACON = 0, - WMI_FRAME_PROBE_REQ = 1, - WMI_FRAME_PROBE_RESP = 2, - WMI_FRAME_ASSOC_REQ = 3, - WMI_FRAME_ASSOC_RESP = 4, - WMI_NUM_MGMT_FRAME, + WMI_FRAME_BEACON = 0x00, + WMI_FRAME_PROBE_REQ = 0x01, + WMI_FRAME_PROBE_RESP = 0x02, + WMI_FRAME_ASSOC_REQ = 0x03, + WMI_FRAME_ASSOC_RESP = 0x04, + WMI_NUM_MGMT_FRAME = 0x05, }; struct wmi_set_appie_cmd { - u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */ + /* enum wmi_mgmt_frame_type */ + u8 mgmt_frm_type; u8 reserved; - __le16 ie_len; /* Length of the IE to be added to MGMT frame */ + /* Length of the IE to be added to MGMT frame */ + __le16 ie_len; u8 ie_info[0]; } __packed; -/* - * WMI_PXMT_RANGE_CFG_CMDID - */ +/* WMI_PXMT_RANGE_CFG_CMDID */ struct wmi_pxmt_range_cfg_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 range; } __packed; -/* - * WMI_PXMT_SNR2_RANGE_CFG_CMDID - */ +/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */ struct wmi_pxmt_snr2_range_cfg_cmd { - s8 snr2range_arr[WMI_PROX_RANGE_NUM-1]; + s8 snr2range_arr[2]; } __packed; -/* - * WMI_RF_MGMT_CMDID - */ +/* WMI_RF_MGMT_CMDID */ enum wmi_rf_mgmt_type { - WMI_RF_MGMT_W_DISABLE = 0, - WMI_RF_MGMT_W_ENABLE = 1, - WMI_RF_MGMT_GET_STATUS = 2, + WMI_RF_MGMT_W_DISABLE = 0x00, + WMI_RF_MGMT_W_ENABLE = 0x01, + WMI_RF_MGMT_GET_STATUS = 0x02, }; +/* WMI_RF_MGMT_CMDID */ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; } __packed; -/* - * WMI_THERMAL_THROTTLING_CTRL_CMDID - */ -#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF) - -struct wmi_thermal_throttling_ctrl_cmd { - __le32 time_on_usec; - __le32 time_off_usec; - __le32 max_txop_length_usec; -} __packed; - -/* - * WMI_RF_RX_TEST_CMDID - */ -struct wmi_rf_rx_test_cmd { - __le32 sector; -} __packed; - -/* - * WMI_CORR_MEASURE_CMDID - */ +/* WMI_CORR_MEASURE_CMDID */ struct wmi_corr_measure_cmd { - s32 freq_mhz; + __le32 freq_mhz; __le32 length_samples; __le32 iterations; } __packed; -/* - * WMI_SET_SSID_CMDID - */ +/* WMI_SET_SSID_CMDID */ struct wmi_set_ssid_cmd { __le32 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* - * WMI_SET_PCP_CHANNEL_CMDID - */ +/* WMI_SET_PCP_CHANNEL_CMDID */ struct wmi_set_pcp_channel_cmd { u8 channel; u8 reserved[3]; } __packed; -/* - * WMI_BCON_CTRL_CMDID - */ +/* WMI_BCON_CTRL_CMDID */ struct wmi_bcon_ctrl_cmd { __le16 bcon_interval; __le16 frag_num; @@ -434,214 +482,221 @@ struct wmi_bcon_ctrl_cmd { u8 pcp_max_assoc_sta; u8 disable_sec_offload; u8 disable_sec; + u8 hidden_ssid; + u8 is_go; + u8 reserved[2]; } __packed; -/******* P2P ***********/ - -/* - * WMI_PORT_ALLOCATE_CMDID - */ +/* WMI_PORT_ALLOCATE_CMDID */ enum wmi_port_role { - WMI_PORT_STA = 0, - WMI_PORT_PCP = 1, - WMI_PORT_AP = 2, - WMI_PORT_P2P_DEV = 3, - WMI_PORT_P2P_CLIENT = 4, - WMI_PORT_P2P_GO = 5, + WMI_PORT_STA = 0x00, + WMI_PORT_PCP = 0x01, + WMI_PORT_AP = 0x02, + WMI_PORT_P2P_DEV = 0x03, + WMI_PORT_P2P_CLIENT = 0x04, + WMI_PORT_P2P_GO = 0x05, }; +/* WMI_PORT_ALLOCATE_CMDID */ struct wmi_port_allocate_cmd { u8 mac[WMI_MAC_LEN]; u8 port_role; u8 mid; } __packed; -/* - * WMI_PORT_DELETE_CMDID - */ -struct wmi_delete_port_cmd { +/* WMI_PORT_DELETE_CMDID */ +struct wmi_port_delete_cmd { u8 mid; u8 reserved[3]; } __packed; -/* - * WMI_P2P_CFG_CMDID - */ +/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */ +enum wmi_wakeup_trigger { + WMI_WAKEUP_TRIGGER_UCAST = 0x01, + WMI_WAKEUP_TRIGGER_BCAST = 0x02, +}; + +/* WMI_TRAFFIC_SUSPEND_CMDID */ +struct wmi_traffic_suspend_cmd { + /* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */ + u8 wakeup_trigger; +} __packed; + +/* WMI_P2P_CFG_CMDID */ enum wmi_discovery_mode { - WMI_DISCOVERY_MODE_NON_OFFLOAD = 0, - WMI_DISCOVERY_MODE_OFFLOAD = 1, - WMI_DISCOVERY_MODE_PEER2PEER = 2, + WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00, + WMI_DISCOVERY_MODE_OFFLOAD = 0x01, + WMI_DISCOVERY_MODE_PEER2PEER = 0x02, }; struct wmi_p2p_cfg_cmd { - u8 discovery_mode; /* wmi_discovery_mode */ + /* enum wmi_discovery_mode */ + u8 discovery_mode; u8 channel; - __le16 bcon_interval; /* base to listen/search duration calculation */ + /* base to listen/search duration calculation */ + __le16 bcon_interval; } __packed; -/* - * WMI_POWER_MGMT_CFG_CMDID - */ +/* WMI_POWER_MGMT_CFG_CMDID */ enum wmi_power_source_type { - WMI_POWER_SOURCE_BATTERY = 0, - WMI_POWER_SOURCE_OTHER = 1, + WMI_POWER_SOURCE_BATTERY = 0x00, + WMI_POWER_SOURCE_OTHER = 0x01, }; struct wmi_power_mgmt_cfg_cmd { - u8 power_source; /* wmi_power_source_type */ + /* enum wmi_power_source_type */ + u8 power_source; u8 reserved[3]; } __packed; -/* - * WMI_PCP_START_CMDID - */ - -enum wmi_hidden_ssid { - WMI_HIDDEN_SSID_DISABLED = 0, - WMI_HIDDEN_SSID_SEND_EMPTY = 1, - WMI_HIDDEN_SSID_CLEAR = 2, -}; - +/* WMI_PCP_START_CMDID */ struct wmi_pcp_start_cmd { __le16 bcon_interval; u8 pcp_max_assoc_sta; u8 hidden_ssid; - u8 reserved0[8]; + u8 is_go; + u8 reserved0[5]; + /* A-BFT length override if non-0 */ + u8 abft_len; + u8 disable_ap_sme; u8 network_type; u8 channel; u8 disable_sec_offload; u8 disable_sec; } __packed; -/* - * WMI_SW_TX_REQ_CMDID - */ +/* WMI_SW_TX_REQ_CMDID */ struct wmi_sw_tx_req_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 len; u8 payload[0]; } __packed; -/* - * WMI_VRING_CFG_CMDID - */ - struct wmi_sw_ring_cfg { __le64 ring_mem_base; __le16 ring_size; __le16 max_mpdu_size; } __packed; +/* wmi_vring_cfg_schd */ struct wmi_vring_cfg_schd { __le16 priority; __le16 timeslot_us; } __packed; enum wmi_vring_cfg_encap_trans_type { - WMI_VRING_ENC_TYPE_802_3 = 0, - WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1, + WMI_VRING_ENC_TYPE_802_3 = 0x00, + WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01, }; enum wmi_vring_cfg_ds_cfg { - WMI_VRING_DS_PBSS = 0, - WMI_VRING_DS_STATION = 1, - WMI_VRING_DS_AP = 2, - WMI_VRING_DS_ADDR4 = 3, + WMI_VRING_DS_PBSS = 0x00, + WMI_VRING_DS_STATION = 0x01, + WMI_VRING_DS_AP = 0x02, + WMI_VRING_DS_ADDR4 = 0x03, }; enum wmi_vring_cfg_nwifi_ds_trans_type { - WMI_NWIFI_TX_TRANS_MODE_NO = 0, - WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1, - WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2, + WMI_NWIFI_TX_TRANS_MODE_NO = 0x00, + WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 0x01, + WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 0x02, }; enum wmi_vring_cfg_schd_params_priority { - WMI_SCH_PRIO_REGULAR = 0, - WMI_SCH_PRIO_HIGH = 1, + WMI_SCH_PRIO_REGULAR = 0x00, + WMI_SCH_PRIO_HIGH = 0x01, }; -#define CIDXTID_CID_POS (0) -#define CIDXTID_CID_LEN (4) -#define CIDXTID_CID_MSK (0xF) -#define CIDXTID_TID_POS (4) -#define CIDXTID_TID_LEN (4) -#define CIDXTID_TID_MSK (0xF0) +#define CIDXTID_CID_POS (0) +#define CIDXTID_CID_LEN (4) +#define CIDXTID_CID_MSK (0xF) +#define CIDXTID_TID_POS (4) +#define CIDXTID_TID_LEN (4) +#define CIDXTID_TID_MSK (0xF0) +#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0) +#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1) +#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1) +#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1) +#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1) +#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2) +#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0) +#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6) +#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F) struct wmi_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; - u8 ringid; /* 0-23 vrings */ - + /* 0-23 vrings */ + u8 ringid; u8 cidxtid; - u8 encap_trans_type; - u8 ds_cfg; /* 802.3 DS cfg */ + /* 802.3 DS cfg */ + u8 ds_cfg; u8 nwifi_ds_trans_type; - - #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0) - #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1) - #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1) - #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1) - #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1) - #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2) u8 mac_ctrl; - - #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0) - #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6) - #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F) u8 to_resolution; u8 agg_max_wsize; struct wmi_vring_cfg_schd schd_params; } __packed; enum wmi_vring_cfg_cmd_action { - WMI_VRING_CMD_ADD = 0, - WMI_VRING_CMD_MODIFY = 1, - WMI_VRING_CMD_DELETE = 2, + WMI_VRING_CMD_ADD = 0x00, + WMI_VRING_CMD_MODIFY = 0x01, + WMI_VRING_CMD_DELETE = 0x02, }; +/* WMI_VRING_CFG_CMDID */ struct wmi_vring_cfg_cmd { __le32 action; struct wmi_vring_cfg vring_cfg; } __packed; -/* - * WMI_BCAST_VRING_CFG_CMDID - */ struct wmi_bcast_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; - u8 ringid; /* 0-23 vrings */ + /* 0-23 vrings */ + u8 ringid; u8 encap_trans_type; - u8 ds_cfg; /* 802.3 DS cfg */ + /* 802.3 DS cfg */ + u8 ds_cfg; u8 nwifi_ds_trans_type; } __packed; +/* WMI_BCAST_VRING_CFG_CMDID */ struct wmi_bcast_vring_cfg_cmd { __le32 action; struct wmi_bcast_vring_cfg vring_cfg; } __packed; -/* - * WMI_VRING_BA_EN_CMDID - */ +/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */ +struct wmi_lo_power_calib_from_otp_cmd { + /* index to read from OTP. zero based */ + u8 index; + u8 reserved[3]; +} __packed; + +/* WMI_LO_POWER_CALIB_FROM_OTP_EVENTID */ +struct wmi_lo_power_calib_from_otp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_VRING_BA_EN_CMDID */ struct wmi_vring_ba_en_cmd { u8 ringid; u8 agg_max_wsize; __le16 ba_timeout; u8 amsdu; + u8 reserved[3]; } __packed; -/* - * WMI_VRING_BA_DIS_CMDID - */ +/* WMI_VRING_BA_DIS_CMDID */ struct wmi_vring_ba_dis_cmd { u8 ringid; u8 reserved; __le16 reason; } __packed; -/* - * WMI_NOTIFY_REQ_CMDID - */ +/* WMI_NOTIFY_REQ_CMDID */ struct wmi_notify_req_cmd { u8 cid; u8 year; @@ -654,102 +709,118 @@ struct wmi_notify_req_cmd { u8 miliseconds; } __packed; -/* - * WMI_CFG_RX_CHAIN_CMDID - */ +/* WMI_CFG_RX_CHAIN_CMDID */ enum wmi_sniffer_cfg_mode { - WMI_SNIFFER_OFF = 0, - WMI_SNIFFER_ON = 1, + WMI_SNIFFER_OFF = 0x00, + WMI_SNIFFER_ON = 0x01, }; +/* WMI_SILENT_RSSI_TABLE */ +enum wmi_silent_rssi_table { + RF_TEMPERATURE_CALIB_DEFAULT_DB = 0x00, + RF_TEMPERATURE_CALIB_HIGH_POWER_DB = 0x01, +}; + +/* WMI_SILENT_RSSI_STATUS */ +enum wmi_silent_rssi_status { + SILENT_RSSI_SUCCESS = 0x00, + SILENT_RSSI_FAILURE = 0x01, +}; + +/* WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID */ +struct wmi_set_active_silent_rssi_table_cmd { + /* enum wmi_silent_rssi_table */ + __le32 table; +} __packed; + enum wmi_sniffer_cfg_phy_info_mode { - WMI_SNIFFER_PHY_INFO_DISABLED = 0, - WMI_SNIFFER_PHY_INFO_ENABLED = 1, + WMI_SNIFFER_PHY_INFO_DISABLED = 0x00, + WMI_SNIFFER_PHY_INFO_ENABLED = 0x01, }; enum wmi_sniffer_cfg_phy_support { - WMI_SNIFFER_CP = 0, - WMI_SNIFFER_DP = 1, - WMI_SNIFFER_BOTH_PHYS = 2, + WMI_SNIFFER_CP = 0x00, + WMI_SNIFFER_DP = 0x01, + WMI_SNIFFER_BOTH_PHYS = 0x02, }; +/* wmi_sniffer_cfg */ struct wmi_sniffer_cfg { - __le32 mode; /* enum wmi_sniffer_cfg_mode */ - __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */ - __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */ + /* enum wmi_sniffer_cfg_mode */ + __le32 mode; + /* enum wmi_sniffer_cfg_phy_info_mode */ + __le32 phy_info_mode; + /* enum wmi_sniffer_cfg_phy_support */ + __le32 phy_support; u8 channel; u8 reserved[3]; } __packed; enum wmi_cfg_rx_chain_cmd_action { - WMI_RX_CHAIN_ADD = 0, - WMI_RX_CHAIN_DEL = 1, + WMI_RX_CHAIN_ADD = 0x00, + WMI_RX_CHAIN_DEL = 0x01, }; enum wmi_cfg_rx_chain_cmd_decap_trans_type { - WMI_DECAP_TYPE_802_3 = 0, - WMI_DECAP_TYPE_NATIVE_WIFI = 1, - WMI_DECAP_TYPE_NONE = 2, + WMI_DECAP_TYPE_802_3 = 0x00, + WMI_DECAP_TYPE_NATIVE_WIFI = 0x01, + WMI_DECAP_TYPE_NONE = 0x02, }; enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type { - WMI_NWIFI_RX_TRANS_MODE_NO = 0, - WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1, - WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2, + WMI_NWIFI_RX_TRANS_MODE_NO = 0x00, + WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 0x01, + WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 0x02, }; enum wmi_cfg_rx_chain_cmd_reorder_type { - WMI_RX_HW_REORDER = 0, - WMI_RX_SW_REORDER = 1, + WMI_RX_HW_REORDER = 0x00, + WMI_RX_SW_REORDER = 0x01, }; +#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0) +#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1) +#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1) +#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1) +#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1) +#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1) +#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2) +#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0) +#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1) +#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1) +#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1) +#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1) +#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2) +#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0) +#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1) +#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1) +#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2) +#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2) +#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4) +#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3) +#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1) +#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8) + +/* WMI_CFG_RX_CHAIN_CMDID */ struct wmi_cfg_rx_chain_cmd { __le32 action; struct wmi_sw_ring_cfg rx_sw_ring; u8 mid; u8 decap_trans_type; - - #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0) - #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1) - #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1) - #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1) - #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1) - #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2) u8 l2_802_3_offload_ctrl; - - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1) - #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2) u8 l2_nwifi_offload_ctrl; - u8 vlan_id; u8 nwifi_ds_trans_type; - - #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0) - #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1) - #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1) - #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1) - #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1) - #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2) u8 l3_l4_ctrl; - - #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0) - #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1) - #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1) - #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2) - #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2) - #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4) - #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3) - #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1) - #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8) u8 ring_ctrl; - __le16 prefetch_thrsh; __le16 wb_thrsh; __le32 itr_value; @@ -757,31 +828,27 @@ struct wmi_cfg_rx_chain_cmd { u8 reorder_type; u8 reserved; struct wmi_sniffer_cfg sniffer_cfg; + __le16 max_rx_pl_per_desc; } __packed; -/* - * WMI_RCP_ADDBA_RESP_CMDID - */ +/* WMI_RCP_ADDBA_RESP_CMDID */ struct wmi_rcp_addba_resp_cmd { u8 cidxtid; u8 dialog_token; __le16 status_code; - __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */ + /* ieee80211_ba_parameterset field to send */ + __le16 ba_param_set; __le16 ba_timeout; } __packed; -/* - * WMI_RCP_DELBA_CMDID - */ +/* WMI_RCP_DELBA_CMDID */ struct wmi_rcp_delba_cmd { u8 cidxtid; u8 reserved; __le16 reason; } __packed; -/* - * WMI_RCP_ADDBA_REQ_CMDID - */ +/* WMI_RCP_ADDBA_REQ_CMDID */ struct wmi_rcp_addba_req_cmd { u8 cidxtid; u8 dialog_token; @@ -792,32 +859,16 @@ struct wmi_rcp_addba_req_cmd { __le16 ba_seq_ctrl; } __packed; -/* - * WMI_SET_MAC_ADDRESS_CMDID - */ +/* WMI_SET_MAC_ADDRESS_CMDID */ struct wmi_set_mac_address_cmd { u8 mac[WMI_MAC_LEN]; u8 reserved[2]; } __packed; -/* -* WMI_EAPOL_TX_CMDID -*/ -struct wmi_eapol_tx_cmd { - u8 dst_mac[WMI_MAC_LEN]; - __le16 eapol_len; - u8 eapol[0]; -} __packed; - -/* - * WMI_ECHO_CMDID - * +/* WMI_ECHO_CMDID * Check FW is alive - * * WMI_DEEP_ECHO_CMDID - * * Check FW and ucode are alive - * * Returned event: WMI_ECHO_RSP_EVENTID * same event for both commands */ @@ -825,161 +876,506 @@ struct wmi_echo_cmd { __le32 value; } __packed; -/* - * WMI_TEMP_SENSE_CMDID +/* WMI_RF_PWR_ON_DELAY_CMDID + * set FW time parameters used through RF resetting + * RF reset consists of bringing its power down for a period of time, then + * bringing the power up + * Returned event: WMI_RF_PWR_ON_DELAY_RSP_EVENTID + */ +struct wmi_rf_pwr_on_delay_cmd { + /* time in usec the FW waits after bringing the RF PWR down, + * set 0 for default + */ + __le16 down_delay_usec; + /* time in usec the FW waits after bringing the RF PWR up, + * set 0 for default + */ + __le16 up_delay_usec; +} __packed; + +/* \WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID + * This API controls the Tx and Rx gain over temperature. + * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers. + * It also controls the Tx gain index, by controlling the Rx to Tx gain index + * offset. + * The control is divided by 3 temperature values to 4 temperature ranges. + * Each parameter uses its own temperature values. + * Returned event: WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID + */ +struct wmi_set_high_power_table_params_cmd { + /* Temperature range for Tx D-type parameters */ + u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH]; + u8 reserved0; + /* Tx D-type values to be used for each temperature range */ + __le32 tx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + /* Temperature range for Rx D-type parameters */ + u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH]; + u8 reserved1; + /* Rx D-type values to be used for each temperature range */ + __le32 rx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + /* Temperature range for Rx E-type parameters */ + u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH]; + u8 reserved2; + /* Rx E-type values to be used for each temperature range. + * The last 4 values of any range are the first 4 values of the next + * range and so on + */ + __le32 rx_etype_conf[WMI_RF_ETYPE_VAL_PER_RANGE + WMI_RF_ETYPE_LENGTH]; + /* Temperature range for rx_2_tx_offs parameters */ + u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH]; + u8 reserved3; + /* Rx to Tx gain index offset */ + s8 rx_2_tx_offs[WMI_RF_RX2TX_LENGTH + 1]; +} __packed; + +/* CMD: WMI_RF_XPM_READ_CMDID */ +struct wmi_rf_xpm_read_cmd { + u8 rf_id; + u8 reserved[3]; + /* XPM bit start address in range [0,8191]bits - rounded by FW to + * multiple of 8bits + */ + __le32 xpm_bit_address; + __le32 num_bytes; +} __packed; + +/* CMD: WMI_RF_XPM_WRITE_CMDID */ +struct wmi_rf_xpm_write_cmd { + u8 rf_id; + u8 reserved0[3]; + /* XPM bit start address in range [0,8191]bits - rounded by FW to + * multiple of 8bits + */ + __le32 xpm_bit_address; + __le32 num_bytes; + /* boolean flag indicating whether FW should verify the write + * operation + */ + u8 verify; + u8 reserved1[3]; + /* actual size=num_bytes */ + u8 data_bytes[0]; +} __packed; + +/* WMI_TEMP_SENSE_CMDID * * Measure MAC and radio temperatures + * + * Possible modes for temperature measurement */ - -/* Possible modes for temperature measurement */ enum wmi_temperature_measure_mode { - TEMPERATURE_USE_OLD_VALUE = 0x1, - TEMPERATURE_MEASURE_NOW = 0x2, + TEMPERATURE_USE_OLD_VALUE = 0x01, + TEMPERATURE_MEASURE_NOW = 0x02, }; +/* WMI_TEMP_SENSE_CMDID */ struct wmi_temp_sense_cmd { __le32 measure_baseband_en; __le32 measure_rf_en; __le32 measure_mode; } __packed; -/* - * WMI_PMC_CMDID - */ -enum wmi_pmc_op_e { - WMI_PMC_ALLOCATE = 0, - WMI_PMC_RELEASE = 1, +enum wmi_pmc_op { + WMI_PMC_ALLOCATE = 0x00, + WMI_PMC_RELEASE = 0x01, }; +/* WMI_PMC_CMDID */ struct wmi_pmc_cmd { - u8 op; /* enum wmi_pmc_cmd_op_type */ + /* enum wmi_pmc_cmd_op_type */ + u8 op; u8 reserved; __le16 ring_size; __le64 mem_base; } __packed; -/* - * WMI Events +enum wmi_aoa_meas_type { + WMI_AOA_PHASE_MEAS = 0x00, + WMI_AOA_PHASE_AMP_MEAS = 0x01, +}; + +/* WMI_AOA_MEAS_CMDID */ +struct wmi_aoa_meas_cmd { + u8 mac_addr[WMI_MAC_LEN]; + /* channels IDs: + * 0 - 58320 MHz + * 1 - 60480 MHz + * 2 - 62640 MHz + */ + u8 channel; + /* enum wmi_aoa_meas_type */ + u8 aoa_meas_type; + __le32 meas_rf_mask; +} __packed; + +/* WMI_SET_MGMT_RETRY_LIMIT_CMDID */ +struct wmi_set_mgmt_retry_limit_cmd { + /* MAC retransmit limit for mgmt frames */ + u8 mgmt_retry_limit; + /* alignment to 32b */ + u8 reserved[3]; +} __packed; + +/* Zones: HIGH, MAX, CRITICAL */ +#define WMI_NUM_OF_TT_ZONES (3) + +struct wmi_tt_zone_limits { + /* Above this temperature this zone is active */ + u8 temperature_high; + /* Below this temperature the adjacent lower zone is active */ + u8 temperature_low; + u8 reserved[2]; +} __packed; + +/* Struct used for both configuration and status commands of thermal + * throttling */ +struct wmi_tt_data { + /* Enable/Disable TT algorithm for baseband */ + u8 bb_enabled; + u8 reserved0[3]; + /* Define zones for baseband */ + struct wmi_tt_zone_limits bb_zones[WMI_NUM_OF_TT_ZONES]; + /* Enable/Disable TT algorithm for radio */ + u8 rf_enabled; + u8 reserved1[3]; + /* Define zones for all radio chips */ + struct wmi_tt_zone_limits rf_zones[WMI_NUM_OF_TT_ZONES]; +} __packed; -/* +/* WMI_SET_THERMAL_THROTTLING_CFG_CMDID */ +struct wmi_set_thermal_throttling_cfg_cmd { + /* Command data */ + struct wmi_tt_data tt_data; +} __packed; + +/* WMI_NEW_STA_CMDID */ +struct wmi_new_sta_cmd { + u8 dst_mac[WMI_MAC_LEN]; + u8 aid; +} __packed; + +/* WMI_DEL_STA_CMDID */ +struct wmi_del_sta_cmd { + u8 dst_mac[WMI_MAC_LEN]; + __le16 disconnect_reason; +} __packed; + +enum wmi_tof_burst_duration { + WMI_TOF_BURST_DURATION_250_USEC = 2, + WMI_TOF_BURST_DURATION_500_USEC = 3, + WMI_TOF_BURST_DURATION_1_MSEC = 4, + WMI_TOF_BURST_DURATION_2_MSEC = 5, + WMI_TOF_BURST_DURATION_4_MSEC = 6, + WMI_TOF_BURST_DURATION_8_MSEC = 7, + WMI_TOF_BURST_DURATION_16_MSEC = 8, + WMI_TOF_BURST_DURATION_32_MSEC = 9, + WMI_TOF_BURST_DURATION_64_MSEC = 10, + WMI_TOF_BURST_DURATION_128_MSEC = 11, + WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15, +}; + +enum wmi_tof_session_start_flags { + WMI_TOF_SESSION_START_FLAG_SECURED = 0x1, + WMI_TOF_SESSION_START_FLAG_ASAP = 0x2, + WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4, + WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8, +}; + +/* WMI_TOF_SESSION_START_CMDID */ +struct wmi_ftm_dest_info { + u8 channel; + /* wmi_tof_session_start_flags_e */ + u8 flags; + u8 initial_token; + u8 num_of_ftm_per_burst; + u8 num_of_bursts_exp; + /* wmi_tof_burst_duration_e */ + u8 burst_duration; + /* Burst Period indicate interval between two consecutive burst + * instances, in units of 100 ms + */ + __le16 burst_period; + u8 dst_mac[WMI_MAC_LEN]; + u8 reserved; + u8 num_burst_per_aoa_meas; +} __packed; + +/* WMI_TOF_SESSION_START_CMDID */ +struct wmi_tof_session_start_cmd { + __le32 session_id; + u8 reserved1; + u8 aoa_type; + __le16 num_of_dest; + u8 reserved[4]; + struct wmi_ftm_dest_info ftm_dest_info[0]; +} __packed; + +/* WMI_TOF_CFG_RESPONDER_CMDID */ +struct wmi_tof_cfg_responder_cmd { + u8 enable; + u8 reserved[3]; +} __packed; + +enum wmi_tof_channel_info_report_type { + WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1, + WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2, + WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4, + WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8, + WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10, +}; + +/* WMI_TOF_CHANNEL_INFO_CMDID */ +struct wmi_tof_channel_info_cmd { + /* wmi_tof_channel_info_report_type_e */ + __le32 channel_info_report_request; +} __packed; + +/* WMI_TOF_SET_TX_RX_OFFSET_CMDID */ +struct wmi_tof_set_tx_rx_offset_cmd { + /* TX delay offset */ + __le32 tx_offset; + /* RX delay offset */ + __le32 rx_offset; + /* Mask to define which RFs to configure. 0 means all RFs */ + __le32 rf_mask; + /* Offset to strongest tap of CIR */ + __le32 precursor; +} __packed; + +/* WMI_TOF_GET_TX_RX_OFFSET_CMDID */ +struct wmi_tof_get_tx_rx_offset_cmd { + /* rf index to read offsets from */ + u8 rf_index; + u8 reserved[3]; +} __packed; + +/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ +struct wmi_map_mcs_to_schd_params { + u8 mcs; + /* time in usec from start slot to start tx flow - default 15 */ + u8 time_in_usec_before_initiate_tx; + /* RD enable - if yes consider RD according to STA mcs */ + u8 rd_enabled; + u8 reserved; + /* time in usec from start slot to stop vring */ + __le16 time_in_usec_to_stop_vring; + /* timeout to force flush from start of slot */ + __le16 flush_to_in_usec; + /* per mcs the mac buffer limit size in bytes */ + __le32 mac_buff_size_in_bytes; +} __packed; + +/* WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID */ +struct wmi_fixed_scheduling_config_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +#define WMI_NUM_MCS (13) + +/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ +struct wmi_fixed_scheduling_config_cmd { + /* defaults in the SAS table */ + struct wmi_map_mcs_to_schd_params mcs_to_schd_params_map[WMI_NUM_MCS]; + /* default 150 uSec */ + __le16 max_sta_rd_ppdu_duration_in_usec; + /* default 300 uSec */ + __le16 max_sta_grant_ppdu_duration_in_usec; + /* default 1000 uSec */ + __le16 assoc_slot_duration_in_usec; + /* default 360 uSec */ + __le16 virtual_slot_duration_in_usec; + /* each this field value slots start with grant frame to the station + * - default 2 + */ + u8 number_of_ap_slots_for_initiate_grant; + u8 reserved[3]; +} __packed; + +/* WMI_ENABLE_FIXED_SCHEDULING_CMDID */ +struct wmi_enable_fixed_scheduling_cmd { + __le32 reserved; +} __packed; + +/* WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID */ +struct wmi_enable_fixed_scheduling_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID */ +struct wmi_set_multi_directed_omnis_config_cmd { + /* number of directed omnis at destination AP */ + u8 dest_ap_num_directed_omnis; + u8 reserved[3]; +} __packed; + +/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID */ +struct wmi_set_multi_directed_omnis_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_LONG_RANGE_CONFIG_CMDID */ +struct wmi_set_long_range_config_cmd { + __le32 reserved; +} __packed; + +/* WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID */ +struct wmi_set_long_range_config_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI Events * List of Events (target to host) */ enum wmi_event_id { - WMI_READY_EVENTID = 0x1001, - WMI_CONNECT_EVENTID = 0x1002, - WMI_DISCONNECT_EVENTID = 0x1003, - WMI_SCAN_COMPLETE_EVENTID = 0x100a, - WMI_REPORT_STATISTICS_EVENTID = 0x100b, - WMI_RD_MEM_RSP_EVENTID = 0x1800, - WMI_FW_READY_EVENTID = 0x1801, - WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200, - WMI_ECHO_RSP_EVENTID = 0x1803, - WMI_FS_TUNE_DONE_EVENTID = 0x180a, - WMI_CORR_MEASURE_EVENTID = 0x180b, - WMI_READ_RSSI_EVENTID = 0x180c, - WMI_TEMP_SENSE_DONE_EVENTID = 0x180e, - WMI_DC_CALIB_DONE_EVENTID = 0x180f, - WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, - WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, - WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, - WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, - WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, - WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, - WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a, - WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d, - WMI_RF_RX_TEST_DONE_EVENTID = 0x181e, - WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, - WMI_VRING_CFG_DONE_EVENTID = 0x1821, - WMI_BA_STATUS_EVENTID = 0x1823, - WMI_RCP_ADDBA_REQ_EVENTID = 0x1824, - WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825, - WMI_DELBA_EVENTID = 0x1826, - WMI_GET_SSID_EVENTID = 0x1828, - WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, - WMI_SW_TX_COMPLETE_EVENTID = 0x182b, - - WMI_READ_MAC_RXQ_EVENTID = 0x1830, - WMI_READ_MAC_TXQ_EVENTID = 0x1831, - WMI_WRITE_MAC_RXQ_EVENTID = 0x1832, - WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, - WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, - - WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, - WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, - WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, - WMI_RS_MGMT_DONE_EVENTID = 0x1852, - WMI_RF_MGMT_STATUS_EVENTID = 0x1853, - WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855, - WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, - WMI_RX_MGMT_PACKET_EVENTID = 0x1840, - WMI_TX_MGMT_PACKET_EVENTID = 0x1841, - + WMI_READY_EVENTID = 0x1001, + WMI_CONNECT_EVENTID = 0x1002, + WMI_DISCONNECT_EVENTID = 0x1003, + WMI_SCAN_COMPLETE_EVENTID = 0x100A, + WMI_REPORT_STATISTICS_EVENTID = 0x100B, + WMI_RD_MEM_RSP_EVENTID = 0x1800, + WMI_FW_READY_EVENTID = 0x1801, + WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200, + WMI_ECHO_RSP_EVENTID = 0x1803, + /* deprecated */ + WMI_FS_TUNE_DONE_EVENTID = 0x180A, + /* deprecated */ + WMI_CORR_MEASURE_EVENTID = 0x180B, + WMI_READ_RSSI_EVENTID = 0x180C, + WMI_TEMP_SENSE_DONE_EVENTID = 0x180E, + WMI_DC_CALIB_DONE_EVENTID = 0x180F, + /* deprecated */ + WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, + /* deprecated */ + WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, + WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, + WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, + WMI_LO_POWER_CALIB_FROM_OTP_EVENTID = 0x1817, + WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D, + /* deprecated */ + WMI_RF_RX_TEST_DONE_EVENTID = 0x181E, + WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, + WMI_VRING_CFG_DONE_EVENTID = 0x1821, + WMI_BA_STATUS_EVENTID = 0x1823, + WMI_RCP_ADDBA_REQ_EVENTID = 0x1824, + WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825, + WMI_DELBA_EVENTID = 0x1826, + WMI_GET_SSID_EVENTID = 0x1828, + WMI_GET_PCP_CHANNEL_EVENTID = 0x182A, + WMI_SW_TX_COMPLETE_EVENTID = 0x182B, + WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, + WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, + WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, + WMI_RS_MGMT_DONE_EVENTID = 0x1852, + WMI_RF_MGMT_STATUS_EVENTID = 0x1853, + WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, + WMI_RX_MGMT_PACKET_EVENTID = 0x1840, + WMI_TX_MGMT_PACKET_EVENTID = 0x1841, + WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842, + WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843, + WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856, + WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857, + WMI_LED_CFG_DONE_EVENTID = 0x1858, + WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C, + WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D, + WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E, /* Performance monitoring events */ - WMI_DATA_PORT_OPEN_EVENTID = 0x1860, - WMI_WBE_LINK_DOWN_EVENTID = 0x1861, - - WMI_BF_CTRL_DONE_EVENTID = 0x1862, - WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, - WMI_GET_STATUS_DONE_EVENTID = 0x1864, - WMI_VRING_EN_EVENTID = 0x1865, - - WMI_UNIT_TEST_EVENTID = 0x1900, - WMI_FLASH_READ_DONE_EVENTID = 0x1902, - WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, - /*P2P*/ - WMI_P2P_CFG_DONE_EVENTID = 0x1910, - WMI_PORT_ALLOCATED_EVENTID = 0x1911, - WMI_PORT_DELETED_EVENTID = 0x1912, - WMI_LISTEN_STARTED_EVENTID = 0x1914, - WMI_SEARCH_STARTED_EVENTID = 0x1915, - WMI_DISCOVERY_STARTED_EVENTID = 0x1916, - WMI_DISCOVERY_STOPPED_EVENTID = 0x1917, - WMI_PCP_STARTED_EVENTID = 0x1918, - WMI_PCP_STOPPED_EVENTID = 0x1919, - WMI_PCP_FACTOR_EVENTID = 0x191a, - WMI_SET_CHANNEL_EVENTID = 0x9000, - WMI_ASSOC_REQ_EVENTID = 0x9001, - WMI_EAPOL_RX_EVENTID = 0x9002, - WMI_MAC_ADDR_RESP_EVENTID = 0x9003, - WMI_FW_VER_EVENTID = 0x9004, + WMI_DATA_PORT_OPEN_EVENTID = 0x1860, + WMI_WBE_LINK_DOWN_EVENTID = 0x1861, + WMI_BF_CTRL_DONE_EVENTID = 0x1862, + WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, + WMI_GET_STATUS_DONE_EVENTID = 0x1864, + WMI_VRING_EN_EVENTID = 0x1865, + WMI_GET_RF_STATUS_EVENTID = 0x1866, + WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867, + WMI_UNIT_TEST_EVENTID = 0x1900, + WMI_FLASH_READ_DONE_EVENTID = 0x1902, + WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, + /* Power management */ + WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904, + WMI_TRAFFIC_RESUME_EVENTID = 0x1905, + /* P2P */ + WMI_P2P_CFG_DONE_EVENTID = 0x1910, + WMI_PORT_ALLOCATED_EVENTID = 0x1911, + WMI_PORT_DELETED_EVENTID = 0x1912, + WMI_LISTEN_STARTED_EVENTID = 0x1914, + WMI_SEARCH_STARTED_EVENTID = 0x1915, + WMI_DISCOVERY_STARTED_EVENTID = 0x1916, + WMI_DISCOVERY_STOPPED_EVENTID = 0x1917, + WMI_PCP_STARTED_EVENTID = 0x1918, + WMI_PCP_STOPPED_EVENTID = 0x1919, + WMI_PCP_FACTOR_EVENTID = 0x191A, + /* Power Save Configuration Events */ + WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C, + WMI_RS_CFG_DONE_EVENTID = 0x1921, + WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922, + WMI_AOA_MEAS_EVENTID = 0x1923, + WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924, + WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930, + WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931, + WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940, + WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, + /* return the Power Save profile */ + WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942, + WMI_TOF_SESSION_END_EVENTID = 0x1991, + WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, + WMI_TOF_SET_LCR_EVENTID = 0x1993, + WMI_TOF_SET_LCI_EVENTID = 0x1994, + WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995, + WMI_TOF_CFG_RESPONDER_EVENTID = 0x1996, + WMI_TOF_SET_TX_RX_OFFSET_EVENTID = 0x1997, + WMI_TOF_GET_TX_RX_OFFSET_EVENTID = 0x1998, + WMI_TOF_CHANNEL_INFO_EVENTID = 0x1999, + WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A0, + WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A1, + WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A2, + WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A3, + WMI_SET_RF_SECTOR_ON_DONE_EVENTID = 0x19A4, + WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, + WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, + WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, + WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, + WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, + WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04, + WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05, + WMI_SET_CHANNEL_EVENTID = 0x9000, + WMI_ASSOC_REQ_EVENTID = 0x9001, + WMI_EAPOL_RX_EVENTID = 0x9002, + WMI_MAC_ADDR_RESP_EVENTID = 0x9003, + WMI_FW_VER_EVENTID = 0x9004, + WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, + WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF, }; -/* - * Events data structures - */ - +/* Events data structures */ enum wmi_fw_status { - WMI_FW_STATUS_SUCCESS, - WMI_FW_STATUS_FAILURE, + WMI_FW_STATUS_SUCCESS = 0x00, + WMI_FW_STATUS_FAILURE = 0x01, }; -/* - * WMI_RF_MGMT_STATUS_EVENTID - */ +/* WMI_RF_MGMT_STATUS_EVENTID */ enum wmi_rf_status { - WMI_RF_ENABLED = 0, - WMI_RF_DISABLED_HW = 1, - WMI_RF_DISABLED_SW = 2, - WMI_RF_DISABLED_HW_SW = 3, + WMI_RF_ENABLED = 0x00, + WMI_RF_DISABLED_HW = 0x01, + WMI_RF_DISABLED_SW = 0x02, + WMI_RF_DISABLED_HW_SW = 0x03, }; +/* WMI_RF_MGMT_STATUS_EVENTID */ struct wmi_rf_mgmt_status_event { __le32 rf_status; } __packed; -/* - * WMI_THERMAL_THROTTLING_STATUS_EVENTID - */ -struct wmi_thermal_throttling_status_event { - __le32 time_on_usec; - __le32 time_off_usec; - __le32 max_txop_length_usec; -} __packed; - -/* - * WMI_GET_STATUS_DONE_EVENTID - */ +/* WMI_GET_STATUS_DONE_EVENTID */ struct wmi_get_status_done_event { __le32 is_associated; u8 cid; @@ -995,19 +1391,93 @@ struct wmi_get_status_done_event { __le32 is_secured; } __packed; -/* - * WMI_FW_VER_EVENTID - */ +/* WMI_FW_VER_EVENTID */ struct wmi_fw_ver_event { - u8 major; - u8 minor; - __le16 subminor; - __le16 build; + /* FW image version */ + __le32 fw_major; + __le32 fw_minor; + __le32 fw_subminor; + __le32 fw_build; + /* FW image build time stamp */ + __le32 hour; + __le32 minute; + __le32 second; + __le32 day; + __le32 month; + __le32 year; + /* Boot Loader image version */ + __le32 bl_major; + __le32 bl_minor; + __le32 bl_subminor; + __le32 bl_build; + /* The number of entries in the FW capabilities array */ + u8 fw_capabilities_len; + u8 reserved[3]; + /* FW capabilities info + * Must be the last member of the struct + */ + __le32 fw_capabilities[0]; } __packed; -/* -* WMI_MAC_ADDR_RESP_EVENTID -*/ +/* WMI_GET_RF_STATUS_EVENTID */ +enum rf_type { + RF_UNKNOWN = 0x00, + RF_MARLON = 0x01, + RF_SPARROW = 0x02, +}; + +/* WMI_GET_RF_STATUS_EVENTID */ +enum board_file_rf_type { + BF_RF_MARLON = 0x00, + BF_RF_SPARROW = 0x01, +}; + +/* WMI_GET_RF_STATUS_EVENTID */ +enum rf_status { + RF_OK = 0x00, + RF_NO_COMM = 0x01, + RF_WRONG_BOARD_FILE = 0x02, +}; + +/* WMI_GET_RF_STATUS_EVENTID */ +struct wmi_get_rf_status_event { + /* enum rf_type */ + __le32 rf_type; + /* attached RFs bit vector */ + __le32 attached_rf_vector; + /* enabled RFs bit vector */ + __le32 enabled_rf_vector; + /* enum rf_status, refers to enabled RFs */ + u8 rf_status[32]; + /* enum board file RF type */ + __le32 board_file_rf_type; + /* board file platform type */ + __le32 board_file_platform_type; + /* board file version */ + __le32 board_file_version; + /* enabled XIFs bit vector */ + __le32 enabled_xif_vector; + __le32 reserved; +} __packed; + +/* WMI_GET_BASEBAND_TYPE_EVENTID */ +enum baseband_type { + BASEBAND_UNKNOWN = 0x00, + BASEBAND_SPARROW_M_A0 = 0x03, + BASEBAND_SPARROW_M_A1 = 0x04, + BASEBAND_SPARROW_M_B0 = 0x05, + BASEBAND_SPARROW_M_C0 = 0x06, + BASEBAND_SPARROW_M_D0 = 0x07, + BASEBAND_TALYN_M_A0 = 0x08, +}; + +/* WMI_GET_BASEBAND_TYPE_EVENTID */ +struct wmi_get_baseband_type_event { + /* enum baseband_type */ + __le32 baseband_type; +} __packed; + +/* WMI_MAC_ADDR_RESP_EVENTID */ struct wmi_mac_addr_resp_event { u8 mac[WMI_MAC_LEN]; u8 auth_mode; @@ -1015,44 +1485,44 @@ struct wmi_mac_addr_resp_event { __le32 offload_mode; } __packed; -/* -* WMI_EAPOL_RX_EVENTID -*/ +/* WMI_EAPOL_RX_EVENTID */ struct wmi_eapol_rx_event { u8 src_mac[WMI_MAC_LEN]; __le16 eapol_len; u8 eapol[0]; } __packed; -/* -* WMI_READY_EVENTID -*/ +/* WMI_READY_EVENTID */ enum wmi_phy_capability { - WMI_11A_CAPABILITY = 1, - WMI_11G_CAPABILITY = 2, - WMI_11AG_CAPABILITY = 3, - WMI_11NA_CAPABILITY = 4, - WMI_11NG_CAPABILITY = 5, - WMI_11NAG_CAPABILITY = 6, - WMI_11AD_CAPABILITY = 7, - WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY, + WMI_11A_CAPABILITY = 0x01, + WMI_11G_CAPABILITY = 0x02, + WMI_11AG_CAPABILITY = 0x03, + WMI_11NA_CAPABILITY = 0x04, + WMI_11NG_CAPABILITY = 0x05, + WMI_11NAG_CAPABILITY = 0x06, + WMI_11AD_CAPABILITY = 0x07, + WMI_11N_CAPABILITY_OFFSET = 0x03, }; struct wmi_ready_event { __le32 sw_version; __le32 abi_version; u8 mac[WMI_MAC_LEN]; - u8 phy_capability; /* enum wmi_phy_capability */ + /* enum wmi_phy_capability */ + u8 phy_capability; u8 numof_additional_mids; + /* rfc read calibration result. 5..15 */ + u8 rfc_read_calib_result; + u8 reserved[3]; } __packed; -/* - * WMI_NOTIFY_REQ_DONE_EVENTID - */ +/* WMI_NOTIFY_REQ_DONE_EVENTID */ struct wmi_notify_req_done_event { - __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */ + /* beamforming status, 0: fail; 1: OK; 2: retrying */ + __le32 status; __le64 tsf; - __le32 snr_val; + s8 rssi; + u8 reserved0[3]; __le32 tx_tpt; __le32 tx_goodput; __le32 rx_goodput; @@ -1066,9 +1536,7 @@ struct wmi_notify_req_done_event { u8 reserved[3]; } __packed; -/* - * WMI_CONNECT_EVENTID - */ +/* WMI_CONNECT_EVENTID */ struct wmi_connect_event { u8 channel; u8 reserved0; @@ -1081,69 +1549,106 @@ struct wmi_connect_event { u8 assoc_req_len; u8 assoc_resp_len; u8 cid; - u8 reserved2[3]; + u8 aid; + u8 reserved2[2]; + /* not in use */ u8 assoc_info[0]; } __packed; -/* - * WMI_DISCONNECT_EVENTID - */ +/* disconnect_reason */ enum wmi_disconnect_reason { - WMI_DIS_REASON_NO_NETWORK_AVAIL = 1, - WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */ - WMI_DIS_REASON_DISCONNECT_CMD = 3, - WMI_DIS_REASON_BSS_DISCONNECTED = 4, - WMI_DIS_REASON_AUTH_FAILED = 5, - WMI_DIS_REASON_ASSOC_FAILED = 6, - WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7, - WMI_DIS_REASON_CSERV_DISCONNECT = 8, - WMI_DIS_REASON_INVALID_PROFILE = 10, - WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11, - WMI_DIS_REASON_PROFILE_MISMATCH = 12, - WMI_DIS_REASON_CONNECTION_EVICTED = 13, - WMI_DIS_REASON_IBSS_MERGE = 14, + WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01, + /* bmiss */ + WMI_DIS_REASON_LOST_LINK = 0x02, + WMI_DIS_REASON_DISCONNECT_CMD = 0x03, + WMI_DIS_REASON_BSS_DISCONNECTED = 0x04, + WMI_DIS_REASON_AUTH_FAILED = 0x05, + WMI_DIS_REASON_ASSOC_FAILED = 0x06, + WMI_DIS_REASON_NO_RESOURCES_AVAIL = 0x07, + WMI_DIS_REASON_CSERV_DISCONNECT = 0x08, + WMI_DIS_REASON_INVALID_PROFILE = 0x0A, + WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 0x0B, + WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C, + WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D, + WMI_DIS_REASON_IBSS_MERGE = 0x0E, }; +/* WMI_DISCONNECT_EVENTID */ struct wmi_disconnect_event { - __le16 protocol_reason_status; /* reason code, see 802.11 spec. */ - u8 bssid[WMI_MAC_LEN]; /* set if known */ - u8 disconnect_reason; /* see wmi_disconnect_reason */ - u8 assoc_resp_len; /* not used */ - u8 assoc_info[0]; /* not used */ + /* reason code, see 802.11 spec. */ + __le16 protocol_reason_status; + /* set if known */ + u8 bssid[WMI_MAC_LEN]; + /* see enum wmi_disconnect_reason */ + u8 disconnect_reason; + /* last assoc req may passed to host - not in used */ + u8 assoc_resp_len; + /* last assoc req may passed to host - not in used */ + u8 assoc_info[0]; } __packed; -/* - * WMI_SCAN_COMPLETE_EVENTID - */ +/* WMI_SCAN_COMPLETE_EVENTID */ enum scan_status { - WMI_SCAN_SUCCESS = 0, - WMI_SCAN_FAILED = 1, - WMI_SCAN_ABORTED = 2, - WMI_SCAN_REJECTED = 3, + WMI_SCAN_SUCCESS = 0x00, + WMI_SCAN_FAILED = 0x01, + WMI_SCAN_ABORTED = 0x02, + WMI_SCAN_REJECTED = 0x03, + WMI_SCAN_ABORT_REJECTED = 0x04, }; struct wmi_scan_complete_event { - __le32 status; /* scan_status */ + /* enum scan_status */ + __le32 status; } __packed; -/* - * WMI_BA_STATUS_EVENTID - */ +/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */ +enum wmi_acs_info_bitmask { + WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01, + WMI_ACS_INFO_BITMASK_BUSY_TIME = 0x02, + WMI_ACS_INFO_BITMASK_TX_TIME = 0x04, + WMI_ACS_INFO_BITMASK_RX_TIME = 0x08, + WMI_ACS_INFO_BITMASK_NOISE = 0x10, +}; + +struct scan_acs_info { + u8 channel; + u8 beacon_found; + /* msec */ + __le16 busy_time; + __le16 tx_time; + __le16 rx_time; + u8 noise; + u8 reserved[3]; +} __packed; + +struct wmi_acs_passive_scan_complete_event { + __le32 dwell_time; + /* valid fields within channel info according to + * their appearance in struct order + */ + __le16 filled; + u8 num_scanned_channels; + u8 reserved; + struct scan_acs_info scan_info_list[0]; +} __packed; + +/* WMI_BA_STATUS_EVENTID */ enum wmi_vring_ba_status { - WMI_BA_AGREED = 0, - WMI_BA_NON_AGREED = 1, + WMI_BA_AGREED = 0x00, + WMI_BA_NON_AGREED = 0x01, /* BA_EN in middle of teardown flow */ - WMI_BA_TD_WIP = 2, + WMI_BA_TD_WIP = 0x02, /* BA_DIS or BA_EN in middle of BA SETUP flow */ - WMI_BA_SETUP_WIP = 3, + WMI_BA_SETUP_WIP = 0x03, /* BA_EN when the BA session is already active */ - WMI_BA_SESSION_ACTIVE = 4, + WMI_BA_SESSION_ACTIVE = 0x04, /* BA_DIS when the BA session is not active */ - WMI_BA_SESSION_NOT_ACTIVE = 5, + WMI_BA_SESSION_NOT_ACTIVE = 0x05, }; -struct wmi_vring_ba_status_event { - __le16 status; /* enum wmi_vring_ba_status */ +struct wmi_ba_status_event { + /* enum wmi_vring_ba_status */ + __le16 status; u8 reserved[2]; u8 ringid; u8 agg_wsize; @@ -1151,18 +1656,14 @@ struct wmi_vring_ba_status_event { u8 amsdu; } __packed; -/* - * WMI_DELBA_EVENTID - */ +/* WMI_DELBA_EVENTID */ struct wmi_delba_event { u8 cidxtid; u8 from_initiator; __le16 reason; } __packed; -/* - * WMI_VRING_CFG_DONE_EVENTID - */ +/* WMI_VRING_CFG_DONE_EVENTID */ struct wmi_vring_cfg_done_event { u8 ringid; u8 status; @@ -1170,215 +1671,1195 @@ struct wmi_vring_cfg_done_event { __le32 tx_vring_tail_ptr; } __packed; -/* - * WMI_RCP_ADDBA_RESP_SENT_EVENTID - */ +/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { u8 cidxtid; u8 reserved; __le16 status; } __packed; -/* - * WMI_RCP_ADDBA_REQ_EVENTID - */ +/* WMI_RCP_ADDBA_REQ_EVENTID */ struct wmi_rcp_addba_req_event { u8 cidxtid; u8 dialog_token; - __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */ + /* ieee80211_ba_parameterset as it received */ + __le16 ba_param_set; __le16 ba_timeout; - __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */ + /* ieee80211_ba_seqstrl field as it received */ + __le16 ba_seq_ctrl; } __packed; -/* - * WMI_CFG_RX_CHAIN_DONE_EVENTID - */ +/* WMI_CFG_RX_CHAIN_DONE_EVENTID */ enum wmi_cfg_rx_chain_done_event_status { - WMI_CFG_RX_CHAIN_SUCCESS = 1, + WMI_CFG_RX_CHAIN_SUCCESS = 0x01, }; struct wmi_cfg_rx_chain_done_event { - __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */ + /* V-Ring Tail pointer */ + __le32 rx_ring_tail_ptr; __le32 status; } __packed; -/* - * WMI_WBE_LINK_DOWN_EVENTID - */ +/* WMI_WBE_LINK_DOWN_EVENTID */ enum wmi_wbe_link_down_event_reason { - WMI_WBE_REASON_USER_REQUEST = 0, - WMI_WBE_REASON_RX_DISASSOC = 1, - WMI_WBE_REASON_BAD_PHY_LINK = 2, + WMI_WBE_REASON_USER_REQUEST = 0x00, + WMI_WBE_REASON_RX_DISASSOC = 0x01, + WMI_WBE_REASON_BAD_PHY_LINK = 0x02, }; +/* WMI_WBE_LINK_DOWN_EVENTID */ struct wmi_wbe_link_down_event { u8 cid; u8 reserved[3]; __le32 reason; } __packed; -/* - * WMI_DATA_PORT_OPEN_EVENTID - */ +/* WMI_DATA_PORT_OPEN_EVENTID */ struct wmi_data_port_open_event { u8 cid; u8 reserved[3]; } __packed; -/* - * WMI_VRING_EN_EVENTID - */ +/* WMI_VRING_EN_EVENTID */ struct wmi_vring_en_event { u8 vring_index; u8 reserved[3]; } __packed; -/* - * WMI_GET_PCP_CHANNEL_EVENTID - */ +/* WMI_GET_PCP_CHANNEL_EVENTID */ struct wmi_get_pcp_channel_event { u8 channel; u8 reserved[3]; } __packed; -/* - * WMI_P2P_CFG_DONE_EVENTID - */ +/* WMI_P2P_CFG_DONE_EVENTID */ struct wmi_p2p_cfg_done_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* -* WMI_PORT_ALLOCATED_EVENTID -*/ +/* WMI_PORT_ALLOCATED_EVENTID */ struct wmi_port_allocated_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* -* WMI_PORT_DELETED_EVENTID -*/ +/* WMI_PORT_DELETED_EVENTID */ struct wmi_port_deleted_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_LISTEN_STARTED_EVENTID - */ +/* WMI_LISTEN_STARTED_EVENTID */ struct wmi_listen_started_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_SEARCH_STARTED_EVENTID - */ +/* WMI_SEARCH_STARTED_EVENTID */ struct wmi_search_started_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_PCP_STARTED_EVENTID - */ +/* WMI_PCP_STARTED_EVENTID */ struct wmi_pcp_started_event { - u8 status; /* wmi_fw_status */ + /* wmi_fw_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_PCP_FACTOR_EVENTID - */ +/* WMI_PCP_FACTOR_EVENTID */ struct wmi_pcp_factor_event { __le32 pcp_factor; } __packed; -/* - * WMI_SW_TX_COMPLETE_EVENTID - */ enum wmi_sw_tx_status { - WMI_TX_SW_STATUS_SUCCESS = 0, - WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1, - WMI_TX_SW_STATUS_FAILED_TX = 2, + WMI_TX_SW_STATUS_SUCCESS = 0x00, + WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 0x01, + WMI_TX_SW_STATUS_FAILED_TX = 0x02, }; +/* WMI_SW_TX_COMPLETE_EVENTID */ struct wmi_sw_tx_complete_event { - u8 status; /* enum wmi_sw_tx_status */ + /* enum wmi_sw_tx_status */ + u8 status; u8 reserved[3]; } __packed; -/* - * WMI_CORR_MEASURE_EVENTID - */ +/* WMI_CORR_MEASURE_EVENTID - deprecated */ struct wmi_corr_measure_event { - s32 i; - s32 q; - s32 image_i; - s32 image_q; + /* signed */ + __le32 i; + /* signed */ + __le32 q; + /* signed */ + __le32 image_i; + /* signed */ + __le32 image_q; } __packed; -/* - * WMI_READ_RSSI_EVENTID - */ +/* WMI_READ_RSSI_EVENTID */ struct wmi_read_rssi_event { __le32 ina_rssi_adc_dbm; } __packed; -/* - * WMI_GET_SSID_EVENTID - */ +/* WMI_GET_SSID_EVENTID */ struct wmi_get_ssid_event { __le32 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* - * WMI_RX_MGMT_PACKET_EVENTID - */ +/* wmi_rx_mgmt_info */ struct wmi_rx_mgmt_info { u8 mcs; - s8 snr; + s8 rssi; u8 range; u8 sqi; __le16 stype; - __le16 status; + __le16 snr; __le32 len; + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ u8 qid; + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ u8 mid; u8 cid; - u8 channel; /* From Radio MNGR */ + /* From Radio MNGR */ + u8 channel; } __packed; -/* - * WMI_TX_MGMT_PACKET_EVENTID - */ +/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */ +struct wmi_rf_xpm_read_result_event { + /* enum wmi_fw_status_e - success=0 or fail=1 */ + u8 status; + u8 reserved[3]; + /* requested num_bytes of data */ + u8 data_bytes[0]; +} __packed; + +/* EVENT: WMI_RF_XPM_WRITE_RESULT_EVENTID */ +struct wmi_rf_xpm_write_result_event { + /* enum wmi_fw_status_e - success=0 or fail=1 */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TX_MGMT_PACKET_EVENTID */ struct wmi_tx_mgmt_packet_event { u8 payload[0]; } __packed; +/* WMI_RX_MGMT_PACKET_EVENTID */ struct wmi_rx_mgmt_packet_event { struct wmi_rx_mgmt_info info; u8 payload[0]; } __packed; -/* - * WMI_ECHO_RSP_EVENTID - */ -struct wmi_echo_event { +/* WMI_ECHO_RSP_EVENTID */ +struct wmi_echo_rsp_event { __le32 echoed_value; } __packed; -/* - * WMI_TEMP_SENSE_DONE_EVENTID +/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */ +struct wmi_rf_pwr_on_delay_rsp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID */ +struct wmi_set_high_power_table_params_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures */ struct wmi_temp_sense_done_event { + /* Temperature times 1000 (actual temperature will be achieved by + * dividing the value by 1000) + */ __le32 baseband_t1000; + /* Temperature times 1000 (actual temperature will be achieved by + * dividing the value by 1000) + */ __le32 rf_t1000; } __packed; +#define WMI_SCAN_DWELL_TIME_MS (100) +#define WMI_SURVEY_TIMEOUT_MS (10000) + +enum wmi_hidden_ssid { + WMI_HIDDEN_SSID_DISABLED = 0x00, + WMI_HIDDEN_SSID_SEND_EMPTY = 0x10, + WMI_HIDDEN_SSID_CLEAR = 0xFE, +}; + +/* WMI_LED_CFG_CMDID + * + * Configure LED On\Off\Blinking operation + * + * Returned events: + * - WMI_LED_CFG_DONE_EVENTID + */ +enum led_mode { + LED_DISABLE = 0x00, + LED_ENABLE = 0x01, +}; + +/* The names of the led as + * described on HW schemes. + */ +enum wmi_led_id { + WMI_LED_WLAN = 0x00, + WMI_LED_WPAN = 0x01, + WMI_LED_WWAN = 0x02, +}; + +/* Led polarity mode. */ +enum wmi_led_polarity { + LED_POLARITY_HIGH_ACTIVE = 0x00, + LED_POLARITY_LOW_ACTIVE = 0x01, +}; + +/* Combination of on and off + * creates the blinking period + */ +struct wmi_led_blink_mode { + __le32 blink_on; + __le32 blink_off; +} __packed; + +/* WMI_LED_CFG_CMDID */ +struct wmi_led_cfg_cmd { + /* enum led_mode_e */ + u8 led_mode; + /* enum wmi_led_id_e */ + u8 id; + /* slow speed blinking combination */ + struct wmi_led_blink_mode slow_blink_cfg; + /* medium speed blinking combination */ + struct wmi_led_blink_mode medium_blink_cfg; + /* high speed blinking combination */ + struct wmi_led_blink_mode fast_blink_cfg; + /* polarity of the led */ + u8 led_polarity; + /* reserved */ + u8 reserved; +} __packed; + +/* \WMI_SET_CONNECT_SNR_THR_CMDID */ +struct wmi_set_connect_snr_thr_cmd { + u8 enable; + u8 reserved; + /* 1/4 Db units */ + __le16 omni_snr_thr; + /* 1/4 Db units */ + __le16 direct_snr_thr; +} __packed; + +/* WMI_LED_CFG_DONE_EVENTID */ +struct wmi_led_cfg_done_event { + /* led config status */ + __le32 status; +} __packed; + +/* Rate search parameters configuration per connection */ +struct wmi_rs_cfg { + /* The maximal allowed PER for each MCS + * MCS will be considered as failed if PER during RS is higher + */ + u8 per_threshold[WMI_NUM_MCS]; + /* Number of MPDUs for each MCS + * this is the minimal statistic required to make an educated + * decision + */ + u8 min_frame_cnt[WMI_NUM_MCS]; + /* stop threshold [0-100] */ + u8 stop_th; + /* MCS1 stop threshold [0-100] */ + u8 mcs1_fail_th; + u8 max_back_failure_th; + /* Debug feature for disabling internal RS trigger (which is + * currently triggered by BF Done) + */ + u8 dbg_disable_internal_trigger; + __le32 back_failure_mask; + __le32 mcs_en_vec; +} __packed; + +/* Slot types */ +enum wmi_sched_scheme_slot_type { + WMI_SCHED_SLOT_SP = 0x0, + WMI_SCHED_SLOT_CBAP = 0x1, + WMI_SCHED_SLOT_IDLE = 0x2, + WMI_SCHED_SLOT_ANNOUNCE_NO_ACK = 0x3, + WMI_SCHED_SLOT_DISCOVERY = 0x4, +}; + +enum wmi_sched_scheme_slot_flags { + WMI_SCHED_SCHEME_SLOT_PERIODIC = 0x1, +}; + +struct wmi_sched_scheme_slot { + /* in microsecond */ + __le32 tbtt_offset; + /* wmi_sched_scheme_slot_flags */ + u8 flags; + /* wmi_sched_scheme_slot_type */ + u8 type; + /* in microsecond */ + __le16 duration; + /* frame_exchange_sequence_duration */ + __le16 tx_op; + /* time in microseconds between two consecutive slots + * relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + */ + __le16 period; + /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + * number of times to repeat allocation + */ + u8 num_of_blocks; + /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + * every idle_period allocation will be idle + */ + u8 idle_period; + u8 src_aid; + u8 dest_aid; + __le32 reserved; +} __packed; + +enum wmi_sched_scheme_flags { + /* should not be set when clearing scheduling scheme */ + WMI_SCHED_SCHEME_ENABLE = 0x01, + WMI_SCHED_PROTECTED_SP = 0x02, + /* should be set only on first WMI fragment of scheme */ + WMI_SCHED_FIRST = 0x04, + /* should be set only on last WMI fragment of scheme */ + WMI_SCHED_LAST = 0x08, + WMI_SCHED_IMMEDIATE_START = 0x10, +}; + +enum wmi_sched_scheme_advertisment { + /* ESE is not advertised at all, STA has to be configured with WMI + * also + */ + WMI_ADVERTISE_ESE_DISABLED = 0x0, + WMI_ADVERTISE_ESE_IN_BEACON = 0x1, + WMI_ADVERTISE_ESE_IN_ANNOUNCE_FRAME = 0x2, +}; + +/* WMI_SCHEDULING_SCHEME_CMD */ +struct wmi_scheduling_scheme_cmd { + u8 serial_num; + /* wmi_sched_scheme_advertisment */ + u8 ese_advertisment; + /* wmi_sched_scheme_flags */ + __le16 flags; + u8 num_allocs; + u8 reserved[3]; + __le64 start_tbtt; + /* allocations list */ + struct wmi_sched_scheme_slot allocs[WMI_SCHED_MAX_ALLOCS_PER_CMD]; +} __packed; + +enum wmi_sched_scheme_failure_type { + WMI_SCHED_SCHEME_FAILURE_NO_ERROR = 0x00, + WMI_SCHED_SCHEME_FAILURE_OLD_START_TSF_ERR = 0x01, +}; + +/* WMI_SCHEDULING_SCHEME_EVENTID */ +struct wmi_scheduling_scheme_event { + /* wmi_fw_status_e */ + u8 status; + /* serial number given in command */ + u8 serial_num; + /* wmi_sched_scheme_failure_type */ + u8 failure_type; + /* alignment to 32b */ + u8 reserved[1]; +} __packed; + +/* WMI_RS_CFG_CMDID */ +struct wmi_rs_cfg_cmd { + /* connection id */ + u8 cid; + /* enable or disable rate search */ + u8 rs_enable; + /* rate search configuration */ + struct wmi_rs_cfg rs_cfg; +} __packed; + +/* WMI_RS_CFG_DONE_EVENTID */ +struct wmi_rs_cfg_done_event { + u8 cid; + /* enum wmi_fw_status */ + u8 status; + u8 reserved[2]; +} __packed; + +/* WMI_GET_DETAILED_RS_RES_CMDID */ +struct wmi_get_detailed_rs_res_cmd { + /* connection id */ + u8 cid; + u8 reserved[3]; +} __packed; + +/* RS results status */ +enum wmi_rs_results_status { + WMI_RS_RES_VALID = 0x00, + WMI_RS_RES_INVALID = 0x01, +}; + +/* Rate search results */ +struct wmi_rs_results { + /* number of sent MPDUs */ + u8 num_of_tx_pkt[WMI_NUM_MCS]; + /* number of non-acked MPDUs */ + u8 num_of_non_acked_pkt[WMI_NUM_MCS]; + /* RS timestamp */ + __le32 tsf; + /* RS selected MCS */ + u8 mcs; +} __packed; + +/* WMI_GET_DETAILED_RS_RES_EVENTID */ +struct wmi_get_detailed_rs_res_event { + u8 cid; + /* enum wmi_rs_results_status */ + u8 status; + /* detailed rs results */ + struct wmi_rs_results rs_results; + u8 reserved[3]; +} __packed; + +/* BRP antenna limit mode */ +enum wmi_brp_ant_limit_mode { + /* Disable BRP force antenna limit */ + WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00, + /* Define maximal antennas limit. Only effective antennas will be + * actually used + */ + WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01, + /* Force a specific number of antennas */ + WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02, + /* number of BRP antenna limit modes */ + WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03, +}; + +/* WMI_BRP_SET_ANT_LIMIT_CMDID */ +struct wmi_brp_set_ant_limit_cmd { + /* connection id */ + u8 cid; + /* enum wmi_brp_ant_limit_mode */ + u8 limit_mode; + /* antenna limit count, 1-27 + * disable_mode - ignored + * effective_mode - upper limit to number of antennas to be used + * force_mode - exact number of antennas to be used + */ + u8 ant_limit; + u8 reserved; +} __packed; + +/* WMI_BRP_SET_ANT_LIMIT_EVENTID */ +struct wmi_brp_set_ant_limit_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* broadcast connection ID */ +#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF) + +/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */ +enum wmi_link_maintain_cfg_type { + /* AP/PCP default normal (non-FST) configuration settings */ + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00, + /* AP/PCP default FST configuration settings */ + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01, + /* STA default normal (non-FST) configuration settings */ + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02, + /* STA default FST configuration settings */ + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03, + /* custom configuration settings */ + WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04, + /* number of defined configuration types */ + WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05, +}; + +/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */ +enum wmi_link_maintain_cfg_response_status { + /* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished + */ + WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00, + /* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ + * command request + */ + WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01, +}; + +/* Link Loss and Keep Alive configuration */ +struct wmi_link_maintain_cfg { + /* link_loss_enable_detectors_vec */ + __le32 link_loss_enable_detectors_vec; + /* detectors check period usec */ + __le32 check_link_loss_period_usec; + /* max allowed tx ageing */ + __le32 tx_ageing_threshold_usec; + /* keep alive period for high SNR */ + __le32 keep_alive_period_usec_high_snr; + /* keep alive period for low SNR */ + __le32 keep_alive_period_usec_low_snr; + /* lower snr limit for keep alive period update */ + __le32 keep_alive_snr_threshold_low_db; + /* upper snr limit for keep alive period update */ + __le32 keep_alive_snr_threshold_high_db; + /* num of successive bad bcons causing link-loss */ + __le32 bad_beacons_num_threshold; + /* SNR limit for bad_beacons_detector */ + __le32 bad_beacons_snr_threshold_db; +} __packed; + +/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */ +struct wmi_link_maintain_cfg_write_cmd { + /* enum wmi_link_maintain_cfg_type_e - type of requested default + * configuration to be applied + */ + __le32 cfg_type; + /* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */ + __le32 cid; + /* custom configuration settings to be applied (relevant only if + * cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM) + */ + struct wmi_link_maintain_cfg lm_cfg; +} __packed; + +/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */ +struct wmi_link_maintain_cfg_read_cmd { + /* connection ID which configuration settings are requested */ + __le32 cid; +} __packed; + +/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */ +struct wmi_link_maintain_cfg_write_done_event { + /* requested connection ID */ + __le32 cid; + /* wmi_link_maintain_cfg_response_status_e - write status */ + __le32 status; +} __packed; + +/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */ +struct wmi_link_maintain_cfg_read_done_event { + /* requested connection ID */ + __le32 cid; + /* wmi_link_maintain_cfg_response_status_e - read status */ + __le32 status; + /* Retrieved configuration settings */ + struct wmi_link_maintain_cfg lm_cfg; +} __packed; + +enum wmi_traffic_suspend_status { + WMI_TRAFFIC_SUSPEND_APPROVED = 0x0, + WMI_TRAFFIC_SUSPEND_REJECTED = 0x1, +}; + +/* WMI_TRAFFIC_SUSPEND_EVENTID */ +struct wmi_traffic_suspend_event { + /* enum wmi_traffic_suspend_status_e */ + u8 status; +} __packed; + +enum wmi_traffic_resume_status { + WMI_TRAFFIC_RESUME_SUCCESS = 0x0, + WMI_TRAFFIC_RESUME_FAILED = 0x1, +}; + +/* WMI_TRAFFIC_RESUME_EVENTID */ +struct wmi_traffic_resume_event { + /* enum wmi_traffic_resume_status_e */ + u8 status; +} __packed; + +/* Power Save command completion status codes */ +enum wmi_ps_cfg_cmd_status { + WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00, + WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01, + /* other error */ + WMI_PS_CFG_CMD_STATUS_ERROR = 0x02, +}; + +/* Device Power Save Profiles */ +enum wmi_ps_profile_type { + WMI_PS_PROFILE_TYPE_DEFAULT = 0x00, + WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01, + WMI_PS_PROFILE_TYPE_MAX_PS = 0x02, + WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03, +}; + +/* WMI_PS_DEV_PROFILE_CFG_READ_CMDID */ +struct wmi_ps_dev_profile_cfg_read_cmd { + /* reserved */ + __le32 reserved; +} __packed; + +/* WMI_PS_DEV_PROFILE_CFG_READ_EVENTID */ +struct wmi_ps_dev_profile_cfg_read_event { + /* wmi_ps_profile_type_e */ + u8 ps_profile; + u8 reserved[3]; +} __packed; + +/* WMI_PS_DEV_PROFILE_CFG_CMDID + * + * Power save profile to be used by the device + * + * Returned event: + * - WMI_PS_DEV_PROFILE_CFG_EVENTID + */ +struct wmi_ps_dev_profile_cfg_cmd { + /* wmi_ps_profile_type_e */ + u8 ps_profile; + u8 reserved[3]; +} __packed; + +/* WMI_PS_DEV_PROFILE_CFG_EVENTID */ +struct wmi_ps_dev_profile_cfg_event { + /* wmi_ps_cfg_cmd_status_e */ + __le32 status; +} __packed; + +enum wmi_ps_level { + WMI_PS_LEVEL_DEEP_SLEEP = 0x00, + WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01, + /* awake = all PS mechanisms are disabled */ + WMI_PS_LEVEL_AWAKE = 0x02, +}; + +enum wmi_ps_deep_sleep_clk_level { + /* 33k */ + WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00, + /* 10k */ + WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01, + /* @RTC Low latency */ + WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02, + WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03, + WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04, + /* Not Applicable */ + WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF, +}; + +/* Response by the FW to a D3 entry request */ +enum wmi_ps_d3_resp_policy { + WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00, + /* debug -D3 req is always denied */ + WMI_PS_D3_RESP_POLICY_DENIED = 0x01, + /* debug -D3 req is always approved */ + WMI_PS_D3_RESP_POLICY_APPROVED = 0x02, +}; + +#define WMI_AOA_MAX_DATA_SIZE (128) + +enum wmi_aoa_meas_status { + WMI_AOA_MEAS_SUCCESS = 0x00, + WMI_AOA_MEAS_PEER_INCAPABLE = 0x01, + WMI_AOA_MEAS_FAILURE = 0x02, +}; + +/* WMI_AOA_MEAS_EVENTID */ +struct wmi_aoa_meas_event { + u8 mac_addr[WMI_MAC_LEN]; + /* channels IDs: + * 0 - 58320 MHz + * 1 - 60480 MHz + * 2 - 62640 MHz + */ + u8 channel; + /* enum wmi_aoa_meas_type */ + u8 aoa_meas_type; + /* Measurments are from RFs, defined by the mask */ + __le32 meas_rf_mask; + /* enum wmi_aoa_meas_status */ + u8 meas_status; + u8 reserved; + /* Length of meas_data in bytes */ + __le16 length; + u8 meas_data[WMI_AOA_MAX_DATA_SIZE]; +} __packed; + +/* WMI_SET_MGMT_RETRY_LIMIT_EVENTID */ +struct wmi_set_mgmt_retry_limit_event { + /* enum wmi_fw_status */ + u8 status; + /* alignment to 32b */ + u8 reserved[3]; +} __packed; + +/* WMI_GET_MGMT_RETRY_LIMIT_EVENTID */ +struct wmi_get_mgmt_retry_limit_event { + /* MAC retransmit limit for mgmt frames */ + u8 mgmt_retry_limit; + /* alignment to 32b */ + u8 reserved[3]; +} __packed; + +/* WMI_TOF_GET_CAPABILITIES_EVENTID */ +struct wmi_tof_get_capabilities_event { + u8 ftm_capability; + /* maximum supported number of destination to start TOF */ + u8 max_num_of_dest; + /* maximum supported number of measurements per burst */ + u8 max_num_of_meas_per_burst; + u8 reserved; + /* maximum supported multi bursts */ + __le16 max_multi_bursts_sessions; + /* maximum supported FTM burst duration , wmi_tof_burst_duration_e */ + __le16 max_ftm_burst_duration; + /* AOA supported types */ + __le32 aoa_supported_types; +} __packed; + +/* WMI_SET_THERMAL_THROTTLING_CFG_EVENTID */ +struct wmi_set_thermal_throttling_cfg_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_THERMAL_THROTTLING_CFG_EVENTID */ +struct wmi_get_thermal_throttling_cfg_event { + /* Status data */ + struct wmi_tt_data tt_data; +} __packed; + +enum wmi_tof_session_end_status { + WMI_TOF_SESSION_END_NO_ERROR = 0x00, + WMI_TOF_SESSION_END_FAIL = 0x01, + WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02, + WMI_TOF_SESSION_END_ABORTED = 0x03, +}; + +/* WMI_TOF_SESSION_END_EVENTID */ +struct wmi_tof_session_end_event { + /* FTM session ID */ + __le32 session_id; + /* wmi_tof_session_end_status_e */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TOF_SET_LCI_EVENTID */ +struct wmi_tof_set_lci_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TOF_SET_LCR_EVENTID */ +struct wmi_tof_set_lcr_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* Responder FTM Results */ +struct wmi_responder_ftm_res { + u8 t1[6]; + u8 t2[6]; + u8 t3[6]; + u8 t4[6]; + __le16 tod_err; + __le16 toa_err; + __le16 tod_err_initiator; + __le16 toa_err_initiator; +} __packed; + +enum wmi_tof_ftm_per_dest_res_status { + WMI_PER_DEST_RES_NO_ERROR = 0x00, + WMI_PER_DEST_RES_TX_RX_FAIL = 0x01, + WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02, +}; + +enum wmi_tof_ftm_per_dest_res_flags { + WMI_PER_DEST_RES_REQ_START = 0x01, + WMI_PER_DEST_RES_BURST_REPORT_END = 0x02, + WMI_PER_DEST_RES_REQ_END = 0x04, + WMI_PER_DEST_RES_PARAM_UPDATE = 0x08, +}; + +/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */ +struct wmi_tof_ftm_per_dest_res_event { + /* FTM session ID */ + __le32 session_id; + /* destination MAC address */ + u8 dst_mac[WMI_MAC_LEN]; + /* wmi_tof_ftm_per_dest_res_flags_e */ + u8 flags; + /* wmi_tof_ftm_per_dest_res_status_e */ + u8 status; + /* responder ASAP */ + u8 responder_asap; + /* responder number of FTM per burst */ + u8 responder_num_ftm_per_burst; + /* responder number of FTM burst exponent */ + u8 responder_num_ftm_bursts_exp; + /* responder burst duration ,wmi_tof_burst_duration_e */ + u8 responder_burst_duration; + /* responder burst period, indicate interval between two consecutive + * burst instances, in units of 100 ms + */ + __le16 responder_burst_period; + /* receive burst counter */ + __le16 bursts_cnt; + /* tsf of responder start burst */ + __le32 tsf_sync; + /* actual received ftm per burst */ + u8 actual_ftm_per_burst; + /* Measurments are from RFs, defined by the mask */ + __le32 meas_rf_mask; + u8 reserved0[3]; + struct wmi_responder_ftm_res responder_ftm_res[0]; +} __packed; + +/* WMI_TOF_CFG_RESPONDER_EVENTID */ +struct wmi_tof_cfg_responder_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +enum wmi_tof_channel_info_type { + WMI_TOF_CHANNEL_INFO_AOA = 0x00, + WMI_TOF_CHANNEL_INFO_LCI = 0x01, + WMI_TOF_CHANNEL_INFO_LCR = 0x02, + WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03, + WMI_TOF_CHANNEL_INFO_CIR = 0x04, + WMI_TOF_CHANNEL_INFO_RSSI = 0x05, + WMI_TOF_CHANNEL_INFO_SNR = 0x06, + WMI_TOF_CHANNEL_INFO_DEBUG = 0x07, +}; + +/* WMI_TOF_CHANNEL_INFO_EVENTID */ +struct wmi_tof_channel_info_event { + /* FTM session ID */ + __le32 session_id; + /* destination MAC address */ + u8 dst_mac[WMI_MAC_LEN]; + /* wmi_tof_channel_info_type_e */ + u8 type; + /* data report length */ + u8 len; + /* data report payload */ + u8 report[0]; +} __packed; + +/* WMI_TOF_SET_TX_RX_OFFSET_EVENTID */ +struct wmi_tof_set_tx_rx_offset_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TOF_GET_TX_RX_OFFSET_EVENTID */ +struct wmi_tof_get_tx_rx_offset_event { + /* enum wmi_fw_status */ + u8 status; + /* RF index used to read the offsets */ + u8 rf_index; + u8 reserved1[2]; + /* TX delay offset */ + __le32 tx_offset; + /* RX delay offset */ + __le32 rx_offset; + /* Offset to strongest tap of CIR */ + __le32 precursor; +} __packed; + +/* Result status codes for WMI commands */ +enum wmi_rf_sector_status { + WMI_RF_SECTOR_STATUS_SUCCESS = 0x00, + WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR = 0x01, + WMI_RF_SECTOR_STATUS_BUSY_ERROR = 0x02, + WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR = 0x03, +}; + +/* Types of the RF sector (TX,RX) */ +enum wmi_rf_sector_type { + WMI_RF_SECTOR_TYPE_RX = 0x00, + WMI_RF_SECTOR_TYPE_TX = 0x01, +}; + +/* Content of RF Sector (six 32-bits registers) */ +struct wmi_rf_sector_info { + /* Phase values for RF Chains[15-0] (2bits per RF chain) */ + __le32 psh_hi; + /* Phase values for RF Chains[31-16] (2bits per RF chain) */ + __le32 psh_lo; + /* ETYPE Bit0 for all RF chains[31-0] - bit0 of Edge amplifier gain + * index + */ + __le32 etype0; + /* ETYPE Bit1 for all RF chains[31-0] - bit1 of Edge amplifier gain + * index + */ + __le32 etype1; + /* ETYPE Bit2 for all RF chains[31-0] - bit2 of Edge amplifier gain + * index + */ + __le32 etype2; + /* D-Type values (3bits each) for 8 Distribution amplifiers + X16 + * switch bits + */ + __le32 dtype_swch_off; +} __packed; + +#define WMI_INVALID_RF_SECTOR_INDEX (0xFFFF) +#define WMI_MAX_RF_MODULES_NUM (8) + +/* WMI_GET_RF_SECTOR_PARAMS_CMD */ +struct wmi_get_rf_sector_params_cmd { + /* Sector number to be retrieved */ + __le16 sector_idx; + /* enum wmi_rf_sector_type - type of requested RF sector */ + u8 sector_type; + /* bitmask vector specifying destination RF modules */ + u8 rf_modules_vec; +} __packed; + +/* \WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT */ +struct wmi_get_rf_sector_params_done_event { + /* result status of WMI_GET_RF_SECTOR_PARAMS_CMD (enum + * wmi_rf_sector_status) + */ + u8 status; + /* align next field to U64 boundary */ + u8 reserved[7]; + /* TSF timestamp when RF sectors where retrieved */ + __le64 tsf; + /* Content of RF sector retrieved from each RF module */ + struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM]; +} __packed; + +/* WMI_SET_RF_SECTOR_PARAMS_CMD */ +struct wmi_set_rf_sector_params_cmd { + /* Sector number to be retrieved */ + __le16 sector_idx; + /* enum wmi_rf_sector_type - type of requested RF sector */ + u8 sector_type; + /* bitmask vector specifying destination RF modules */ + u8 rf_modules_vec; + /* Content of RF sector to be written to each RF module */ + struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM]; +} __packed; + +/* \WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT */ +struct wmi_set_rf_sector_params_done_event { + /* result status of WMI_SET_RF_SECTOR_PARAMS_CMD (enum + * wmi_rf_sector_status) + */ + u8 status; +} __packed; + +/* WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD - Get RF sector index selected by + * TXSS/BRP for communication with specified CID + */ +struct wmi_get_selected_rf_sector_index_cmd { + /* Connection/Station ID in [0:7] range */ + u8 cid; + /* type of requested RF sector (enum wmi_rf_sector_type) */ + u8 sector_type; + /* align to U32 boundary */ + u8 reserved[2]; +} __packed; + +/* \WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Returns retrieved RF sector + * index selected by TXSS/BRP for communication with specified CID + */ +struct wmi_get_selected_rf_sector_index_done_event { + /* Retrieved sector index selected in TXSS (for TX sector request) or + * BRP (for RX sector request) + */ + __le16 sector_idx; + /* result status of WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD (enum + * wmi_rf_sector_status) + */ + u8 status; + /* align next field to U64 boundary */ + u8 reserved[5]; + /* TSF timestamp when result was retrieved */ + __le64 tsf; +} __packed; + +/* WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD - Force RF sector index for + * communication with specified CID. Assumes that TXSS/BRP is disabled by + * other command + */ +struct wmi_set_selected_rf_sector_index_cmd { + /* Connection/Station ID in [0:7] range */ + u8 cid; + /* type of requested RF sector (enum wmi_rf_sector_type) */ + u8 sector_type; + /* Forced sector index */ + __le16 sector_idx; +} __packed; + +/* \WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Success/Fail status for + * WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD + */ +struct wmi_set_selected_rf_sector_index_done_event { + /* result status of WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD (enum + * wmi_rf_sector_status) + */ + u8 status; + /* align to U32 boundary */ + u8 reserved[3]; +} __packed; + +/* WMI_SET_RF_SECTOR_ON_CMD - Activates specified sector for specified rf + * modules + */ +struct wmi_set_rf_sector_on_cmd { + /* Sector index to be activated */ + __le16 sector_idx; + /* type of requested RF sector (enum wmi_rf_sector_type) */ + u8 sector_type; + /* bitmask vector specifying destination RF modules */ + u8 rf_modules_vec; +} __packed; + +/* \WMI_SET_RF_SECTOR_ON_DONE_EVENT - Success/Fail status for + * WMI_SET_RF_SECTOR_ON_CMD + */ +struct wmi_set_rf_sector_on_done_event { + /* result status of WMI_SET_RF_SECTOR_ON_CMD (enum + * wmi_rf_sector_status) + */ + u8 status; + /* align to U32 boundary */ + u8 reserved[3]; +} __packed; + +enum wmi_sector_sweep_type { + WMI_SECTOR_SWEEP_TYPE_TXSS = 0x00, + WMI_SECTOR_SWEEP_TYPE_BCON = 0x01, + WMI_SECTOR_SWEEP_TYPE_TXSS_AND_BCON = 0x02, + WMI_SECTOR_SWEEP_TYPE_NUM = 0x03, +}; + +/* WMI_PRIO_TX_SECTORS_ORDER_CMDID + * + * Set the order of TX sectors in TXSS and/or Beacon(AP). + * + * Returned event: + * - WMI_PRIO_TX_SECTORS_ORDER_EVENTID + */ +struct wmi_prio_tx_sectors_order_cmd { + /* tx sectors order to be applied, 0xFF for end of array */ + u8 tx_sectors_priority_array[MAX_NUM_OF_SECTORS]; + /* enum wmi_sector_sweep_type, TXSS and/or Beacon */ + u8 sector_sweep_type; + /* needed only for TXSS configuration */ + u8 cid; + /* alignment to 32b */ + u8 reserved[2]; +} __packed; + +/* completion status codes */ +enum wmi_prio_tx_sectors_cmd_status { + WMI_PRIO_TX_SECT_CMD_STATUS_SUCCESS = 0x00, + WMI_PRIO_TX_SECT_CMD_STATUS_BAD_PARAM = 0x01, + /* other error */ + WMI_PRIO_TX_SECT_CMD_STATUS_ERROR = 0x02, +}; + +/* WMI_PRIO_TX_SECTORS_ORDER_EVENTID */ +struct wmi_prio_tx_sectors_order_event { + /* enum wmi_prio_tx_sectors_cmd_status */ + u8 status; + /* alignment to 32b */ + u8 reserved[3]; +} __packed; + +struct wmi_prio_tx_sectors_num_cmd { + /* [0-128], 0 = No changes */ + u8 beacon_number_of_sectors; + /* [0-128], 0 = No changes */ + u8 txss_number_of_sectors; + /* [0-8] needed only for TXSS configuration */ + u8 cid; +} __packed; + +/* WMI_PRIO_TX_SECTORS_NUMBER_CMDID + * + * Set the number of active sectors in TXSS and/or Beacon. + * + * Returned event: + * - WMI_PRIO_TX_SECTORS_NUMBER_EVENTID + */ +struct wmi_prio_tx_sectors_number_cmd { + struct wmi_prio_tx_sectors_num_cmd active_sectors_num; + /* alignment to 32b */ + u8 reserved; +} __packed; + +/* WMI_PRIO_TX_SECTORS_NUMBER_EVENTID */ +struct wmi_prio_tx_sectors_number_event { + /* enum wmi_prio_tx_sectors_cmd_status */ + u8 status; + /* alignment to 32b */ + u8 reserved[3]; +} __packed; + +/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID + * + * Set default sectors order and number (hard coded in board file) + * in TXSS and/or Beacon. + * + * Returned event: + * - WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID + */ +struct wmi_prio_tx_sectors_set_default_cfg_cmd { + /* enum wmi_sector_sweep_type, TXSS and/or Beacon */ + u8 sector_sweep_type; + /* needed only for TXSS configuration */ + u8 cid; + /* alignment to 32b */ + u8 reserved[2]; +} __packed; + +/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID */ +struct wmi_prio_tx_sectors_set_default_cfg_event { + /* enum wmi_prio_tx_sectors_cmd_status */ + u8 status; + /* alignment to 32b */ + u8 reserved[3]; +} __packed; + +/* WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID */ +struct wmi_set_silent_rssi_table_done_event { + /* enum wmi_silent_rssi_status */ + __le32 status; + /* enum wmi_silent_rssi_table */ + __le32 table; +} __packed; + +/* \WMI_COMMAND_NOT_SUPPORTED_EVENTID */ +struct wmi_command_not_supported_event { + /* device id */ + u8 mid; + u8 reserved0; + __le16 command_id; + /* for UT command only, otherwise reserved */ + __le16 command_subtype; + __le16 reserved1; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ |
