summaryrefslogtreecommitdiff
path: root/drivers/staging/wfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/wfx')
-rw-r--r--drivers/staging/wfx/Makefile3
-rw-r--r--drivers/staging/wfx/bh.c2
-rw-r--r--drivers/staging/wfx/data_tx.c783
-rw-r--r--drivers/staging/wfx/data_tx.h93
-rw-r--r--drivers/staging/wfx/hif_rx.c37
-rw-r--r--drivers/staging/wfx/hif_tx.c1
-rw-r--r--drivers/staging/wfx/main.c4
-rw-r--r--drivers/staging/wfx/queue.c526
-rw-r--r--drivers/staging/wfx/queue.h59
-rw-r--r--drivers/staging/wfx/sta.c135
-rw-r--r--drivers/staging/wfx/sta.h8
-rw-r--r--drivers/staging/wfx/traces.h74
-rw-r--r--drivers/staging/wfx/wfx.h58
13 files changed, 1783 insertions, 0 deletions
diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile
index e158589468a3..d5ac9fafd1f1 100644
--- a/drivers/staging/wfx/Makefile
+++ b/drivers/staging/wfx/Makefile
@@ -9,6 +9,9 @@ wfx-y := \
fwio.o \
hif_tx.o \
hif_rx.o \
+ queue.o \
+ data_tx.o \
+ sta.o \
main.o \
sta.o \
debug.o
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index d321fd312d55..ed81c3924d98 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -220,6 +220,8 @@ static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
hif = wdev->hif_cmd.buf_send;
+ } else {
+ hif = wfx_tx_queues_get(wdev);
}
}
if (!hif)
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
new file mode 100644
index 000000000000..217d3c270706
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "data_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "queue.h"
+#include "debug.h"
+#include "traces.h"
+#include "hif_tx_mib.h"
+
+#define WFX_INVALID_RATE_ID (0xFF)
+#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate)
+{
+ if (rate->idx < 0)
+ return -1;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ if (rate->idx > 7) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
+ return rate->idx + 14;
+ }
+ // WFx only support 2GHz, else band information should be retreived
+ // from ieee80211_tx_info
+ return wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates[rate->idx].hw_value;
+}
+
+/* TX policy cache implementation */
+
+static void tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
+ struct ieee80211_tx_rate *rates)
+{
+ int i;
+ size_t count;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ BUG_ON(rates[0].idx < 0);
+ memset(policy, 0, sizeof(*policy));
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ if (rates[i].idx < 0)
+ break;
+ count = i;
+
+ /* HACK!!! Device has problems (at least) switching from
+ * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+ * of time (100-200 ms), leading to valuable throughput drop.
+ * As a workaround, additional g-rates are injected to the
+ * policy.
+ */
+ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+ rates[0].idx > 4 && rates[0].count > 2 &&
+ rates[1].idx < 2) {
+ int mid_rate = (rates[0].idx + 4) >> 1;
+
+ /* Decrease number of retries for the initial rate */
+ rates[0].count -= 2;
+
+ if (mid_rate != 4) {
+ /* Keep fallback rate at 1Mbps. */
+ rates[3] = rates[1];
+
+ /* Inject 1 transmission on lowest g-rate */
+ rates[2].idx = 4;
+ rates[2].count = 1;
+ rates[2].flags = rates[1].flags;
+
+ /* Inject 1 transmission on mid-rate */
+ rates[1].idx = mid_rate;
+ rates[1].count = 1;
+
+ /* Fallback to 1 Mbps is a really bad thing,
+ * so let's try to increase probability of
+ * successful transmission on the lowest g rate
+ * even more
+ */
+ if (rates[0].count >= 3) {
+ --rates[0].count;
+ ++rates[2].count;
+ }
+
+ /* Adjust amount of rates defined */
+ count += 2;
+ } else {
+ /* Keep fallback rate at 1Mbps. */
+ rates[2] = rates[1];
+
+ /* Inject 2 transmissions on lowest g-rate */
+ rates[1].idx = 4;
+ rates[1].count = 2;
+
+ /* Adjust amount of rates defined */
+ count += 1;
+ }
+ }
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ int rateid;
+ uint8_t count;
+
+ if (rates[i].idx < 0)
+ break;
+ WARN_ON(rates[i].count > 15);
+ rateid = wfx_get_hw_rate(wdev, &rates[i]);
+ // Pack two values in each byte of policy->rates
+ count = rates[i].count;
+ if (rateid % 2)
+ count <<= 4;
+ policy->rates[rateid / 2] |= count;
+ }
+}
+
+static bool tx_policy_is_equal(const struct tx_policy *a, const struct tx_policy *b)
+{
+ return !memcmp(a->rates, b->rates, sizeof(a->rates));
+}
+
+static int tx_policy_find(struct tx_policy_cache *cache, struct tx_policy *wanted)
+{
+ struct tx_policy *it;
+
+ list_for_each_entry(it, &cache->used, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ list_for_each_entry(it, &cache->free, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ return -1;
+}
+
+static void tx_policy_use(struct tx_policy_cache *cache, struct tx_policy *entry)
+{
+ ++entry->usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static int tx_policy_release(struct tx_policy_cache *cache, struct tx_policy *entry)
+{
+ int ret = --entry->usage_count;
+
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+static int tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates,
+ bool *renew)
+{
+ int idx;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct tx_policy wanted;
+
+ tx_policy_build(wvif, &wanted, rates);
+
+ spin_lock_bh(&cache->lock);
+ if (WARN_ON_ONCE(list_empty(&cache->free))) {
+ spin_unlock_bh(&cache->lock);
+ return WFX_INVALID_RATE_ID;
+ }
+ idx = tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ *renew = false;
+ } else {
+ struct tx_policy *entry;
+ *renew = true;
+ /* If policy is not found create a new one
+ * using the oldest entry in "free" list
+ */
+ entry = list_entry(cache->free.prev, struct tx_policy, link);
+ memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
+ entry->uploaded = 0;
+ entry->usage_count = 0;
+ idx = entry - cache->cache;
+ }
+ tx_policy_use(cache, &cache->cache[idx]);
+ if (list_empty(&cache->free)) {
+ /* Lock TX queues. */
+ wfx_tx_queues_lock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void tx_policy_put(struct wfx_vif *wvif, int idx)
+{
+ int usage, locked;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = tx_policy_release(cache, &cache->cache[idx]);
+ if (locked && !usage) {
+ /* Unlock TX queues. */
+ wfx_tx_queues_unlock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+}
+
+static int tx_policy_upload(struct wfx_vif *wvif)
+{
+ int i;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct hif_mib_set_tx_rate_retry_policy *arg =
+ kzalloc(struct_size(arg, tx_rate_retry_policy, HIF_MIB_NUM_TX_RATE_RETRY_POLICIES), GFP_KERNEL);
+ struct hif_mib_tx_rate_retry_policy *dst;
+
+ spin_lock_bh(&cache->lock);
+ /* Upload only modified entries. */
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
+ struct tx_policy *src = &cache->cache[i];
+
+ if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
+ dst = arg->tx_rate_retry_policy + arg->num_tx_rate_policies;
+
+ dst->policy_index = i;
+ dst->short_retry_count = 255;
+ dst->long_retry_count = 255;
+ dst->first_rate_sel = 1;
+ dst->terminate = 1;
+ dst->count_init = 1;
+ memcpy(&dst->rates, src->rates, sizeof(src->rates));
+ src->uploaded = 1;
+ arg->num_tx_rate_policies++;
+ }
+ }
+ spin_unlock_bh(&cache->lock);
+ hif_set_tx_rate_retry_policy(wvif, arg);
+ kfree(arg);
+ return 0;
+}
+
+static void tx_policy_upload_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, tx_policy_upload_work);
+
+ tx_policy_upload(wvif);
+
+ wfx_tx_unlock(wvif->wdev);
+ wfx_tx_queues_unlock(wvif->wdev);
+}
+
+void tx_policy_init(struct wfx_vif *wvif)
+{
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+ INIT_WORK(&wvif->tx_policy_upload_work, tx_policy_upload_work);
+
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+/* Link ID related functions */
+
+static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+ unsigned long max_inactivity = 0;
+ unsigned long now = jiffies;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (!wvif->link_id_db[i].status) {
+ ret = i + 1;
+ break;
+ } else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
+ !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
+ unsigned long inactivity =
+ now - wvif->link_id_db[i].timestamp;
+
+ if (inactivity < max_inactivity)
+ continue;
+ max_inactivity = inactivity;
+ ret = i + 1;
+ }
+ }
+
+ if (ret) {
+ struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
+
+ entry->status = WFX_LINK_RESERVE;
+ ether_addr_copy(entry->mac, mac);
+ memset(&entry->buffered, 0, WFX_MAX_TID);
+ skb_queue_head_init(&entry->rx_queue);
+ wfx_tx_lock(wvif->wdev);
+
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ dev_info(wvif->wdev->dev, "no more link-id available\n");
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
+ wvif->link_id_db[i].status) {
+ wvif->link_id_db[i].timestamp = jiffies;
+ ret = i + 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+static int wfx_map_link(struct wfx_vif *wvif, struct wfx_link_entry *link_entry, int sta_id)
+{
+ int ret;
+
+ ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
+
+ if (ret == 0)
+ /* Save the MAC address currently associated with the peer
+ * for future unmap request
+ */
+ ether_addr_copy(link_entry->old_mac, link_entry->mac);
+
+ return ret;
+}
+
+int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
+{
+ u8 *mac_addr = NULL;
+
+ if (sta_id)
+ mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
+
+ return hif_map_link(wvif, mac_addr, 1, sta_id);
+}
+
+void wfx_link_id_gc_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_gc_work.work);
+ unsigned long now = jiffies;
+ unsigned long next_gc = -1;
+ long ttl;
+ u32 mask;
+ int i;
+
+ wfx_tx_lock_flush(wvif->wdev);
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ bool need_reset = false;
+
+ mask = BIT(i + 1);
+ if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
+ (wvif->link_id_db[i].status == WFX_LINK_HARD &&
+ !(wvif->link_id_map & mask))) {
+ if (wvif->link_id_map & mask) {
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ need_reset = true;
+ }
+ wvif->link_id_map |= mask;
+ if (wvif->link_id_db[i].status != WFX_LINK_HARD)
+ wvif->link_id_db[i].status = WFX_LINK_SOFT;
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (need_reset)
+ wfx_unmap_link(wvif, i + 1);
+ wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
+ next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
+ ttl = wvif->link_id_db[i].timestamp - now +
+ WFX_LINK_ID_GC_TIMEOUT;
+ if (ttl <= 0) {
+ need_reset = true;
+ wvif->link_id_db[i].status = WFX_LINK_OFF;
+ wvif->link_id_map &= ~mask;
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ spin_unlock_bh(&wvif->ps_state_lock);
+ wfx_unmap_link(wvif, i + 1);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else {
+ next_gc = min_t(unsigned long, next_gc, ttl);
+ }
+ }
+ if (need_reset)
+ skb_queue_purge(&wvif->link_id_db[i].rx_queue);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (next_gc != -1)
+ schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+void wfx_link_id_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_work);
+
+ wfx_tx_flush(wvif->wdev);
+ wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+/* Tx implementation */
+
+static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) hdr;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+ return true;
+}
+
+static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
+ struct wfx_tx_priv *tx_priv, struct ieee80211_sta *sta)
+{
+ u32 mask = ~BIT(tx_priv->raw_link_id);
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (ieee80211_is_auth(hdr->frame_control)) {
+ wvif->sta_asleep_mask &= mask;
+ wvif->pspoll_mask &= mask;
+ }
+
+ if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM && !wvif->mcast_buffered) {
+ wvif->mcast_buffered = true;
+ if (wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ }
+
+ if (tx_priv->raw_link_id) {
+ wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
+ if (tx_priv->tid < WFX_MAX_TID)
+ wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+}
+
+static uint8_t wfx_tx_get_raw_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr)
+{
+ struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
+ const u8 *da = ieee80211_get_DA(hdr);
+ int ret;
+
+ if (sta_priv && sta_priv->link_id)
+ return sta_priv->link_id;
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (is_multicast_ether_addr(da))
+ return 0;
+ ret = wfx_find_link_id(wvif, da);
+ if (!ret)
+ ret = wfx_alloc_link_id(wvif, da);
+ if (!ret) {
+ dev_err(wvif->wdev->dev, "no more link-id available\n");
+ return -ENOENT;
+ }
+ return ret;
+}
+
+static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+{
+ int i;
+ bool finished;
+
+ // Firmware is not able to mix rates with differents flags
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+ rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+ }
+
+ // Sort rates and remove duplicates
+ do {
+ finished = true;
+ for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+ if (rates[i + 1].idx == rates[i].idx && rates[i].idx != -1) {
+ rates[i].count = max_t(int, rates[i].count, rates[i + 1].count);
+ rates[i + 1].idx = -1;
+ rates[i + 1].count = 0;
+
+ finished = false;
+ }
+ if (rates[i + 1].idx > rates[i].idx) {
+ swap(rates[i + 1], rates[i]);
+ finished = false;
+ }
+ }
+ } while (!finished);
+ // All retries use long GI
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+}
+
+static uint8_t wfx_tx_get_rate_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+{
+ bool tx_policy_renew = false;
+ uint8_t rate_id;
+
+ rate_id = tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew);
+ WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy");
+
+ if (tx_policy_renew) {
+ /* FIXME: It's not so optimal to stop TX queues every now and
+ * then. Better to reimplement task scheduling with a counter.
+ */
+ wfx_tx_lock(wvif->wdev);
+ wfx_tx_queues_lock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work)) {
+ wfx_tx_queues_unlock(wvif->wdev);
+ wfx_tx_unlock(wvif->wdev);
+ }
+ }
+ return rate_id;
+}
+
+static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info)
+{
+ struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
+ struct hif_ht_tx_parameters ret = { };
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ ret.frame_format = HIF_FRAME_FORMAT_NON_HT;
+ else if (!(rate->flags & IEEE80211_TX_RC_GREEN_FIELD))
+ ret.frame_format = HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
+ else
+ ret.frame_format = HIF_FRAME_FORMAT_GF_HT_11N;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ ret.short_gi = 1;
+ if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+ ret.stbc = 0; // FIXME: Not yet supported by firmware?
+ return ret;
+}
+
+static uint8_t wfx_tx_get_tid(struct ieee80211_hdr *hdr)
+{
+ // FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
+ if (!ieee80211_is_data(hdr->frame_control))
+ return WFX_MAX_TID;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ return ieee80211_get_tid(hdr);
+ else
+ return 0;
+}
+
+static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
+{
+ int mic_space;
+
+ if (!hw_key)
+ return 0;
+ mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
+ return hw_key->icv_len + mic_space;
+}
+
+static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ struct hif_msg *hif_msg;
+ struct hif_req_tx *req;
+ struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int queue_id = tx_info->hw_queue;
+ size_t offset = (size_t) skb->data & 3;
+ int wmsg_len = sizeof(struct hif_msg) + sizeof(struct hif_req_tx) + offset;
+
+ WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
+ wfx_tx_fixup_rates(tx_info->driver_rates);
+
+ // From now tx_info->control is unusable
+ memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
+ // Fill tx_priv
+ tx_priv = (struct wfx_tx_priv *) tx_info->rate_driver_data;
+ tx_priv->tid = wfx_tx_get_tid(hdr);
+ tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
+ tx_priv->link_id = tx_priv->raw_link_id;
+ if (ieee80211_has_protected(hdr->frame_control))
+ tx_priv->hw_key = hw_key;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ tx_priv->link_id = WFX_LINK_ID_AFTER_DTIM;
+ if (sta && (sta->uapsd_queues & BIT(queue_id)))
+ tx_priv->link_id = WFX_LINK_ID_UAPSD;
+
+ // Fill hif_msg
+ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ WARN(offset & 1, "attempt to transmit an unaligned frame");
+ skb_put(skb, wfx_tx_get_icv_len(tx_priv->hw_key));
+ skb_push(skb, wmsg_len);
+ memset(skb->data, 0, wmsg_len);
+ hif_msg = (struct hif_msg *) skb->data;
+ hif_msg->len = cpu_to_le16(skb->len);
+ hif_msg->id = cpu_to_le16(HIF_REQ_ID_TX);
+ hif_msg->interface = wvif->id;
+ if (skb->len > wvif->wdev->hw_caps.size_inp_ch_buf) {
+ dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
+ skb->len, wvif->wdev->hw_caps.size_inp_ch_buf);
+ skb_pull(skb, wmsg_len);
+ return -EIO;
+ }
+
+ // Fill tx request
+ req = (struct hif_req_tx *) hif_msg->body;
+ req->packet_id = queue_id << 16 | IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ req->data_flags.fc_offset = offset;
+ req->queue_id.peer_sta_id = tx_priv->raw_link_id;
+ // Queue index are inverted between firmware and Linux
+ req->queue_id.queue_id = 3 - queue_id;
+ req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
+ req->tx_flags.retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+
+ // Auxilliary operations
+ wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
+ wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ wfx_bh_request_tx(wvif->wdev);
+ return 0;
+}
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+ struct ieee80211_sta *sta = control ? control->sta : NULL;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info, rate_driver_data);
+
+ compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
+ "struct tx_priv is too large");
+ WARN(skb->next || skb->prev, "skb is already member of a list");
+ // control.vif can be NULL for injected frames
+ if (tx_info->control.vif)
+ wvif = (struct wfx_vif *) tx_info->control.vif->drv_priv;
+ else
+ wvif = wvif_iterate(wdev, NULL);
+ if (WARN_ON(!wvif))
+ goto drop;
+ // FIXME: why?
+ if (ieee80211_is_action_back(hdr)) {
+ dev_info(wdev->dev, "drop BA action\n");
+ goto drop;
+ }
+ if (wfx_tx_inner(wvif, sta, skb))
+ goto drop;
+
+ return;
+
+drop:
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
+
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+{
+ int i;
+ int tx_count;
+ struct sk_buff *skb;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_tx_info *tx_info;
+ const struct wfx_tx_priv *tx_priv;
+
+ skb = wfx_pending_get(wvif->wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wvif->wdev->dev, "received unknown packet_id (%#.8x) from chip\n", arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+
+ // You can touch to tx_priv, but don't touch to tx_info->status.
+ tx_count = arg->ack_failures;
+ if (!arg->status || arg->ack_failures)
+ tx_count += 1; // Also report success
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ rate = &tx_info->status.rates[i];
+ if (rate->idx < 0)
+ break;
+ if (tx_count < rate->count && arg->status && arg->ack_failures)
+ dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
+ rate->count, tx_count);
+ if (tx_count <= rate->count && tx_count && arg->txed_rate != wfx_get_hw_rate(wvif->wdev, rate))
+ dev_dbg(wvif->wdev->dev, "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate, wfx_get_hw_rate(wvif->wdev, rate));
+ if (tx_count > rate->count) {
+ tx_count -= rate->count;
+ } else if (!tx_count) {
+ rate->count = 0;
+ rate->idx = -1;
+ } else {
+ rate->count = tx_count;
+ tx_count = 0;
+ }
+ }
+ if (tx_count)
+ dev_dbg(wvif->wdev->dev, "%d more retries than expected\n", tx_count);
+ skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
+
+ // From now, you can touch to tx_info->status, but do not touch to
+ // tx_priv anymore
+ // FIXME: use ieee80211_tx_info_clear_status()
+ memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
+ memset(tx_info->pad, 0, sizeof(tx_info->pad));
+
+ if (!arg->status) {
+ tx_info->status.tx_time = arg->media_delay - arg->tx_queue_delay;
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ } else if (arg->status == HIF_REQUEUE) {
+ WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ }
+ wfx_pending_remove(wvif->wdev, skb);
+}
+
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct hif_req_tx *req)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int tid = wfx_tx_get_tid(hdr);
+ int raw_link_id = req->queue_id.peer_sta_id;
+ u8 *buffered;
+
+ if (raw_link_id && tid < WFX_MAX_TID) {
+ buffered = wvif->link_id_db[raw_link_id - 1].buffered;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ WARN(!buffered[tid], "inconsistent notification");
+ buffered[tid]--;
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (!buffered[tid]) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tid, false);
+ rcu_read_unlock();
+ }
+ }
+}
+
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *) skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *) hif->body;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ unsigned int offset = sizeof(struct hif_req_tx) + sizeof(struct hif_msg) + req->data_flags.fc_offset;
+
+ WARN_ON(!wvif);
+ skb_pull(skb, offset);
+ wfx_notify_buffered_tx(wvif, skb, req);
+ tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
new file mode 100644
index 000000000000..f59a259bb744
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_TX_H
+#define WFX_DATA_TX_H
+
+#include <linux/list.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+// FIXME: use IEEE80211_NUM_TIDS
+#define WFX_MAX_TID 8
+
+struct wfx_tx_priv;
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_link_status {
+ WFX_LINK_OFF,
+ WFX_LINK_RESERVE,
+ WFX_LINK_SOFT,
+ WFX_LINK_HARD,
+};
+
+struct wfx_link_entry {
+ unsigned long timestamp;
+ enum wfx_link_status status;
+ uint8_t mac[ETH_ALEN];
+ uint8_t old_mac[ETH_ALEN];
+ uint8_t buffered[WFX_MAX_TID];
+ struct sk_buff_head rx_queue;
+};
+
+struct tx_policy {
+ struct list_head link;
+ uint8_t rates[12];
+ uint8_t usage_count;
+ uint8_t uploaded;
+};
+
+struct tx_policy_cache {
+ struct tx_policy cache[HIF_MIB_NUM_TX_RATE_RETRY_POLICIES];
+ // FIXME: use a trees and drop hash from tx_policy
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock;
+};
+
+struct wfx_tx_priv {
+ ktime_t xmit_timestamp;
+ struct ieee80211_key_conf *hw_key;
+ uint8_t link_id;
+ uint8_t raw_link_id;
+ uint8_t tid;
+} __packed;
+
+void tx_policy_init(struct wfx_vif *wvif);
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
+
+int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
+void wfx_link_id_work(struct work_struct *work);
+void wfx_link_id_gc_work(struct work_struct *work);
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
+
+static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return NULL;
+ tx_info = IEEE80211_SKB_CB(skb);
+ return (struct wfx_tx_priv *) tx_info->rate_driver_data;
+}
+
+static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *) skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *) hif->body;
+
+ return req;
+}
+
+#endif /* WFX_DATA_TX_H */
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index c93bae1b6acf..97c4c2f082fb 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -53,6 +53,39 @@ static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *
return status;
}
+static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+{
+ struct hif_cnf_tx *body = buf;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ wfx_tx_confirm_cb(wvif, body);
+ return 0;
+}
+
+static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+{
+ struct hif_cnf_multi_transmit *body = buf;
+ struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ int count = body->num_tx_confs;
+ int i;
+
+ WARN(count <= 0, "corrupted message");
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ for (i = 0; i < count; ++i) {
+ wfx_tx_confirm_cb(wvif, buf_loc);
+ buf_loc++;
+ }
+ return 0;
+}
+
static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
{
struct hif_ind_startup *body = buf;
@@ -174,6 +207,10 @@ static const struct {
int msg_id;
int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
} hif_handlers[] = {
+ /* Confirmations */
+ { HIF_CNF_ID_TX, hif_tx_confirm },
+ { HIF_CNF_ID_MULTI_TRANSMIT, hif_multi_tx_confirm },
+ /* Indications */
{ HIF_IND_ID_STARTUP, hif_startup_indication },
{ HIF_IND_ID_WAKEUP, hif_wakeup_indication },
{ HIF_IND_ID_JOIN_COMPLETE, hif_join_complete_indication },
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index f8ab871aa188..157ab177b73f 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -88,6 +88,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply, siz
}
if (!ret) {
dev_err(wdev->dev, "chip did not answer\n");
+ wfx_pending_dump_old_frames(wdev, 3000);
wdev->chip_frozen = 1;
reinit_completion(&wdev->hif_cmd.done);
ret = -ETIMEDOUT;
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 2e71f446d4d4..cce4e30dd94a 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -28,6 +28,7 @@
#include "bh.h"
#include "sta.h"
#include "debug.h"
+#include "data_tx.h"
#include "secure_link.h"
#include "hif_tx_mib.h"
#include "hif_api_cmd.h"
@@ -53,6 +54,7 @@ static const struct ieee80211_ops wfx_ops = {
.stop = wfx_stop,
.add_interface = wfx_add_interface,
.remove_interface = wfx_remove_interface,
+ .tx = wfx_tx,
};
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
@@ -215,6 +217,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
mutex_init(&wdev->rx_stats_lock);
init_completion(&wdev->firmware_ready);
wfx_init_hif_cmd(&wdev->hif_cmd);
+ wfx_tx_queues_init(wdev);
return wdev;
}
@@ -222,6 +225,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
void wfx_free_common(struct wfx_dev *wdev)
{
mutex_destroy(&wdev->rx_stats_lock);
+ wfx_tx_queues_deinit(wdev);
ieee80211_free_hw(wdev->hw);
}
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
new file mode 100644
index 000000000000..aa438be21d37
--- /dev/null
+++ b/drivers/staging/wfx/queue.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wfx.h"
+#include "sta.h"
+#include "data_tx.h"
+
+void wfx_tx_lock(struct wfx_dev *wdev)
+{
+ atomic_inc(&wdev->tx_lock);
+}
+
+void wfx_tx_unlock(struct wfx_dev *wdev)
+{
+ int tx_lock = atomic_dec_return(&wdev->tx_lock);
+
+ WARN(tx_lock < 0, "inconsistent tx_lock value");
+ if (!tx_lock)
+ wfx_bh_request_tx(wdev);
+}
+
+void wfx_tx_flush(struct wfx_dev *wdev)
+{
+ int ret;
+
+ WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return;
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
+ !wdev->hif.tx_buffers_used,
+ msecs_to_jiffies(3000));
+ if (!ret) {
+ dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n", wdev->hif.tx_buffers_used);
+ wfx_pending_dump_old_frames(wdev, 3000);
+ // FIXME: drop pending frames here
+ wdev->chip_frozen = 1;
+ }
+ mutex_unlock(&wdev->hif_cmd.lock);
+}
+
+void wfx_tx_lock_flush(struct wfx_dev *wdev)
+{
+ wfx_tx_lock(wdev);
+ wfx_tx_flush(wdev);
+}
+
+void wfx_tx_queues_lock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ if (queue->tx_locked_cnt++ == 0)
+ ieee80211_stop_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+void wfx_tx_queues_unlock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ BUG_ON(!queue->tx_locked_cnt);
+ if (--queue->tx_locked_cnt == 0)
+ ieee80211_wake_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+/* If successful, LOCKS the TX queue! */
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+{
+ int i;
+ bool done;
+ struct wfx_queue *queue;
+ struct sk_buff *item;
+ struct wfx_dev *wdev = wvif->wdev;
+ struct hif_msg *hif;
+
+ if (wvif->wdev->chip_frozen) {
+ wfx_tx_lock_flush(wdev);
+ wfx_tx_queues_clear(wdev);
+ return;
+ }
+
+ do {
+ done = true;
+ wfx_tx_lock_flush(wdev);
+ for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ hif = (struct hif_msg *) item->data;
+ if (hif->interface == wvif->id)
+ done = false;
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ }
+ if (!done) {
+ wfx_tx_unlock(wdev);
+ msleep(20);
+ }
+ } while (!done);
+}
+
+static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue, struct sk_buff_head *gc_list)
+{
+ int i;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&queue->queue.lock);
+ while ((item = __skb_dequeue(&queue->queue)) != NULL)
+ skb_queue_head(gc_list, item);
+ spin_lock_bh(&stats->pending.lock);
+ for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
+ stats->link_map_cache[i] -= queue->link_map_cache[i];
+ queue->link_map_cache[i] = 0;
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+void wfx_tx_queues_clear(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff *item;
+ struct sk_buff_head gc_list;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ skb_queue_head_init(&gc_list);
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
+ wake_up(&stats->wait_link_id_empty);
+ while ((item = skb_dequeue(&gc_list)) != NULL)
+ wfx_skb_dtor(wdev, item);
+}
+
+void wfx_tx_queues_init(struct wfx_dev *wdev)
+{
+ int i;
+
+ memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
+ memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
+ skb_queue_head_init(&wdev->tx_queue_stats.pending);
+ init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ wdev->tx_queue[i].queue_id = i;
+ skb_queue_head_init(&wdev->tx_queue[i].queue);
+ }
+}
+
+void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+{
+ WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
+ wfx_tx_queues_clear(wdev);
+}
+
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ size_t ret;
+ int i, bit;
+
+ if (!link_id_map)
+ return 0;
+
+ spin_lock_bh(&queue->queue.lock);
+ if (link_id_map == (u32)-1) {
+ ret = skb_queue_len(&queue->queue);
+ } else {
+ ret = 0;
+ for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache); ++i, bit <<= 1) {
+ if (link_id_map & bit)
+ ret += queue->link_map_cache[i];
+ }
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ return ret;
+}
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
+ spin_lock_bh(&queue->queue.lock);
+ __skb_queue_tail(&queue->queue, skb);
+
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev, struct wfx_queue *queue, u32 link_id_map)
+{
+ struct sk_buff *skb = NULL;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv;
+ bool wakeup_stats = false;
+
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ tx_priv = wfx_skb_tx_priv(item);
+ if (link_id_map & BIT(tx_priv->link_id)) {
+ skb = item;
+ break;
+ }
+ }
+ WARN_ON(!skb);
+ if (skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ __skb_unlink(skb, &queue->queue);
+ --queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_queue_tail(&stats->pending, skb);
+ if (!--stats->link_map_cache[tx_priv->link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->pending.lock);
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+ return skb;
+}
+
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ spin_lock_bh(&queue->queue.lock);
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ __skb_queue_tail(&queue->queue, skb);
+ spin_unlock_bh(&queue->queue.lock);
+ return 0;
+}
+
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ wfx_skb_dtor(wdev, skb);
+
+ return 0;
+}
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
+{
+ struct sk_buff *skb;
+ struct hif_req_tx *req;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ req = wfx_skb_txreq(skb);
+ if (req->packet_id == packet_id) {
+ spin_unlock_bh(&stats->pending.lock);
+ return skb;
+ }
+ }
+ WARN_ON(1);
+ spin_unlock_bh(&stats->pending.lock);
+ return NULL;
+}
+
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv;
+ struct hif_req_tx *req;
+ struct sk_buff *skb;
+ bool first = true;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ req = wfx_skb_txreq(skb);
+ if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) {
+ if (first) {
+ dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
+ limit_ms);
+ first = false;
+ }
+ dev_info(wdev->dev, " id %08x sent %lldms ago\n",
+ req->packet_id,
+ ktime_ms_delta(now, tx_priv->xmit_timestamp));
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+}
+
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ return ktime_us_delta(now, tx_priv->xmit_timestamp);
+}
+
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff_head *queue;
+ bool ret = true;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ queue = &wdev->tx_queue[i].queue;
+ spin_lock_bh(&queue->lock);
+ if (!skb_queue_empty(queue))
+ ret = false;
+ spin_unlock_bh(&queue->lock);
+ }
+ return ret;
+}
+
+static int wfx_get_prio_queue(struct wfx_vif *wvif,
+ u32 tx_allowed_mask, int *total)
+{
+ static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
+ BIT(WFX_LINK_ID_UAPSD);
+ struct hif_req_edca_queue_params *edca;
+ unsigned int score, best = -1;
+ int winner = -1;
+ int i;
+
+ /* search for a winner using edca params */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ int queued;
+
+ edca = &wvif->edca.params[i];
+ queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ tx_allowed_mask);
+ if (!queued)
+ continue;
+ *total += queued;
+ score = ((edca->aifsn + edca->cw_min) << 16) +
+ ((edca->cw_max - edca->cw_min) *
+ (get_random_int() & 0xFFFF));
+ if (score < best && (winner < 0 || i != 3)) {
+ best = score;
+ winner = i;
+ }
+ }
+
+ /* override winner if bursting */
+ if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
+ winner != wvif->wdev->tx_burst_idx &&
+ !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner], tx_allowed_mask & urgent) &&
+ wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
+ winner = wvif->wdev->tx_burst_idx;
+
+ return winner;
+}
+
+static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
+ struct wfx_queue **queue_p,
+ u32 *tx_allowed_mask_p,
+ bool *more)
+{
+ int idx;
+ u32 tx_allowed_mask;
+ int total = 0;
+
+ /* Search for a queue with multicast frames buffered */
+ if (wvif->mcast_tx) {
+ tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx >= 0) {
+ *more = total > 1;
+ goto found;
+ }
+ }
+
+ /* Search for unicast traffic */
+ tx_allowed_mask = ~wvif->sta_asleep_mask;
+ tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
+ if (wvif->sta_asleep_mask) {
+ tx_allowed_mask |= wvif->pspoll_mask;
+ tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
+ } else {
+ tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
+ }
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx < 0)
+ return -ENOENT;
+
+found:
+ *queue_p = &wvif->wdev->tx_queue[idx];
+ *tx_allowed_mask_p = tx_allowed_mask;
+ return 0;
+}
+
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif = NULL;
+ struct hif_req_tx *req = NULL;
+ struct wfx_queue *queue = NULL;
+ struct wfx_queue *vif_queue = NULL;
+ u32 tx_allowed_mask = 0;
+ u32 vif_tx_allowed_mask = 0;
+ const struct wfx_tx_priv *tx_priv = NULL;
+ struct wfx_vif *wvif;
+ /* More is used only for broadcasts. */
+ bool more = false;
+ bool vif_more = false;
+ int not_found;
+ int burst;
+
+ for (;;) {
+ int ret = -ENOENT;
+ int queue_num;
+ struct ieee80211_hdr *hdr;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ spin_lock_bh(&wvif->ps_state_lock);
+
+ not_found = wfx_tx_queue_mask_get(wvif, &vif_queue, &vif_tx_allowed_mask, &vif_more);
+
+ if (wvif->mcast_buffered && (not_found || !vif_more) &&
+ (wvif->mcast_tx || !wvif->sta_asleep_mask)) {
+ wvif->mcast_buffered = false;
+ if (wvif->mcast_tx) {
+ wvif->mcast_tx = false;
+ schedule_work(&wvif->mcast_stop_work);
+ }
+ }
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (vif_more) {
+ more = 1;
+ tx_allowed_mask = vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ break;
+ } else if (!not_found) {
+ if (queue && queue != vif_queue)
+ dev_info(wdev->dev, "vifs disagree about queue priority\n");
+ tx_allowed_mask |= vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ return 0;
+
+ queue_num = queue - wdev->tx_queue;
+
+ skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
+ if (!skb)
+ continue;
+ tx_priv = wfx_skb_tx_priv(skb);
+ hif = (struct hif_msg *) skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ WARN_ON(!wvif);
+
+ wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
+
+ /* allow bursting if txop is set */
+ if (wvif->edca.params[queue_num].tx_op_limit)
+ burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+ else
+ burst = 1;
+
+ /* store index of bursting queue */
+ if (burst > 1)
+ wdev->tx_burst_idx = queue_num;
+ else
+ wdev->tx_burst_idx = -1;
+
+ /* more buffered multicast/broadcast frames
+ * ==> set MoreData flag in IEEE 802.11 header
+ * to inform PS STAs
+ */
+ if (more) {
+ req = (struct hif_req_tx *) hif->body;
+ hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+ return hif;
+ }
+}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
new file mode 100644
index 000000000000..938dbf3469e7
--- /dev/null
+++ b/drivers/staging/wfx/queue.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_QUEUE_H
+#define WFX_QUEUE_H
+
+#include <linux/skbuff.h>
+
+#include "hif_api_cmd.h"
+
+#define WFX_MAX_STA_IN_AP_MODE 14
+#define WFX_LINK_ID_AFTER_DTIM (WFX_MAX_STA_IN_AP_MODE + 1)
+#define WFX_LINK_ID_UAPSD (WFX_MAX_STA_IN_AP_MODE + 2)
+#define WFX_LINK_ID_MAX (WFX_MAX_STA_IN_AP_MODE + 3)
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_queue {
+ struct sk_buff_head queue;
+ int tx_locked_cnt;
+ int link_map_cache[WFX_LINK_ID_MAX];
+ u8 queue_id;
+};
+
+struct wfx_queue_stats {
+ int link_map_cache[WFX_LINK_ID_MAX];
+ struct sk_buff_head pending;
+ wait_queue_head_t wait_link_id_empty;
+};
+
+void wfx_tx_lock(struct wfx_dev *wdev);
+void wfx_tx_unlock(struct wfx_dev *wdev);
+void wfx_tx_flush(struct wfx_dev *wdev);
+void wfx_tx_lock_flush(struct wfx_dev *wdev);
+
+void wfx_tx_queues_init(struct wfx_dev *wdev);
+void wfx_tx_queues_deinit(struct wfx_dev *wdev);
+void wfx_tx_queues_lock(struct wfx_dev *wdev);
+void wfx_tx_queues_unlock(struct wfx_dev *wdev);
+void wfx_tx_queues_clear(struct wfx_dev *wdev);
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue, struct sk_buff *skb);
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb);
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
+
+#endif /* WFX_QUEUE_H */
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index fe3ff6536a87..5714aba1432c 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -10,11 +10,123 @@
#include "sta.h"
#include "wfx.h"
+#define TXOP_UNIT 32
+
+static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+{
+ struct sk_buff *skb;
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+ u16 tim_offset, tim_length;
+ u8 *tim_ptr;
+
+ skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
+ &tim_offset, &tim_length);
+ if (!skb)
+ return -ENOENT;
+ tim_ptr = skb->data + tim_offset;
+
+ if (tim_offset && tim_length >= 6) {
+ /* Ignore DTIM count from mac80211:
+ * firmware handles DTIM internally.
+ */
+ tim_ptr[2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (aid0_bit_set)
+ tim_ptr[4] |= 1;
+ else
+ tim_ptr[4] &= ~1;
+ }
+
+ hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void wfx_mcast_start_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, mcast_start_work);
+
+ cancel_work_sync(&wvif->mcast_stop_work);
+ if (!wvif->aid0_bit_set) {
+ wfx_tx_lock_flush(wvif->wdev);
+ wfx_set_tim_impl(wvif, true);
+ wvif->aid0_bit_set = true;
+ mod_timer(&wvif->mcast_timeout, TU_TO_JIFFIES(1000));
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_stop_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, mcast_stop_work);
+
+ if (wvif->aid0_bit_set) {
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->aid0_bit_set = false;
+ wfx_set_tim_impl(wvif, false);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_timeout(struct timer_list *t)
+{
+ struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
+
+ dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
+ spin_lock_bh(&wvif->ps_state_lock);
+ wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
+ if (wvif->mcast_tx)
+ wfx_bh_request_tx(wvif->wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int i;
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ // FIXME: parameters are set by kernel juste after interface_add.
+ // Keep struct hif_req_edca_queue_params blank?
+ struct hif_req_edca_queue_params default_edca_params[] = {
+ [IEEE80211_AC_VO] = {
+ .queue_id = HIF_QUEUE_ID_VOICE,
+ .aifsn = 2,
+ .cw_min = 3,
+ .cw_max = 7,
+ .tx_op_limit = TXOP_UNIT * 47,
+ },
+ [IEEE80211_AC_VI] = {
+ .queue_id = HIF_QUEUE_ID_VIDEO,
+ .aifsn = 2,
+ .cw_min = 7,
+ .cw_max = 15,
+ .tx_op_limit = TXOP_UNIT * 94,
+ },
+ [IEEE80211_AC_BE] = {
+ .queue_id = HIF_QUEUE_ID_BESTEFFORT,
+ .aifsn = 3,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ [IEEE80211_AC_BK] = {
+ .queue_id = HIF_QUEUE_ID_BACKGROUND,
+ .aifsn = 7,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ };
+
+ if (wfx_api_older_than(wdev, 2, 0)) {
+ default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
+ default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
+ }
for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
if (!wdev->vif[i]) {
@@ -28,12 +140,29 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif->vif = vif;
wvif->wdev = wdev;
+ INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
+ INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
+
+ spin_lock_init(&wvif->ps_state_lock);
+
+ INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
+ INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
+ timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+ BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ memcpy(&wvif->edca.params[i], &default_edca_params[i], sizeof(default_edca_params[i]));
+ tx_policy_init(wvif);
return 0;
}
void wfx_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+
+ wfx_tx_queues_wait_empty_vif(wvif);
+ cancel_delayed_work_sync(&wvif->link_id_gc_work);
+ del_timer_sync(&wvif->mcast_timeout);
}
int wfx_start(struct ieee80211_hw *hw)
@@ -43,4 +172,10 @@ int wfx_start(struct ieee80211_hw *hw)
void wfx_stop(struct ieee80211_hw *hw)
{
+ struct wfx_dev *wdev = hw->priv;
+
+ wfx_tx_lock_flush(wdev);
+ wfx_tx_queues_clear(wdev);
+ wfx_tx_unlock(wdev);
+ WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index f17b4d1511d7..f36d94f907c7 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -10,6 +10,14 @@
#include <net/mac80211.h>
+#include "hif_api_cmd.h"
+
+struct wfx_edca_params {
+ /* NOTE: index is a linux queue id. */
+ struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
+ bool uapsd_enable[IEEE80211_NUM_ACS];
+};
+
struct wfx_sta_priv {
int link_id;
int vif_id;
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index e7b03b940535..67457cda133b 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -12,6 +12,7 @@
#define _WFX_TRACE_H
#include <linux/tracepoint.h>
+#include <net/mac80211.h>
#include "bus.h"
#include "hif_api_cmd.h"
@@ -349,6 +350,79 @@ TRACE_EVENT(bh_stats,
);
#define _trace_bh_stats(ind, req, cnf, busy, release) trace_bh_stats(ind, req, cnf, busy, release)
+TRACE_EVENT(tx_stats,
+ TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+ TP_ARGS(tx_cnf, skb, delay),
+ TP_STRUCT__entry(
+ __field(int, pkt_id)
+ __field(int, delay_media)
+ __field(int, delay_queue)
+ __field(int, delay_fw)
+ __field(int, ack_failures)
+ __field(int, flags)
+ __array(int, rate, 4)
+ __array(int, tx_count, 4)
+ ),
+ TP_fast_assign(
+ // Keep sync with wfx_rates definition in main.c
+ static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13 };
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ int i;
+
+ __entry->pkt_id = tx_cnf->packet_id;
+ __entry->delay_media = tx_cnf->media_delay;
+ __entry->delay_queue = tx_cnf->tx_queue_delay;
+ __entry->delay_fw = delay;
+ __entry->ack_failures = tx_cnf->ack_failures;
+ if (!tx_cnf->status || __entry->ack_failures)
+ __entry->ack_failures += 1;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->rate[i] = rates[i].idx;
+ else
+ __entry->rate[i] = hw_rate[rates[i].idx];
+ __entry->tx_count[i] = rates[i].count;
+ }
+ __entry->flags = 0;
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->flags |= 0x01;
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ __entry->flags |= 0x02;
+ if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ __entry->flags |= 0x04;
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ __entry->flags |= 0x08;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ __entry->flags |= 0x10;
+ if (tx_cnf->status)
+ __entry->flags |= 0x20;
+ if (tx_cnf->status == HIF_REQUEUE)
+ __entry->flags |= 0x40;
+ ),
+ TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
+ __entry->pkt_id,
+ __print_flags(__entry->flags, NULL,
+ { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" },
+ { 0x08, "R" }, { 0x10, "D" }, { 0x20, "F" },
+ { 0x40, "Q" }),
+ __entry->rate[0],
+ __entry->tx_count[0],
+ __entry->rate[1],
+ __entry->tx_count[1],
+ __entry->rate[2],
+ __entry->tx_count[2],
+ __entry->rate[3],
+ __entry->tx_count[3],
+ __entry->ack_failures,
+ __entry->delay_media,
+ __entry->delay_queue,
+ __entry->delay_fw
+ )
+);
+#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay)
+
#endif
/* This part must be outside protection */
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 49b776a07515..11775b1e06ef 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -14,8 +14,11 @@
#include <net/mac80211.h>
#include "bh.h"
+#include "data_tx.h"
#include "main.h"
+#include "queue.h"
#include "secure_link.h"
+#include "sta.h"
#include "hif_tx.h"
#include "hif_api_general.h"
@@ -38,6 +41,10 @@ struct wfx_dev {
int chip_frozen;
struct wfx_hif_cmd hif_cmd;
+ struct wfx_queue tx_queue[4];
+ struct wfx_queue_stats tx_queue_stats;
+ int tx_burst_idx;
+ atomic_t tx_lock;
struct hif_rx_stats rx_stats;
struct mutex rx_stats_lock;
@@ -47,6 +54,28 @@ struct wfx_vif {
struct wfx_dev *wdev;
struct ieee80211_vif *vif;
int id;
+
+
+ u32 link_id_map;
+ struct wfx_link_entry link_id_db[WFX_MAX_STA_IN_AP_MODE];
+ struct delayed_work link_id_gc_work;
+ struct work_struct link_id_work;
+
+ bool aid0_bit_set;
+ bool mcast_tx;
+ bool mcast_buffered;
+ struct timer_list mcast_timeout;
+ struct work_struct mcast_start_work;
+ struct work_struct mcast_stop_work;
+
+
+ struct tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+ u32 sta_asleep_mask;
+ u32 pspoll_mask;
+ spinlock_t ps_state_lock;
+
+ struct wfx_edca_params edca;
};
static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
@@ -62,4 +91,33 @@ static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
return (struct wfx_vif *) wdev->vif[vif_id]->drv_priv;
}
+static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev, struct wfx_vif *cur)
+{
+ int i;
+ int mark = 0;
+ struct wfx_vif *tmp;
+
+ if (!cur)
+ mark = 1;
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ tmp = wdev_to_wvif(wdev, i);
+ if (mark && tmp)
+ return tmp;
+ if (tmp == cur)
+ mark = 1;
+ }
+ return NULL;
+}
+
+static inline int memzcmp(void *src, unsigned int size)
+{
+ uint8_t *buf = src;
+
+ if (!size)
+ return 0;
+ if (*buf)
+ return 1;
+ return memcmp(buf, buf + 1, size - 1);
+}
+
#endif /* WFX_H */