1 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@toke.dk>
2 Date: Fri, 2 Sep 2016 16:00:30 +0200
3 Subject: [PATCH] ath9k: Switch to using mac80211 intermediate software
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This switches ath9k over to using the mac80211 intermediate software
10 queueing mechanism for data packets. It removes the queueing inside the
11 driver, except for the retry queue, and instead pulls from mac80211 when
12 a packet is needed. The retry queue is used to store a packet that was
13 pulled but can't be sent immediately.
15 The old code path in ath_tx_start that would queue packets has been
16 removed completely, as has the qlen limit tunables (since there's no
17 longer a queue in the driver to limit).
19 Based on Tim's original patch set, but reworked quite thoroughly.
21 Cc: Tim Shepard <shep@alum.mit.edu>
22 Cc: Felix Fietkau <nbd@nbd.name>
23 Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
26 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
27 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
28 @@ -91,7 +91,6 @@ int ath_descdma_setup(struct ath_softc *
31 #define ATH_TXBUF_RESERVE 5
32 -#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
33 #define ATH_TXMAXTRY 13
34 #define ATH_MAX_SW_RETRIES 30
36 @@ -145,7 +144,7 @@ int ath_descdma_setup(struct ath_softc *
37 #define BAW_WITHIN(_start, _bawsz, _seqno) \
38 ((((_seqno) - (_start)) & 4095) < (_bawsz))
40 -#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
41 +#define ATH_AN_2_TID(_an, _tidno) ath_node_to_tid(_an, _tidno)
43 #define IS_HT_RATE(rate) (rate & 0x80)
44 #define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
45 @@ -164,7 +163,6 @@ struct ath_txq {
50 bool axq_tx_inprogress;
51 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
53 @@ -232,7 +230,6 @@ struct ath_buf {
56 struct list_head list;
57 - struct sk_buff_head buf_q;
58 struct sk_buff_head retry_q;
61 @@ -247,13 +244,13 @@ struct ath_atx_tid {
70 struct ieee80211_sta *sta; /* station struct we're part of */
71 struct ieee80211_vif *vif; /* interface with which we're associated */
72 - struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
76 @@ -276,7 +273,6 @@ struct ath_tx_control {
78 struct ieee80211_sta *sta;
84 @@ -293,7 +289,6 @@ struct ath_tx {
85 struct ath_descdma txdma;
86 struct ath_txq *txq_map[IEEE80211_NUM_ACS];
87 struct ath_txq *uapsdq;
88 - u32 txq_max_pending[IEEE80211_NUM_ACS];
89 u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
92 @@ -421,6 +416,22 @@ struct ath_offchannel {
96 +static inline struct ath_atx_tid *
97 +ath_node_to_tid(struct ath_node *an, u8 tidno)
99 + struct ieee80211_sta *sta = an->sta;
100 + struct ieee80211_vif *vif = an->vif;
101 + struct ieee80211_txq *txq;
105 + txq = sta->txq[tidno % ARRAY_SIZE(sta->txq)];
109 + return (struct ath_atx_tid *) txq->drv_priv;
112 #define case_rtn_string(val) case val: return #val
114 #define ath_for_each_chanctx(_sc, _ctx) \
115 @@ -575,7 +586,6 @@ void ath_tx_edma_tasklet(struct ath_soft
116 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
118 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
119 -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
121 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
122 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
123 @@ -585,6 +595,7 @@ void ath9k_release_buffered_frames(struc
124 u16 tids, int nframes,
125 enum ieee80211_frame_release_type reason,
127 +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
131 --- a/drivers/net/wireless/ath/ath9k/channel.c
132 +++ b/drivers/net/wireless/ath/ath9k/channel.c
133 @@ -1010,7 +1010,6 @@ static void ath_scan_send_probe(struct a
136 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
137 - txctl.force_channel = true;
138 if (ath_tx_start(sc->hw, skb, &txctl))
141 @@ -1133,7 +1132,6 @@ ath_chanctx_send_vif_ps_frame(struct ath
142 memset(&txctl, 0, sizeof(txctl));
143 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
145 - txctl.force_channel = true;
146 if (ath_tx_start(sc->hw, skb, &txctl)) {
147 ieee80211_free_txskb(sc->hw, skb);
149 --- a/drivers/net/wireless/ath/ath9k/debug.c
150 +++ b/drivers/net/wireless/ath/ath9k/debug.c
151 @@ -600,7 +600,6 @@ static int read_file_xmit(struct seq_fil
152 PR("MPDUs XRetried: ", xretries);
153 PR("Aggregates: ", a_aggr);
154 PR("AMPDUs Queued HW:", a_queued_hw);
155 - PR("AMPDUs Queued SW:", a_queued_sw);
156 PR("AMPDUs Completed:", a_completed);
157 PR("AMPDUs Retried: ", a_retries);
158 PR("AMPDUs XRetried: ", a_xretries);
159 @@ -629,8 +628,7 @@ static void print_queue(struct ath_softc
160 seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
161 seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
162 seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
163 - seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
164 - seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
165 + seq_printf(file, "%s: %3d\n", "pending", txq->pending_frames);
167 ath_txq_unlock(sc, txq);
169 @@ -1208,7 +1206,6 @@ static const char ath9k_gstrings_stats[]
170 AMKSTR(d_tx_mpdu_xretries),
171 AMKSTR(d_tx_aggregates),
172 AMKSTR(d_tx_ampdus_queued_hw),
173 - AMKSTR(d_tx_ampdus_queued_sw),
174 AMKSTR(d_tx_ampdus_completed),
175 AMKSTR(d_tx_ampdu_retries),
176 AMKSTR(d_tx_ampdu_xretries),
177 @@ -1288,7 +1285,6 @@ void ath9k_get_et_stats(struct ieee80211
181 - AWDATA(a_queued_sw);
185 @@ -1346,14 +1342,6 @@ int ath9k_init_debug(struct ath_hw *ah)
187 debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
189 - debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
190 - &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
191 - debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
192 - &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
193 - debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
194 - &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
195 - debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
196 - &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
197 debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
199 debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
200 --- a/drivers/net/wireless/ath/ath9k/debug.h
201 +++ b/drivers/net/wireless/ath/ath9k/debug.h
202 @@ -147,7 +147,6 @@ struct ath_interrupt_stats {
203 * @completed: Total MPDUs (non-aggr) completed
204 * @a_aggr: Total no. of aggregates queued
205 * @a_queued_hw: Total AMPDUs queued to hardware
206 - * @a_queued_sw: Total AMPDUs queued to software queues
207 * @a_completed: Total AMPDUs completed
208 * @a_retries: No. of AMPDUs retried (SW)
209 * @a_xretries: No. of AMPDUs dropped due to xretries
210 @@ -174,7 +173,6 @@ struct ath_tx_stats {
218 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c
219 +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
220 @@ -52,8 +52,8 @@ static ssize_t read_file_node_aggr(struc
221 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
222 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
224 - for (tidno = 0, tid = &an->tid[tidno];
225 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
226 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
227 + tid = ath_node_to_tid(an, tidno);
229 ath_txq_lock(sc, txq);
231 --- a/drivers/net/wireless/ath/ath9k/init.c
232 +++ b/drivers/net/wireless/ath/ath9k/init.c
233 @@ -358,7 +358,6 @@ static int ath9k_init_queues(struct ath_
234 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
235 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
236 sc->tx.txq_map[i]->mac80211_qnum = i;
237 - sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
241 @@ -877,6 +876,7 @@ static void ath9k_set_hw_capab(struct at
242 hw->max_rate_tries = 10;
243 hw->sta_data_size = sizeof(struct ath_node);
244 hw->vif_data_size = sizeof(struct ath_vif);
245 + hw->txq_data_size = sizeof(struct ath_atx_tid);
246 hw->extra_tx_headroom = 4;
248 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
249 --- a/drivers/net/wireless/ath/ath9k/main.c
250 +++ b/drivers/net/wireless/ath/ath9k/main.c
251 @@ -1902,9 +1902,11 @@ static int ath9k_ampdu_action(struct iee
254 struct ieee80211_sta *sta = params->sta;
255 + struct ath_node *an = (struct ath_node *)sta->drv_priv;
256 enum ieee80211_ampdu_mlme_action action = params->action;
257 u16 tid = params->tid;
258 u16 *ssn = ¶ms->ssn;
259 + struct ath_atx_tid *atid;
261 mutex_lock(&sc->mutex);
263 @@ -1937,9 +1939,9 @@ static int ath9k_ampdu_action(struct iee
264 ath9k_ps_restore(sc);
266 case IEEE80211_AMPDU_TX_OPERATIONAL:
267 - ath9k_ps_wakeup(sc);
268 - ath_tx_aggr_resume(sc, sta, tid);
269 - ath9k_ps_restore(sc);
270 + atid = ath_node_to_tid(an, tid);
271 + atid->baw_size = IEEE80211_MIN_AMPDU_BUF <<
272 + sta->ht_cap.ampdu_factor;
275 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
276 @@ -2701,4 +2703,5 @@ struct ieee80211_ops ath9k_ops = {
277 .sw_scan_start = ath9k_sw_scan_start,
278 .sw_scan_complete = ath9k_sw_scan_complete,
279 .get_txpower = ath9k_get_txpower,
280 + .wake_tx_queue = ath9k_wake_tx_queue,
282 --- a/drivers/net/wireless/ath/ath9k/xmit.c
283 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
284 @@ -67,6 +67,8 @@ static struct ath_buf *ath_tx_setup_buff
286 struct ath_atx_tid *tid,
287 struct sk_buff *skb);
288 +static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
289 + struct ath_tx_control *txctl);
293 @@ -137,6 +139,26 @@ static void ath_tx_queue_tid(struct ath_
294 list_add_tail(&tid->list, list);
297 +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
299 + struct ath_softc *sc = hw->priv;
300 + struct ath_common *common = ath9k_hw_common(sc->sc_ah);
301 + struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv;
302 + struct ath_txq *txq = tid->txq;
304 + ath_dbg(common, QUEUE, "Waking TX queue: %pM (%d)\n",
305 + queue->sta ? queue->sta->addr : queue->vif->addr,
308 + ath_txq_lock(sc, txq);
310 + tid->has_queued = true;
311 + ath_tx_queue_tid(sc, txq, tid);
312 + ath_txq_schedule(sc, txq);
314 + ath_txq_unlock(sc, txq);
317 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
319 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
320 @@ -164,7 +186,6 @@ static void ath_set_rates(struct ieee802
321 static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
324 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
325 struct ath_frame_info *fi = get_frame_info(skb);
328 @@ -175,14 +196,6 @@ static void ath_txq_skb_done(struct ath_
329 if (WARN_ON(--txq->pending_frames < 0))
330 txq->pending_frames = 0;
332 - if (txq->stopped &&
333 - txq->pending_frames < sc->tx.txq_max_pending[q]) {
334 - if (ath9k_is_chanctx_enabled())
335 - ieee80211_wake_queue(sc->hw, info->hw_queue);
337 - ieee80211_wake_queue(sc->hw, q);
338 - txq->stopped = false;
342 static struct ath_atx_tid *
343 @@ -192,9 +205,48 @@ ath_get_skb_tid(struct ath_softc *sc, st
344 return ATH_AN_2_TID(an, tidno);
347 +static struct sk_buff *
348 +ath_tid_pull(struct ath_atx_tid *tid)
350 + struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv);
351 + struct ath_softc *sc = tid->an->sc;
352 + struct ieee80211_hw *hw = sc->hw;
353 + struct ath_tx_control txctl = {
355 + .sta = tid->an->sta,
357 + struct sk_buff *skb;
358 + struct ath_frame_info *fi;
361 + if (!tid->has_queued)
364 + skb = ieee80211_tx_dequeue(hw, txq);
366 + tid->has_queued = false;
370 + if (ath_tx_prepare(hw, skb, &txctl)) {
371 + ieee80211_free_txskb(hw, skb);
375 + q = skb_get_queue_mapping(skb);
376 + if (tid->txq == sc->tx.txq_map[q]) {
377 + fi = get_frame_info(skb);
379 + ++tid->txq->pending_frames;
386 static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
388 - return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
389 + return !skb_queue_empty(&tid->retry_q) || tid->has_queued;
392 static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
393 @@ -203,46 +255,11 @@ static struct sk_buff *ath_tid_dequeue(s
395 skb = __skb_dequeue(&tid->retry_q);
397 - skb = __skb_dequeue(&tid->buf_q);
398 + skb = ath_tid_pull(tid);
404 - * ath_tx_tid_change_state:
405 - * - clears a-mpdu flag of previous session
406 - * - force sequence number allocation to fix next BlockAck Window
409 -ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
411 - struct ath_txq *txq = tid->txq;
412 - struct ieee80211_tx_info *tx_info;
413 - struct sk_buff *skb, *tskb;
414 - struct ath_buf *bf;
415 - struct ath_frame_info *fi;
417 - skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
418 - fi = get_frame_info(skb);
421 - tx_info = IEEE80211_SKB_CB(skb);
422 - tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
427 - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
429 - __skb_unlink(skb, &tid->buf_q);
430 - ath_txq_skb_done(sc, txq, skb);
431 - ieee80211_free_txskb(sc->hw, skb);
438 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
440 struct ath_txq *txq = tid->txq;
441 @@ -883,20 +900,16 @@ static int ath_compute_num_delims(struct
443 static struct ath_buf *
444 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
445 - struct ath_atx_tid *tid, struct sk_buff_head **q)
446 + struct ath_atx_tid *tid)
448 struct ieee80211_tx_info *tx_info;
449 struct ath_frame_info *fi;
450 - struct sk_buff *skb;
451 + struct sk_buff *skb, *first_skb = NULL;
456 - *q = &tid->retry_q;
457 - if (skb_queue_empty(*q))
460 - skb = skb_peek(*q);
461 + skb = ath_tid_dequeue(tid);
465 @@ -908,7 +921,6 @@ ath_tx_get_tid_subframe(struct ath_softc
466 bf->bf_state.stale = false;
469 - __skb_unlink(skb, *q);
470 ath_txq_skb_done(sc, txq, skb);
471 ieee80211_free_txskb(sc->hw, skb);
473 @@ -937,8 +949,20 @@ ath_tx_get_tid_subframe(struct ath_softc
474 seqno = bf->bf_state.seqno;
476 /* do not step over block-ack window */
477 - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
478 + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
479 + __skb_queue_tail(&tid->retry_q, skb);
481 + /* If there are other skbs in the retry q, they are
482 + * probably within the BAW, so loop immediately to get
483 + * one of them. Otherwise the queue can get stuck. */
484 + if (!skb_queue_is_first(&tid->retry_q, skb) &&
485 + !WARN_ON(skb == first_skb)) {
486 + if(!first_skb) /* infinite loop prevention */
493 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
494 struct ath_tx_status ts = {};
495 @@ -946,7 +970,6 @@ ath_tx_get_tid_subframe(struct ath_softc
497 INIT_LIST_HEAD(&bf_head);
498 list_add(&bf->list, &bf_head);
499 - __skb_unlink(skb, *q);
500 ath_tx_update_baw(sc, tid, seqno);
501 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
503 @@ -958,11 +981,10 @@ ath_tx_get_tid_subframe(struct ath_softc
509 ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
510 struct ath_atx_tid *tid, struct list_head *bf_q,
511 - struct ath_buf *bf_first, struct sk_buff_head *tid_q,
513 + struct ath_buf *bf_first)
515 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
516 struct ath_buf *bf = bf_first, *bf_prev = NULL;
517 @@ -972,12 +994,13 @@ ath_tx_form_aggr(struct ath_softc *sc, s
518 struct ieee80211_tx_info *tx_info;
519 struct ath_frame_info *fi;
521 - bool closed = false;
525 aggr_limit = ath_lookup_rate(sc, bf, tid);
531 fi = get_frame_info(skb);
533 @@ -986,12 +1009,12 @@ ath_tx_form_aggr(struct ath_softc *sc, s
535 if (aggr_limit < al + bpad + al_delta ||
536 ath_lookup_legacy(bf) || nframes >= h_baw)
540 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
541 if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
542 !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
547 /* add padding for previous frame to aggregation length */
548 @@ -1013,20 +1036,18 @@ ath_tx_form_aggr(struct ath_softc *sc, s
549 ath_tx_addto_baw(sc, tid, bf);
550 bf->bf_state.ndelim = ndelim;
552 - __skb_unlink(skb, tid_q);
553 list_add_tail(&bf->list, bf_q);
555 bf_prev->bf_next = bf;
559 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
564 - } while (ath_tid_has_buffered(tid));
566 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
570 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
573 bf->bf_lastbf = bf_prev;
575 @@ -1037,9 +1058,7 @@ ath_tx_form_aggr(struct ath_softc *sc, s
576 TX_STAT_INC(txq->axq_qnum, a_aggr);
586 @@ -1416,18 +1435,15 @@ static void ath_tx_fill_desc(struct ath_
588 ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
589 struct ath_atx_tid *tid, struct list_head *bf_q,
590 - struct ath_buf *bf_first, struct sk_buff_head *tid_q)
591 + struct ath_buf *bf_first)
593 struct ath_buf *bf = bf_first, *bf_prev = NULL;
594 - struct sk_buff *skb;
598 struct ieee80211_tx_info *tx_info;
602 - __skb_unlink(skb, tid_q);
603 list_add_tail(&bf->list, bf_q);
605 bf_prev->bf_next = bf;
606 @@ -1436,13 +1452,15 @@ ath_tx_form_burst(struct ath_softc *sc,
610 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
611 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
615 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
616 - if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
617 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
618 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
622 ath_set_rates(tid->an->vif, tid->an->sta, bf);
624 @@ -1453,34 +1471,33 @@ static bool ath_tx_sched_aggr(struct ath
627 struct ieee80211_tx_info *tx_info;
628 - struct sk_buff_head *tid_q;
629 struct list_head bf_q;
631 - bool aggr, last = true;
634 if (!ath_tid_has_buffered(tid))
637 INIT_LIST_HEAD(&bf_q);
639 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
640 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
644 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
645 aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
646 if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
647 - (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
648 + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
649 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
654 ath_set_rates(tid->an->vif, tid->an->sta, bf);
656 - last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
658 + aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
660 - ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
661 + ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
663 if (list_empty(&bf_q))
665 @@ -1523,9 +1540,6 @@ int ath_tx_aggr_start(struct ath_softc *
666 an->mpdudensity = density;
669 - /* force sequence number allocation for pending frames */
670 - ath_tx_tid_change_state(sc, txtid);
672 txtid->active = true;
673 *ssn = txtid->seq_start = txtid->seq_next;
674 txtid->bar_index = -1;
675 @@ -1550,7 +1564,6 @@ void ath_tx_aggr_stop(struct ath_softc *
676 ath_txq_lock(sc, txq);
677 txtid->active = false;
678 ath_tx_flush_tid(sc, txtid);
679 - ath_tx_tid_change_state(sc, txtid);
680 ath_txq_unlock_complete(sc, txq);
683 @@ -1560,14 +1573,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
684 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
685 struct ath_atx_tid *tid;
690 ath_dbg(common, XMIT, "%s called\n", __func__);
692 - for (tidno = 0, tid = &an->tid[tidno];
693 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
695 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
696 + tid = ath_node_to_tid(an, tidno);
699 ath_txq_lock(sc, txq);
700 @@ -1577,13 +1588,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
704 - buffered = ath_tid_has_buffered(tid);
705 + if (!skb_queue_empty(&tid->retry_q))
706 + ieee80211_sta_set_buffered(sta, tid->tidno, true);
708 list_del_init(&tid->list);
710 ath_txq_unlock(sc, txq);
712 - ieee80211_sta_set_buffered(sta, tidno, buffered);
716 @@ -1596,49 +1606,20 @@ void ath_tx_aggr_wakeup(struct ath_softc
718 ath_dbg(common, XMIT, "%s called\n", __func__);
720 - for (tidno = 0, tid = &an->tid[tidno];
721 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
723 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
724 + tid = ath_node_to_tid(an, tidno);
727 ath_txq_lock(sc, txq);
728 tid->clear_ps_filter = true;
730 if (ath_tid_has_buffered(tid)) {
731 ath_tx_queue_tid(sc, txq, tid);
732 ath_txq_schedule(sc, txq);
735 ath_txq_unlock_complete(sc, txq);
739 -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
742 - struct ath_common *common = ath9k_hw_common(sc->sc_ah);
743 - struct ath_atx_tid *tid;
744 - struct ath_node *an;
745 - struct ath_txq *txq;
747 - ath_dbg(common, XMIT, "%s called\n", __func__);
749 - an = (struct ath_node *)sta->drv_priv;
750 - tid = ATH_AN_2_TID(an, tidno);
753 - ath_txq_lock(sc, txq);
755 - tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
757 - if (ath_tid_has_buffered(tid)) {
758 - ath_tx_queue_tid(sc, txq, tid);
759 - ath_txq_schedule(sc, txq);
762 - ath_txq_unlock_complete(sc, txq);
765 void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
766 struct ieee80211_sta *sta,
767 u16 tids, int nframes,
768 @@ -1651,7 +1632,6 @@ void ath9k_release_buffered_frames(struc
769 struct ieee80211_tx_info *info;
770 struct list_head bf_q;
771 struct ath_buf *bf_tail = NULL, *bf;
772 - struct sk_buff_head *tid_q;
776 @@ -1666,11 +1646,10 @@ void ath9k_release_buffered_frames(struc
778 ath_txq_lock(sc, tid->txq);
779 while (nframes > 0) {
780 - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
781 + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
785 - __skb_unlink(bf->bf_mpdu, tid_q);
786 list_add_tail(&bf->list, &bf_q);
787 ath_set_rates(tid->an->vif, tid->an->sta, bf);
788 if (bf_isampdu(bf)) {
789 @@ -1685,7 +1664,7 @@ void ath9k_release_buffered_frames(struc
791 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
793 - if (an->sta && !ath_tid_has_buffered(tid))
794 + if (an->sta && skb_queue_empty(&tid->retry_q))
795 ieee80211_sta_set_buffered(an->sta, i, false);
797 ath_txq_unlock_complete(sc, tid->txq);
798 @@ -1914,13 +1893,7 @@ bool ath_drain_all_txq(struct ath_softc
799 if (!ATH_TXQ_SETUP(sc, i))
803 - * The caller will resume queues with ieee80211_wake_queues.
804 - * Mark the queue as not stopped to prevent ath_tx_complete
805 - * from waking the queue too early.
807 txq = &sc->tx.txq[i];
808 - txq->stopped = false;
809 ath_draintxq(sc, txq);
812 @@ -2319,16 +2292,14 @@ int ath_tx_start(struct ieee80211_hw *hw
813 struct ath_softc *sc = hw->priv;
814 struct ath_txq *txq = txctl->txq;
815 struct ath_atx_tid *tid = NULL;
816 + struct ath_node *an = NULL;
818 - bool queue, skip_uapsd = false, ps_resp;
823 avp = (void *)vif->drv_priv;
825 - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
826 - txctl->force_channel = true;
828 ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
830 ret = ath_tx_prepare(hw, skb, txctl);
831 @@ -2343,63 +2314,18 @@ int ath_tx_start(struct ieee80211_hw *hw
833 q = skb_get_queue_mapping(skb);
835 - ath_txq_lock(sc, txq);
836 - if (txq == sc->tx.txq_map[q]) {
838 - if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
840 - if (ath9k_is_chanctx_enabled())
841 - ieee80211_stop_queue(sc->hw, info->hw_queue);
843 - ieee80211_stop_queue(sc->hw, q);
844 - txq->stopped = true;
848 - queue = ieee80211_is_data_present(hdr->frame_control);
850 - /* If chanctx, queue all null frames while NOA could be there */
851 - if (ath9k_is_chanctx_enabled() &&
852 - ieee80211_is_nullfunc(hdr->frame_control) &&
853 - !txctl->force_channel)
856 - /* Force queueing of all frames that belong to a virtual interface on
857 - * a different channel context, to ensure that they are sent on the
860 - if (((avp && avp->chanctx != sc->cur_chan) ||
861 - sc->cur_chan->stopped) && !txctl->force_channel) {
863 - txctl->an = &avp->mcast_node;
868 - if (txctl->an && queue)
869 - tid = ath_get_skb_tid(sc, txctl->an, skb);
871 - if (!skip_uapsd && ps_resp) {
872 - ath_txq_unlock(sc, txq);
875 - ath_txq_lock(sc, txq);
876 - } else if (txctl->an && queue) {
877 - WARN_ON(tid->txq != txctl->txq);
879 - if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
880 - tid->clear_ps_filter = true;
883 - * Add this frame to software queue for scheduling later
886 - TX_STAT_INC(txq->axq_qnum, a_queued_sw);
887 - __skb_queue_tail(&tid->buf_q, skb);
888 - if (!txctl->an->sleeping)
889 - ath_tx_queue_tid(sc, txq, tid);
891 + an = (struct ath_node *) sta->drv_priv;
892 + tid = ath_get_skb_tid(sc, an, skb);
895 - ath_txq_schedule(sc, txq);
897 + ath_txq_lock(sc, txq);
898 + if (txq == sc->tx.txq_map[q]) {
900 + ++txq->pending_frames;
903 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
904 @@ -2892,9 +2818,8 @@ void ath_tx_node_init(struct ath_softc *
905 struct ath_atx_tid *tid;
908 - for (tidno = 0, tid = &an->tid[tidno];
909 - tidno < IEEE80211_NUM_TIDS;
911 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
912 + tid = ath_node_to_tid(an, tidno);
915 tid->seq_start = tid->seq_next = 0;
916 @@ -2902,11 +2827,14 @@ void ath_tx_node_init(struct ath_softc *
917 tid->baw_head = tid->baw_tail = 0;
919 tid->clear_ps_filter = true;
920 - __skb_queue_head_init(&tid->buf_q);
921 + tid->has_queued = false;
922 __skb_queue_head_init(&tid->retry_q);
923 INIT_LIST_HEAD(&tid->list);
924 acno = TID_TO_WME_AC(tidno);
925 tid->txq = sc->tx.txq_map[acno];
928 + break; /* just one multicast ath_atx_tid */
932 @@ -2916,9 +2844,8 @@ void ath_tx_node_cleanup(struct ath_soft
936 - for (tidno = 0, tid = &an->tid[tidno];
937 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
939 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
940 + tid = ath_node_to_tid(an, tidno);
943 ath_txq_lock(sc, txq);
944 @@ -2930,6 +2857,9 @@ void ath_tx_node_cleanup(struct ath_soft
947 ath_txq_unlock(sc, txq);
950 + break; /* just one multicast ath_atx_tid */