f8b8f86e03ba0786ef410db31d6de2ab691430bd
[openwrt/openwrt.git] / package / kernel / mac80211 / patches / 320-ath9k-Switch-to-using-mac80211-intermediate-software.patch
1 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@toke.dk>
2 Date: Wed, 6 Jul 2016 21:34:17 +0200
3 Subject: [PATCH] ath9k: Switch to using mac80211 intermediate software queues.
4 MIME-Version: 1.0
5 Content-Type: text/plain; charset=UTF-8
6 Content-Transfer-Encoding: 8bit
7
8 This switches ath9k over to using the mac80211 intermediate software
9 queueing mechanism for data packets. It removes the queueing inside the
10 driver, except for the retry queue, and instead pulls from mac80211 when
11 a packet is needed. The retry queue is used to store a packet that was
12 pulled but can't be sent immediately.
13
14 The old code path in ath_tx_start that would queue packets has been
15 removed completely, as has the qlen limit tunables (since there's no
16 longer a queue in the driver to limit).
17
18 Based on Tim's original patch set, but reworked quite thoroughly.
19
20 Cc: Tim Shepard <shep@alum.mit.edu>
21 Cc: Felix Fietkau <nbd@nbd.name>
22 Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
23 ---
24
25 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
26 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
27 @@ -91,7 +91,6 @@ int ath_descdma_setup(struct ath_softc *
28 #define ATH_RXBUF 512
29 #define ATH_TXBUF 512
30 #define ATH_TXBUF_RESERVE 5
31 -#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
32 #define ATH_TXMAXTRY 13
33 #define ATH_MAX_SW_RETRIES 30
34
35 @@ -145,7 +144,9 @@ int ath_descdma_setup(struct ath_softc *
36 #define BAW_WITHIN(_start, _bawsz, _seqno) \
37 ((((_seqno) - (_start)) & 4095) < (_bawsz))
38
39 -#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
40 +#define ATH_STA_2_TID(_sta, _tidno) ((struct ath_atx_tid *)(_sta)->txq[_tidno]->drv_priv)
41 +#define ATH_VIF_2_TID(_vif) ((struct ath_atx_tid *)(_vif)->txq->drv_priv)
42 +#define ATH_AN_2_TID(_an, _tidno) ((_an)->sta ? ATH_STA_2_TID((_an)->sta, _tidno) : ATH_VIF_2_TID((_an)->vif))
43
44 #define IS_HT_RATE(rate) (rate & 0x80)
45 #define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
46 @@ -164,7 +165,6 @@ struct ath_txq {
47 spinlock_t axq_lock;
48 u32 axq_depth;
49 u32 axq_ampdu_depth;
50 - bool stopped;
51 bool axq_tx_inprogress;
52 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
53 u8 txq_headidx;
54 @@ -232,7 +232,6 @@ struct ath_buf {
55
56 struct ath_atx_tid {
57 struct list_head list;
58 - struct sk_buff_head buf_q;
59 struct sk_buff_head retry_q;
60 struct ath_node *an;
61 struct ath_txq *txq;
62 @@ -247,13 +246,13 @@ struct ath_atx_tid {
63 s8 bar_index;
64 bool active;
65 bool clear_ps_filter;
66 + bool has_queued;
67 };
68
69 struct ath_node {
70 struct ath_softc *sc;
71 struct ieee80211_sta *sta; /* station struct we're part of */
72 struct ieee80211_vif *vif; /* interface with which we're associated */
73 - struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
74
75 u16 maxampdu;
76 u8 mpdudensity;
77 @@ -276,7 +275,6 @@ struct ath_tx_control {
78 struct ath_node *an;
79 struct ieee80211_sta *sta;
80 u8 paprd;
81 - bool force_channel;
82 };
83
84
85 @@ -293,7 +291,6 @@ struct ath_tx {
86 struct ath_descdma txdma;
87 struct ath_txq *txq_map[IEEE80211_NUM_ACS];
88 struct ath_txq *uapsdq;
89 - u32 txq_max_pending[IEEE80211_NUM_ACS];
90 u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
91 };
92
93 @@ -585,6 +582,7 @@ void ath9k_release_buffered_frames(struc
94 u16 tids, int nframes,
95 enum ieee80211_frame_release_type reason,
96 bool more_data);
97 +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
98
99 /********/
100 /* VIFs */
101 --- a/drivers/net/wireless/ath/ath9k/channel.c
102 +++ b/drivers/net/wireless/ath/ath9k/channel.c
103 @@ -1007,7 +1007,6 @@ static void ath_scan_send_probe(struct a
104 goto error;
105
106 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
107 - txctl.force_channel = true;
108 if (ath_tx_start(sc->hw, skb, &txctl))
109 goto error;
110
111 @@ -1130,7 +1129,6 @@ ath_chanctx_send_vif_ps_frame(struct ath
112 memset(&txctl, 0, sizeof(txctl));
113 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
114 txctl.sta = sta;
115 - txctl.force_channel = true;
116 if (ath_tx_start(sc->hw, skb, &txctl)) {
117 ieee80211_free_txskb(sc->hw, skb);
118 return false;
119 --- a/drivers/net/wireless/ath/ath9k/debug.c
120 +++ b/drivers/net/wireless/ath/ath9k/debug.c
121 @@ -600,7 +600,6 @@ static int read_file_xmit(struct seq_fil
122 PR("MPDUs XRetried: ", xretries);
123 PR("Aggregates: ", a_aggr);
124 PR("AMPDUs Queued HW:", a_queued_hw);
125 - PR("AMPDUs Queued SW:", a_queued_sw);
126 PR("AMPDUs Completed:", a_completed);
127 PR("AMPDUs Retried: ", a_retries);
128 PR("AMPDUs XRetried: ", a_xretries);
129 @@ -629,8 +628,7 @@ static void print_queue(struct ath_softc
130 seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
131 seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
132 seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
133 - seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
134 - seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
135 + seq_printf(file, "%s: %3d\n", "pending", txq->pending_frames);
136
137 ath_txq_unlock(sc, txq);
138 }
139 @@ -1208,7 +1206,6 @@ static const char ath9k_gstrings_stats[]
140 AMKSTR(d_tx_mpdu_xretries),
141 AMKSTR(d_tx_aggregates),
142 AMKSTR(d_tx_ampdus_queued_hw),
143 - AMKSTR(d_tx_ampdus_queued_sw),
144 AMKSTR(d_tx_ampdus_completed),
145 AMKSTR(d_tx_ampdu_retries),
146 AMKSTR(d_tx_ampdu_xretries),
147 @@ -1288,7 +1285,6 @@ void ath9k_get_et_stats(struct ieee80211
148 AWDATA(xretries);
149 AWDATA(a_aggr);
150 AWDATA(a_queued_hw);
151 - AWDATA(a_queued_sw);
152 AWDATA(a_completed);
153 AWDATA(a_retries);
154 AWDATA(a_xretries);
155 @@ -1346,14 +1342,6 @@ int ath9k_init_debug(struct ath_hw *ah)
156 read_file_xmit);
157 debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
158 read_file_queues);
159 - debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
160 - &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
161 - debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
162 - &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
163 - debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
164 - &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
165 - debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
166 - &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
167 debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
168 read_file_misc);
169 debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
170 --- a/drivers/net/wireless/ath/ath9k/debug.h
171 +++ b/drivers/net/wireless/ath/ath9k/debug.h
172 @@ -147,7 +147,6 @@ struct ath_interrupt_stats {
173 * @completed: Total MPDUs (non-aggr) completed
174 * @a_aggr: Total no. of aggregates queued
175 * @a_queued_hw: Total AMPDUs queued to hardware
176 - * @a_queued_sw: Total AMPDUs queued to software queues
177 * @a_completed: Total AMPDUs completed
178 * @a_retries: No. of AMPDUs retried (SW)
179 * @a_xretries: No. of AMPDUs dropped due to xretries
180 @@ -174,7 +173,6 @@ struct ath_tx_stats {
181 u32 xretries;
182 u32 a_aggr;
183 u32 a_queued_hw;
184 - u32 a_queued_sw;
185 u32 a_completed;
186 u32 a_retries;
187 u32 a_xretries;
188 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c
189 +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
190 @@ -52,8 +52,8 @@ static ssize_t read_file_node_aggr(struc
191 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
192 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
193
194 - for (tidno = 0, tid = &an->tid[tidno];
195 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
196 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
197 + tid = ATH_STA_2_TID(an->sta, tidno);
198 txq = tid->txq;
199 ath_txq_lock(sc, txq);
200 if (tid->active) {
201 --- a/drivers/net/wireless/ath/ath9k/init.c
202 +++ b/drivers/net/wireless/ath/ath9k/init.c
203 @@ -358,7 +358,6 @@ static int ath9k_init_queues(struct ath_
204 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
205 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
206 sc->tx.txq_map[i]->mac80211_qnum = i;
207 - sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
208 }
209 return 0;
210 }
211 @@ -873,6 +872,7 @@ static void ath9k_set_hw_capab(struct at
212 hw->max_rate_tries = 10;
213 hw->sta_data_size = sizeof(struct ath_node);
214 hw->vif_data_size = sizeof(struct ath_vif);
215 + hw->txq_data_size = sizeof(struct ath_atx_tid);
216 hw->extra_tx_headroom = 4;
217
218 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
219 --- a/drivers/net/wireless/ath/ath9k/main.c
220 +++ b/drivers/net/wireless/ath/ath9k/main.c
221 @@ -2695,4 +2695,5 @@ struct ieee80211_ops ath9k_ops = {
222 .sw_scan_start = ath9k_sw_scan_start,
223 .sw_scan_complete = ath9k_sw_scan_complete,
224 .get_txpower = ath9k_get_txpower,
225 + .wake_tx_queue = ath9k_wake_tx_queue,
226 };
227 --- a/drivers/net/wireless/ath/ath9k/xmit.c
228 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
229 @@ -65,6 +65,8 @@ static struct ath_buf *ath_tx_setup_buff
230 struct ath_txq *txq,
231 struct ath_atx_tid *tid,
232 struct sk_buff *skb);
233 +static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
234 + struct ath_tx_control *txctl);
235
236 enum {
237 MCS_HT20,
238 @@ -118,6 +120,26 @@ static void ath_tx_queue_tid(struct ath_
239 list_add_tail(&tid->list, list);
240 }
241
242 +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
243 +{
244 + struct ath_softc *sc = hw->priv;
245 + struct ath_common *common = ath9k_hw_common(sc->sc_ah);
246 + struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv;
247 + struct ath_txq *txq = tid->txq;
248 +
249 + ath_dbg(common, QUEUE, "Waking TX queue: %pM (%d)\n",
250 + queue->sta ? queue->sta->addr : queue->vif->addr,
251 + tid->tidno);
252 +
253 + ath_txq_lock(sc, txq);
254 +
255 + tid->has_queued = true;
256 + ath_tx_queue_tid(sc, txq, tid);
257 + ath_txq_schedule(sc, txq);
258 +
259 + ath_txq_unlock(sc, txq);
260 +}
261 +
262 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
263 {
264 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
265 @@ -160,7 +182,6 @@ static void ath_set_rates(struct ieee802
266 static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
267 struct sk_buff *skb)
268 {
269 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
270 struct ath_frame_info *fi = get_frame_info(skb);
271 int q = fi->txq;
272
273 @@ -171,14 +192,6 @@ static void ath_txq_skb_done(struct ath_
274 if (WARN_ON(--txq->pending_frames < 0))
275 txq->pending_frames = 0;
276
277 - if (txq->stopped &&
278 - txq->pending_frames < sc->tx.txq_max_pending[q]) {
279 - if (ath9k_is_chanctx_enabled())
280 - ieee80211_wake_queue(sc->hw, info->hw_queue);
281 - else
282 - ieee80211_wake_queue(sc->hw, q);
283 - txq->stopped = false;
284 - }
285 }
286
287 static struct ath_atx_tid *
288 @@ -188,9 +201,47 @@ ath_get_skb_tid(struct ath_softc *sc, st
289 return ATH_AN_2_TID(an, tidno);
290 }
291
292 +static struct sk_buff *
293 +ath_tid_pull(struct ath_atx_tid *tid)
294 +{
295 + struct ath_softc *sc = tid->an->sc;
296 + struct ieee80211_hw *hw = sc->hw;
297 + struct ath_tx_control txctl = {
298 + .txq = tid->txq,
299 + .sta = tid->an->sta,
300 + };
301 + struct sk_buff *skb;
302 + struct ath_frame_info *fi;
303 + int q;
304 +
305 + if (!tid->has_queued)
306 + return NULL;
307 +
308 + skb = ieee80211_tx_dequeue(hw, container_of((void*)tid, struct ieee80211_txq, drv_priv));
309 + if (!skb) {
310 + tid->has_queued = false;
311 + return NULL;
312 + }
313 +
314 + if (ath_tx_prepare(hw, skb, &txctl)) {
315 + ieee80211_free_txskb(hw, skb);
316 + return NULL;
317 + }
318 +
319 + q = skb_get_queue_mapping(skb);
320 + if (tid->txq == sc->tx.txq_map[q]) {
321 + fi = get_frame_info(skb);
322 + fi->txq = q;
323 + ++tid->txq->pending_frames;
324 + }
325 +
326 + return skb;
327 + }
328 +
329 +
330 static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
331 {
332 - return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
333 + return !skb_queue_empty(&tid->retry_q) || tid->has_queued;
334 }
335
336 static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
337 @@ -199,46 +250,11 @@ static struct sk_buff *ath_tid_dequeue(s
338
339 skb = __skb_dequeue(&tid->retry_q);
340 if (!skb)
341 - skb = __skb_dequeue(&tid->buf_q);
342 + skb = ath_tid_pull(tid);
343
344 return skb;
345 }
346
347 -/*
348 - * ath_tx_tid_change_state:
349 - * - clears a-mpdu flag of previous session
350 - * - force sequence number allocation to fix next BlockAck Window
351 - */
352 -static void
353 -ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
354 -{
355 - struct ath_txq *txq = tid->txq;
356 - struct ieee80211_tx_info *tx_info;
357 - struct sk_buff *skb, *tskb;
358 - struct ath_buf *bf;
359 - struct ath_frame_info *fi;
360 -
361 - skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
362 - fi = get_frame_info(skb);
363 - bf = fi->bf;
364 -
365 - tx_info = IEEE80211_SKB_CB(skb);
366 - tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
367 -
368 - if (bf)
369 - continue;
370 -
371 - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
372 - if (!bf) {
373 - __skb_unlink(skb, &tid->buf_q);
374 - ath_txq_skb_done(sc, txq, skb);
375 - ieee80211_free_txskb(sc->hw, skb);
376 - continue;
377 - }
378 - }
379 -
380 -}
381 -
382 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
383 {
384 struct ath_txq *txq = tid->txq;
385 @@ -873,20 +889,16 @@ static int ath_compute_num_delims(struct
386
387 static struct ath_buf *
388 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
389 - struct ath_atx_tid *tid, struct sk_buff_head **q)
390 + struct ath_atx_tid *tid)
391 {
392 struct ieee80211_tx_info *tx_info;
393 struct ath_frame_info *fi;
394 - struct sk_buff *skb;
395 + struct sk_buff *skb, *first_skb = NULL;
396 struct ath_buf *bf;
397 u16 seqno;
398
399 while (1) {
400 - *q = &tid->retry_q;
401 - if (skb_queue_empty(*q))
402 - *q = &tid->buf_q;
403 -
404 - skb = skb_peek(*q);
405 + skb = ath_tid_dequeue(tid);
406 if (!skb)
407 break;
408
409 @@ -898,7 +910,6 @@ ath_tx_get_tid_subframe(struct ath_softc
410 bf->bf_state.stale = false;
411
412 if (!bf) {
413 - __skb_unlink(skb, *q);
414 ath_txq_skb_done(sc, txq, skb);
415 ieee80211_free_txskb(sc->hw, skb);
416 continue;
417 @@ -927,8 +938,19 @@ ath_tx_get_tid_subframe(struct ath_softc
418 seqno = bf->bf_state.seqno;
419
420 /* do not step over block-ack window */
421 - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
422 + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
423 + __skb_queue_tail(&tid->retry_q, skb);
424 +
425 + /* If there are other skbs in the retry q, they are
426 + * probably within the BAW, so loop immediately to get
427 + * one of them. Otherwise the queue can get stuck. */
428 + if (!skb_queue_is_first(&tid->retry_q, skb) && skb != first_skb) {
429 + if(!first_skb) /* infinite loop prevention */
430 + first_skb = skb;
431 + continue;
432 + }
433 break;
434 + }
435
436 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
437 struct ath_tx_status ts = {};
438 @@ -936,7 +958,6 @@ ath_tx_get_tid_subframe(struct ath_softc
439
440 INIT_LIST_HEAD(&bf_head);
441 list_add(&bf->list, &bf_head);
442 - __skb_unlink(skb, *q);
443 ath_tx_update_baw(sc, tid, seqno);
444 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
445 continue;
446 @@ -948,11 +969,10 @@ ath_tx_get_tid_subframe(struct ath_softc
447 return NULL;
448 }
449
450 -static bool
451 +static int
452 ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
453 struct ath_atx_tid *tid, struct list_head *bf_q,
454 - struct ath_buf *bf_first, struct sk_buff_head *tid_q,
455 - int *aggr_len)
456 + struct ath_buf *bf_first)
457 {
458 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
459 struct ath_buf *bf = bf_first, *bf_prev = NULL;
460 @@ -962,12 +982,13 @@ ath_tx_form_aggr(struct ath_softc *sc, s
461 struct ieee80211_tx_info *tx_info;
462 struct ath_frame_info *fi;
463 struct sk_buff *skb;
464 - bool closed = false;
465 +
466
467 bf = bf_first;
468 aggr_limit = ath_lookup_rate(sc, bf, tid);
469
470 - do {
471 + while (bf)
472 + {
473 skb = bf->bf_mpdu;
474 fi = get_frame_info(skb);
475
476 @@ -976,12 +997,12 @@ ath_tx_form_aggr(struct ath_softc *sc, s
477 if (nframes) {
478 if (aggr_limit < al + bpad + al_delta ||
479 ath_lookup_legacy(bf) || nframes >= h_baw)
480 - break;
481 + goto stop;
482
483 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
484 if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
485 !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
486 - break;
487 + goto stop;
488 }
489
490 /* add padding for previous frame to aggregation length */
491 @@ -1003,20 +1024,18 @@ ath_tx_form_aggr(struct ath_softc *sc, s
492 ath_tx_addto_baw(sc, tid, bf);
493 bf->bf_state.ndelim = ndelim;
494
495 - __skb_unlink(skb, tid_q);
496 list_add_tail(&bf->list, bf_q);
497 if (bf_prev)
498 bf_prev->bf_next = bf;
499
500 bf_prev = bf;
501
502 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
503 - if (!bf) {
504 - closed = true;
505 - break;
506 - }
507 - } while (ath_tid_has_buffered(tid));
508 -
509 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
510 + }
511 + goto finish;
512 +stop:
513 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
514 +finish:
515 bf = bf_first;
516 bf->bf_lastbf = bf_prev;
517
518 @@ -1027,9 +1046,7 @@ ath_tx_form_aggr(struct ath_softc *sc, s
519 TX_STAT_INC(txq->axq_qnum, a_aggr);
520 }
521
522 - *aggr_len = al;
523 -
524 - return closed;
525 + return al;
526 #undef PADBYTES
527 }
528
529 @@ -1406,18 +1423,15 @@ static void ath_tx_fill_desc(struct ath_
530 static void
531 ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
532 struct ath_atx_tid *tid, struct list_head *bf_q,
533 - struct ath_buf *bf_first, struct sk_buff_head *tid_q)
534 + struct ath_buf *bf_first)
535 {
536 struct ath_buf *bf = bf_first, *bf_prev = NULL;
537 - struct sk_buff *skb;
538 int nframes = 0;
539
540 do {
541 struct ieee80211_tx_info *tx_info;
542 - skb = bf->bf_mpdu;
543
544 nframes++;
545 - __skb_unlink(skb, tid_q);
546 list_add_tail(&bf->list, bf_q);
547 if (bf_prev)
548 bf_prev->bf_next = bf;
549 @@ -1426,13 +1440,15 @@ ath_tx_form_burst(struct ath_softc *sc,
550 if (nframes >= 2)
551 break;
552
553 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
554 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
555 if (!bf)
556 break;
557
558 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
559 - if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
560 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
561 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
562 break;
563 + }
564
565 ath_set_rates(tid->an->vif, tid->an->sta, bf, false);
566 } while (1);
567 @@ -1443,34 +1459,33 @@ static bool ath_tx_sched_aggr(struct ath
568 {
569 struct ath_buf *bf;
570 struct ieee80211_tx_info *tx_info;
571 - struct sk_buff_head *tid_q;
572 struct list_head bf_q;
573 int aggr_len = 0;
574 - bool aggr, last = true;
575 + bool aggr;
576
577 if (!ath_tid_has_buffered(tid))
578 return false;
579
580 INIT_LIST_HEAD(&bf_q);
581
582 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
583 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
584 if (!bf)
585 return false;
586
587 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
588 aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
589 if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
590 - (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
591 + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
592 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
593 *stop = true;
594 return false;
595 }
596
597 ath_set_rates(tid->an->vif, tid->an->sta, bf, false);
598 if (aggr)
599 - last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
600 - tid_q, &aggr_len);
601 + aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
602 else
603 - ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
604 + ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
605
606 if (list_empty(&bf_q))
607 return false;
608 @@ -1513,9 +1528,6 @@ int ath_tx_aggr_start(struct ath_softc *
609 an->mpdudensity = density;
610 }
611
612 - /* force sequence number allocation for pending frames */
613 - ath_tx_tid_change_state(sc, txtid);
614 -
615 txtid->active = true;
616 *ssn = txtid->seq_start = txtid->seq_next;
617 txtid->bar_index = -1;
618 @@ -1540,7 +1552,6 @@ void ath_tx_aggr_stop(struct ath_softc *
619 ath_txq_lock(sc, txq);
620 txtid->active = false;
621 ath_tx_flush_tid(sc, txtid);
622 - ath_tx_tid_change_state(sc, txtid);
623 ath_txq_unlock_complete(sc, txq);
624 }
625
626 @@ -1550,14 +1561,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
627 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
628 struct ath_atx_tid *tid;
629 struct ath_txq *txq;
630 - bool buffered;
631 int tidno;
632
633 ath_dbg(common, XMIT, "%s called\n", __func__);
634
635 - for (tidno = 0, tid = &an->tid[tidno];
636 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
637 -
638 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
639 + tid = ATH_AN_2_TID(an, tidno);
640 txq = tid->txq;
641
642 ath_txq_lock(sc, txq);
643 @@ -1567,13 +1576,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
644 continue;
645 }
646
647 - buffered = ath_tid_has_buffered(tid);
648 + if (!skb_queue_empty(&tid->retry_q))
649 + ieee80211_sta_set_buffered(sta, tid->tidno, true);
650
651 list_del_init(&tid->list);
652
653 ath_txq_unlock(sc, txq);
654 -
655 - ieee80211_sta_set_buffered(sta, tidno, buffered);
656 }
657 }
658
659 @@ -1586,19 +1594,16 @@ void ath_tx_aggr_wakeup(struct ath_softc
660
661 ath_dbg(common, XMIT, "%s called\n", __func__);
662
663 - for (tidno = 0, tid = &an->tid[tidno];
664 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
665 -
666 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
667 + tid = ATH_AN_2_TID(an, tidno);
668 txq = tid->txq;
669
670 ath_txq_lock(sc, txq);
671 tid->clear_ps_filter = true;
672 -
673 if (ath_tid_has_buffered(tid)) {
674 ath_tx_queue_tid(sc, txq, tid);
675 ath_txq_schedule(sc, txq);
676 }
677 -
678 ath_txq_unlock_complete(sc, txq);
679 }
680 }
681 @@ -1621,11 +1626,6 @@ void ath_tx_aggr_resume(struct ath_softc
682
683 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
684
685 - if (ath_tid_has_buffered(tid)) {
686 - ath_tx_queue_tid(sc, txq, tid);
687 - ath_txq_schedule(sc, txq);
688 - }
689 -
690 ath_txq_unlock_complete(sc, txq);
691 }
692
693 @@ -1641,7 +1641,6 @@ void ath9k_release_buffered_frames(struc
694 struct ieee80211_tx_info *info;
695 struct list_head bf_q;
696 struct ath_buf *bf_tail = NULL, *bf;
697 - struct sk_buff_head *tid_q;
698 int sent = 0;
699 int i;
700
701 @@ -1656,11 +1655,10 @@ void ath9k_release_buffered_frames(struc
702
703 ath_txq_lock(sc, tid->txq);
704 while (nframes > 0) {
705 - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
706 + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
707 if (!bf)
708 break;
709
710 - __skb_unlink(bf->bf_mpdu, tid_q);
711 list_add_tail(&bf->list, &bf_q);
712 ath_set_rates(tid->an->vif, tid->an->sta, bf, true);
713 if (bf_isampdu(bf)) {
714 @@ -1675,7 +1673,7 @@ void ath9k_release_buffered_frames(struc
715 sent++;
716 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
717
718 - if (an->sta && !ath_tid_has_buffered(tid))
719 + if (an->sta && skb_queue_empty(&tid->retry_q))
720 ieee80211_sta_set_buffered(an->sta, i, false);
721 }
722 ath_txq_unlock_complete(sc, tid->txq);
723 @@ -1902,13 +1900,7 @@ bool ath_drain_all_txq(struct ath_softc
724 if (!ATH_TXQ_SETUP(sc, i))
725 continue;
726
727 - /*
728 - * The caller will resume queues with ieee80211_wake_queues.
729 - * Mark the queue as not stopped to prevent ath_tx_complete
730 - * from waking the queue too early.
731 - */
732 txq = &sc->tx.txq[i];
733 - txq->stopped = false;
734 ath_draintxq(sc, txq);
735 }
736
737 @@ -2308,15 +2300,12 @@ int ath_tx_start(struct ieee80211_hw *hw
738 struct ath_txq *txq = txctl->txq;
739 struct ath_atx_tid *tid = NULL;
740 struct ath_buf *bf;
741 - bool queue, ps_resp;
742 + bool ps_resp;
743 int q, ret;
744
745 if (vif)
746 avp = (void *)vif->drv_priv;
747
748 - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
749 - txctl->force_channel = true;
750 -
751 ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
752
753 ret = ath_tx_prepare(hw, skb, txctl);
754 @@ -2331,63 +2320,13 @@ int ath_tx_start(struct ieee80211_hw *hw
755
756 q = skb_get_queue_mapping(skb);
757
758 + if (ps_resp)
759 + txq = sc->tx.uapsdq;
760 +
761 ath_txq_lock(sc, txq);
762 if (txq == sc->tx.txq_map[q]) {
763 fi->txq = q;
764 - if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
765 - !txq->stopped) {
766 - if (ath9k_is_chanctx_enabled())
767 - ieee80211_stop_queue(sc->hw, info->hw_queue);
768 - else
769 - ieee80211_stop_queue(sc->hw, q);
770 - txq->stopped = true;
771 - }
772 - }
773 -
774 - queue = ieee80211_is_data_present(hdr->frame_control);
775 -
776 - /* If chanctx, queue all null frames while NOA could be there */
777 - if (ath9k_is_chanctx_enabled() &&
778 - ieee80211_is_nullfunc(hdr->frame_control) &&
779 - !txctl->force_channel)
780 - queue = true;
781 -
782 - /* Force queueing of all frames that belong to a virtual interface on
783 - * a different channel context, to ensure that they are sent on the
784 - * correct channel.
785 - */
786 - if (((avp && avp->chanctx != sc->cur_chan) ||
787 - sc->cur_chan->stopped) && !txctl->force_channel) {
788 - if (!txctl->an)
789 - txctl->an = &avp->mcast_node;
790 - queue = true;
791 - ps_resp = false;
792 - }
793 -
794 - if (txctl->an && queue)
795 - tid = ath_get_skb_tid(sc, txctl->an, skb);
796 -
797 - if (ps_resp) {
798 - ath_txq_unlock(sc, txq);
799 - txq = sc->tx.uapsdq;
800 - ath_txq_lock(sc, txq);
801 - } else if (txctl->an && queue) {
802 - WARN_ON(tid->txq != txctl->txq);
803 -
804 - if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
805 - tid->clear_ps_filter = true;
806 -
807 - /*
808 - * Add this frame to software queue for scheduling later
809 - * for aggregation.
810 - */
811 - TX_STAT_INC(txq->axq_qnum, a_queued_sw);
812 - __skb_queue_tail(&tid->buf_q, skb);
813 - if (!txctl->an->sleeping)
814 - ath_tx_queue_tid(sc, txq, tid);
815 -
816 - ath_txq_schedule(sc, txq);
817 - goto out;
818 + ++txq->pending_frames;
819 }
820
821 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
822 @@ -2871,9 +2810,8 @@ void ath_tx_node_init(struct ath_softc *
823 struct ath_atx_tid *tid;
824 int tidno, acno;
825
826 - for (tidno = 0, tid = &an->tid[tidno];
827 - tidno < IEEE80211_NUM_TIDS;
828 - tidno++, tid++) {
829 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
830 + tid = ATH_AN_2_TID(an, tidno);
831 tid->an = an;
832 tid->tidno = tidno;
833 tid->seq_start = tid->seq_next = 0;
834 @@ -2881,11 +2819,14 @@ void ath_tx_node_init(struct ath_softc *
835 tid->baw_head = tid->baw_tail = 0;
836 tid->active = false;
837 tid->clear_ps_filter = true;
838 - __skb_queue_head_init(&tid->buf_q);
839 + tid->has_queued = false;
840 __skb_queue_head_init(&tid->retry_q);
841 INIT_LIST_HEAD(&tid->list);
842 acno = TID_TO_WME_AC(tidno);
843 tid->txq = sc->tx.txq_map[acno];
844 +
845 + if (!an->sta)
846 + break; /* just one multicast ath_atx_tid */
847 }
848 }
849
850 @@ -2895,9 +2836,8 @@ void ath_tx_node_cleanup(struct ath_soft
851 struct ath_txq *txq;
852 int tidno;
853
854 - for (tidno = 0, tid = &an->tid[tidno];
855 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
856 -
857 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
858 + tid = ATH_AN_2_TID(an, tidno);
859 txq = tid->txq;
860
861 ath_txq_lock(sc, txq);
862 @@ -2909,6 +2849,9 @@ void ath_tx_node_cleanup(struct ath_soft
863 tid->active = false;
864
865 ath_txq_unlock(sc, txq);
866 +
867 + if (!an->sta)
868 + break; /* just one multicast ath_atx_tid */
869 }
870 }
871