ath9k: fix regression in tx queueing patch
[openwrt/openwrt.git] / package / kernel / mac80211 / patches / 337-ath9k-Switch-to-using-mac80211-intermediate-software.patch
1 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@toke.dk>
2 Date: Wed, 6 Jul 2016 21:34:17 +0200
3 Subject: [PATCH] ath9k: Switch to using mac80211 intermediate software queues.
4 MIME-Version: 1.0
5 Content-Type: text/plain; charset=UTF-8
6 Content-Transfer-Encoding: 8bit
7
8 This switches ath9k over to using the mac80211 intermediate software
9 queueing mechanism for data packets. It removes the queueing inside the
10 driver, except for the retry queue, and instead pulls from mac80211 when
11 a packet is needed. The retry queue is used to store a packet that was
12 pulled but can't be sent immediately.
13
14 The old code path in ath_tx_start that would queue packets has been
15 removed completely, as has the qlen limit tunables (since there's no
16 longer a queue in the driver to limit).
17
18 Based on Tim's original patch set, but reworked quite thoroughly.
19
20 Cc: Tim Shepard <shep@alum.mit.edu>
21 Cc: Felix Fietkau <nbd@nbd.name>
22 Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
23 ---
24
25 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
26 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
27 @@ -91,7 +91,6 @@ int ath_descdma_setup(struct ath_softc *
28 #define ATH_RXBUF 512
29 #define ATH_TXBUF 512
30 #define ATH_TXBUF_RESERVE 5
31 -#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
32 #define ATH_TXMAXTRY 13
33 #define ATH_MAX_SW_RETRIES 30
34
35 @@ -145,7 +144,7 @@ int ath_descdma_setup(struct ath_softc *
36 #define BAW_WITHIN(_start, _bawsz, _seqno) \
37 ((((_seqno) - (_start)) & 4095) < (_bawsz))
38
39 -#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
40 +#define ATH_AN_2_TID(_an, _tidno) ath_node_to_tid(_an, _tidno)
41
42 #define IS_HT_RATE(rate) (rate & 0x80)
43 #define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
44 @@ -164,7 +163,6 @@ struct ath_txq {
45 spinlock_t axq_lock;
46 u32 axq_depth;
47 u32 axq_ampdu_depth;
48 - bool stopped;
49 bool axq_tx_inprogress;
50 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
51 u8 txq_headidx;
52 @@ -232,7 +230,6 @@ struct ath_buf {
53
54 struct ath_atx_tid {
55 struct list_head list;
56 - struct sk_buff_head buf_q;
57 struct sk_buff_head retry_q;
58 struct ath_node *an;
59 struct ath_txq *txq;
60 @@ -247,13 +244,13 @@ struct ath_atx_tid {
61 s8 bar_index;
62 bool active;
63 bool clear_ps_filter;
64 + bool has_queued;
65 };
66
67 struct ath_node {
68 struct ath_softc *sc;
69 struct ieee80211_sta *sta; /* station struct we're part of */
70 struct ieee80211_vif *vif; /* interface with which we're associated */
71 - struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
72
73 u16 maxampdu;
74 u8 mpdudensity;
75 @@ -276,7 +273,6 @@ struct ath_tx_control {
76 struct ath_node *an;
77 struct ieee80211_sta *sta;
78 u8 paprd;
79 - bool force_channel;
80 };
81
82
83 @@ -293,7 +289,6 @@ struct ath_tx {
84 struct ath_descdma txdma;
85 struct ath_txq *txq_map[IEEE80211_NUM_ACS];
86 struct ath_txq *uapsdq;
87 - u32 txq_max_pending[IEEE80211_NUM_ACS];
88 u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
89 };
90
91 @@ -421,6 +416,22 @@ struct ath_offchannel {
92 int duration;
93 };
94
95 +static inline struct ath_atx_tid *
96 +ath_node_to_tid(struct ath_node *an, u8 tidno)
97 +{
98 + struct ieee80211_sta *sta = an->sta;
99 + struct ieee80211_vif *vif = an->vif;
100 + struct ieee80211_txq *txq;
101 +
102 + BUG_ON(!vif);
103 + if (sta)
104 + txq = sta->txq[tidno % ARRAY_SIZE(sta->txq)];
105 + else
106 + txq = vif->txq;
107 +
108 + return (struct ath_atx_tid *) txq->drv_priv;
109 +}
110 +
111 #define case_rtn_string(val) case val: return #val
112
113 #define ath_for_each_chanctx(_sc, _ctx) \
114 @@ -575,7 +586,6 @@ void ath_tx_edma_tasklet(struct ath_soft
115 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
116 u16 tid, u16 *ssn);
117 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
118 -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
119
120 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
121 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
122 @@ -585,6 +595,7 @@ void ath9k_release_buffered_frames(struc
123 u16 tids, int nframes,
124 enum ieee80211_frame_release_type reason,
125 bool more_data);
126 +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
127
128 /********/
129 /* VIFs */
130 --- a/drivers/net/wireless/ath/ath9k/channel.c
131 +++ b/drivers/net/wireless/ath/ath9k/channel.c
132 @@ -1007,7 +1007,6 @@ static void ath_scan_send_probe(struct a
133 goto error;
134
135 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
136 - txctl.force_channel = true;
137 if (ath_tx_start(sc->hw, skb, &txctl))
138 goto error;
139
140 @@ -1130,7 +1129,6 @@ ath_chanctx_send_vif_ps_frame(struct ath
141 memset(&txctl, 0, sizeof(txctl));
142 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
143 txctl.sta = sta;
144 - txctl.force_channel = true;
145 if (ath_tx_start(sc->hw, skb, &txctl)) {
146 ieee80211_free_txskb(sc->hw, skb);
147 return false;
148 --- a/drivers/net/wireless/ath/ath9k/debug.c
149 +++ b/drivers/net/wireless/ath/ath9k/debug.c
150 @@ -600,7 +600,6 @@ static int read_file_xmit(struct seq_fil
151 PR("MPDUs XRetried: ", xretries);
152 PR("Aggregates: ", a_aggr);
153 PR("AMPDUs Queued HW:", a_queued_hw);
154 - PR("AMPDUs Queued SW:", a_queued_sw);
155 PR("AMPDUs Completed:", a_completed);
156 PR("AMPDUs Retried: ", a_retries);
157 PR("AMPDUs XRetried: ", a_xretries);
158 @@ -629,8 +628,7 @@ static void print_queue(struct ath_softc
159 seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
160 seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
161 seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
162 - seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
163 - seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
164 + seq_printf(file, "%s: %3d\n", "pending", txq->pending_frames);
165
166 ath_txq_unlock(sc, txq);
167 }
168 @@ -1208,7 +1206,6 @@ static const char ath9k_gstrings_stats[]
169 AMKSTR(d_tx_mpdu_xretries),
170 AMKSTR(d_tx_aggregates),
171 AMKSTR(d_tx_ampdus_queued_hw),
172 - AMKSTR(d_tx_ampdus_queued_sw),
173 AMKSTR(d_tx_ampdus_completed),
174 AMKSTR(d_tx_ampdu_retries),
175 AMKSTR(d_tx_ampdu_xretries),
176 @@ -1288,7 +1285,6 @@ void ath9k_get_et_stats(struct ieee80211
177 AWDATA(xretries);
178 AWDATA(a_aggr);
179 AWDATA(a_queued_hw);
180 - AWDATA(a_queued_sw);
181 AWDATA(a_completed);
182 AWDATA(a_retries);
183 AWDATA(a_xretries);
184 @@ -1346,14 +1342,6 @@ int ath9k_init_debug(struct ath_hw *ah)
185 read_file_xmit);
186 debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
187 read_file_queues);
188 - debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
189 - &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
190 - debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
191 - &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
192 - debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
193 - &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
194 - debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
195 - &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
196 debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
197 read_file_misc);
198 debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
199 --- a/drivers/net/wireless/ath/ath9k/debug.h
200 +++ b/drivers/net/wireless/ath/ath9k/debug.h
201 @@ -147,7 +147,6 @@ struct ath_interrupt_stats {
202 * @completed: Total MPDUs (non-aggr) completed
203 * @a_aggr: Total no. of aggregates queued
204 * @a_queued_hw: Total AMPDUs queued to hardware
205 - * @a_queued_sw: Total AMPDUs queued to software queues
206 * @a_completed: Total AMPDUs completed
207 * @a_retries: No. of AMPDUs retried (SW)
208 * @a_xretries: No. of AMPDUs dropped due to xretries
209 @@ -174,7 +173,6 @@ struct ath_tx_stats {
210 u32 xretries;
211 u32 a_aggr;
212 u32 a_queued_hw;
213 - u32 a_queued_sw;
214 u32 a_completed;
215 u32 a_retries;
216 u32 a_xretries;
217 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c
218 +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
219 @@ -52,8 +52,8 @@ static ssize_t read_file_node_aggr(struc
220 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
221 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
222
223 - for (tidno = 0, tid = &an->tid[tidno];
224 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
225 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
226 + tid = ath_node_to_tid(an, tidno);
227 txq = tid->txq;
228 ath_txq_lock(sc, txq);
229 if (tid->active) {
230 --- a/drivers/net/wireless/ath/ath9k/init.c
231 +++ b/drivers/net/wireless/ath/ath9k/init.c
232 @@ -358,7 +358,6 @@ static int ath9k_init_queues(struct ath_
233 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
234 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
235 sc->tx.txq_map[i]->mac80211_qnum = i;
236 - sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
237 }
238 return 0;
239 }
240 @@ -873,6 +872,7 @@ static void ath9k_set_hw_capab(struct at
241 hw->max_rate_tries = 10;
242 hw->sta_data_size = sizeof(struct ath_node);
243 hw->vif_data_size = sizeof(struct ath_vif);
244 + hw->txq_data_size = sizeof(struct ath_atx_tid);
245 hw->extra_tx_headroom = 4;
246
247 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
248 --- a/drivers/net/wireless/ath/ath9k/main.c
249 +++ b/drivers/net/wireless/ath/ath9k/main.c
250 @@ -1897,9 +1897,11 @@ static int ath9k_ampdu_action(struct iee
251 bool flush = false;
252 int ret = 0;
253 struct ieee80211_sta *sta = params->sta;
254 + struct ath_node *an = (struct ath_node *)sta->drv_priv;
255 enum ieee80211_ampdu_mlme_action action = params->action;
256 u16 tid = params->tid;
257 u16 *ssn = &params->ssn;
258 + struct ath_atx_tid *atid;
259
260 mutex_lock(&sc->mutex);
261
262 @@ -1932,9 +1934,9 @@ static int ath9k_ampdu_action(struct iee
263 ath9k_ps_restore(sc);
264 break;
265 case IEEE80211_AMPDU_TX_OPERATIONAL:
266 - ath9k_ps_wakeup(sc);
267 - ath_tx_aggr_resume(sc, sta, tid);
268 - ath9k_ps_restore(sc);
269 + atid = ath_node_to_tid(an, tid);
270 + atid->baw_size = IEEE80211_MIN_AMPDU_BUF <<
271 + sta->ht_cap.ampdu_factor;
272 break;
273 default:
274 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
275 @@ -2696,4 +2698,5 @@ struct ieee80211_ops ath9k_ops = {
276 .sw_scan_start = ath9k_sw_scan_start,
277 .sw_scan_complete = ath9k_sw_scan_complete,
278 .get_txpower = ath9k_get_txpower,
279 + .wake_tx_queue = ath9k_wake_tx_queue,
280 };
281 --- a/drivers/net/wireless/ath/ath9k/xmit.c
282 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
283 @@ -67,6 +67,8 @@ static struct ath_buf *ath_tx_setup_buff
284 struct ath_txq *txq,
285 struct ath_atx_tid *tid,
286 struct sk_buff *skb);
287 +static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
288 + struct ath_tx_control *txctl);
289
290 enum {
291 MCS_HT20,
292 @@ -137,6 +139,26 @@ static void ath_tx_queue_tid(struct ath_
293 list_add_tail(&tid->list, list);
294 }
295
296 +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
297 +{
298 + struct ath_softc *sc = hw->priv;
299 + struct ath_common *common = ath9k_hw_common(sc->sc_ah);
300 + struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv;
301 + struct ath_txq *txq = tid->txq;
302 +
303 + ath_dbg(common, QUEUE, "Waking TX queue: %pM (%d)\n",
304 + queue->sta ? queue->sta->addr : queue->vif->addr,
305 + tid->tidno);
306 +
307 + ath_txq_lock(sc, txq);
308 +
309 + tid->has_queued = true;
310 + ath_tx_queue_tid(sc, txq, tid);
311 + ath_txq_schedule(sc, txq);
312 +
313 + ath_txq_unlock(sc, txq);
314 +}
315 +
316 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
317 {
318 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
319 @@ -179,7 +201,6 @@ static void ath_set_rates(struct ieee802
320 static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
321 struct sk_buff *skb)
322 {
323 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
324 struct ath_frame_info *fi = get_frame_info(skb);
325 int q = fi->txq;
326
327 @@ -190,14 +211,6 @@ static void ath_txq_skb_done(struct ath_
328 if (WARN_ON(--txq->pending_frames < 0))
329 txq->pending_frames = 0;
330
331 - if (txq->stopped &&
332 - txq->pending_frames < sc->tx.txq_max_pending[q]) {
333 - if (ath9k_is_chanctx_enabled())
334 - ieee80211_wake_queue(sc->hw, info->hw_queue);
335 - else
336 - ieee80211_wake_queue(sc->hw, q);
337 - txq->stopped = false;
338 - }
339 }
340
341 static struct ath_atx_tid *
342 @@ -207,9 +220,48 @@ ath_get_skb_tid(struct ath_softc *sc, st
343 return ATH_AN_2_TID(an, tidno);
344 }
345
346 +static struct sk_buff *
347 +ath_tid_pull(struct ath_atx_tid *tid)
348 +{
349 + struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv);
350 + struct ath_softc *sc = tid->an->sc;
351 + struct ieee80211_hw *hw = sc->hw;
352 + struct ath_tx_control txctl = {
353 + .txq = tid->txq,
354 + .sta = tid->an->sta,
355 + };
356 + struct sk_buff *skb;
357 + struct ath_frame_info *fi;
358 + int q;
359 +
360 + if (!tid->has_queued)
361 + return NULL;
362 +
363 + skb = ieee80211_tx_dequeue(hw, txq);
364 + if (!skb) {
365 + tid->has_queued = false;
366 + return NULL;
367 + }
368 +
369 + if (ath_tx_prepare(hw, skb, &txctl)) {
370 + ieee80211_free_txskb(hw, skb);
371 + return NULL;
372 + }
373 +
374 + q = skb_get_queue_mapping(skb);
375 + if (tid->txq == sc->tx.txq_map[q]) {
376 + fi = get_frame_info(skb);
377 + fi->txq = q;
378 + ++tid->txq->pending_frames;
379 + }
380 +
381 + return skb;
382 + }
383 +
384 +
385 static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
386 {
387 - return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
388 + return !skb_queue_empty(&tid->retry_q) || tid->has_queued;
389 }
390
391 static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
392 @@ -218,46 +270,11 @@ static struct sk_buff *ath_tid_dequeue(s
393
394 skb = __skb_dequeue(&tid->retry_q);
395 if (!skb)
396 - skb = __skb_dequeue(&tid->buf_q);
397 + skb = ath_tid_pull(tid);
398
399 return skb;
400 }
401
402 -/*
403 - * ath_tx_tid_change_state:
404 - * - clears a-mpdu flag of previous session
405 - * - force sequence number allocation to fix next BlockAck Window
406 - */
407 -static void
408 -ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
409 -{
410 - struct ath_txq *txq = tid->txq;
411 - struct ieee80211_tx_info *tx_info;
412 - struct sk_buff *skb, *tskb;
413 - struct ath_buf *bf;
414 - struct ath_frame_info *fi;
415 -
416 - skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
417 - fi = get_frame_info(skb);
418 - bf = fi->bf;
419 -
420 - tx_info = IEEE80211_SKB_CB(skb);
421 - tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
422 -
423 - if (bf)
424 - continue;
425 -
426 - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
427 - if (!bf) {
428 - __skb_unlink(skb, &tid->buf_q);
429 - ath_txq_skb_done(sc, txq, skb);
430 - ieee80211_free_txskb(sc->hw, skb);
431 - continue;
432 - }
433 - }
434 -
435 -}
436 -
437 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
438 {
439 struct ath_txq *txq = tid->txq;
440 @@ -898,20 +915,16 @@ static int ath_compute_num_delims(struct
441
442 static struct ath_buf *
443 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
444 - struct ath_atx_tid *tid, struct sk_buff_head **q)
445 + struct ath_atx_tid *tid)
446 {
447 struct ieee80211_tx_info *tx_info;
448 struct ath_frame_info *fi;
449 - struct sk_buff *skb;
450 + struct sk_buff *skb, *first_skb = NULL;
451 struct ath_buf *bf;
452 u16 seqno;
453
454 while (1) {
455 - *q = &tid->retry_q;
456 - if (skb_queue_empty(*q))
457 - *q = &tid->buf_q;
458 -
459 - skb = skb_peek(*q);
460 + skb = ath_tid_dequeue(tid);
461 if (!skb)
462 break;
463
464 @@ -923,7 +936,6 @@ ath_tx_get_tid_subframe(struct ath_softc
465 bf->bf_state.stale = false;
466
467 if (!bf) {
468 - __skb_unlink(skb, *q);
469 ath_txq_skb_done(sc, txq, skb);
470 ieee80211_free_txskb(sc->hw, skb);
471 continue;
472 @@ -952,8 +964,19 @@ ath_tx_get_tid_subframe(struct ath_softc
473 seqno = bf->bf_state.seqno;
474
475 /* do not step over block-ack window */
476 - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
477 + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
478 + __skb_queue_tail(&tid->retry_q, skb);
479 +
480 + /* If there are other skbs in the retry q, they are
481 + * probably within the BAW, so loop immediately to get
482 + * one of them. Otherwise the queue can get stuck. */
483 + if (!skb_queue_is_first(&tid->retry_q, skb) && skb != first_skb) {
484 + if(!first_skb) /* infinite loop prevention */
485 + first_skb = skb;
486 + continue;
487 + }
488 break;
489 + }
490
491 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
492 struct ath_tx_status ts = {};
493 @@ -961,7 +984,6 @@ ath_tx_get_tid_subframe(struct ath_softc
494
495 INIT_LIST_HEAD(&bf_head);
496 list_add(&bf->list, &bf_head);
497 - __skb_unlink(skb, *q);
498 ath_tx_update_baw(sc, tid, seqno);
499 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
500 continue;
501 @@ -973,11 +995,10 @@ ath_tx_get_tid_subframe(struct ath_softc
502 return NULL;
503 }
504
505 -static bool
506 +static int
507 ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
508 struct ath_atx_tid *tid, struct list_head *bf_q,
509 - struct ath_buf *bf_first, struct sk_buff_head *tid_q,
510 - int *aggr_len)
511 + struct ath_buf *bf_first)
512 {
513 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
514 struct ath_buf *bf = bf_first, *bf_prev = NULL;
515 @@ -987,12 +1008,13 @@ ath_tx_form_aggr(struct ath_softc *sc, s
516 struct ieee80211_tx_info *tx_info;
517 struct ath_frame_info *fi;
518 struct sk_buff *skb;
519 - bool closed = false;
520 +
521
522 bf = bf_first;
523 aggr_limit = ath_lookup_rate(sc, bf, tid);
524
525 - do {
526 + while (bf)
527 + {
528 skb = bf->bf_mpdu;
529 fi = get_frame_info(skb);
530
531 @@ -1001,12 +1023,12 @@ ath_tx_form_aggr(struct ath_softc *sc, s
532 if (nframes) {
533 if (aggr_limit < al + bpad + al_delta ||
534 ath_lookup_legacy(bf) || nframes >= h_baw)
535 - break;
536 + goto stop;
537
538 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
539 if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
540 !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
541 - break;
542 + goto stop;
543 }
544
545 /* add padding for previous frame to aggregation length */
546 @@ -1028,20 +1050,18 @@ ath_tx_form_aggr(struct ath_softc *sc, s
547 ath_tx_addto_baw(sc, tid, bf);
548 bf->bf_state.ndelim = ndelim;
549
550 - __skb_unlink(skb, tid_q);
551 list_add_tail(&bf->list, bf_q);
552 if (bf_prev)
553 bf_prev->bf_next = bf;
554
555 bf_prev = bf;
556
557 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
558 - if (!bf) {
559 - closed = true;
560 - break;
561 - }
562 - } while (ath_tid_has_buffered(tid));
563 -
564 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
565 + }
566 + goto finish;
567 +stop:
568 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
569 +finish:
570 bf = bf_first;
571 bf->bf_lastbf = bf_prev;
572
573 @@ -1052,9 +1072,7 @@ ath_tx_form_aggr(struct ath_softc *sc, s
574 TX_STAT_INC(txq->axq_qnum, a_aggr);
575 }
576
577 - *aggr_len = al;
578 -
579 - return closed;
580 + return al;
581 #undef PADBYTES
582 }
583
584 @@ -1431,18 +1449,15 @@ static void ath_tx_fill_desc(struct ath_
585 static void
586 ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
587 struct ath_atx_tid *tid, struct list_head *bf_q,
588 - struct ath_buf *bf_first, struct sk_buff_head *tid_q)
589 + struct ath_buf *bf_first)
590 {
591 struct ath_buf *bf = bf_first, *bf_prev = NULL;
592 - struct sk_buff *skb;
593 int nframes = 0;
594
595 do {
596 struct ieee80211_tx_info *tx_info;
597 - skb = bf->bf_mpdu;
598
599 nframes++;
600 - __skb_unlink(skb, tid_q);
601 list_add_tail(&bf->list, bf_q);
602 if (bf_prev)
603 bf_prev->bf_next = bf;
604 @@ -1451,13 +1466,15 @@ ath_tx_form_burst(struct ath_softc *sc,
605 if (nframes >= 2)
606 break;
607
608 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
609 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
610 if (!bf)
611 break;
612
613 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
614 - if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
615 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
616 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
617 break;
618 + }
619
620 ath_set_rates(tid->an->vif, tid->an->sta, bf, false);
621 } while (1);
622 @@ -1468,34 +1485,33 @@ static bool ath_tx_sched_aggr(struct ath
623 {
624 struct ath_buf *bf;
625 struct ieee80211_tx_info *tx_info;
626 - struct sk_buff_head *tid_q;
627 struct list_head bf_q;
628 int aggr_len = 0;
629 - bool aggr, last = true;
630 + bool aggr;
631
632 if (!ath_tid_has_buffered(tid))
633 return false;
634
635 INIT_LIST_HEAD(&bf_q);
636
637 - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
638 + bf = ath_tx_get_tid_subframe(sc, txq, tid);
639 if (!bf)
640 return false;
641
642 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
643 aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
644 if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
645 - (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
646 + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
647 + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
648 *stop = true;
649 return false;
650 }
651
652 ath_set_rates(tid->an->vif, tid->an->sta, bf, false);
653 if (aggr)
654 - last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
655 - tid_q, &aggr_len);
656 + aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
657 else
658 - ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
659 + ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
660
661 if (list_empty(&bf_q))
662 return false;
663 @@ -1538,9 +1554,6 @@ int ath_tx_aggr_start(struct ath_softc *
664 an->mpdudensity = density;
665 }
666
667 - /* force sequence number allocation for pending frames */
668 - ath_tx_tid_change_state(sc, txtid);
669 -
670 txtid->active = true;
671 *ssn = txtid->seq_start = txtid->seq_next;
672 txtid->bar_index = -1;
673 @@ -1565,7 +1578,6 @@ void ath_tx_aggr_stop(struct ath_softc *
674 ath_txq_lock(sc, txq);
675 txtid->active = false;
676 ath_tx_flush_tid(sc, txtid);
677 - ath_tx_tid_change_state(sc, txtid);
678 ath_txq_unlock_complete(sc, txq);
679 }
680
681 @@ -1575,14 +1587,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
683 struct ath_atx_tid *tid;
684 struct ath_txq *txq;
685 - bool buffered;
686 int tidno;
687
688 ath_dbg(common, XMIT, "%s called\n", __func__);
689
690 - for (tidno = 0, tid = &an->tid[tidno];
691 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
692 -
693 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
694 + tid = ath_node_to_tid(an, tidno);
695 txq = tid->txq;
696
697 ath_txq_lock(sc, txq);
698 @@ -1592,13 +1602,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
699 continue;
700 }
701
702 - buffered = ath_tid_has_buffered(tid);
703 + if (!skb_queue_empty(&tid->retry_q))
704 + ieee80211_sta_set_buffered(sta, tid->tidno, true);
705
706 list_del_init(&tid->list);
707
708 ath_txq_unlock(sc, txq);
709 -
710 - ieee80211_sta_set_buffered(sta, tidno, buffered);
711 }
712 }
713
714 @@ -1611,49 +1620,20 @@ void ath_tx_aggr_wakeup(struct ath_softc
715
716 ath_dbg(common, XMIT, "%s called\n", __func__);
717
718 - for (tidno = 0, tid = &an->tid[tidno];
719 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
720 -
721 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
722 + tid = ath_node_to_tid(an, tidno);
723 txq = tid->txq;
724
725 ath_txq_lock(sc, txq);
726 tid->clear_ps_filter = true;
727 -
728 if (ath_tid_has_buffered(tid)) {
729 ath_tx_queue_tid(sc, txq, tid);
730 ath_txq_schedule(sc, txq);
731 }
732 -
733 ath_txq_unlock_complete(sc, txq);
734 }
735 }
736
737 -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
738 - u16 tidno)
739 -{
740 - struct ath_common *common = ath9k_hw_common(sc->sc_ah);
741 - struct ath_atx_tid *tid;
742 - struct ath_node *an;
743 - struct ath_txq *txq;
744 -
745 - ath_dbg(common, XMIT, "%s called\n", __func__);
746 -
747 - an = (struct ath_node *)sta->drv_priv;
748 - tid = ATH_AN_2_TID(an, tidno);
749 - txq = tid->txq;
750 -
751 - ath_txq_lock(sc, txq);
752 -
753 - tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
754 -
755 - if (ath_tid_has_buffered(tid)) {
756 - ath_tx_queue_tid(sc, txq, tid);
757 - ath_txq_schedule(sc, txq);
758 - }
759 -
760 - ath_txq_unlock_complete(sc, txq);
761 -}
762 -
763 void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
764 struct ieee80211_sta *sta,
765 u16 tids, int nframes,
766 @@ -1666,7 +1646,6 @@ void ath9k_release_buffered_frames(struc
767 struct ieee80211_tx_info *info;
768 struct list_head bf_q;
769 struct ath_buf *bf_tail = NULL, *bf;
770 - struct sk_buff_head *tid_q;
771 int sent = 0;
772 int i;
773
774 @@ -1681,11 +1660,10 @@ void ath9k_release_buffered_frames(struc
775
776 ath_txq_lock(sc, tid->txq);
777 while (nframes > 0) {
778 - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
779 + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
780 if (!bf)
781 break;
782
783 - __skb_unlink(bf->bf_mpdu, tid_q);
784 list_add_tail(&bf->list, &bf_q);
785 ath_set_rates(tid->an->vif, tid->an->sta, bf, true);
786 if (bf_isampdu(bf)) {
787 @@ -1700,7 +1678,7 @@ void ath9k_release_buffered_frames(struc
788 sent++;
789 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
790
791 - if (an->sta && !ath_tid_has_buffered(tid))
792 + if (an->sta && skb_queue_empty(&tid->retry_q))
793 ieee80211_sta_set_buffered(an->sta, i, false);
794 }
795 ath_txq_unlock_complete(sc, tid->txq);
796 @@ -1929,13 +1907,7 @@ bool ath_drain_all_txq(struct ath_softc
797 if (!ATH_TXQ_SETUP(sc, i))
798 continue;
799
800 - /*
801 - * The caller will resume queues with ieee80211_wake_queues.
802 - * Mark the queue as not stopped to prevent ath_tx_complete
803 - * from waking the queue too early.
804 - */
805 txq = &sc->tx.txq[i];
806 - txq->stopped = false;
807 ath_draintxq(sc, txq);
808 }
809
810 @@ -2334,16 +2306,14 @@ int ath_tx_start(struct ieee80211_hw *hw
811 struct ath_softc *sc = hw->priv;
812 struct ath_txq *txq = txctl->txq;
813 struct ath_atx_tid *tid = NULL;
814 + struct ath_node *an = NULL;
815 struct ath_buf *bf;
816 - bool queue, ps_resp;
817 + bool ps_resp;
818 int q, ret;
819
820 if (vif)
821 avp = (void *)vif->drv_priv;
822
823 - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
824 - txctl->force_channel = true;
825 -
826 ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
827
828 ret = ath_tx_prepare(hw, skb, txctl);
829 @@ -2358,63 +2328,18 @@ int ath_tx_start(struct ieee80211_hw *hw
830
831 q = skb_get_queue_mapping(skb);
832
833 - ath_txq_lock(sc, txq);
834 - if (txq == sc->tx.txq_map[q]) {
835 - fi->txq = q;
836 - if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
837 - !txq->stopped) {
838 - if (ath9k_is_chanctx_enabled())
839 - ieee80211_stop_queue(sc->hw, info->hw_queue);
840 - else
841 - ieee80211_stop_queue(sc->hw, q);
842 - txq->stopped = true;
843 - }
844 - }
845 -
846 - queue = ieee80211_is_data_present(hdr->frame_control);
847 -
848 - /* If chanctx, queue all null frames while NOA could be there */
849 - if (ath9k_is_chanctx_enabled() &&
850 - ieee80211_is_nullfunc(hdr->frame_control) &&
851 - !txctl->force_channel)
852 - queue = true;
853 -
854 - /* Force queueing of all frames that belong to a virtual interface on
855 - * a different channel context, to ensure that they are sent on the
856 - * correct channel.
857 - */
858 - if (((avp && avp->chanctx != sc->cur_chan) ||
859 - sc->cur_chan->stopped) && !txctl->force_channel) {
860 - if (!txctl->an)
861 - txctl->an = &avp->mcast_node;
862 - queue = true;
863 - ps_resp = false;
864 - }
865 -
866 - if (txctl->an && queue)
867 - tid = ath_get_skb_tid(sc, txctl->an, skb);
868 -
869 - if (ps_resp) {
870 - ath_txq_unlock(sc, txq);
871 + if (ps_resp)
872 txq = sc->tx.uapsdq;
873 - ath_txq_lock(sc, txq);
874 - } else if (txctl->an && queue) {
875 - WARN_ON(tid->txq != txctl->txq);
876 -
877 - if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
878 - tid->clear_ps_filter = true;
879
880 - /*
881 - * Add this frame to software queue for scheduling later
882 - * for aggregation.
883 - */
884 - TX_STAT_INC(txq->axq_qnum, a_queued_sw);
885 - __skb_queue_tail(&tid->buf_q, skb);
886 - if (!txctl->an->sleeping)
887 - ath_tx_queue_tid(sc, txq, tid);
888 + if (txctl->sta) {
889 + an = (struct ath_node *) sta->drv_priv;
890 + tid = ath_get_skb_tid(sc, an, skb);
891 + }
892
893 - ath_txq_schedule(sc, txq);
894 - goto out;
895 + ath_txq_lock(sc, txq);
896 + if (txq == sc->tx.txq_map[q]) {
897 + fi->txq = q;
898 + ++txq->pending_frames;
899 }
900
901 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
902 @@ -2907,9 +2832,8 @@ void ath_tx_node_init(struct ath_softc *
903 struct ath_atx_tid *tid;
904 int tidno, acno;
905
906 - for (tidno = 0, tid = &an->tid[tidno];
907 - tidno < IEEE80211_NUM_TIDS;
908 - tidno++, tid++) {
909 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
910 + tid = ath_node_to_tid(an, tidno);
911 tid->an = an;
912 tid->tidno = tidno;
913 tid->seq_start = tid->seq_next = 0;
914 @@ -2917,11 +2841,14 @@ void ath_tx_node_init(struct ath_softc *
915 tid->baw_head = tid->baw_tail = 0;
916 tid->active = false;
917 tid->clear_ps_filter = true;
918 - __skb_queue_head_init(&tid->buf_q);
919 + tid->has_queued = false;
920 __skb_queue_head_init(&tid->retry_q);
921 INIT_LIST_HEAD(&tid->list);
922 acno = TID_TO_WME_AC(tidno);
923 tid->txq = sc->tx.txq_map[acno];
924 +
925 + if (!an->sta)
926 + break; /* just one multicast ath_atx_tid */
927 }
928 }
929
930 @@ -2931,9 +2858,8 @@ void ath_tx_node_cleanup(struct ath_soft
931 struct ath_txq *txq;
932 int tidno;
933
934 - for (tidno = 0, tid = &an->tid[tidno];
935 - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
936 -
937 + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
938 + tid = ath_node_to_tid(an, tidno);
939 txq = tid->txq;
940
941 ath_txq_lock(sc, txq);
942 @@ -2945,6 +2871,9 @@ void ath_tx_node_cleanup(struct ath_soft
943 tid->active = false;
944
945 ath_txq_unlock(sc, txq);
946 +
947 + if (!an->sta)
948 + break; /* just one multicast ath_atx_tid */
949 }
950 }
951