ath9k: fix an rx descriptor processing race condition
[openwrt/svn-archive/archive.git] / package / mac80211 / patches / 560-ath9k_tx_queueing_rework.patch
1 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
2 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
3 @@ -133,7 +133,8 @@ int ath_descdma_setup(struct ath_softc *
4 #define ATH_AGGR_ENCRYPTDELIM 10
5 /* minimum h/w qdepth to be sustained to maximize aggregation */
6 #define ATH_AGGR_MIN_QDEPTH 2
7 -#define ATH_AMPDU_SUBFRAME_DEFAULT 32
8 +/* minimum h/w qdepth for non-aggregated traffic */
9 +#define ATH_NON_AGGR_MIN_QDEPTH 8
10
11 #define IEEE80211_SEQ_SEQ_SHIFT 4
12 #define IEEE80211_SEQ_MAX 4096
13 @@ -170,12 +171,6 @@ int ath_descdma_setup(struct ath_softc *
14
15 #define ATH_TX_COMPLETE_POLL_INT 1000
16
17 -enum ATH_AGGR_STATUS {
18 - ATH_AGGR_DONE,
19 - ATH_AGGR_BAW_CLOSED,
20 - ATH_AGGR_LIMITED,
21 -};
22 -
23 #define ATH_TXFIFO_DEPTH 8
24 struct ath_txq {
25 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
26 @@ -208,8 +203,9 @@ struct ath_frame_info {
27 int framelen;
28 enum ath9k_key_type keytype;
29 u8 keyix;
30 - u8 retries;
31 u8 rtscts_rate;
32 + u8 retries : 7;
33 + u8 baw_tracked : 1;
34 };
35
36 struct ath_buf_state {
37 @@ -237,6 +233,7 @@ struct ath_buf {
38 struct ath_atx_tid {
39 struct list_head list;
40 struct sk_buff_head buf_q;
41 + struct sk_buff_head retry_q;
42 struct ath_node *an;
43 struct ath_atx_ac *ac;
44 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
45 @@ -264,6 +261,7 @@ struct ath_node {
46 u8 mpdudensity;
47
48 bool sleeping;
49 + bool no_ps_filter;
50
51 #if defined(CPTCFG_MAC80211_DEBUGFS) && defined(CPTCFG_ATH9K_DEBUGFS)
52 struct dentry *node_stat;
53 @@ -364,6 +362,7 @@ void ath9k_release_buffered_frames(struc
54 /********/
55
56 struct ath_vif {
57 + struct ath_node mcast_node;
58 int av_bslot;
59 bool primary_sta_vif;
60 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
61 --- a/drivers/net/wireless/ath/ath9k/debug.c
62 +++ b/drivers/net/wireless/ath/ath9k/debug.c
63 @@ -607,6 +607,28 @@ static ssize_t read_file_xmit(struct fil
64 return retval;
65 }
66
67 +static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
68 + char *buf, ssize_t size)
69 +{
70 + ssize_t len = 0;
71 +
72 + ath_txq_lock(sc, txq);
73 +
74 + len += snprintf(buf + len, size - len, "%s: %d ",
75 + "qnum", txq->axq_qnum);
76 + len += snprintf(buf + len, size - len, "%s: %2d ",
77 + "qdepth", txq->axq_depth);
78 + len += snprintf(buf + len, size - len, "%s: %2d ",
79 + "ampdu-depth", txq->axq_ampdu_depth);
80 + len += snprintf(buf + len, size - len, "%s: %3d ",
81 + "pending", txq->pending_frames);
82 + len += snprintf(buf + len, size - len, "%s: %d\n",
83 + "stopped", txq->stopped);
84 +
85 + ath_txq_unlock(sc, txq);
86 + return len;
87 +}
88 +
89 static ssize_t read_file_queues(struct file *file, char __user *user_buf,
90 size_t count, loff_t *ppos)
91 {
92 @@ -624,24 +646,13 @@ static ssize_t read_file_queues(struct f
93
94 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
95 txq = sc->tx.txq_map[i];
96 - len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
97 -
98 - ath_txq_lock(sc, txq);
99 -
100 - len += snprintf(buf + len, size - len, "%s: %d ",
101 - "qnum", txq->axq_qnum);
102 - len += snprintf(buf + len, size - len, "%s: %2d ",
103 - "qdepth", txq->axq_depth);
104 - len += snprintf(buf + len, size - len, "%s: %2d ",
105 - "ampdu-depth", txq->axq_ampdu_depth);
106 - len += snprintf(buf + len, size - len, "%s: %3d ",
107 - "pending", txq->pending_frames);
108 - len += snprintf(buf + len, size - len, "%s: %d\n",
109 - "stopped", txq->stopped);
110 -
111 - ath_txq_unlock(sc, txq);
112 + len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
113 + len += print_queue(sc, txq, buf + len, size - len);
114 }
115
116 + len += snprintf(buf + len, size - len, "(CAB): ");
117 + len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
118 +
119 if (len > size)
120 len = size;
121
122 --- a/drivers/net/wireless/ath/ath9k/main.c
123 +++ b/drivers/net/wireless/ath/ath9k/main.c
124 @@ -971,6 +971,8 @@ static int ath9k_add_interface(struct ie
125 struct ath_softc *sc = hw->priv;
126 struct ath_hw *ah = sc->sc_ah;
127 struct ath_common *common = ath9k_hw_common(ah);
128 + struct ath_vif *avp = (void *)vif->drv_priv;
129 + struct ath_node *an = &avp->mcast_node;
130
131 mutex_lock(&sc->mutex);
132
133 @@ -984,6 +986,12 @@ static int ath9k_add_interface(struct ie
134 if (ath9k_uses_beacons(vif->type))
135 ath9k_beacon_assign_slot(sc, vif);
136
137 + an->sc = sc;
138 + an->sta = NULL;
139 + an->vif = vif;
140 + an->no_ps_filter = true;
141 + ath_tx_node_init(sc, an);
142 +
143 mutex_unlock(&sc->mutex);
144 return 0;
145 }
146 @@ -1021,6 +1029,7 @@ static void ath9k_remove_interface(struc
147 {
148 struct ath_softc *sc = hw->priv;
149 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
150 + struct ath_vif *avp = (void *)vif->drv_priv;
151
152 ath_dbg(common, CONFIG, "Detach Interface\n");
153
154 @@ -1035,6 +1044,8 @@ static void ath9k_remove_interface(struc
155 ath9k_calculate_summary_state(hw, NULL);
156 ath9k_ps_restore(sc);
157
158 + ath_tx_node_cleanup(sc, &avp->mcast_node);
159 +
160 mutex_unlock(&sc->mutex);
161 }
162
163 @@ -1403,9 +1414,6 @@ static void ath9k_sta_notify(struct ieee
164 struct ath_softc *sc = hw->priv;
165 struct ath_node *an = (struct ath_node *) sta->drv_priv;
166
167 - if (!sta->ht_cap.ht_supported)
168 - return;
169 -
170 switch (cmd) {
171 case STA_NOTIFY_SLEEP:
172 an->sleeping = true;
173 --- a/drivers/net/wireless/ath/ath9k/xmit.c
174 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
175 @@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_
176
177 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
178 {
179 + if (!tid->an->sta)
180 + return;
181 +
182 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
183 seqno << IEEE80211_SEQ_SEQ_SHIFT);
184 }
185 @@ -168,6 +171,71 @@ static void ath_txq_skb_done(struct ath_
186 }
187 }
188
189 +static struct ath_atx_tid *
190 +ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
191 +{
192 + struct ieee80211_hdr *hdr;
193 + u8 tidno = 0;
194 +
195 + hdr = (struct ieee80211_hdr *) skb->data;
196 + if (ieee80211_is_data_qos(hdr->frame_control))
197 + tidno = ieee80211_get_qos_ctl(hdr)[0];
198 +
199 + tidno &= IEEE80211_QOS_CTL_TID_MASK;
200 + return ATH_AN_2_TID(an, tidno);
201 +}
202 +
203 +static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
204 +{
205 + return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
206 +}
207 +
208 +static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
209 +{
210 + struct sk_buff *skb;
211 +
212 + skb = __skb_dequeue(&tid->retry_q);
213 + if (!skb)
214 + skb = __skb_dequeue(&tid->buf_q);
215 +
216 + return skb;
217 +}
218 +
219 +/*
220 + * ath_tx_tid_change_state:
221 + * - clears a-mpdu flag of previous session
222 + * - force sequence number allocation to fix next BlockAck Window
223 + */
224 +static void
225 +ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
226 +{
227 + struct ath_txq *txq = tid->ac->txq;
228 + struct ieee80211_tx_info *tx_info;
229 + struct sk_buff *skb, *tskb;
230 + struct ath_buf *bf;
231 + struct ath_frame_info *fi;
232 +
233 + skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
234 + fi = get_frame_info(skb);
235 + bf = fi->bf;
236 +
237 + tx_info = IEEE80211_SKB_CB(skb);
238 + tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
239 +
240 + if (bf)
241 + continue;
242 +
243 + bf = ath_tx_setup_buffer(sc, txq, tid, skb);
244 + if (!bf) {
245 + __skb_unlink(skb, &tid->buf_q);
246 + ath_txq_skb_done(sc, txq, skb);
247 + ieee80211_free_txskb(sc->hw, skb);
248 + continue;
249 + }
250 + }
251 +
252 +}
253 +
254 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
255 {
256 struct ath_txq *txq = tid->ac->txq;
257 @@ -182,28 +250,22 @@ static void ath_tx_flush_tid(struct ath_
258
259 memset(&ts, 0, sizeof(ts));
260
261 - while ((skb = __skb_dequeue(&tid->buf_q))) {
262 + while ((skb = __skb_dequeue(&tid->retry_q))) {
263 fi = get_frame_info(skb);
264 bf = fi->bf;
265 -
266 if (!bf) {
267 - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
268 - if (!bf) {
269 - ath_txq_skb_done(sc, txq, skb);
270 - ieee80211_free_txskb(sc->hw, skb);
271 - continue;
272 - }
273 + ath_txq_skb_done(sc, txq, skb);
274 + ieee80211_free_txskb(sc->hw, skb);
275 + continue;
276 }
277
278 - if (fi->retries) {
279 - list_add_tail(&bf->list, &bf_head);
280 + if (fi->baw_tracked) {
281 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
282 - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
283 sendbar = true;
284 - } else {
285 - ath_set_rates(tid->an->vif, tid->an->sta, bf);
286 - ath_tx_send_normal(sc, txq, NULL, skb);
287 }
288 +
289 + list_add_tail(&bf->list, &bf_head);
290 + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
291 }
292
293 if (sendbar) {
294 @@ -232,13 +294,16 @@ static void ath_tx_update_baw(struct ath
295 }
296
297 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
298 - u16 seqno)
299 + struct ath_buf *bf)
300 {
301 + struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
302 + u16 seqno = bf->bf_state.seqno;
303 int index, cindex;
304
305 index = ATH_BA_INDEX(tid->seq_start, seqno);
306 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
307 __set_bit(cindex, tid->tx_buf);
308 + fi->baw_tracked = 1;
309
310 if (index >= ((tid->baw_tail - tid->baw_head) &
311 (ATH_TID_MAX_BUFS - 1))) {
312 @@ -266,7 +331,7 @@ static void ath_tid_drain(struct ath_sof
313 memset(&ts, 0, sizeof(ts));
314 INIT_LIST_HEAD(&bf_head);
315
316 - while ((skb = __skb_dequeue(&tid->buf_q))) {
317 + while ((skb = ath_tid_dequeue(tid))) {
318 fi = get_frame_info(skb);
319 bf = fi->bf;
320
321 @@ -403,7 +468,6 @@ static void ath_tx_complete_aggr(struct
322 struct ieee80211_tx_rate rates[4];
323 struct ath_frame_info *fi;
324 int nframes;
325 - u8 tidno;
326 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
327 int i, retries;
328 int bar_index = -1;
329 @@ -440,8 +504,7 @@ static void ath_tx_complete_aggr(struct
330 }
331
332 an = (struct ath_node *)sta->drv_priv;
333 - tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
334 - tid = ATH_AN_2_TID(an, tidno);
335 + tid = ath_get_skb_tid(sc, an, skb);
336 seq_first = tid->seq_start;
337 isba = ts->ts_flags & ATH9K_TX_BA;
338
339 @@ -453,7 +516,7 @@ static void ath_tx_complete_aggr(struct
340 * Only BlockAcks have a TID and therefore normal Acks cannot be
341 * checked
342 */
343 - if (isba && tidno != ts->tid)
344 + if (isba && tid->tidno != ts->tid)
345 txok = false;
346
347 isaggr = bf_isaggr(bf);
348 @@ -489,7 +552,8 @@ static void ath_tx_complete_aggr(struct
349 tx_info = IEEE80211_SKB_CB(skb);
350 fi = get_frame_info(skb);
351
352 - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
353 + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
354 + !tid->active) {
355 /*
356 * Outside of the current BlockAck window,
357 * maybe part of a previous session
358 @@ -583,7 +647,7 @@ static void ath_tx_complete_aggr(struct
359 if (an->sleeping)
360 ieee80211_sta_set_buffered(sta, tid->tidno, true);
361
362 - skb_queue_splice(&bf_pending, &tid->buf_q);
363 + skb_queue_splice_tail(&bf_pending, &tid->retry_q);
364 if (!an->sleeping) {
365 ath_tx_queue_tid(txq, tid);
366
367 @@ -641,7 +705,7 @@ static void ath_tx_process_buffer(struct
368 } else
369 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
370
371 - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
372 + if (!flush)
373 ath_txq_schedule(sc, txq);
374 }
375
376 @@ -815,15 +879,20 @@ static int ath_compute_num_delims(struct
377
378 static struct ath_buf *
379 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
380 - struct ath_atx_tid *tid)
381 + struct ath_atx_tid *tid, struct sk_buff_head **q)
382 {
383 + struct ieee80211_tx_info *tx_info;
384 struct ath_frame_info *fi;
385 struct sk_buff *skb;
386 struct ath_buf *bf;
387 u16 seqno;
388
389 while (1) {
390 - skb = skb_peek(&tid->buf_q);
391 + *q = &tid->retry_q;
392 + if (skb_queue_empty(*q))
393 + *q = &tid->buf_q;
394 +
395 + skb = skb_peek(*q);
396 if (!skb)
397 break;
398
399 @@ -833,12 +902,22 @@ ath_tx_get_tid_subframe(struct ath_softc
400 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
401
402 if (!bf) {
403 - __skb_unlink(skb, &tid->buf_q);
404 + __skb_unlink(skb, *q);
405 ath_txq_skb_done(sc, txq, skb);
406 ieee80211_free_txskb(sc->hw, skb);
407 continue;
408 }
409
410 + bf->bf_next = NULL;
411 + bf->bf_lastbf = bf;
412 +
413 + tx_info = IEEE80211_SKB_CB(skb);
414 + tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
415 + if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
416 + bf->bf_state.bf_type = 0;
417 + return bf;
418 + }
419 +
420 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
421 seqno = bf->bf_state.seqno;
422
423 @@ -852,73 +931,52 @@ ath_tx_get_tid_subframe(struct ath_softc
424
425 INIT_LIST_HEAD(&bf_head);
426 list_add(&bf->list, &bf_head);
427 - __skb_unlink(skb, &tid->buf_q);
428 + __skb_unlink(skb, *q);
429 ath_tx_update_baw(sc, tid, seqno);
430 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
431 continue;
432 }
433
434 - bf->bf_next = NULL;
435 - bf->bf_lastbf = bf;
436 return bf;
437 }
438
439 return NULL;
440 }
441
442 -static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
443 - struct ath_txq *txq,
444 - struct ath_atx_tid *tid,
445 - struct list_head *bf_q,
446 - int *aggr_len)
447 +static bool
448 +ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
449 + struct ath_atx_tid *tid, struct list_head *bf_q,
450 + struct ath_buf *bf_first, struct sk_buff_head *tid_q,
451 + int *aggr_len)
452 {
453 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
454 - struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
455 - int rl = 0, nframes = 0, ndelim, prev_al = 0;
456 + struct ath_buf *bf = bf_first, *bf_prev = NULL;
457 + int nframes = 0, ndelim;
458 u16 aggr_limit = 0, al = 0, bpad = 0,
459 - al_delta, h_baw = tid->baw_size / 2;
460 - enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
461 + al_delta, h_baw = tid->baw_size / 2;
462 struct ieee80211_tx_info *tx_info;
463 struct ath_frame_info *fi;
464 struct sk_buff *skb;
465 + bool closed = false;
466
467 - do {
468 - bf = ath_tx_get_tid_subframe(sc, txq, tid);
469 - if (!bf) {
470 - status = ATH_AGGR_BAW_CLOSED;
471 - break;
472 - }
473 + bf = bf_first;
474 + aggr_limit = ath_lookup_rate(sc, bf, tid);
475
476 + do {
477 skb = bf->bf_mpdu;
478 fi = get_frame_info(skb);
479
480 - if (!bf_first)
481 - bf_first = bf;
482 -
483 - if (!rl) {
484 - ath_set_rates(tid->an->vif, tid->an->sta, bf);
485 - aggr_limit = ath_lookup_rate(sc, bf, tid);
486 - rl = 1;
487 - }
488 -
489 /* do not exceed aggregation limit */
490 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
491 + if (nframes) {
492 + if (aggr_limit < al + bpad + al_delta ||
493 + ath_lookup_legacy(bf) || nframes >= h_baw)
494 + break;
495
496 - if (nframes &&
497 - ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
498 - ath_lookup_legacy(bf))) {
499 - status = ATH_AGGR_LIMITED;
500 - break;
501 - }
502 -
503 - tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
504 - if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
505 - break;
506 -
507 - /* do not exceed subframe limit */
508 - if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
509 - status = ATH_AGGR_LIMITED;
510 - break;
511 + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
512 + if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
513 + !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
514 + break;
515 }
516
517 /* add padding for previous frame to aggregation length */
518 @@ -936,22 +994,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_
519 bf->bf_next = NULL;
520
521 /* link buffers of this frame to the aggregate */
522 - if (!fi->retries)
523 - ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
524 + if (!fi->baw_tracked)
525 + ath_tx_addto_baw(sc, tid, bf);
526 bf->bf_state.ndelim = ndelim;
527
528 - __skb_unlink(skb, &tid->buf_q);
529 + __skb_unlink(skb, tid_q);
530 list_add_tail(&bf->list, bf_q);
531 if (bf_prev)
532 bf_prev->bf_next = bf;
533
534 bf_prev = bf;
535
536 - } while (!skb_queue_empty(&tid->buf_q));
537 + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
538 + if (!bf) {
539 + closed = true;
540 + break;
541 + }
542 + } while (ath_tid_has_buffered(tid));
543 +
544 + bf = bf_first;
545 + bf->bf_lastbf = bf_prev;
546 +
547 + if (bf == bf_prev) {
548 + al = get_frame_info(bf->bf_mpdu)->framelen;
549 + bf->bf_state.bf_type = BUF_AMPDU;
550 + } else {
551 + TX_STAT_INC(txq->axq_qnum, a_aggr);
552 + }
553
554 *aggr_len = al;
555
556 - return status;
557 + return closed;
558 #undef PADBYTES
559 }
560
561 @@ -1212,53 +1285,86 @@ static void ath_tx_fill_desc(struct ath_
562 }
563 }
564
565 -static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
566 - struct ath_atx_tid *tid)
567 +static void
568 +ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
569 + struct ath_atx_tid *tid, struct list_head *bf_q,
570 + struct ath_buf *bf_first, struct sk_buff_head *tid_q)
571 {
572 - struct ath_buf *bf;
573 - enum ATH_AGGR_STATUS status;
574 - struct ieee80211_tx_info *tx_info;
575 - struct list_head bf_q;
576 - int aggr_len;
577 + struct ath_buf *bf = bf_first, *bf_prev = NULL;
578 + struct sk_buff *skb;
579 + int nframes = 0;
580
581 do {
582 - if (skb_queue_empty(&tid->buf_q))
583 - return;
584 + struct ieee80211_tx_info *tx_info;
585 + skb = bf->bf_mpdu;
586
587 - INIT_LIST_HEAD(&bf_q);
588 + nframes++;
589 + __skb_unlink(skb, tid_q);
590 + list_add_tail(&bf->list, bf_q);
591 + if (bf_prev)
592 + bf_prev->bf_next = bf;
593 + bf_prev = bf;
594
595 - status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
596 + if (nframes >= 2)
597 + break;
598
599 - /*
600 - * no frames picked up to be aggregated;
601 - * block-ack window is not open.
602 - */
603 - if (list_empty(&bf_q))
604 + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
605 + if (!bf)
606 break;
607
608 - bf = list_first_entry(&bf_q, struct ath_buf, list);
609 - bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
610 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
611 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
612 + break;
613
614 - if (tid->ac->clear_ps_filter) {
615 - tid->ac->clear_ps_filter = false;
616 - tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
617 - } else {
618 - tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
619 - }
620 + ath_set_rates(tid->an->vif, tid->an->sta, bf);
621 + } while (1);
622 +}
623
624 - /* if only one frame, send as non-aggregate */
625 - if (bf == bf->bf_lastbf) {
626 - aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
627 - bf->bf_state.bf_type = BUF_AMPDU;
628 - } else {
629 - TX_STAT_INC(txq->axq_qnum, a_aggr);
630 - }
631 +static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
632 + struct ath_atx_tid *tid, bool *stop)
633 +{
634 + struct ath_buf *bf;
635 + struct ieee80211_tx_info *tx_info;
636 + struct sk_buff_head *tid_q;
637 + struct list_head bf_q;
638 + int aggr_len = 0;
639 + bool aggr, last = true;
640 +
641 + if (!ath_tid_has_buffered(tid))
642 + return false;
643 +
644 + INIT_LIST_HEAD(&bf_q);
645
646 - ath_tx_fill_desc(sc, bf, txq, aggr_len);
647 - ath_tx_txqaddbuf(sc, txq, &bf_q, false);
648 - } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
649 - status != ATH_AGGR_BAW_CLOSED);
650 + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
651 + if (!bf)
652 + return false;
653 +
654 + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
655 + aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
656 + if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
657 + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
658 + *stop = true;
659 + return false;
660 + }
661 +
662 + ath_set_rates(tid->an->vif, tid->an->sta, bf);
663 + if (aggr)
664 + last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
665 + tid_q, &aggr_len);
666 + else
667 + ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
668 +
669 + if (list_empty(&bf_q))
670 + return false;
671 +
672 + if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
673 + tid->ac->clear_ps_filter = false;
674 + tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
675 + }
676 +
677 + ath_tx_fill_desc(sc, bf, txq, aggr_len);
678 + ath_tx_txqaddbuf(sc, txq, &bf_q, false);
679 + return true;
680 }
681
682 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
683 @@ -1282,6 +1388,9 @@ int ath_tx_aggr_start(struct ath_softc *
684 an->mpdudensity = density;
685 }
686
687 + /* force sequence number allocation for pending frames */
688 + ath_tx_tid_change_state(sc, txtid);
689 +
690 txtid->active = true;
691 txtid->paused = true;
692 *ssn = txtid->seq_start = txtid->seq_next;
693 @@ -1301,8 +1410,9 @@ void ath_tx_aggr_stop(struct ath_softc *
694
695 ath_txq_lock(sc, txq);
696 txtid->active = false;
697 - txtid->paused = true;
698 + txtid->paused = false;
699 ath_tx_flush_tid(sc, txtid);
700 + ath_tx_tid_change_state(sc, txtid);
701 ath_txq_unlock_complete(sc, txq);
702 }
703
704 @@ -1326,7 +1436,7 @@ void ath_tx_aggr_sleep(struct ieee80211_
705
706 ath_txq_lock(sc, txq);
707
708 - buffered = !skb_queue_empty(&tid->buf_q);
709 + buffered = ath_tid_has_buffered(tid);
710
711 tid->sched = false;
712 list_del(&tid->list);
713 @@ -1358,7 +1468,7 @@ void ath_tx_aggr_wakeup(struct ath_softc
714 ath_txq_lock(sc, txq);
715 ac->clear_ps_filter = true;
716
717 - if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
718 + if (!tid->paused && ath_tid_has_buffered(tid)) {
719 ath_tx_queue_tid(txq, tid);
720 ath_txq_schedule(sc, txq);
721 }
722 @@ -1383,7 +1493,7 @@ void ath_tx_aggr_resume(struct ath_softc
723 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
724 tid->paused = false;
725
726 - if (!skb_queue_empty(&tid->buf_q)) {
727 + if (ath_tid_has_buffered(tid)) {
728 ath_tx_queue_tid(txq, tid);
729 ath_txq_schedule(sc, txq);
730 }
731 @@ -1403,6 +1513,7 @@ void ath9k_release_buffered_frames(struc
732 struct ieee80211_tx_info *info;
733 struct list_head bf_q;
734 struct ath_buf *bf_tail = NULL, *bf;
735 + struct sk_buff_head *tid_q;
736 int sent = 0;
737 int i;
738
739 @@ -1418,15 +1529,15 @@ void ath9k_release_buffered_frames(struc
740 continue;
741
742 ath_txq_lock(sc, tid->ac->txq);
743 - while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
744 - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
745 + while (nframes > 0) {
746 + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
747 if (!bf)
748 break;
749
750 - __skb_unlink(bf->bf_mpdu, &tid->buf_q);
751 + __skb_unlink(bf->bf_mpdu, tid_q);
752 list_add_tail(&bf->list, &bf_q);
753 ath_set_rates(tid->an->vif, tid->an->sta, bf);
754 - ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
755 + ath_tx_addto_baw(sc, tid, bf);
756 bf->bf_state.bf_type &= ~BUF_AGGR;
757 if (bf_tail)
758 bf_tail->bf_next = bf;
759 @@ -1436,7 +1547,7 @@ void ath9k_release_buffered_frames(struc
760 sent++;
761 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
762
763 - if (skb_queue_empty(&tid->buf_q))
764 + if (an->sta && !ath_tid_has_buffered(tid))
765 ieee80211_sta_set_buffered(an->sta, i, false);
766 }
767 ath_txq_unlock_complete(sc, tid->ac->txq);
768 @@ -1689,25 +1800,27 @@ void ath_tx_cleanupq(struct ath_softc *s
769 */
770 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
771 {
772 - struct ath_atx_ac *ac, *ac_tmp, *last_ac;
773 + struct ath_atx_ac *ac, *last_ac;
774 struct ath_atx_tid *tid, *last_tid;
775 + bool sent = false;
776
777 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
778 - list_empty(&txq->axq_acq) ||
779 - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
780 + list_empty(&txq->axq_acq))
781 return;
782
783 rcu_read_lock();
784
785 - ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
786 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
787 + while (!list_empty(&txq->axq_acq)) {
788 + bool stop = false;
789
790 - list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
791 + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
792 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
793 list_del(&ac->list);
794 ac->sched = false;
795
796 while (!list_empty(&ac->tid_q)) {
797 +
798 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
799 list);
800 list_del(&tid->list);
801 @@ -1716,17 +1829,17 @@ void ath_txq_schedule(struct ath_softc *
802 if (tid->paused)
803 continue;
804
805 - ath_tx_sched_aggr(sc, txq, tid);
806 + if (ath_tx_sched_aggr(sc, txq, tid, &stop))
807 + sent = true;
808
809 /*
810 * add tid to round-robin queue if more frames
811 * are pending for the tid
812 */
813 - if (!skb_queue_empty(&tid->buf_q))
814 + if (ath_tid_has_buffered(tid))
815 ath_tx_queue_tid(txq, tid);
816
817 - if (tid == last_tid ||
818 - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
819 + if (stop || tid == last_tid)
820 break;
821 }
822
823 @@ -1735,9 +1848,17 @@ void ath_txq_schedule(struct ath_softc *
824 list_add_tail(&ac->list, &txq->axq_acq);
825 }
826
827 - if (ac == last_ac ||
828 - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
829 + if (stop)
830 break;
831 +
832 + if (ac == last_ac) {
833 + if (!sent)
834 + break;
835 +
836 + sent = false;
837 + last_ac = list_entry(txq->axq_acq.prev,
838 + struct ath_atx_ac, list);
839 + }
840 }
841
842 rcu_read_unlock();
843 @@ -1816,58 +1937,6 @@ static void ath_tx_txqaddbuf(struct ath_
844 }
845 }
846
847 -static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
848 - struct ath_atx_tid *tid, struct sk_buff *skb,
849 - struct ath_tx_control *txctl)
850 -{
851 - struct ath_frame_info *fi = get_frame_info(skb);
852 - struct list_head bf_head;
853 - struct ath_buf *bf;
854 -
855 - /*
856 - * Do not queue to h/w when any of the following conditions is true:
857 - * - there are pending frames in software queue
858 - * - the TID is currently paused for ADDBA/BAR request
859 - * - seqno is not within block-ack window
860 - * - h/w queue depth exceeds low water mark
861 - */
862 - if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
863 - !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
864 - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
865 - txq != sc->tx.uapsdq) {
866 - /*
867 - * Add this frame to software queue for scheduling later
868 - * for aggregation.
869 - */
870 - TX_STAT_INC(txq->axq_qnum, a_queued_sw);
871 - __skb_queue_tail(&tid->buf_q, skb);
872 - if (!txctl->an || !txctl->an->sleeping)
873 - ath_tx_queue_tid(txq, tid);
874 - return;
875 - }
876 -
877 - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
878 - if (!bf) {
879 - ath_txq_skb_done(sc, txq, skb);
880 - ieee80211_free_txskb(sc->hw, skb);
881 - return;
882 - }
883 -
884 - ath_set_rates(tid->an->vif, tid->an->sta, bf);
885 - bf->bf_state.bf_type = BUF_AMPDU;
886 - INIT_LIST_HEAD(&bf_head);
887 - list_add(&bf->list, &bf_head);
888 -
889 - /* Add sub-frame to BAW */
890 - ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
891 -
892 - /* Queue to h/w without aggregation */
893 - TX_STAT_INC(txq->axq_qnum, a_queued_hw);
894 - bf->bf_lastbf = bf;
895 - ath_tx_fill_desc(sc, bf, txq, fi->framelen);
896 - ath_tx_txqaddbuf(sc, txq, &bf_head, false);
897 -}
898 -
899 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
900 struct ath_atx_tid *tid, struct sk_buff *skb)
901 {
902 @@ -2010,6 +2079,7 @@ static int ath_tx_prepare(struct ieee802
903 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
904 struct ieee80211_sta *sta = txctl->sta;
905 struct ieee80211_vif *vif = info->control.vif;
906 + struct ath_vif *avp;
907 struct ath_softc *sc = hw->priv;
908 int frmlen = skb->len + FCS_LEN;
909 int padpos, padsize;
910 @@ -2017,6 +2087,10 @@ static int ath_tx_prepare(struct ieee802
911 /* NOTE: sta can be NULL according to net/mac80211.h */
912 if (sta)
913 txctl->an = (struct ath_node *)sta->drv_priv;
914 + else if (vif && ieee80211_is_data(hdr->frame_control)) {
915 + avp = (void *)vif->drv_priv;
916 + txctl->an = &avp->mcast_node;
917 + }
918
919 if (info->control.hw_key)
920 frmlen += info->control.hw_key->icv_len;
921 @@ -2066,7 +2140,6 @@ int ath_tx_start(struct ieee80211_hw *hw
922 struct ath_txq *txq = txctl->txq;
923 struct ath_atx_tid *tid = NULL;
924 struct ath_buf *bf;
925 - u8 tidno;
926 int q;
927 int ret;
928
929 @@ -2094,22 +2167,25 @@ int ath_tx_start(struct ieee80211_hw *hw
930 ath_txq_unlock(sc, txq);
931 txq = sc->tx.uapsdq;
932 ath_txq_lock(sc, txq);
933 - }
934 -
935 - if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
936 - tidno = ieee80211_get_qos_ctl(hdr)[0] &
937 - IEEE80211_QOS_CTL_TID_MASK;
938 - tid = ATH_AN_2_TID(txctl->an, tidno);
939 + } else if (txctl->an &&
940 + ieee80211_is_data_present(hdr->frame_control)) {
941 + tid = ath_get_skb_tid(sc, txctl->an, skb);
942
943 WARN_ON(tid->ac->txq != txctl->txq);
944 - }
945
946 - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
947 + if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
948 + tid->ac->clear_ps_filter = true;
949 +
950 /*
951 - * Try aggregation if it's a unicast data frame
952 - * and the destination is HT capable.
953 + * Add this frame to software queue for scheduling later
954 + * for aggregation.
955 */
956 - ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
957 + TX_STAT_INC(txq->axq_qnum, a_queued_sw);
958 + __skb_queue_tail(&tid->buf_q, skb);
959 + if (!txctl->an->sleeping)
960 + ath_tx_queue_tid(txq, tid);
961 +
962 + ath_txq_schedule(sc, txq);
963 goto out;
964 }
965
966 @@ -2372,8 +2448,7 @@ static void ath_tx_processq(struct ath_s
967
968 if (list_empty(&txq->axq_q)) {
969 txq->axq_link = NULL;
970 - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
971 - ath_txq_schedule(sc, txq);
972 + ath_txq_schedule(sc, txq);
973 break;
974 }
975 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
976 @@ -2595,6 +2670,7 @@ void ath_tx_node_init(struct ath_softc *
977 tid->paused = false;
978 tid->active = false;
979 __skb_queue_head_init(&tid->buf_q);
980 + __skb_queue_head_init(&tid->retry_q);
981 acno = TID_TO_WME_AC(tidno);
982 tid->ac = &an->ac[acno];
983 }
984 @@ -2602,6 +2678,7 @@ void ath_tx_node_init(struct ath_softc *
985 for (acno = 0, ac = &an->ac[acno];
986 acno < IEEE80211_NUM_ACS; acno++, ac++) {
987 ac->sched = false;
988 + ac->clear_ps_filter = true;
989 ac->txq = sc->tx.txq_map[acno];
990 INIT_LIST_HEAD(&ac->tid_q);
991 }