154d9114576e668016e5ba747ac9c62c41fe6807
[openwrt/openwrt.git] / package / kernel / mac80211 / patches / 300-mac80211-add-an-intermediate-software-queue-implemen.patch
1 From: Felix Fietkau <nbd@openwrt.org>
2 Date: Tue, 18 Nov 2014 23:58:51 +0100
3 Subject: [PATCH] mac80211: add an intermediate software queue implementation
4
5 This allows drivers to request per-vif and per-sta-tid queues from which
6 they can pull frames. This makes it easier to keep the hardware queues
7 short, and to improve fairness between clients and vifs.
8
9 The task of scheduling packet transmission is left up to the driver -
10 queueing is controlled by mac80211. Drivers can only dequeue packets by
11 calling ieee80211_tx_dequeue. This makes it possible to add active queue
12 management later without changing drivers using this code.
13
14 This can also be used as a starting point to implement A-MSDU
15 aggregation in a way that does not add artificially induced latency.
16
17 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
18 ---
19
20 --- a/include/net/mac80211.h
21 +++ b/include/net/mac80211.h
22 @@ -84,6 +84,34 @@
23 *
24 */
25
26 +/**
27 + * DOC: mac80211 software tx queueing
28 + *
29 + * mac80211 provides an optional intermediate queueing implementation designed
30 + * to allow the driver to keep hardware queues short and provide some fairness
31 + * between different stations/interfaces.
32 + * In this model, the driver pulls data frames from the mac80211 queue instead
33 + * of letting mac80211 push them via drv_tx().
34 + * Other frames (e.g. control or management) are still pushed using drv_tx().
35 + *
36 + * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a
37 + * single per-vif queue for multicast data frames.
38 + *
39 + * The driver is expected to initialize its private per-queue data for stations
40 + * and interfaces in the .add_interface and .sta_add ops.
41 + *
42 + * The driver can not access the queue directly. To dequeue a frame, it calls
43 + * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
44 + * calls the .wake_tx_queue driver op.
45 + *
46 + * For AP powersave TIM handling, the driver only needs to indicate if it has
47 + * buffered packets in the driver specific data structures by calling
48 + * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq
49 + * struct, mac80211 sets TIM and calls .release_buffered_frames().
50 + * The driver is expected to release its own buffered frames and also call
51 + * ieee80211_tx_dequeue() within that callback.
52 + */
53 +
54 struct device;
55
56 /**
57 @@ -1257,6 +1285,8 @@ struct ieee80211_vif {
58 u8 cab_queue;
59 u8 hw_queue[IEEE80211_NUM_ACS];
60
61 + struct ieee80211_txq *txq;
62 +
63 struct ieee80211_chanctx_conf __rcu *chanctx_conf;
64
65 u32 driver_flags;
66 @@ -1519,6 +1549,8 @@ struct ieee80211_sta {
67 bool tdls_initiator;
68 bool mfp;
69
70 + struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
71 +
72 /* must be last */
73 u8 drv_priv[0] __aligned(sizeof(void *));
74 };
75 @@ -1547,6 +1579,27 @@ struct ieee80211_tx_control {
76 };
77
78 /**
79 + * struct ieee80211_txq - Software intermediate tx queue
80 + *
81 + * @vif: &struct ieee80211_vif pointer from the add_interface callback.
82 + * @sta: station table entry, may be NULL for per-vif queue
83 + * @tid: the TID for this queue (unused for per-vif queue)
84 + * @ac: the AC for this queue
85 + *
86 + * The driver can obtain packets from this queue by calling
87 + * ieee80211_tx_dequeue().
88 + */
89 +struct ieee80211_txq {
90 + struct ieee80211_vif *vif;
91 + struct ieee80211_sta *sta;
92 + u8 tid;
93 + u8 ac;
94 +
95 + /* must be last */
96 + u8 drv_priv[0] __aligned(sizeof(void *));
97 +};
98 +
99 +/**
100 * enum ieee80211_hw_flags - hardware flags
101 *
102 * These flags are used to indicate hardware capabilities to
103 @@ -1770,6 +1823,8 @@ enum ieee80211_hw_flags {
104 * within &struct ieee80211_sta.
105 * @chanctx_data_size: size (in bytes) of the drv_priv data area
106 * within &struct ieee80211_chanctx_conf.
107 + * @txq_data_size: size (in bytes) of the drv_priv data area
108 + * within @struct ieee80211_txq.
109 *
110 * @max_rates: maximum number of alternate rate retry stages the hw
111 * can handle.
112 @@ -1818,6 +1873,9 @@ enum ieee80211_hw_flags {
113 * @n_cipher_schemes: a size of an array of cipher schemes definitions.
114 * @cipher_schemes: a pointer to an array of cipher scheme definitions
115 * supported by HW.
116 + *
117 + * @txq_ac_max_pending: maximum number of frames per AC pending in all txq
118 + * entries for a vif.
119 */
120 struct ieee80211_hw {
121 struct ieee80211_conf conf;
122 @@ -1830,6 +1888,7 @@ struct ieee80211_hw {
123 int vif_data_size;
124 int sta_data_size;
125 int chanctx_data_size;
126 + int txq_data_size;
127 u16 queues;
128 u16 max_listen_interval;
129 s8 max_signal;
130 @@ -1846,6 +1905,7 @@ struct ieee80211_hw {
131 u8 uapsd_max_sp_len;
132 u8 n_cipher_schemes;
133 const struct ieee80211_cipher_scheme *cipher_schemes;
134 + int txq_ac_max_pending;
135 };
136
137 /**
138 @@ -3007,6 +3067,8 @@ enum ieee80211_reconfig_type {
139 * response template is provided, together with the location of the
140 * switch-timing IE within the template. The skb can only be used within
141 * the function call.
142 + *
143 + * @wake_tx_queue: Called when new packets have been added to the queue.
144 */
145 struct ieee80211_ops {
146 void (*tx)(struct ieee80211_hw *hw,
147 @@ -3238,6 +3300,9 @@ struct ieee80211_ops {
148 void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
149 struct ieee80211_vif *vif,
150 struct ieee80211_tdls_ch_sw_params *params);
151 +
152 + void (*wake_tx_queue)(struct ieee80211_hw *hw,
153 + struct ieee80211_txq *txq);
154 };
155
156 /**
157 @@ -5249,4 +5314,17 @@ void ieee80211_unreserve_tid(struct ieee
158 */
159 size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
160 const u8 *ids, int n_ids, size_t offset);
161 +
162 +/**
163 + * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
164 + *
165 + * @hw: pointer as obtained from ieee80211_alloc_hw()
166 + * @txq: pointer obtained from .add_tx_queue() call
167 + *
168 + * Returns the skb if successful, NULL if no frame was available.
169 + */
170 +struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
171 + struct ieee80211_txq *txq);
172 +
173 +
174 #endif /* MAC80211_H */
175 --- a/net/mac80211/driver-ops.h
176 +++ b/net/mac80211/driver-ops.h
177 @@ -1367,4 +1367,16 @@ drv_tdls_recv_channel_switch(struct ieee
178 trace_drv_return_void(local);
179 }
180
181 +static inline void drv_wake_tx_queue(struct ieee80211_local *local,
182 + struct txq_info *txq)
183 +{
184 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
185 +
186 + if (!check_sdata_in_driver(sdata))
187 + return;
188 +
189 + trace_drv_wake_tx_queue(local, sdata, txq->txq.sta, txq->txq.tid);
190 + local->ops->wake_tx_queue(&local->hw, &txq->txq);
191 +}
192 +
193 #endif /* __MAC80211_DRIVER_OPS */
194 --- a/net/mac80211/ieee80211_i.h
195 +++ b/net/mac80211/ieee80211_i.h
196 @@ -809,6 +809,19 @@ struct mac80211_qos_map {
197 struct rcu_head rcu_head;
198 };
199
200 +enum txq_info_flags {
201 + IEEE80211_TXQ_STOP,
202 + IEEE80211_TXQ_AMPDU,
203 +};
204 +
205 +struct txq_info {
206 + struct sk_buff_head queue;
207 + unsigned long flags;
208 +
209 + /* keep last! */
210 + struct ieee80211_txq txq;
211 +};
212 +
213 struct ieee80211_sub_if_data {
214 struct list_head list;
215
216 @@ -853,6 +866,7 @@ struct ieee80211_sub_if_data {
217 bool control_port_no_encrypt;
218 int encrypt_headroom;
219
220 + atomic_t txqs_len[IEEE80211_NUM_ACS];
221 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
222 struct mac80211_qos_map __rcu *qos_map;
223
224 @@ -1453,6 +1467,10 @@ static inline struct ieee80211_local *hw
225 return container_of(hw, struct ieee80211_local, hw);
226 }
227
228 +static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq)
229 +{
230 + return container_of(txq, struct txq_info, txq);
231 +}
232
233 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
234 {
235 @@ -1905,6 +1923,12 @@ static inline bool ieee80211_can_run_wor
236 return true;
237 }
238
239 +void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
240 + struct sta_info *sta,
241 + struct txq_info *txq, int tid);
242 +void ieee80211_flush_tx_queue(struct ieee80211_local *local,
243 + struct ieee80211_txq *txq);
244 +
245 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
246 u16 transaction, u16 auth_alg, u16 status,
247 const u8 *extra, size_t extra_len, const u8 *bssid,
248 --- a/net/mac80211/iface.c
249 +++ b/net/mac80211/iface.c
250 @@ -969,6 +969,9 @@ static void ieee80211_do_stop(struct iee
251 }
252 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
253
254 + if (sdata->vif.txq)
255 + ieee80211_flush_tx_queue(local, sdata->vif.txq);
256 +
257 if (local->open_count == 0)
258 ieee80211_clear_tx_pending(local);
259
260 @@ -1773,6 +1776,15 @@ int ieee80211_if_add(struct ieee80211_lo
261 ieee80211_setup_sdata(sdata, type);
262
263 if (ndev) {
264 + struct txq_info *txqi = NULL;
265 +
266 + if (local->ops->wake_tx_queue) {
267 + txqi = kzalloc(sizeof(*txqi) +
268 + local->hw.txq_data_size, GFP_KERNEL);
269 + if (txqi)
270 + ieee80211_init_tx_queue(sdata, NULL, txqi, 0);
271 + }
272 +
273 if (params) {
274 ndev->ieee80211_ptr->use_4addr = params->use_4addr;
275 if (type == NL80211_IFTYPE_STATION)
276 @@ -1785,6 +1797,7 @@ int ieee80211_if_add(struct ieee80211_lo
277
278 ret = register_netdevice(ndev);
279 if (ret) {
280 + kfree(txqi);
281 free_netdev(ndev);
282 return ret;
283 }
284 @@ -1810,6 +1823,9 @@ void ieee80211_if_remove(struct ieee8021
285
286 synchronize_rcu();
287
288 + if (sdata->vif.txq)
289 + kfree(to_txq_info(sdata->vif.txq));
290 +
291 if (sdata->dev) {
292 unregister_netdevice(sdata->dev);
293 } else {
294 @@ -1851,6 +1867,9 @@ void ieee80211_remove_interfaces(struct
295 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
296 list_del(&sdata->list);
297
298 + if (sdata->vif.txq)
299 + kfree(to_txq_info(sdata->vif.txq));
300 +
301 if (sdata->dev)
302 unregister_netdevice_queue(sdata->dev, &unreg_list);
303 else
304 --- a/net/mac80211/main.c
305 +++ b/net/mac80211/main.c
306 @@ -1019,6 +1019,9 @@ int ieee80211_register_hw(struct ieee802
307
308 local->dynamic_ps_forced_timeout = -1;
309
310 + if (!local->hw.txq_ac_max_pending)
311 + local->hw.txq_ac_max_pending = 64;
312 +
313 result = ieee80211_wep_init(local);
314 if (result < 0)
315 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
316 --- a/net/mac80211/sta_info.c
317 +++ b/net/mac80211/sta_info.c
318 @@ -118,6 +118,11 @@ static void __cleanup_single_sta(struct
319 atomic_dec(&ps->num_sta_ps);
320 }
321
322 + if (sta->sta.txq[0]) {
323 + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++)
324 + ieee80211_flush_tx_queue(local, sta->sta.txq[i]);
325 + }
326 +
327 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
328 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
329 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
330 @@ -234,6 +239,8 @@ void sta_info_free(struct ieee80211_loca
331
332 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
333
334 + if (sta->sta.txq[0])
335 + kfree(to_txq_info(sta->sta.txq[0]));
336 kfree(rcu_dereference_raw(sta->sta.rates));
337 kfree(sta);
338 }
339 @@ -285,11 +292,12 @@ struct sta_info *sta_info_alloc(struct i
340 const u8 *addr, gfp_t gfp)
341 {
342 struct ieee80211_local *local = sdata->local;
343 + struct ieee80211_hw *hw = &local->hw;
344 struct sta_info *sta;
345 struct timespec uptime;
346 int i;
347
348 - sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
349 + sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
350 if (!sta)
351 return NULL;
352
353 @@ -321,11 +329,24 @@ struct sta_info *sta_info_alloc(struct i
354 for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
355 ewma_init(&sta->chain_signal_avg[i], 1024, 8);
356
357 - if (sta_prepare_rate_control(local, sta, gfp)) {
358 - kfree(sta);
359 - return NULL;
360 + if (local->ops->wake_tx_queue) {
361 + void *txq_data;
362 + int size = sizeof(struct txq_info) +
363 + ALIGN(hw->txq_data_size, sizeof(void *));
364 +
365 + txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
366 + if (!txq_data)
367 + goto free;
368 +
369 + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
370 + struct txq_info *txq = txq_data + i * size;
371 + ieee80211_init_tx_queue(sdata, sta, txq, i);
372 + }
373 }
374
375 + if (sta_prepare_rate_control(local, sta, gfp))
376 + goto free_txq;
377 +
378 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
379 /*
380 * timer_to_tid must be initialized with identity mapping
381 @@ -346,7 +367,7 @@ struct sta_info *sta_info_alloc(struct i
382 if (sdata->vif.type == NL80211_IFTYPE_AP ||
383 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
384 struct ieee80211_supported_band *sband =
385 - local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
386 + hw->wiphy->bands[ieee80211_get_sdata_band(sdata)];
387 u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
388 IEEE80211_HT_CAP_SM_PS_SHIFT;
389 /*
390 @@ -371,6 +392,13 @@ struct sta_info *sta_info_alloc(struct i
391 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
392
393 return sta;
394 +
395 +free_txq:
396 + if (sta->sta.txq[0])
397 + kfree(to_txq_info(sta->sta.txq[0]));
398 +free:
399 + kfree(sta);
400 + return NULL;
401 }
402
403 static int sta_info_insert_check(struct sta_info *sta)
404 @@ -640,6 +668,8 @@ static void __sta_info_recalc_tim(struct
405
406 indicate_tim |=
407 sta->driver_buffered_tids & tids;
408 + indicate_tim |=
409 + sta->txq_buffered_tids & tids;
410 }
411
412 done:
413 @@ -1071,7 +1101,7 @@ void ieee80211_sta_ps_deliver_wakeup(str
414 struct ieee80211_sub_if_data *sdata = sta->sdata;
415 struct ieee80211_local *local = sdata->local;
416 struct sk_buff_head pending;
417 - int filtered = 0, buffered = 0, ac;
418 + int filtered = 0, buffered = 0, ac, i;
419 unsigned long flags;
420 struct ps_data *ps;
421
422 @@ -1090,10 +1120,22 @@ void ieee80211_sta_ps_deliver_wakeup(str
423
424 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
425 sta->driver_buffered_tids = 0;
426 + sta->txq_buffered_tids = 0;
427
428 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
429 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
430
431 + if (sta->sta.txq[0]) {
432 + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
433 + struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
434 +
435 + if (!skb_queue_len(&txqi->queue))
436 + continue;
437 +
438 + drv_wake_tx_queue(local, txqi);
439 + }
440 + }
441 +
442 skb_queue_head_init(&pending);
443
444 /* sync with ieee80211_tx_h_unicast_ps_buf */
445 @@ -1254,7 +1296,7 @@ ieee80211_sta_ps_deliver_response(struct
446 struct ieee80211_sub_if_data *sdata = sta->sdata;
447 struct ieee80211_local *local = sdata->local;
448 bool more_data = false;
449 - int ac;
450 + int ac, tid;
451 unsigned long driver_release_tids = 0;
452 struct sk_buff_head frames;
453
454 @@ -1275,8 +1317,10 @@ ieee80211_sta_ps_deliver_response(struct
455 /* if we already have frames from software, then we can't also
456 * release from hardware queues
457 */
458 - if (skb_queue_empty(&frames))
459 + if (skb_queue_empty(&frames)) {
460 driver_release_tids |= sta->driver_buffered_tids & tids;
461 + driver_release_tids |= sta->txq_buffered_tids & tids;
462 + }
463
464 if (driver_release_tids) {
465 /* If the driver has data on more than one TID then
466 @@ -1447,6 +1491,8 @@ ieee80211_sta_ps_deliver_response(struct
467
468 sta_info_recalc_tim(sta);
469 } else {
470 + unsigned long tids = sta->txq_buffered_tids & driver_release_tids;
471 +
472 /*
473 * We need to release a frame that is buffered somewhere in the
474 * driver ... it'll have to handle that.
475 @@ -1466,8 +1512,22 @@ ieee80211_sta_ps_deliver_response(struct
476 * that the TID(s) became empty before returning here from the
477 * release function.
478 * Either way, however, when the driver tells us that the TID(s)
479 - * became empty we'll do the TIM recalculation.
480 + * became empty or we find that a txq became empty, we'll do the
481 + * TIM recalculation.
482 */
483 +
484 + if (!sta->sta.txq[0])
485 + return;
486 +
487 + for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
488 + struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
489 +
490 + if (!(tids & BIT(tid)) || skb_queue_len(&txqi->queue))
491 + continue;
492 +
493 + sta_info_recalc_tim(sta);
494 + break;
495 + }
496 }
497 }
498
499 --- a/net/mac80211/sta_info.h
500 +++ b/net/mac80211/sta_info.h
501 @@ -274,6 +274,7 @@ struct sta_ampdu_mlme {
502 * entered power saving state, these are also delivered to
503 * the station when it leaves powersave or polls for frames
504 * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
505 + * @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on
506 * @rx_packets: Number of MSDUs received from this STA
507 * @rx_bytes: Number of bytes received from this STA
508 * @last_rx: time (in jiffies) when last frame was received from this STA
509 @@ -368,6 +369,7 @@ struct sta_info {
510 struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
511 struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
512 unsigned long driver_buffered_tids;
513 + unsigned long txq_buffered_tids;
514
515 /* Updated from RX path only, no locking requirements */
516 unsigned long rx_packets;
517 --- a/net/mac80211/trace.h
518 +++ b/net/mac80211/trace.h
519 @@ -2312,6 +2312,34 @@ TRACE_EVENT(drv_tdls_recv_channel_switch
520 )
521 );
522
523 +TRACE_EVENT(drv_wake_tx_queue,
524 + TP_PROTO(struct ieee80211_local *local,
525 + struct ieee80211_sub_if_data *sdata,
526 + struct ieee80211_sta *sta,
527 + u8 tid),
528 +
529 + TP_ARGS(local, sdata, sta, tid),
530 +
531 + TP_STRUCT__entry(
532 + LOCAL_ENTRY
533 + VIF_ENTRY
534 + STA_ENTRY
535 + __field(u8, tid)
536 + ),
537 +
538 + TP_fast_assign(
539 + LOCAL_ASSIGN;
540 + VIF_ASSIGN;
541 + STA_ASSIGN;
542 + __entry->tid = tid;
543 + ),
544 +
545 + TP_printk(
546 + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " tid: 0x%x",
547 + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->tid
548 + )
549 +);
550 +
551 #ifdef CPTCFG_MAC80211_MESSAGE_TRACING
552 #undef TRACE_SYSTEM
553 #define TRACE_SYSTEM mac80211_msg
554 --- a/net/mac80211/tx.c
555 +++ b/net/mac80211/tx.c
556 @@ -776,12 +776,23 @@ ieee80211_tx_h_rate_ctrl(struct ieee8021
557 return TX_CONTINUE;
558 }
559
560 +static u16
561 +ieee80211_tx_next_seq(struct sta_info *sta, int tid)
562 +{
563 + u16 *seq = &sta->tid_seq[tid];
564 + u16 ret = cpu_to_le16(*seq);
565 +
566 + /* Increase the sequence number. */
567 + *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
568 +
569 + return ret;
570 +}
571 +
572 static ieee80211_tx_result debug_noinline
573 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
574 {
575 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
576 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
577 - u16 *seq;
578 u8 *qc;
579 int tid;
580
581 @@ -832,13 +843,10 @@ ieee80211_tx_h_sequence(struct ieee80211
582
583 qc = ieee80211_get_qos_ctl(hdr);
584 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
585 - seq = &tx->sta->tid_seq[tid];
586 tx->sta->tx_msdu[tid]++;
587
588 - hdr->seq_ctrl = cpu_to_le16(*seq);
589 -
590 - /* Increase the sequence number. */
591 - *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
592 + if (!tx->sta->sta.txq[0])
593 + hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
594
595 return TX_CONTINUE;
596 }
597 @@ -1067,7 +1075,7 @@ static bool ieee80211_tx_prep_agg(struct
598 * nothing -- this aggregation session is being started
599 * but that might still fail with the driver
600 */
601 - } else {
602 + } else if (!tx->sta->sta.txq[tid]) {
603 spin_lock(&tx->sta->lock);
604 /*
605 * Need to re-check now, because we may get here
606 @@ -1201,13 +1209,101 @@ ieee80211_tx_prepare(struct ieee80211_su
607 return TX_CONTINUE;
608 }
609
610 +static void ieee80211_drv_tx(struct ieee80211_local *local,
611 + struct ieee80211_vif *vif,
612 + struct ieee80211_sta *pubsta,
613 + struct sk_buff *skb)
614 +{
615 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
616 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
617 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
618 + struct ieee80211_tx_control control = {
619 + .sta = pubsta
620 + };
621 + struct ieee80211_txq *txq = NULL;
622 + struct txq_info *txqi;
623 + u8 ac;
624 +
625 + if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)
626 + goto tx_normal;
627 +
628 + if (!ieee80211_is_data(hdr->frame_control))
629 + goto tx_normal;
630 +
631 + if (pubsta) {
632 + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
633 + txq = pubsta->txq[tid];
634 + } else if (vif) {
635 + txq = vif->txq;
636 + }
637 +
638 + if (!txq)
639 + goto tx_normal;
640 +
641 + ac = txq->ac;
642 + txqi = container_of(txq, struct txq_info, txq);
643 + atomic_inc(&sdata->txqs_len[ac]);
644 + if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
645 + netif_stop_subqueue(sdata->dev, ac);
646 +
647 + skb_queue_tail(&txqi->queue, skb);
648 + drv_wake_tx_queue(local, txqi);
649 +
650 + return;
651 +
652 +tx_normal:
653 + drv_tx(local, &control, skb);
654 +}
655 +
656 +struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
657 + struct ieee80211_txq *txq)
658 +{
659 + struct ieee80211_local *local = hw_to_local(hw);
660 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
661 + struct txq_info *txqi = container_of(txq, struct txq_info, txq);
662 + struct ieee80211_hdr *hdr;
663 + struct sk_buff *skb = NULL;
664 + u8 ac = txq->ac;
665 +
666 + spin_lock_bh(&txqi->queue.lock);
667 +
668 + if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
669 + goto out;
670 +
671 + skb = __skb_dequeue(&txqi->queue);
672 + if (!skb)
673 + goto out;
674 +
675 + atomic_dec(&sdata->txqs_len[ac]);
676 + if (__netif_subqueue_stopped(sdata->dev, ac))
677 + ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
678 +
679 + hdr = (struct ieee80211_hdr *)skb->data;
680 + if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
681 + struct sta_info *sta = container_of(txq->sta, struct sta_info,
682 + sta);
683 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
684 +
685 + hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
686 + if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
687 + info->flags |= IEEE80211_TX_CTL_AMPDU;
688 + else
689 + info->flags &= ~IEEE80211_TX_CTL_AMPDU;
690 + }
691 +
692 +out:
693 + spin_unlock_bh(&txqi->queue.lock);
694 +
695 + return skb;
696 +}
697 +EXPORT_SYMBOL(ieee80211_tx_dequeue);
698 +
699 static bool ieee80211_tx_frags(struct ieee80211_local *local,
700 struct ieee80211_vif *vif,
701 struct ieee80211_sta *sta,
702 struct sk_buff_head *skbs,
703 bool txpending)
704 {
705 - struct ieee80211_tx_control control;
706 struct sk_buff *skb, *tmp;
707 unsigned long flags;
708
709 @@ -1265,10 +1361,9 @@ static bool ieee80211_tx_frags(struct ie
710 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
711
712 info->control.vif = vif;
713 - control.sta = sta;
714
715 __skb_unlink(skb, skbs);
716 - drv_tx(local, &control, skb);
717 + ieee80211_drv_tx(local, vif, sta, skb);
718 }
719
720 return true;
721 --- a/net/mac80211/util.c
722 +++ b/net/mac80211/util.c
723 @@ -308,6 +308,11 @@ void ieee80211_propagate_queue_wake(stru
724 for (ac = 0; ac < n_acs; ac++) {
725 int ac_queue = sdata->vif.hw_queue[ac];
726
727 + if (local->ops->wake_tx_queue &&
728 + (atomic_read(&sdata->txqs_len[ac]) >
729 + local->hw.txq_ac_max_pending))
730 + continue;
731 +
732 if (ac_queue == queue ||
733 (sdata->vif.cab_queue == queue &&
734 local->queue_stop_reasons[ac_queue] == 0 &&
735 @@ -3307,3 +3312,36 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u
736
737 return buf;
738 }
739 +
740 +void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
741 + struct sta_info *sta,
742 + struct txq_info *txqi, int tid)
743 +{
744 + skb_queue_head_init(&txqi->queue);
745 + txqi->txq.vif = &sdata->vif;
746 +
747 + if (sta) {
748 + txqi->txq.sta = &sta->sta;
749 + sta->sta.txq[tid] = &txqi->txq;
750 + txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
751 + } else {
752 + sdata->vif.txq = &txqi->txq;
753 + txqi->txq.ac = IEEE80211_AC_BE;
754 + }
755 +}
756 +
757 +void ieee80211_flush_tx_queue(struct ieee80211_local *local,
758 + struct ieee80211_txq *txq)
759 +{
760 + struct txq_info *txqi = container_of(txq, struct txq_info, txq);
761 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
762 + struct sk_buff *skb;
763 + int n = 0;
764 +
765 + while ((skb = skb_dequeue(&txqi->queue)) != NULL) {
766 + n++;
767 + ieee80211_free_txskb(&local->hw, skb);
768 + }
769 +
770 + atomic_sub(n, &sdata->txqs_len[txq->ac]);
771 +}
772 --- a/net/mac80211/rx.c
773 +++ b/net/mac80211/rx.c
774 @@ -1176,6 +1176,7 @@ static void sta_ps_start(struct sta_info
775 struct ieee80211_sub_if_data *sdata = sta->sdata;
776 struct ieee80211_local *local = sdata->local;
777 struct ps_data *ps;
778 + int tid;
779
780 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
781 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
782 @@ -1189,6 +1190,18 @@ static void sta_ps_start(struct sta_info
783 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
784 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
785 sta->sta.addr, sta->sta.aid);
786 +
787 + if (!sta->sta.txq[0])
788 + return;
789 +
790 + for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
791 + struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
792 +
793 + if (!skb_queue_len(&txqi->queue))
794 + set_bit(tid, &sta->txq_buffered_tids);
795 + else
796 + clear_bit(tid, &sta->txq_buffered_tids);
797 + }
798 }
799
800 static void sta_ps_end(struct sta_info *sta)
801 --- a/net/mac80211/agg-tx.c
802 +++ b/net/mac80211/agg-tx.c
803 @@ -188,6 +188,41 @@ ieee80211_wake_queue_agg(struct ieee8021
804 __release(agg_queue);
805 }
806
807 +static void
808 +ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
809 +{
810 + struct ieee80211_txq *txq = sta->sta.txq[tid];
811 + struct txq_info *txqi;
812 +
813 + if (!txq)
814 + return;
815 +
816 + txqi = to_txq_info(txq);
817 + spin_lock_bh(&txqi->queue.lock);
818 + set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
819 + spin_unlock_bh(&txqi->queue.lock);
820 +}
821 +
822 +static void
823 +ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
824 +{
825 + struct ieee80211_txq *txq = sta->sta.txq[tid];
826 + struct txq_info *txqi;
827 +
828 + if (!txq)
829 + return;
830 +
831 + txqi = to_txq_info(txq);
832 +
833 + if (enable)
834 + set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
835 + else
836 + clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
837 +
838 + clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
839 + drv_wake_tx_queue(sta->sdata->local, txqi);
840 +}
841 +
842 /*
843 * splice packets from the STA's pending to the local pending,
844 * requires a call to ieee80211_agg_splice_finish later
845 @@ -247,6 +282,7 @@ static void ieee80211_remove_tid_tx(stru
846 ieee80211_assign_tid_tx(sta, tid, NULL);
847
848 ieee80211_agg_splice_finish(sta->sdata, tid);
849 + ieee80211_agg_start_txq(sta, tid, false);
850
851 kfree_rcu(tid_tx, rcu_head);
852 }
853 @@ -418,6 +454,8 @@ void ieee80211_tx_ba_session_handle_star
854 */
855 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
856
857 + ieee80211_agg_stop_txq(sta, tid);
858 +
859 /*
860 * Make sure no packets are being processed. This ensures that
861 * we have a valid starting sequence number and that in-flight
862 @@ -440,6 +478,8 @@ void ieee80211_tx_ba_session_handle_star
863 ieee80211_agg_splice_finish(sdata, tid);
864 spin_unlock_bh(&sta->lock);
865
866 + ieee80211_agg_start_txq(sta, tid, false);
867 +
868 kfree_rcu(tid_tx, rcu_head);
869 return;
870 }
871 @@ -666,6 +706,8 @@ static void ieee80211_agg_tx_operational
872 ieee80211_agg_splice_finish(sta->sdata, tid);
873
874 spin_unlock_bh(&sta->lock);
875 +
876 + ieee80211_agg_start_txq(sta, tid, true);
877 }
878
879 void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)