71088be87ae8ac79a703de53d6f87a597546029a
[openwrt/openwrt.git] / package / kernel / mac80211 / patches / subsys / 320-mac80211-Add-TXQ-scheduling-API.patch
1 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@toke.dk>
2 Date: Tue, 18 Dec 2018 17:02:06 -0800
3 Subject: [PATCH] mac80211: Add TXQ scheduling API
4 MIME-Version: 1.0
5 Content-Type: text/plain; charset=UTF-8
6 Content-Transfer-Encoding: 8bit
7
8 This adds an API to mac80211 to handle scheduling of TXQs. The interface
9 between driver and mac80211 for TXQ handling is changed by adding two new
10 functions: ieee80211_next_txq(), which will return the next TXQ to schedule
11 in the current round-robin rotation, and ieee80211_return_txq(), which the
12 driver uses to indicate that it has finished scheduling a TXQ (which will
13 then be put back in the scheduling rotation if it isn't empty).
14
15 The driver must call ieee80211_txq_schedule_start() at the start of each
16 scheduling session, and ieee80211_txq_schedule_end() at the end. The API
17 then guarantees that the same TXQ is not returned twice in the same
18 session (so a driver can loop on ieee80211_next_txq() without worrying
19 about breaking the loop.
20
21 Usage of the new API is optional, so drivers can be ported one at a time.
22 In this patch, the actual scheduling performed by mac80211 is simple
23 round-robin, but a subsequent commit adds airtime fairness awareness to the
24 scheduler.
25
26 Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
27 [minor kernel-doc fix, propagate sparse locking checks out]
28 Signed-off-by: Johannes Berg <johannes.berg@intel.com>
29 ---
30
31 --- a/include/net/mac80211.h
32 +++ b/include/net/mac80211.h
33 @@ -107,9 +107,15 @@
34 * The driver is expected to initialize its private per-queue data for stations
35 * and interfaces in the .add_interface and .sta_add ops.
36 *
37 - * The driver can't access the queue directly. To dequeue a frame, it calls
38 - * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
39 - * calls the .wake_tx_queue driver op.
40 + * The driver can't access the queue directly. To dequeue a frame from a
41 + * txq, it calls ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a
42 + * queue, it calls the .wake_tx_queue driver op.
43 + *
44 + * Drivers can optionally delegate responsibility for scheduling queues to
45 + * mac80211, to take advantage of airtime fairness accounting. In this case, to
46 + * obtain the next queue to pull frames from, the driver calls
47 + * ieee80211_next_txq(). The driver is then expected to return the txq using
48 + * ieee80211_return_txq().
49 *
50 * For AP powersave TIM handling, the driver only needs to indicate if it has
51 * buffered packets in the driver specific data structures by calling
52 @@ -5979,7 +5985,8 @@ void ieee80211_unreserve_tid(struct ieee
53 * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
54 *
55 * @hw: pointer as obtained from ieee80211_alloc_hw()
56 - * @txq: pointer obtained from station or virtual interface
57 + * @txq: pointer obtained from station or virtual interface, or from
58 + * ieee80211_next_txq()
59 *
60 * Returns the skb if successful, %NULL if no frame was available.
61 */
62 @@ -5987,6 +5994,54 @@ struct sk_buff *ieee80211_tx_dequeue(str
63 struct ieee80211_txq *txq);
64
65 /**
66 + * ieee80211_next_txq - get next tx queue to pull packets from
67 + *
68 + * @hw: pointer as obtained from ieee80211_alloc_hw()
69 + * @ac: AC number to return packets from.
70 + *
71 + * Should only be called between calls to ieee80211_txq_schedule_start()
72 + * and ieee80211_txq_schedule_end().
73 + * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
74 + * is returned, it should be returned with ieee80211_return_txq() after the
75 + * driver has finished scheduling it.
76 + */
77 +struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
78 +
79 +/**
80 + * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
81 + *
82 + * @hw: pointer as obtained from ieee80211_alloc_hw()
83 + * @txq: pointer obtained from station or virtual interface
84 + *
85 + * Should only be called between calls to ieee80211_txq_schedule_start()
86 + * and ieee80211_txq_schedule_end().
87 + */
88 +void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
89 +
90 +/**
91 + * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
92 + *
93 + * @hw: pointer as obtained from ieee80211_alloc_hw()
94 + * @ac: AC number to acquire locks for
95 + *
96 + * Acquire locks needed to schedule TXQs from the given AC. Should be called
97 + * before ieee80211_next_txq() or ieee80211_return_txq().
98 + */
99 +void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
100 + __acquires(txq_lock);
101 +
102 +/**
103 + * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
104 + *
105 + * @hw: pointer as obtained from ieee80211_alloc_hw()
106 + * @ac: AC number to acquire locks for
107 + *
108 + * Release locks previously acquired by ieee80211_txq_schedule_end().
109 + */
110 +void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
111 + __releases(txq_lock);
112 +
113 +/**
114 * ieee80211_txq_get_depth - get pending frame/byte count of given txq
115 *
116 * The values are not guaranteed to be coherent with regard to each other, i.e.
117 --- a/net/mac80211/agg-tx.c
118 +++ b/net/mac80211/agg-tx.c
119 @@ -229,7 +229,7 @@ ieee80211_agg_start_txq(struct sta_info
120 clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
121 local_bh_disable();
122 rcu_read_lock();
123 - drv_wake_tx_queue(sta->sdata->local, txqi);
124 + schedule_and_wake_txq(sta->sdata->local, txqi);
125 rcu_read_unlock();
126 local_bh_enable();
127 }
128 --- a/net/mac80211/driver-ops.h
129 +++ b/net/mac80211/driver-ops.h
130 @@ -1176,6 +1176,15 @@ static inline void drv_wake_tx_queue(str
131 local->ops->wake_tx_queue(&local->hw, &txq->txq);
132 }
133
134 +static inline void schedule_and_wake_txq(struct ieee80211_local *local,
135 + struct txq_info *txqi)
136 +{
137 + spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
138 + ieee80211_return_txq(&local->hw, &txqi->txq);
139 + spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
140 + drv_wake_tx_queue(local, txqi);
141 +}
142 +
143 static inline int drv_start_nan(struct ieee80211_local *local,
144 struct ieee80211_sub_if_data *sdata,
145 struct cfg80211_nan_conf *conf)
146 --- a/net/mac80211/ieee80211_i.h
147 +++ b/net/mac80211/ieee80211_i.h
148 @@ -829,6 +829,8 @@ enum txq_info_flags {
149 * a fq_flow which is already owned by a different tin
150 * @def_cvars: codel vars for @def_flow
151 * @frags: used to keep fragments created after dequeue
152 + * @schedule_order: used with ieee80211_local->active_txqs
153 + * @schedule_round: counter to prevent infinite loops on TXQ scheduling
154 */
155 struct txq_info {
156 struct fq_tin tin;
157 @@ -836,6 +838,8 @@ struct txq_info {
158 struct codel_vars def_cvars;
159 struct codel_stats cstats;
160 struct sk_buff_head frags;
161 + struct list_head schedule_order;
162 + u16 schedule_round;
163 unsigned long flags;
164
165 /* keep last! */
166 @@ -1127,6 +1131,11 @@ struct ieee80211_local {
167 struct codel_vars *cvars;
168 struct codel_params cparams;
169
170 + /* protects active_txqs and txqi->schedule_order */
171 + spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
172 + struct list_head active_txqs[IEEE80211_NUM_ACS];
173 + u16 schedule_round[IEEE80211_NUM_ACS];
174 +
175 const struct ieee80211_ops *ops;
176
177 /*
178 --- a/net/mac80211/main.c
179 +++ b/net/mac80211/main.c
180 @@ -652,6 +652,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_
181 spin_lock_init(&local->rx_path_lock);
182 spin_lock_init(&local->queue_stop_reason_lock);
183
184 + for (i = 0; i < IEEE80211_NUM_ACS; i++) {
185 + INIT_LIST_HEAD(&local->active_txqs[i]);
186 + spin_lock_init(&local->active_txq_lock[i]);
187 + }
188 +
189 INIT_LIST_HEAD(&local->chanctx_list);
190 mutex_init(&local->chanctx_mtx);
191
192 --- a/net/mac80211/sta_info.c
193 +++ b/net/mac80211/sta_info.c
194 @@ -1261,7 +1261,7 @@ void ieee80211_sta_ps_deliver_wakeup(str
195 if (!txq_has_queue(sta->sta.txq[i]))
196 continue;
197
198 - drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
199 + schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i]));
200 }
201 }
202
203 --- a/net/mac80211/tx.c
204 +++ b/net/mac80211/tx.c
205 @@ -1441,6 +1441,7 @@ void ieee80211_txq_init(struct ieee80211
206 codel_vars_init(&txqi->def_cvars);
207 codel_stats_init(&txqi->cstats);
208 __skb_queue_head_init(&txqi->frags);
209 + INIT_LIST_HEAD(&txqi->schedule_order);
210
211 txqi->txq.vif = &sdata->vif;
212
213 @@ -1464,6 +1465,9 @@ void ieee80211_txq_purge(struct ieee8021
214
215 fq_tin_reset(fq, tin, fq_skb_free_func);
216 ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
217 + spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
218 + list_del_init(&txqi->schedule_order);
219 + spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
220 }
221
222 void ieee80211_txq_set_params(struct ieee80211_local *local)
223 @@ -1580,7 +1584,7 @@ static bool ieee80211_queue_skb(struct i
224 ieee80211_txq_enqueue(local, txqi, skb);
225 spin_unlock_bh(&fq->lock);
226
227 - drv_wake_tx_queue(local, txqi);
228 + schedule_and_wake_txq(local, txqi);
229
230 return true;
231 }
232 @@ -3631,6 +3635,60 @@ out:
233 }
234 EXPORT_SYMBOL(ieee80211_tx_dequeue);
235
236 +struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
237 +{
238 + struct ieee80211_local *local = hw_to_local(hw);
239 + struct txq_info *txqi = NULL;
240 +
241 + lockdep_assert_held(&local->active_txq_lock[ac]);
242 +
243 + txqi = list_first_entry_or_null(&local->active_txqs[ac],
244 + struct txq_info,
245 + schedule_order);
246 +
247 + if (!txqi || txqi->schedule_round == local->schedule_round[ac])
248 + return NULL;
249 +
250 + list_del_init(&txqi->schedule_order);
251 + txqi->schedule_round = local->schedule_round[ac];
252 + return &txqi->txq;
253 +}
254 +EXPORT_SYMBOL(ieee80211_next_txq);
255 +
256 +void ieee80211_return_txq(struct ieee80211_hw *hw,
257 + struct ieee80211_txq *txq)
258 +{
259 + struct ieee80211_local *local = hw_to_local(hw);
260 + struct txq_info *txqi = to_txq_info(txq);
261 +
262 + lockdep_assert_held(&local->active_txq_lock[txq->ac]);
263 +
264 + if (list_empty(&txqi->schedule_order) &&
265 + (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets))
266 + list_add_tail(&txqi->schedule_order,
267 + &local->active_txqs[txq->ac]);
268 +}
269 +EXPORT_SYMBOL(ieee80211_return_txq);
270 +
271 +void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
272 + __acquires(txq_lock)
273 +{
274 + struct ieee80211_local *local = hw_to_local(hw);
275 +
276 + spin_lock_bh(&local->active_txq_lock[ac]);
277 + local->schedule_round[ac]++;
278 +}
279 +EXPORT_SYMBOL(ieee80211_txq_schedule_start);
280 +
281 +void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
282 + __releases(txq_lock)
283 +{
284 + struct ieee80211_local *local = hw_to_local(hw);
285 +
286 + spin_unlock_bh(&local->active_txq_lock[ac]);
287 +}
288 +EXPORT_SYMBOL(ieee80211_txq_schedule_end);
289 +
290 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
291 struct net_device *dev,
292 u32 info_flags,