mac80211: backport upstream fixes for FragAttacks
[openwrt/staging/ynezz.git] / package / kernel / mac80211 / patches / subsys / 385-mac80211-add-fragment-cache-to-sta_info.patch
1 From: Johannes Berg <johannes.berg@intel.com>
2 Date: Tue, 11 May 2021 20:02:47 +0200
3 Subject: [PATCH] mac80211: add fragment cache to sta_info
4
5 Prior patches protected against fragmentation cache attacks
6 by coloring keys, but this shows that it can lead to issues
7 when multiple stations use the same sequence number. Add a
8 fragment cache to struct sta_info (in addition to the one in
9 the interface) to separate fragments for different stations
10 properly.
11
12 This then automatically clear most of the fragment cache when a
13 station disconnects (or reassociates) from an AP, or when client
14 interfaces disconnect from the network, etc.
15
16 On the way, also fix the comment there since this brings us in line
17 with the recommendation in 802.11-2016 ("An AP should support ...").
18 Additionally, remove a useless condition (since there's no problem
19 purging an already empty list).
20
21 Cc: stable@vger.kernel.org
22 Signed-off-by: Johannes Berg <johannes.berg@intel.com>
23 ---
24
25 --- a/net/mac80211/ieee80211_i.h
26 +++ b/net/mac80211/ieee80211_i.h
27 @@ -50,12 +50,6 @@ struct ieee80211_local;
28 #define IEEE80211_ENCRYPT_HEADROOM 8
29 #define IEEE80211_ENCRYPT_TAILROOM 18
30
31 -/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
32 - * reception of at least three fragmented frames. This limit can be increased
33 - * by changing this define, at the cost of slower frame reassembly and
34 - * increased memory use (about 2 kB of RAM per entry). */
35 -#define IEEE80211_FRAGMENT_MAX 4
36 -
37 /* power level hasn't been configured (or set to automatic) */
38 #define IEEE80211_UNSET_POWER_LEVEL INT_MIN
39
40 @@ -88,19 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask
41
42 #define IEEE80211_MAX_NAN_INSTANCE_ID 255
43
44 -struct ieee80211_fragment_entry {
45 - struct sk_buff_head skb_list;
46 - unsigned long first_frag_time;
47 - u16 seq;
48 - u16 extra_len;
49 - u16 last_frag;
50 - u8 rx_queue;
51 - bool check_sequential_pn; /* needed for CCMP/GCMP */
52 - u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
53 - unsigned int key_color;
54 -};
55 -
56 -
57 struct ieee80211_bss {
58 u32 device_ts_beacon, device_ts_presp;
59
60 @@ -912,9 +893,7 @@ struct ieee80211_sub_if_data {
61
62 char name[IFNAMSIZ];
63
64 - /* Fragment table for host-based reassembly */
65 - struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
66 - unsigned int fragment_next;
67 + struct ieee80211_fragment_cache frags;
68
69 /* TID bitmap for NoAck policy */
70 u16 noack_map;
71 @@ -2329,4 +2308,7 @@ u32 ieee80211_calc_expected_tx_airtime(s
72 #define debug_noinline
73 #endif
74
75 +void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
76 +void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
77 +
78 #endif /* IEEE80211_I_H */
79 --- a/net/mac80211/iface.c
80 +++ b/net/mac80211/iface.c
81 @@ -8,7 +8,7 @@
82 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
83 * Copyright 2013-2014 Intel Mobile Communications GmbH
84 * Copyright (c) 2016 Intel Deutschland GmbH
85 - * Copyright (C) 2018-2020 Intel Corporation
86 + * Copyright (C) 2018-2021 Intel Corporation
87 */
88 #include <linux/slab.h>
89 #include <linux/kernel.h>
90 @@ -679,16 +679,12 @@ static void ieee80211_set_multicast_list
91 */
92 static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
93 {
94 - int i;
95 -
96 /* free extra data */
97 ieee80211_free_keys(sdata, false);
98
99 ieee80211_debugfs_remove_netdev(sdata);
100
101 - for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
102 - __skb_queue_purge(&sdata->fragments[i].skb_list);
103 - sdata->fragment_next = 0;
104 + ieee80211_destroy_frag_cache(&sdata->frags);
105
106 if (ieee80211_vif_is_mesh(&sdata->vif))
107 ieee80211_mesh_teardown_sdata(sdata);
108 @@ -2038,8 +2034,7 @@ int ieee80211_if_add(struct ieee80211_lo
109 sdata->wdev.wiphy = local->hw.wiphy;
110 sdata->local = local;
111
112 - for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
113 - skb_queue_head_init(&sdata->fragments[i].skb_list);
114 + ieee80211_init_frag_cache(&sdata->frags);
115
116 INIT_LIST_HEAD(&sdata->key_list);
117
118 --- a/net/mac80211/rx.c
119 +++ b/net/mac80211/rx.c
120 @@ -2133,19 +2133,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_
121 return result;
122 }
123
124 +void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
125 +{
126 + int i;
127 +
128 + for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
129 + skb_queue_head_init(&cache->entries[i].skb_list);
130 +}
131 +
132 +void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
133 +{
134 + int i;
135 +
136 + for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
137 + __skb_queue_purge(&cache->entries[i].skb_list);
138 +}
139 +
140 static inline struct ieee80211_fragment_entry *
141 -ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
142 +ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
143 unsigned int frag, unsigned int seq, int rx_queue,
144 struct sk_buff **skb)
145 {
146 struct ieee80211_fragment_entry *entry;
147
148 - entry = &sdata->fragments[sdata->fragment_next++];
149 - if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
150 - sdata->fragment_next = 0;
151 + entry = &cache->entries[cache->next++];
152 + if (cache->next >= IEEE80211_FRAGMENT_MAX)
153 + cache->next = 0;
154
155 - if (!skb_queue_empty(&entry->skb_list))
156 - __skb_queue_purge(&entry->skb_list);
157 + __skb_queue_purge(&entry->skb_list);
158
159 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
160 *skb = NULL;
161 @@ -2160,14 +2175,14 @@ ieee80211_reassemble_add(struct ieee8021
162 }
163
164 static inline struct ieee80211_fragment_entry *
165 -ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
166 +ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
167 unsigned int frag, unsigned int seq,
168 int rx_queue, struct ieee80211_hdr *hdr)
169 {
170 struct ieee80211_fragment_entry *entry;
171 int i, idx;
172
173 - idx = sdata->fragment_next;
174 + idx = cache->next;
175 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
176 struct ieee80211_hdr *f_hdr;
177 struct sk_buff *f_skb;
178 @@ -2176,7 +2191,7 @@ ieee80211_reassemble_find(struct ieee802
179 if (idx < 0)
180 idx = IEEE80211_FRAGMENT_MAX - 1;
181
182 - entry = &sdata->fragments[idx];
183 + entry = &cache->entries[idx];
184 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
185 entry->rx_queue != rx_queue ||
186 entry->last_frag + 1 != frag)
187 @@ -2217,6 +2232,7 @@ static bool requires_sequential_pn(struc
188 static ieee80211_rx_result debug_noinline
189 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
190 {
191 + struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
192 struct ieee80211_hdr *hdr;
193 u16 sc;
194 __le16 fc;
195 @@ -2238,6 +2254,9 @@ ieee80211_rx_h_defragment(struct ieee802
196 goto out_no_led;
197 }
198
199 + if (rx->sta)
200 + cache = &rx->sta->frags;
201 +
202 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
203 goto out;
204
205 @@ -2256,7 +2275,7 @@ ieee80211_rx_h_defragment(struct ieee802
206
207 if (frag == 0) {
208 /* This is the first fragment of a new frame. */
209 - entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
210 + entry = ieee80211_reassemble_add(cache, frag, seq,
211 rx->seqno_idx, &(rx->skb));
212 if (requires_sequential_pn(rx, fc)) {
213 int queue = rx->security_idx;
214 @@ -2284,7 +2303,7 @@ ieee80211_rx_h_defragment(struct ieee802
215 /* This is a fragment for a frame that should already be pending in
216 * fragment cache. Add this fragment to the end of the pending entry.
217 */
218 - entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
219 + entry = ieee80211_reassemble_find(cache, frag, seq,
220 rx->seqno_idx, hdr);
221 if (!entry) {
222 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
223 --- a/net/mac80211/sta_info.c
224 +++ b/net/mac80211/sta_info.c
225 @@ -4,7 +4,7 @@
226 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
227 * Copyright 2013-2014 Intel Mobile Communications GmbH
228 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
229 - * Copyright (C) 2018-2020 Intel Corporation
230 + * Copyright (C) 2018-2021 Intel Corporation
231 */
232
233 #include <linux/module.h>
234 @@ -393,6 +393,8 @@ struct sta_info *sta_info_alloc(struct i
235
236 u64_stats_init(&sta->rx_stats.syncp);
237
238 + ieee80211_init_frag_cache(&sta->frags);
239 +
240 sta->sta_state = IEEE80211_STA_NONE;
241
242 /* Mark TID as unreserved */
243 @@ -1103,6 +1105,8 @@ static void __sta_info_destroy_part2(str
244
245 ieee80211_sta_debugfs_remove(sta);
246
247 + ieee80211_destroy_frag_cache(&sta->frags);
248 +
249 cleanup_single_sta(sta);
250 }
251
252 --- a/net/mac80211/sta_info.h
253 +++ b/net/mac80211/sta_info.h
254 @@ -3,7 +3,7 @@
255 * Copyright 2002-2005, Devicescape Software, Inc.
256 * Copyright 2013-2014 Intel Mobile Communications GmbH
257 * Copyright(c) 2015-2017 Intel Deutschland GmbH
258 - * Copyright(c) 2020 Intel Corporation
259 + * Copyright(c) 2020-2021 Intel Corporation
260 */
261
262 #ifndef STA_INFO_H
263 @@ -439,6 +439,33 @@ struct ieee80211_sta_rx_stats {
264 };
265
266 /*
267 + * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
268 + * reception of at least one MSDU per access category per associated STA"
269 + * on APs, or "at least one MSDU per access category" on other interface types.
270 + *
271 + * This limit can be increased by changing this define, at the cost of slower
272 + * frame reassembly and increased memory use while fragments are pending.
273 + */
274 +#define IEEE80211_FRAGMENT_MAX 4
275 +
276 +struct ieee80211_fragment_entry {
277 + struct sk_buff_head skb_list;
278 + unsigned long first_frag_time;
279 + u16 seq;
280 + u16 extra_len;
281 + u16 last_frag;
282 + u8 rx_queue;
283 + bool check_sequential_pn; /* needed for CCMP/GCMP */
284 + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
285 + unsigned int key_color;
286 +};
287 +
288 +struct ieee80211_fragment_cache {
289 + struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
290 + unsigned int next;
291 +};
292 +
293 +/*
294 * The bandwidth threshold below which the per-station CoDel parameters will be
295 * scaled to be more lenient (to prevent starvation of slow stations). This
296 * value will be scaled by the number of active stations when it is being
297 @@ -531,6 +558,7 @@ struct ieee80211_sta_rx_stats {
298 * @status_stats.last_ack_signal: last ACK signal
299 * @status_stats.ack_signal_filled: last ACK signal validity
300 * @status_stats.avg_ack_signal: average ACK signal
301 + * @frags: fragment cache
302 */
303 struct sta_info {
304 /* General information, mostly static */
305 @@ -639,6 +667,8 @@ struct sta_info {
306
307 struct cfg80211_chan_def tdls_chandef;
308
309 + struct ieee80211_fragment_cache frags;
310 +
311 /* keep last! */
312 struct ieee80211_sta sta;
313 };