1 From: Johannes Berg <johannes.berg@intel.com>
2 Date: Tue, 11 May 2021 20:02:47 +0200
3 Subject: [PATCH] mac80211: add fragment cache to sta_info
5 Prior patches protected against fragmentation cache attacks
6 by coloring keys, but this shows that it can lead to issues
7 when multiple stations use the same sequence number. Add a
8 fragment cache to struct sta_info (in addition to the one in
9 the interface) to separate fragments for different stations
12 This then automatically clear most of the fragment cache when a
13 station disconnects (or reassociates) from an AP, or when client
14 interfaces disconnect from the network, etc.
16 On the way, also fix the comment there since this brings us in line
17 with the recommendation in 802.11-2016 ("An AP should support ...").
18 Additionally, remove a useless condition (since there's no problem
19 purging an already empty list).
21 Cc: stable@vger.kernel.org
22 Signed-off-by: Johannes Berg <johannes.berg@intel.com>
25 --- a/net/mac80211/ieee80211_i.h
26 +++ b/net/mac80211/ieee80211_i.h
27 @@ -50,12 +50,6 @@ struct ieee80211_local;
28 #define IEEE80211_ENCRYPT_HEADROOM 8
29 #define IEEE80211_ENCRYPT_TAILROOM 18
31 -/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
32 - * reception of at least three fragmented frames. This limit can be increased
33 - * by changing this define, at the cost of slower frame reassembly and
34 - * increased memory use (about 2 kB of RAM per entry). */
35 -#define IEEE80211_FRAGMENT_MAX 4
37 /* power level hasn't been configured (or set to automatic) */
38 #define IEEE80211_UNSET_POWER_LEVEL INT_MIN
40 @@ -88,19 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask
42 #define IEEE80211_MAX_NAN_INSTANCE_ID 255
44 -struct ieee80211_fragment_entry {
45 - struct sk_buff_head skb_list;
46 - unsigned long first_frag_time;
51 - bool check_sequential_pn; /* needed for CCMP/GCMP */
52 - u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
53 - unsigned int key_color;
57 struct ieee80211_bss {
58 u32 device_ts_beacon, device_ts_presp;
60 @@ -912,9 +893,7 @@ struct ieee80211_sub_if_data {
64 - /* Fragment table for host-based reassembly */
65 - struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
66 - unsigned int fragment_next;
67 + struct ieee80211_fragment_cache frags;
69 /* TID bitmap for NoAck policy */
71 @@ -2329,4 +2308,7 @@ u32 ieee80211_calc_expected_tx_airtime(s
72 #define debug_noinline
75 +void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
76 +void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
78 #endif /* IEEE80211_I_H */
79 --- a/net/mac80211/iface.c
80 +++ b/net/mac80211/iface.c
82 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
83 * Copyright 2013-2014 Intel Mobile Communications GmbH
84 * Copyright (c) 2016 Intel Deutschland GmbH
85 - * Copyright (C) 2018-2020 Intel Corporation
86 + * Copyright (C) 2018-2021 Intel Corporation
88 #include <linux/slab.h>
89 #include <linux/kernel.h>
90 @@ -679,16 +679,12 @@ static void ieee80211_set_multicast_list
92 static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
97 ieee80211_free_keys(sdata, false);
99 ieee80211_debugfs_remove_netdev(sdata);
101 - for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
102 - __skb_queue_purge(&sdata->fragments[i].skb_list);
103 - sdata->fragment_next = 0;
104 + ieee80211_destroy_frag_cache(&sdata->frags);
106 if (ieee80211_vif_is_mesh(&sdata->vif))
107 ieee80211_mesh_teardown_sdata(sdata);
108 @@ -2038,8 +2034,7 @@ int ieee80211_if_add(struct ieee80211_lo
109 sdata->wdev.wiphy = local->hw.wiphy;
110 sdata->local = local;
112 - for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
113 - skb_queue_head_init(&sdata->fragments[i].skb_list);
114 + ieee80211_init_frag_cache(&sdata->frags);
116 INIT_LIST_HEAD(&sdata->key_list);
118 --- a/net/mac80211/rx.c
119 +++ b/net/mac80211/rx.c
120 @@ -2133,19 +2133,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_
124 +void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
128 + for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
129 + skb_queue_head_init(&cache->entries[i].skb_list);
132 +void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
136 + for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
137 + __skb_queue_purge(&cache->entries[i].skb_list);
140 static inline struct ieee80211_fragment_entry *
141 -ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
142 +ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
143 unsigned int frag, unsigned int seq, int rx_queue,
144 struct sk_buff **skb)
146 struct ieee80211_fragment_entry *entry;
148 - entry = &sdata->fragments[sdata->fragment_next++];
149 - if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
150 - sdata->fragment_next = 0;
151 + entry = &cache->entries[cache->next++];
152 + if (cache->next >= IEEE80211_FRAGMENT_MAX)
155 - if (!skb_queue_empty(&entry->skb_list))
156 - __skb_queue_purge(&entry->skb_list);
157 + __skb_queue_purge(&entry->skb_list);
159 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
161 @@ -2160,14 +2175,14 @@ ieee80211_reassemble_add(struct ieee8021
164 static inline struct ieee80211_fragment_entry *
165 -ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
166 +ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
167 unsigned int frag, unsigned int seq,
168 int rx_queue, struct ieee80211_hdr *hdr)
170 struct ieee80211_fragment_entry *entry;
173 - idx = sdata->fragment_next;
175 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
176 struct ieee80211_hdr *f_hdr;
177 struct sk_buff *f_skb;
178 @@ -2176,7 +2191,7 @@ ieee80211_reassemble_find(struct ieee802
180 idx = IEEE80211_FRAGMENT_MAX - 1;
182 - entry = &sdata->fragments[idx];
183 + entry = &cache->entries[idx];
184 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
185 entry->rx_queue != rx_queue ||
186 entry->last_frag + 1 != frag)
187 @@ -2217,6 +2232,7 @@ static bool requires_sequential_pn(struc
188 static ieee80211_rx_result debug_noinline
189 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
191 + struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
192 struct ieee80211_hdr *hdr;
195 @@ -2238,6 +2254,9 @@ ieee80211_rx_h_defragment(struct ieee802
200 + cache = &rx->sta->frags;
202 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
205 @@ -2256,7 +2275,7 @@ ieee80211_rx_h_defragment(struct ieee802
208 /* This is the first fragment of a new frame. */
209 - entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
210 + entry = ieee80211_reassemble_add(cache, frag, seq,
211 rx->seqno_idx, &(rx->skb));
212 if (requires_sequential_pn(rx, fc)) {
213 int queue = rx->security_idx;
214 @@ -2284,7 +2303,7 @@ ieee80211_rx_h_defragment(struct ieee802
215 /* This is a fragment for a frame that should already be pending in
216 * fragment cache. Add this fragment to the end of the pending entry.
218 - entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
219 + entry = ieee80211_reassemble_find(cache, frag, seq,
222 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
223 --- a/net/mac80211/sta_info.c
224 +++ b/net/mac80211/sta_info.c
226 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
227 * Copyright 2013-2014 Intel Mobile Communications GmbH
228 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
229 - * Copyright (C) 2018-2020 Intel Corporation
230 + * Copyright (C) 2018-2021 Intel Corporation
233 #include <linux/module.h>
234 @@ -393,6 +393,8 @@ struct sta_info *sta_info_alloc(struct i
236 u64_stats_init(&sta->rx_stats.syncp);
238 + ieee80211_init_frag_cache(&sta->frags);
240 sta->sta_state = IEEE80211_STA_NONE;
242 /* Mark TID as unreserved */
243 @@ -1103,6 +1105,8 @@ static void __sta_info_destroy_part2(str
245 ieee80211_sta_debugfs_remove(sta);
247 + ieee80211_destroy_frag_cache(&sta->frags);
249 cleanup_single_sta(sta);
252 --- a/net/mac80211/sta_info.h
253 +++ b/net/mac80211/sta_info.h
255 * Copyright 2002-2005, Devicescape Software, Inc.
256 * Copyright 2013-2014 Intel Mobile Communications GmbH
257 * Copyright(c) 2015-2017 Intel Deutschland GmbH
258 - * Copyright(c) 2020 Intel Corporation
259 + * Copyright(c) 2020-2021 Intel Corporation
263 @@ -439,6 +439,33 @@ struct ieee80211_sta_rx_stats {
267 + * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
268 + * reception of at least one MSDU per access category per associated STA"
269 + * on APs, or "at least one MSDU per access category" on other interface types.
271 + * This limit can be increased by changing this define, at the cost of slower
272 + * frame reassembly and increased memory use while fragments are pending.
274 +#define IEEE80211_FRAGMENT_MAX 4
276 +struct ieee80211_fragment_entry {
277 + struct sk_buff_head skb_list;
278 + unsigned long first_frag_time;
283 + bool check_sequential_pn; /* needed for CCMP/GCMP */
284 + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
285 + unsigned int key_color;
288 +struct ieee80211_fragment_cache {
289 + struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
294 * The bandwidth threshold below which the per-station CoDel parameters will be
295 * scaled to be more lenient (to prevent starvation of slow stations). This
296 * value will be scaled by the number of active stations when it is being
297 @@ -531,6 +558,7 @@ struct ieee80211_sta_rx_stats {
298 * @status_stats.last_ack_signal: last ACK signal
299 * @status_stats.ack_signal_filled: last ACK signal validity
300 * @status_stats.avg_ack_signal: average ACK signal
301 + * @frags: fragment cache
304 /* General information, mostly static */
305 @@ -639,6 +667,8 @@ struct sta_info {
307 struct cfg80211_chan_def tdls_chandef;
309 + struct ieee80211_fragment_cache frags;
312 struct ieee80211_sta sta;