Upgrade b43 and mac80211.
[openwrt/staging/lynxis/omap.git] / package / mac80211 / src / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/rcupdate.h>
17 #include <net/mac80211.h>
18 #include <net/ieee80211_radiotap.h>
19
20 #include "ieee80211_i.h"
21 #include "ieee80211_led.h"
22 #include "wep.h"
23 #include "wpa.h"
24 #include "tkip.h"
25 #include "wme.h"
26
27 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
28 struct tid_ampdu_rx *tid_agg_rx,
29 struct sk_buff *skb, u16 mpdu_seq_num,
30 int bar_req);
31 /*
32 * monitor mode reception
33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring.
36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb,
39 int rtap_len)
40 {
41 skb_pull(skb, rtap_len);
42
43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
44 if (likely(skb->len > FCS_LEN))
45 skb_trim(skb, skb->len - FCS_LEN);
46 else {
47 /* driver bug */
48 WARN_ON(1);
49 dev_kfree_skb(skb);
50 skb = NULL;
51 }
52 }
53
54 return skb;
55 }
56
57 static inline int should_drop_frame(struct ieee80211_rx_status *status,
58 struct sk_buff *skb,
59 int present_fcs_len,
60 int radiotap_len)
61 {
62 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
63
64 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
65 return 1;
66 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
67 return 1;
68 if (((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
69 cpu_to_le16(IEEE80211_FTYPE_CTL)) &&
70 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) !=
71 cpu_to_le16(IEEE80211_STYPE_PSPOLL)) &&
72 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) !=
73 cpu_to_le16(IEEE80211_STYPE_BACK_REQ)))
74 return 1;
75 return 0;
76 }
77
78 /*
79 * This function copies a received frame to all monitor interfaces and
80 * returns a cleaned-up SKB that no longer includes the FCS nor the
81 * radiotap header the driver might have added.
82 */
83 static struct sk_buff *
84 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
85 struct ieee80211_rx_status *status,
86 struct ieee80211_rate *rate)
87 {
88 struct ieee80211_sub_if_data *sdata;
89 int needed_headroom = 0;
90 struct ieee80211_radiotap_header *rthdr;
91 __le64 *rttsft = NULL;
92 struct ieee80211_rtap_fixed_data {
93 u8 flags;
94 u8 rate;
95 __le16 chan_freq;
96 __le16 chan_flags;
97 u8 antsignal;
98 u8 padding_for_rxflags;
99 __le16 rx_flags;
100 } __attribute__ ((packed)) *rtfixed;
101 struct sk_buff *skb, *skb2;
102 struct net_device *prev_dev = NULL;
103 int present_fcs_len = 0;
104 int rtap_len = 0;
105
106 /*
107 * First, we may need to make a copy of the skb because
108 * (1) we need to modify it for radiotap (if not present), and
109 * (2) the other RX handlers will modify the skb we got.
110 *
111 * We don't need to, of course, if we aren't going to return
112 * the SKB because it has a bad FCS/PLCP checksum.
113 */
114 if (status->flag & RX_FLAG_RADIOTAP)
115 rtap_len = ieee80211_get_radiotap_len(origskb->data);
116 else
117 /* room for radiotap header, always present fields and TSFT */
118 needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8;
119
120 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
121 present_fcs_len = FCS_LEN;
122
123 if (!local->monitors) {
124 if (should_drop_frame(status, origskb, present_fcs_len,
125 rtap_len)) {
126 dev_kfree_skb(origskb);
127 return NULL;
128 }
129
130 return remove_monitor_info(local, origskb, rtap_len);
131 }
132
133 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
134 /* only need to expand headroom if necessary */
135 skb = origskb;
136 origskb = NULL;
137
138 /*
139 * This shouldn't trigger often because most devices have an
140 * RX header they pull before we get here, and that should
141 * be big enough for our radiotap information. We should
142 * probably export the length to drivers so that we can have
143 * them allocate enough headroom to start with.
144 */
145 if (skb_headroom(skb) < needed_headroom &&
146 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
147 dev_kfree_skb(skb);
148 return NULL;
149 }
150 } else {
151 /*
152 * Need to make a copy and possibly remove radiotap header
153 * and FCS from the original.
154 */
155 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
156
157 origskb = remove_monitor_info(local, origskb, rtap_len);
158
159 if (!skb)
160 return origskb;
161 }
162
163 /* if necessary, prepend radiotap information */
164 if (!(status->flag & RX_FLAG_RADIOTAP)) {
165 rtfixed = (void *) skb_push(skb, sizeof(*rtfixed));
166 rtap_len = sizeof(*rthdr) + sizeof(*rtfixed);
167 if (status->flag & RX_FLAG_TSFT) {
168 rttsft = (void *) skb_push(skb, sizeof(*rttsft));
169 rtap_len += 8;
170 }
171 rthdr = (void *) skb_push(skb, sizeof(*rthdr));
172 memset(rthdr, 0, sizeof(*rthdr));
173 memset(rtfixed, 0, sizeof(*rtfixed));
174 rthdr->it_present =
175 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
176 (1 << IEEE80211_RADIOTAP_RATE) |
177 (1 << IEEE80211_RADIOTAP_CHANNEL) |
178 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |
179 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
180 rtfixed->flags = 0;
181 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
182 rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS;
183
184 if (rttsft) {
185 *rttsft = cpu_to_le64(status->mactime);
186 rthdr->it_present |=
187 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
188 }
189
190 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
191 rtfixed->rx_flags = 0;
192 if (status->flag &
193 (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
194 rtfixed->rx_flags |=
195 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
196
197 rtfixed->rate = rate->bitrate / 5;
198
199 rtfixed->chan_freq = cpu_to_le16(status->freq);
200
201 if (status->band == IEEE80211_BAND_5GHZ)
202 rtfixed->chan_flags =
203 cpu_to_le16(IEEE80211_CHAN_OFDM |
204 IEEE80211_CHAN_5GHZ);
205 else
206 rtfixed->chan_flags =
207 cpu_to_le16(IEEE80211_CHAN_DYN |
208 IEEE80211_CHAN_2GHZ);
209
210 rtfixed->antsignal = status->ssi;
211 rthdr->it_len = cpu_to_le16(rtap_len);
212 }
213
214 skb_reset_mac_header(skb);
215 skb->ip_summed = CHECKSUM_UNNECESSARY;
216 skb->pkt_type = PACKET_OTHERHOST;
217 skb->protocol = htons(ETH_P_802_2);
218
219 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
220 if (!netif_running(sdata->dev))
221 continue;
222
223 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
224 continue;
225
226 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
227 continue;
228
229 if (prev_dev) {
230 skb2 = skb_clone(skb, GFP_ATOMIC);
231 if (skb2) {
232 skb2->dev = prev_dev;
233 netif_rx(skb2);
234 }
235 }
236
237 prev_dev = sdata->dev;
238 sdata->dev->stats.rx_packets++;
239 sdata->dev->stats.rx_bytes += skb->len;
240 }
241
242 if (prev_dev) {
243 skb->dev = prev_dev;
244 netif_rx(skb);
245 } else
246 dev_kfree_skb(skb);
247
248 return origskb;
249 }
250
251
252 static void ieee80211_parse_qos(struct ieee80211_txrx_data *rx)
253 {
254 u8 *data = rx->skb->data;
255 int tid;
256
257 /* does the frame have a qos control field? */
258 if (WLAN_FC_IS_QOS_DATA(rx->fc)) {
259 u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN;
260 /* frame has qos control */
261 tid = qc[0] & QOS_CONTROL_TID_MASK;
262 if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
263 rx->flags |= IEEE80211_TXRXD_RX_AMSDU;
264 else
265 rx->flags &= ~IEEE80211_TXRXD_RX_AMSDU;
266 } else {
267 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) {
268 /* Separate TID for management frames */
269 tid = NUM_RX_DATA_QUEUES - 1;
270 } else {
271 /* no qos control present */
272 tid = 0; /* 802.1d - Best Effort */
273 }
274 }
275
276 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
277 /* only a debug counter, sta might not be assigned properly yet */
278 if (rx->sta)
279 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
280
281 rx->u.rx.queue = tid;
282 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
283 * For now, set skb->priority to 0 for other cases. */
284 rx->skb->priority = (tid > 7) ? 0 : tid;
285 }
286
287 static void ieee80211_verify_ip_alignment(struct ieee80211_txrx_data *rx)
288 {
289 #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
290 int hdrlen;
291
292 if (!WLAN_FC_DATA_PRESENT(rx->fc))
293 return;
294
295 /*
296 * Drivers are required to align the payload data in a way that
297 * guarantees that the contained IP header is aligned to a four-
298 * byte boundary. In the case of regular frames, this simply means
299 * aligning the payload to a four-byte boundary (because either
300 * the IP header is directly contained, or IV/RFC1042 headers that
301 * have a length divisible by four are in front of it.
302 *
303 * With A-MSDU frames, however, the payload data address must
304 * yield two modulo four because there are 14-byte 802.3 headers
305 * within the A-MSDU frames that push the IP header further back
306 * to a multiple of four again. Thankfully, the specs were sane
307 * enough this time around to require padding each A-MSDU subframe
308 * to a length that is a multiple of four.
309 *
310 * Padding like atheros hardware adds which is inbetween the 802.11
311 * header and the payload is not supported, the driver is required
312 * to move the 802.11 header further back in that case.
313 */
314 hdrlen = ieee80211_get_hdrlen(rx->fc);
315 if (rx->flags & IEEE80211_TXRXD_RX_AMSDU)
316 hdrlen += ETH_HLEN;
317 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
318 #endif
319 }
320
321
322 static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
323 struct sk_buff *skb,
324 struct ieee80211_rx_status *status,
325 struct ieee80211_rate *rate)
326 {
327 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
328 u32 load = 0, hdrtime;
329
330 /* Estimate total channel use caused by this frame */
331
332 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
333 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
334
335 if (status->band == IEEE80211_BAND_5GHZ ||
336 (status->band == IEEE80211_BAND_5GHZ &&
337 rate->flags & IEEE80211_RATE_ERP_G))
338 hdrtime = CHAN_UTIL_HDR_SHORT;
339 else
340 hdrtime = CHAN_UTIL_HDR_LONG;
341
342 load = hdrtime;
343 if (!is_multicast_ether_addr(hdr->addr1))
344 load += hdrtime;
345
346 /* TODO: optimise again */
347 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
348
349 /* Divide channel_use by 8 to avoid wrapping around the counter */
350 load >>= CHAN_UTIL_SHIFT;
351
352 return load;
353 }
354
355 /* rx handlers */
356
357 static ieee80211_rx_result
358 ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx)
359 {
360 if (rx->sta)
361 rx->sta->channel_use_raw += rx->u.rx.load;
362 rx->sdata->channel_use_raw += rx->u.rx.load;
363 return RX_CONTINUE;
364 }
365
366 static ieee80211_rx_result
367 ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx)
368 {
369 struct ieee80211_local *local = rx->local;
370 struct sk_buff *skb = rx->skb;
371
372 if (unlikely(local->sta_hw_scanning))
373 return ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status);
374
375 if (unlikely(local->sta_sw_scanning)) {
376 /* drop all the other packets during a software scan anyway */
377 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status)
378 != RX_QUEUED)
379 dev_kfree_skb(skb);
380 return RX_QUEUED;
381 }
382
383 if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) {
384 /* scanning finished during invoking of handlers */
385 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
386 return RX_DROP_UNUSABLE;
387 }
388
389 return RX_CONTINUE;
390 }
391
392 static ieee80211_rx_result
393 ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
394 {
395 struct ieee80211_hdr *hdr;
396 hdr = (struct ieee80211_hdr *) rx->skb->data;
397
398 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
399 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
400 if (unlikely(rx->fc & IEEE80211_FCTL_RETRY &&
401 rx->sta->last_seq_ctrl[rx->u.rx.queue] ==
402 hdr->seq_ctrl)) {
403 if (rx->flags & IEEE80211_TXRXD_RXRA_MATCH) {
404 rx->local->dot11FrameDuplicateCount++;
405 rx->sta->num_duplicates++;
406 }
407 return RX_DROP_MONITOR;
408 } else
409 rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl;
410 }
411
412 if (unlikely(rx->skb->len < 16)) {
413 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
414 return RX_DROP_MONITOR;
415 }
416
417 /* Drop disallowed frame classes based on STA auth/assoc state;
418 * IEEE 802.11, Chap 5.5.
419 *
420 * 80211.o does filtering only based on association state, i.e., it
421 * drops Class 3 frames from not associated stations. hostapd sends
422 * deauth/disassoc frames when needed. In addition, hostapd is
423 * responsible for filtering on both auth and assoc states.
424 */
425 if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA ||
426 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL &&
427 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) &&
428 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
429 (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) {
430 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) &&
431 !(rx->fc & IEEE80211_FCTL_TODS) &&
432 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
433 || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) {
434 /* Drop IBSS frames and frames for other hosts
435 * silently. */
436 return RX_DROP_MONITOR;
437 }
438
439 return RX_DROP_MONITOR;
440 }
441
442 return RX_CONTINUE;
443 }
444
445
446 static ieee80211_rx_result
447 ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
448 {
449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
450 int keyidx;
451 int hdrlen;
452 ieee80211_rx_result result = RX_DROP_UNUSABLE;
453 struct ieee80211_key *stakey = NULL;
454
455 /*
456 * Key selection 101
457 *
458 * There are three types of keys:
459 * - GTK (group keys)
460 * - PTK (pairwise keys)
461 * - STK (station-to-station pairwise keys)
462 *
463 * When selecting a key, we have to distinguish between multicast
464 * (including broadcast) and unicast frames, the latter can only
465 * use PTKs and STKs while the former always use GTKs. Unless, of
466 * course, actual WEP keys ("pre-RSNA") are used, then unicast
467 * frames can also use key indizes like GTKs. Hence, if we don't
468 * have a PTK/STK we check the key index for a WEP key.
469 *
470 * Note that in a regular BSS, multicast frames are sent by the
471 * AP only, associated stations unicast the frame to the AP first
472 * which then multicasts it on their behalf.
473 *
474 * There is also a slight problem in IBSS mode: GTKs are negotiated
475 * with each station, that is something we don't currently handle.
476 * The spec seems to expect that one negotiates the same key with
477 * every station but there's no such requirement; VLANs could be
478 * possible.
479 */
480
481 if (!(rx->fc & IEEE80211_FCTL_PROTECTED))
482 return RX_CONTINUE;
483
484 /*
485 * No point in finding a key and decrypting if the frame is neither
486 * addressed to us nor a multicast frame.
487 */
488 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
489 return RX_CONTINUE;
490
491 if (rx->sta)
492 stakey = rcu_dereference(rx->sta->key);
493
494 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
495 rx->key = stakey;
496 } else {
497 /*
498 * The device doesn't give us the IV so we won't be
499 * able to look up the key. That's ok though, we
500 * don't need to decrypt the frame, we just won't
501 * be able to keep statistics accurate.
502 * Except for key threshold notifications, should
503 * we somehow allow the driver to tell us which key
504 * the hardware used if this flag is set?
505 */
506 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) &&
507 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED))
508 return RX_CONTINUE;
509
510 hdrlen = ieee80211_get_hdrlen(rx->fc);
511
512 if (rx->skb->len < 8 + hdrlen)
513 return RX_DROP_UNUSABLE; /* TODO: count this? */
514
515 /*
516 * no need to call ieee80211_wep_get_keyidx,
517 * it verifies a bunch of things we've done already
518 */
519 keyidx = rx->skb->data[hdrlen + 3] >> 6;
520
521 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
522
523 /*
524 * RSNA-protected unicast frames should always be sent with
525 * pairwise or station-to-station keys, but for WEP we allow
526 * using a key index as well.
527 */
528 if (rx->key && rx->key->conf.alg != ALG_WEP &&
529 !is_multicast_ether_addr(hdr->addr1))
530 rx->key = NULL;
531 }
532
533 if (rx->key) {
534 rx->key->tx_rx_count++;
535 /* TODO: add threshold stuff again */
536 } else {
537 #ifdef CONFIG_MAC80211_DEBUG
538 if (net_ratelimit())
539 printk(KERN_DEBUG "%s: RX protected frame,"
540 " but have no key\n", rx->dev->name);
541 #endif /* CONFIG_MAC80211_DEBUG */
542 return RX_DROP_MONITOR;
543 }
544
545 /* Check for weak IVs if possible */
546 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
547 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
548 (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) ||
549 !(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) &&
550 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
551 rx->sta->wep_weak_iv_count++;
552
553 switch (rx->key->conf.alg) {
554 case ALG_WEP:
555 result = ieee80211_crypto_wep_decrypt(rx);
556 break;
557 case ALG_TKIP:
558 result = ieee80211_crypto_tkip_decrypt(rx);
559 break;
560 case ALG_CCMP:
561 result = ieee80211_crypto_ccmp_decrypt(rx);
562 break;
563 }
564
565 /* either the frame has been decrypted or will be dropped */
566 rx->u.rx.status->flag |= RX_FLAG_DECRYPTED;
567
568 return result;
569 }
570
571 static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
572 {
573 struct ieee80211_sub_if_data *sdata;
574 DECLARE_MAC_BUF(mac);
575
576 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
577
578 if (sdata->bss)
579 atomic_inc(&sdata->bss->num_sta_ps);
580 sta->flags |= WLAN_STA_PS;
581 sta->pspoll = 0;
582 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
583 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
584 dev->name, print_mac(mac, sta->addr), sta->aid);
585 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
586 }
587
588 static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
589 {
590 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
591 struct sk_buff *skb;
592 int sent = 0;
593 struct ieee80211_sub_if_data *sdata;
594 struct ieee80211_tx_packet_data *pkt_data;
595 DECLARE_MAC_BUF(mac);
596
597 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
598 if (sdata->bss)
599 atomic_dec(&sdata->bss->num_sta_ps);
600 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM);
601 sta->pspoll = 0;
602 if (!skb_queue_empty(&sta->ps_tx_buf)) {
603 if (local->ops->set_tim)
604 local->ops->set_tim(local_to_hw(local), sta->aid, 0);
605 if (sdata->bss)
606 bss_tim_clear(local, sdata->bss, sta->aid);
607 }
608 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
609 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
610 dev->name, print_mac(mac, sta->addr), sta->aid);
611 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
612 /* Send all buffered frames to the station */
613 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
614 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
615 sent++;
616 pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
617 dev_queue_xmit(skb);
618 }
619 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
620 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
621 local->total_ps_buffered--;
622 sent++;
623 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
624 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
625 "since STA not sleeping anymore\n", dev->name,
626 print_mac(mac, sta->addr), sta->aid);
627 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
628 pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
629 dev_queue_xmit(skb);
630 }
631
632 return sent;
633 }
634
635 static ieee80211_rx_result
636 ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
637 {
638 struct sta_info *sta = rx->sta;
639 struct net_device *dev = rx->dev;
640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
641
642 if (!sta)
643 return RX_CONTINUE;
644
645 /* Update last_rx only for IBSS packets which are for the current
646 * BSSID to avoid keeping the current IBSS network alive in cases where
647 * other STAs are using different BSSID. */
648 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
649 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
650 IEEE80211_IF_TYPE_IBSS);
651 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
652 sta->last_rx = jiffies;
653 } else
654 if (!is_multicast_ether_addr(hdr->addr1) ||
655 rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) {
656 /* Update last_rx only for unicast frames in order to prevent
657 * the Probe Request frames (the only broadcast frames from a
658 * STA in infrastructure mode) from keeping a connection alive.
659 */
660 sta->last_rx = jiffies;
661 }
662
663 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
664 return RX_CONTINUE;
665
666 sta->rx_fragments++;
667 sta->rx_bytes += rx->skb->len;
668 sta->last_rssi = rx->u.rx.status->ssi;
669 sta->last_signal = rx->u.rx.status->signal;
670 sta->last_noise = rx->u.rx.status->noise;
671
672 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) {
673 /* Change STA power saving mode only in the end of a frame
674 * exchange sequence */
675 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM))
676 rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta);
677 else if (!(sta->flags & WLAN_STA_PS) &&
678 (rx->fc & IEEE80211_FCTL_PM))
679 ap_sta_ps_start(dev, sta);
680 }
681
682 /* Drop data::nullfunc frames silently, since they are used only to
683 * control station power saving mode. */
684 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
685 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) {
686 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
687 /* Update counter and free packet here to avoid counting this
688 * as a dropped packed. */
689 sta->rx_packets++;
690 dev_kfree_skb(rx->skb);
691 return RX_QUEUED;
692 }
693
694 return RX_CONTINUE;
695 } /* ieee80211_rx_h_sta_process */
696
697 static inline struct ieee80211_fragment_entry *
698 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
699 unsigned int frag, unsigned int seq, int rx_queue,
700 struct sk_buff **skb)
701 {
702 struct ieee80211_fragment_entry *entry;
703 int idx;
704
705 idx = sdata->fragment_next;
706 entry = &sdata->fragments[sdata->fragment_next++];
707 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
708 sdata->fragment_next = 0;
709
710 if (!skb_queue_empty(&entry->skb_list)) {
711 #ifdef CONFIG_MAC80211_DEBUG
712 struct ieee80211_hdr *hdr =
713 (struct ieee80211_hdr *) entry->skb_list.next->data;
714 DECLARE_MAC_BUF(mac);
715 DECLARE_MAC_BUF(mac2);
716 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
717 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
718 "addr1=%s addr2=%s\n",
719 sdata->dev->name, idx,
720 jiffies - entry->first_frag_time, entry->seq,
721 entry->last_frag, print_mac(mac, hdr->addr1),
722 print_mac(mac2, hdr->addr2));
723 #endif /* CONFIG_MAC80211_DEBUG */
724 __skb_queue_purge(&entry->skb_list);
725 }
726
727 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
728 *skb = NULL;
729 entry->first_frag_time = jiffies;
730 entry->seq = seq;
731 entry->rx_queue = rx_queue;
732 entry->last_frag = frag;
733 entry->ccmp = 0;
734 entry->extra_len = 0;
735
736 return entry;
737 }
738
739 static inline struct ieee80211_fragment_entry *
740 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
741 u16 fc, unsigned int frag, unsigned int seq,
742 int rx_queue, struct ieee80211_hdr *hdr)
743 {
744 struct ieee80211_fragment_entry *entry;
745 int i, idx;
746
747 idx = sdata->fragment_next;
748 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
749 struct ieee80211_hdr *f_hdr;
750 u16 f_fc;
751
752 idx--;
753 if (idx < 0)
754 idx = IEEE80211_FRAGMENT_MAX - 1;
755
756 entry = &sdata->fragments[idx];
757 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
758 entry->rx_queue != rx_queue ||
759 entry->last_frag + 1 != frag)
760 continue;
761
762 f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data;
763 f_fc = le16_to_cpu(f_hdr->frame_control);
764
765 if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) ||
766 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
767 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
768 continue;
769
770 if (entry->first_frag_time + 2 * HZ < jiffies) {
771 __skb_queue_purge(&entry->skb_list);
772 continue;
773 }
774 return entry;
775 }
776
777 return NULL;
778 }
779
780 static ieee80211_rx_result
781 ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
782 {
783 struct ieee80211_hdr *hdr;
784 u16 sc;
785 unsigned int frag, seq;
786 struct ieee80211_fragment_entry *entry;
787 struct sk_buff *skb;
788 DECLARE_MAC_BUF(mac);
789
790 hdr = (struct ieee80211_hdr *) rx->skb->data;
791 sc = le16_to_cpu(hdr->seq_ctrl);
792 frag = sc & IEEE80211_SCTL_FRAG;
793
794 if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) ||
795 (rx->skb)->len < 24 ||
796 is_multicast_ether_addr(hdr->addr1))) {
797 /* not fragmented */
798 goto out;
799 }
800 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
801
802 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
803
804 if (frag == 0) {
805 /* This is the first fragment of a new frame. */
806 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
807 rx->u.rx.queue, &(rx->skb));
808 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
809 (rx->fc & IEEE80211_FCTL_PROTECTED)) {
810 /* Store CCMP PN so that we can verify that the next
811 * fragment has a sequential PN value. */
812 entry->ccmp = 1;
813 memcpy(entry->last_pn,
814 rx->key->u.ccmp.rx_pn[rx->u.rx.queue],
815 CCMP_PN_LEN);
816 }
817 return RX_QUEUED;
818 }
819
820 /* This is a fragment for a frame that should already be pending in
821 * fragment cache. Add this fragment to the end of the pending entry.
822 */
823 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq,
824 rx->u.rx.queue, hdr);
825 if (!entry) {
826 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
827 return RX_DROP_MONITOR;
828 }
829
830 /* Verify that MPDUs within one MSDU have sequential PN values.
831 * (IEEE 802.11i, 8.3.3.4.5) */
832 if (entry->ccmp) {
833 int i;
834 u8 pn[CCMP_PN_LEN], *rpn;
835 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
836 return RX_DROP_UNUSABLE;
837 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
838 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
839 pn[i]++;
840 if (pn[i])
841 break;
842 }
843 rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue];
844 if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) {
845 if (net_ratelimit())
846 printk(KERN_DEBUG "%s: defrag: CCMP PN not "
847 "sequential A2=%s"
848 " PN=%02x%02x%02x%02x%02x%02x "
849 "(expected %02x%02x%02x%02x%02x%02x)\n",
850 rx->dev->name, print_mac(mac, hdr->addr2),
851 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4],
852 rpn[5], pn[0], pn[1], pn[2], pn[3],
853 pn[4], pn[5]);
854 return RX_DROP_UNUSABLE;
855 }
856 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
857 }
858
859 skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc));
860 __skb_queue_tail(&entry->skb_list, rx->skb);
861 entry->last_frag = frag;
862 entry->extra_len += rx->skb->len;
863 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) {
864 rx->skb = NULL;
865 return RX_QUEUED;
866 }
867
868 rx->skb = __skb_dequeue(&entry->skb_list);
869 if (skb_tailroom(rx->skb) < entry->extra_len) {
870 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
871 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
872 GFP_ATOMIC))) {
873 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
874 __skb_queue_purge(&entry->skb_list);
875 return RX_DROP_UNUSABLE;
876 }
877 }
878 while ((skb = __skb_dequeue(&entry->skb_list))) {
879 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
880 dev_kfree_skb(skb);
881 }
882
883 /* Complete frame has been reassembled - process it now */
884 rx->flags |= IEEE80211_TXRXD_FRAGMENTED;
885
886 out:
887 if (rx->sta)
888 rx->sta->rx_packets++;
889 if (is_multicast_ether_addr(hdr->addr1))
890 rx->local->dot11MulticastReceivedFrameCount++;
891 else
892 ieee80211_led_rx(rx->local);
893 return RX_CONTINUE;
894 }
895
896 static ieee80211_rx_result
897 ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
898 {
899 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
900 struct sk_buff *skb;
901 int no_pending_pkts;
902 DECLARE_MAC_BUF(mac);
903
904 if (likely(!rx->sta ||
905 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
906 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
907 !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)))
908 return RX_CONTINUE;
909
910 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
911 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
912 return RX_DROP_UNUSABLE;
913
914 skb = skb_dequeue(&rx->sta->tx_filtered);
915 if (!skb) {
916 skb = skb_dequeue(&rx->sta->ps_tx_buf);
917 if (skb)
918 rx->local->total_ps_buffered--;
919 }
920 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
921 skb_queue_empty(&rx->sta->ps_tx_buf);
922
923 if (skb) {
924 struct ieee80211_hdr *hdr =
925 (struct ieee80211_hdr *) skb->data;
926
927 /* tell TX path to send one frame even though the STA may
928 * still remain is PS mode after this frame exchange */
929 rx->sta->pspoll = 1;
930
931 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
932 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
933 print_mac(mac, rx->sta->addr), rx->sta->aid,
934 skb_queue_len(&rx->sta->ps_tx_buf));
935 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
936
937 /* Use MoreData flag to indicate whether there are more
938 * buffered frames for this STA */
939 if (no_pending_pkts) {
940 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
941 rx->sta->flags &= ~WLAN_STA_TIM;
942 } else
943 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
944
945 dev_queue_xmit(skb);
946
947 if (no_pending_pkts) {
948 if (rx->local->ops->set_tim)
949 rx->local->ops->set_tim(local_to_hw(rx->local),
950 rx->sta->aid, 0);
951 if (rx->sdata->bss)
952 bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid);
953 }
954 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
955 } else if (!rx->u.rx.sent_ps_buffered) {
956 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
957 "though there is no buffered frames for it\n",
958 rx->dev->name, print_mac(mac, rx->sta->addr));
959 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
960
961 }
962
963 /* Free PS Poll skb here instead of returning RX_DROP that would
964 * count as an dropped frame. */
965 dev_kfree_skb(rx->skb);
966
967 return RX_QUEUED;
968 }
969
970 static ieee80211_rx_result
971 ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
972 {
973 u16 fc = rx->fc;
974 u8 *data = rx->skb->data;
975 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data;
976
977 if (!WLAN_FC_IS_QOS_DATA(fc))
978 return RX_CONTINUE;
979
980 /* remove the qos control field, update frame type and meta-data */
981 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2);
982 hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2);
983 /* change frame type to non QOS */
984 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA;
985 hdr->frame_control = cpu_to_le16(fc);
986
987 return RX_CONTINUE;
988 }
989
990 static int
991 ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx)
992 {
993 if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) {
994 #ifdef CONFIG_MAC80211_DEBUG
995 if (net_ratelimit())
996 printk(KERN_DEBUG "%s: dropped frame "
997 "(unauthorized port)\n", rx->dev->name);
998 #endif /* CONFIG_MAC80211_DEBUG */
999 return -EACCES;
1000 }
1001
1002 return 0;
1003 }
1004
1005 static int
1006 ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx)
1007 {
1008 /*
1009 * Pass through unencrypted frames if the hardware has
1010 * decrypted them already.
1011 */
1012 if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED)
1013 return 0;
1014
1015 /* Drop unencrypted frames if key is set. */
1016 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) &&
1017 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
1018 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
1019 (rx->key || rx->sdata->drop_unencrypted))) {
1020 if (net_ratelimit())
1021 printk(KERN_DEBUG "%s: RX non-WEP frame, but expected "
1022 "encryption\n", rx->dev->name);
1023 return -EACCES;
1024 }
1025 return 0;
1026 }
1027
1028 static int
1029 ieee80211_data_to_8023(struct ieee80211_txrx_data *rx)
1030 {
1031 struct net_device *dev = rx->dev;
1032 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1033 u16 fc, hdrlen, ethertype;
1034 u8 *payload;
1035 u8 dst[ETH_ALEN];
1036 u8 src[ETH_ALEN];
1037 struct sk_buff *skb = rx->skb;
1038 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1039 DECLARE_MAC_BUF(mac);
1040 DECLARE_MAC_BUF(mac2);
1041 DECLARE_MAC_BUF(mac3);
1042 DECLARE_MAC_BUF(mac4);
1043
1044 fc = rx->fc;
1045
1046 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1047 return -1;
1048
1049 hdrlen = ieee80211_get_hdrlen(fc);
1050
1051 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1052 * header
1053 * IEEE 802.11 address fields:
1054 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1055 * 0 0 DA SA BSSID n/a
1056 * 0 1 DA BSSID SA n/a
1057 * 1 0 BSSID SA DA n/a
1058 * 1 1 RA TA DA SA
1059 */
1060
1061 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1062 case IEEE80211_FCTL_TODS:
1063 /* BSSID SA DA */
1064 memcpy(dst, hdr->addr3, ETH_ALEN);
1065 memcpy(src, hdr->addr2, ETH_ALEN);
1066
1067 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
1068 sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) {
1069 if (net_ratelimit())
1070 printk(KERN_DEBUG "%s: dropped ToDS frame "
1071 "(BSSID=%s SA=%s DA=%s)\n",
1072 dev->name,
1073 print_mac(mac, hdr->addr1),
1074 print_mac(mac2, hdr->addr2),
1075 print_mac(mac3, hdr->addr3));
1076 return -1;
1077 }
1078 break;
1079 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1080 /* RA TA DA SA */
1081 memcpy(dst, hdr->addr3, ETH_ALEN);
1082 memcpy(src, hdr->addr4, ETH_ALEN);
1083
1084 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS)) {
1085 if (net_ratelimit())
1086 printk(KERN_DEBUG "%s: dropped FromDS&ToDS "
1087 "frame (RA=%s TA=%s DA=%s SA=%s)\n",
1088 rx->dev->name,
1089 print_mac(mac, hdr->addr1),
1090 print_mac(mac2, hdr->addr2),
1091 print_mac(mac3, hdr->addr3),
1092 print_mac(mac4, hdr->addr4));
1093 return -1;
1094 }
1095 break;
1096 case IEEE80211_FCTL_FROMDS:
1097 /* DA BSSID SA */
1098 memcpy(dst, hdr->addr1, ETH_ALEN);
1099 memcpy(src, hdr->addr3, ETH_ALEN);
1100
1101 if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
1102 (is_multicast_ether_addr(dst) &&
1103 !compare_ether_addr(src, dev->dev_addr)))
1104 return -1;
1105 break;
1106 case 0:
1107 /* DA SA BSSID */
1108 memcpy(dst, hdr->addr1, ETH_ALEN);
1109 memcpy(src, hdr->addr2, ETH_ALEN);
1110
1111 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) {
1112 if (net_ratelimit()) {
1113 printk(KERN_DEBUG "%s: dropped IBSS frame "
1114 "(DA=%s SA=%s BSSID=%s)\n",
1115 dev->name,
1116 print_mac(mac, hdr->addr1),
1117 print_mac(mac2, hdr->addr2),
1118 print_mac(mac3, hdr->addr3));
1119 }
1120 return -1;
1121 }
1122 break;
1123 }
1124
1125 if (unlikely(skb->len - hdrlen < 8)) {
1126 if (net_ratelimit()) {
1127 printk(KERN_DEBUG "%s: RX too short data frame "
1128 "payload\n", dev->name);
1129 }
1130 return -1;
1131 }
1132
1133 payload = skb->data + hdrlen;
1134 ethertype = (payload[6] << 8) | payload[7];
1135
1136 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1137 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1138 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1139 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1140 * replace EtherType */
1141 skb_pull(skb, hdrlen + 6);
1142 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1143 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1144 } else {
1145 struct ethhdr *ehdr;
1146 __be16 len;
1147
1148 skb_pull(skb, hdrlen);
1149 len = htons(skb->len);
1150 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1151 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1152 memcpy(ehdr->h_source, src, ETH_ALEN);
1153 ehdr->h_proto = len;
1154 }
1155 return 0;
1156 }
1157
1158 /*
1159 * requires that rx->skb is a frame with ethernet header
1160 */
1161 static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx)
1162 {
1163 static const u8 pae_group_addr[ETH_ALEN]
1164 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1165 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1166
1167 /*
1168 * Allow EAPOL frames to us/the PAE group address regardless
1169 * of whether the frame was encrypted or not.
1170 */
1171 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1172 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1173 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1174 return true;
1175
1176 if (ieee80211_802_1x_port_control(rx) ||
1177 ieee80211_drop_unencrypted(rx))
1178 return false;
1179
1180 return true;
1181 }
1182
1183 /*
1184 * requires that rx->skb is a frame with ethernet header
1185 */
1186 static void
1187 ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
1188 {
1189 struct net_device *dev = rx->dev;
1190 struct ieee80211_local *local = rx->local;
1191 struct sk_buff *skb, *xmit_skb;
1192 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1193 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1194 struct sta_info *dsta;
1195
1196 skb = rx->skb;
1197 xmit_skb = NULL;
1198
1199 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP ||
1200 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) &&
1201 (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) {
1202 if (is_multicast_ether_addr(ehdr->h_dest)) {
1203 /*
1204 * send multicast frames both to higher layers in
1205 * local net stack and back to the wireless medium
1206 */
1207 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1208 if (!xmit_skb && net_ratelimit())
1209 printk(KERN_DEBUG "%s: failed to clone "
1210 "multicast frame\n", dev->name);
1211 } else {
1212 dsta = sta_info_get(local, skb->data);
1213 if (dsta && dsta->dev == dev) {
1214 /*
1215 * The destination station is associated to
1216 * this AP (in this VLAN), so send the frame
1217 * directly to it and do not pass it to local
1218 * net stack.
1219 */
1220 xmit_skb = skb;
1221 skb = NULL;
1222 }
1223 if (dsta)
1224 sta_info_put(dsta);
1225 }
1226 }
1227
1228 if (skb) {
1229 /* deliver to local stack */
1230 skb->protocol = eth_type_trans(skb, dev);
1231 memset(skb->cb, 0, sizeof(skb->cb));
1232 netif_rx(skb);
1233 }
1234
1235 if (xmit_skb) {
1236 /* send to wireless media */
1237 xmit_skb->protocol = __constant_htons(ETH_P_802_3);
1238 skb_reset_network_header(xmit_skb);
1239 skb_reset_mac_header(xmit_skb);
1240 dev_queue_xmit(xmit_skb);
1241 }
1242 }
1243
1244 static ieee80211_rx_result
1245 ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1246 {
1247 struct net_device *dev = rx->dev;
1248 struct ieee80211_local *local = rx->local;
1249 u16 fc, ethertype;
1250 u8 *payload;
1251 struct sk_buff *skb = rx->skb, *frame = NULL;
1252 const struct ethhdr *eth;
1253 int remaining, err;
1254 u8 dst[ETH_ALEN];
1255 u8 src[ETH_ALEN];
1256 DECLARE_MAC_BUF(mac);
1257
1258 fc = rx->fc;
1259 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1260 return RX_CONTINUE;
1261
1262 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1263 return RX_DROP_MONITOR;
1264
1265 if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU))
1266 return RX_CONTINUE;
1267
1268 err = ieee80211_data_to_8023(rx);
1269 if (unlikely(err))
1270 return RX_DROP_UNUSABLE;
1271
1272 skb->dev = dev;
1273
1274 dev->stats.rx_packets++;
1275 dev->stats.rx_bytes += skb->len;
1276
1277 /* skip the wrapping header */
1278 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1279 if (!eth)
1280 return RX_DROP_UNUSABLE;
1281
1282 while (skb != frame) {
1283 u8 padding;
1284 __be16 len = eth->h_proto;
1285 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1286
1287 remaining = skb->len;
1288 memcpy(dst, eth->h_dest, ETH_ALEN);
1289 memcpy(src, eth->h_source, ETH_ALEN);
1290
1291 padding = ((4 - subframe_len) & 0x3);
1292 /* the last MSDU has no padding */
1293 if (subframe_len > remaining) {
1294 printk(KERN_DEBUG "%s: wrong buffer size", dev->name);
1295 return RX_DROP_UNUSABLE;
1296 }
1297
1298 skb_pull(skb, sizeof(struct ethhdr));
1299 /* if last subframe reuse skb */
1300 if (remaining <= subframe_len + padding)
1301 frame = skb;
1302 else {
1303 frame = dev_alloc_skb(local->hw.extra_tx_headroom +
1304 subframe_len);
1305
1306 if (frame == NULL)
1307 return RX_DROP_UNUSABLE;
1308
1309 skb_reserve(frame, local->hw.extra_tx_headroom +
1310 sizeof(struct ethhdr));
1311 memcpy(skb_put(frame, ntohs(len)), skb->data,
1312 ntohs(len));
1313
1314 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1315 padding);
1316 if (!eth) {
1317 printk(KERN_DEBUG "%s: wrong buffer size ",
1318 dev->name);
1319 dev_kfree_skb(frame);
1320 return RX_DROP_UNUSABLE;
1321 }
1322 }
1323
1324 skb_reset_network_header(frame);
1325 frame->dev = dev;
1326 frame->priority = skb->priority;
1327 rx->skb = frame;
1328
1329 payload = frame->data;
1330 ethertype = (payload[6] << 8) | payload[7];
1331
1332 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1333 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1334 compare_ether_addr(payload,
1335 bridge_tunnel_header) == 0)) {
1336 /* remove RFC1042 or Bridge-Tunnel
1337 * encapsulation and replace EtherType */
1338 skb_pull(frame, 6);
1339 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1340 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1341 } else {
1342 memcpy(skb_push(frame, sizeof(__be16)),
1343 &len, sizeof(__be16));
1344 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1345 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1346 }
1347
1348 if (!ieee80211_frame_allowed(rx)) {
1349 if (skb == frame) /* last frame */
1350 return RX_DROP_UNUSABLE;
1351 dev_kfree_skb(frame);
1352 continue;
1353 }
1354
1355 ieee80211_deliver_skb(rx);
1356 }
1357
1358 return RX_QUEUED;
1359 }
1360
1361 static ieee80211_rx_result
1362 ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
1363 {
1364 struct net_device *dev = rx->dev;
1365 u16 fc;
1366 int err;
1367
1368 fc = rx->fc;
1369 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1370 return RX_CONTINUE;
1371
1372 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1373 return RX_DROP_MONITOR;
1374
1375 err = ieee80211_data_to_8023(rx);
1376 if (unlikely(err))
1377 return RX_DROP_UNUSABLE;
1378
1379 if (!ieee80211_frame_allowed(rx))
1380 return RX_DROP_MONITOR;
1381
1382 rx->skb->dev = dev;
1383
1384 dev->stats.rx_packets++;
1385 dev->stats.rx_bytes += rx->skb->len;
1386
1387 ieee80211_deliver_skb(rx);
1388
1389 return RX_QUEUED;
1390 }
1391
1392 static ieee80211_rx_result
1393 ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
1394 {
1395 struct ieee80211_local *local = rx->local;
1396 struct ieee80211_hw *hw = &local->hw;
1397 struct sk_buff *skb = rx->skb;
1398 struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
1399 struct tid_ampdu_rx *tid_agg_rx;
1400 u16 start_seq_num;
1401 u16 tid;
1402
1403 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL))
1404 return RX_CONTINUE;
1405
1406 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) {
1407 if (!rx->sta)
1408 return RX_CONTINUE;
1409 tid = le16_to_cpu(bar->control) >> 12;
1410 tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]);
1411 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL)
1412 return RX_CONTINUE;
1413
1414 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1415
1416 /* reset session timer */
1417 if (tid_agg_rx->timeout) {
1418 unsigned long expires =
1419 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
1420 mod_timer(&tid_agg_rx->session_timer, expires);
1421 }
1422
1423 /* manage reordering buffer according to requested */
1424 /* sequence number */
1425 rcu_read_lock();
1426 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1427 start_seq_num, 1);
1428 rcu_read_unlock();
1429 return RX_DROP_UNUSABLE;
1430 }
1431
1432 return RX_CONTINUE;
1433 }
1434
1435 static ieee80211_rx_result
1436 ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx)
1437 {
1438 struct ieee80211_sub_if_data *sdata;
1439
1440 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
1441 return RX_DROP_MONITOR;
1442
1443 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1444 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
1445 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
1446 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
1447 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status);
1448 else
1449 return RX_DROP_MONITOR;
1450
1451 return RX_QUEUED;
1452 }
1453
1454 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1455 struct ieee80211_hdr *hdr,
1456 struct ieee80211_txrx_data *rx)
1457 {
1458 int keyidx, hdrlen;
1459 DECLARE_MAC_BUF(mac);
1460 DECLARE_MAC_BUF(mac2);
1461
1462 hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb);
1463 if (rx->skb->len >= hdrlen + 4)
1464 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1465 else
1466 keyidx = -1;
1467
1468 if (net_ratelimit())
1469 printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC "
1470 "failure from %s to %s keyidx=%d\n",
1471 dev->name, print_mac(mac, hdr->addr2),
1472 print_mac(mac2, hdr->addr1), keyidx);
1473
1474 if (!rx->sta) {
1475 /*
1476 * Some hardware seem to generate incorrect Michael MIC
1477 * reports; ignore them to avoid triggering countermeasures.
1478 */
1479 if (net_ratelimit())
1480 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1481 "error for unknown address %s\n",
1482 dev->name, print_mac(mac, hdr->addr2));
1483 goto ignore;
1484 }
1485
1486 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) {
1487 if (net_ratelimit())
1488 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1489 "error for a frame with no PROTECTED flag (src "
1490 "%s)\n", dev->name, print_mac(mac, hdr->addr2));
1491 goto ignore;
1492 }
1493
1494 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
1495 /*
1496 * APs with pairwise keys should never receive Michael MIC
1497 * errors for non-zero keyidx because these are reserved for
1498 * group keys and only the AP is sending real multicast
1499 * frames in the BSS.
1500 */
1501 if (net_ratelimit())
1502 printk(KERN_DEBUG "%s: ignored Michael MIC error for "
1503 "a frame with non-zero keyidx (%d)"
1504 " (src %s)\n", dev->name, keyidx,
1505 print_mac(mac, hdr->addr2));
1506 goto ignore;
1507 }
1508
1509 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA &&
1510 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
1511 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) {
1512 if (net_ratelimit())
1513 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1514 "error for a frame that cannot be encrypted "
1515 "(fc=0x%04x) (src %s)\n",
1516 dev->name, rx->fc, print_mac(mac, hdr->addr2));
1517 goto ignore;
1518 }
1519
1520 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr);
1521 ignore:
1522 dev_kfree_skb(rx->skb);
1523 rx->skb = NULL;
1524 }
1525
1526 static void ieee80211_rx_cooked_monitor(struct ieee80211_txrx_data *rx)
1527 {
1528 struct ieee80211_sub_if_data *sdata;
1529 struct ieee80211_local *local = rx->local;
1530 struct ieee80211_rtap_hdr {
1531 struct ieee80211_radiotap_header hdr;
1532 u8 flags;
1533 u8 rate;
1534 __le16 chan_freq;
1535 __le16 chan_flags;
1536 } __attribute__ ((packed)) *rthdr;
1537 struct sk_buff *skb = rx->skb, *skb2;
1538 struct net_device *prev_dev = NULL;
1539 struct ieee80211_rx_status *status = rx->u.rx.status;
1540
1541 if (rx->flags & IEEE80211_TXRXD_RX_CMNTR_REPORTED)
1542 goto out_free_skb;
1543
1544 if (skb_headroom(skb) < sizeof(*rthdr) &&
1545 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1546 goto out_free_skb;
1547
1548 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1549 memset(rthdr, 0, sizeof(*rthdr));
1550 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1551 rthdr->hdr.it_present =
1552 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1553 (1 << IEEE80211_RADIOTAP_RATE) |
1554 (1 << IEEE80211_RADIOTAP_CHANNEL));
1555
1556 rthdr->rate = rx->u.rx.rate->bitrate / 5;
1557 rthdr->chan_freq = cpu_to_le16(status->freq);
1558
1559 if (status->band == IEEE80211_BAND_5GHZ)
1560 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1561 IEEE80211_CHAN_5GHZ);
1562 else
1563 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1564 IEEE80211_CHAN_2GHZ);
1565
1566 skb_set_mac_header(skb, 0);
1567 skb->ip_summed = CHECKSUM_UNNECESSARY;
1568 skb->pkt_type = PACKET_OTHERHOST;
1569 skb->protocol = htons(ETH_P_802_2);
1570
1571 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1572 if (!netif_running(sdata->dev))
1573 continue;
1574
1575 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
1576 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1577 continue;
1578
1579 if (prev_dev) {
1580 skb2 = skb_clone(skb, GFP_ATOMIC);
1581 if (skb2) {
1582 skb2->dev = prev_dev;
1583 netif_rx(skb2);
1584 }
1585 }
1586
1587 prev_dev = sdata->dev;
1588 sdata->dev->stats.rx_packets++;
1589 sdata->dev->stats.rx_bytes += skb->len;
1590 }
1591
1592 if (prev_dev) {
1593 skb->dev = prev_dev;
1594 netif_rx(skb);
1595 skb = NULL;
1596 } else
1597 goto out_free_skb;
1598
1599 rx->flags |= IEEE80211_TXRXD_RX_CMNTR_REPORTED;
1600 return;
1601
1602 out_free_skb:
1603 dev_kfree_skb(skb);
1604 }
1605
1606 typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_txrx_data *);
1607 static ieee80211_rx_handler ieee80211_rx_handlers[] =
1608 {
1609 ieee80211_rx_h_if_stats,
1610 ieee80211_rx_h_passive_scan,
1611 ieee80211_rx_h_check,
1612 ieee80211_rx_h_decrypt,
1613 ieee80211_rx_h_sta_process,
1614 ieee80211_rx_h_defragment,
1615 ieee80211_rx_h_ps_poll,
1616 ieee80211_rx_h_michael_mic_verify,
1617 /* this must be after decryption - so header is counted in MPDU mic
1618 * must be before pae and data, so QOS_DATA format frames
1619 * are not passed to user space by these functions
1620 */
1621 ieee80211_rx_h_remove_qos_control,
1622 ieee80211_rx_h_amsdu,
1623 ieee80211_rx_h_data,
1624 ieee80211_rx_h_ctrl,
1625 ieee80211_rx_h_mgmt,
1626 NULL
1627 };
1628
1629 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1630 struct ieee80211_txrx_data *rx,
1631 struct sk_buff *skb)
1632 {
1633 ieee80211_rx_handler *handler;
1634 ieee80211_rx_result res = RX_DROP_MONITOR;
1635
1636 rx->skb = skb;
1637 rx->sdata = sdata;
1638 rx->dev = sdata->dev;
1639
1640 for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) {
1641 res = (*handler)(rx);
1642
1643 switch (res) {
1644 case RX_CONTINUE:
1645 continue;
1646 case RX_DROP_UNUSABLE:
1647 case RX_DROP_MONITOR:
1648 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1649 if (rx->sta)
1650 rx->sta->rx_dropped++;
1651 break;
1652 case RX_QUEUED:
1653 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1654 break;
1655 }
1656 break;
1657 }
1658
1659 switch (res) {
1660 case RX_CONTINUE:
1661 case RX_DROP_MONITOR:
1662 ieee80211_rx_cooked_monitor(rx);
1663 break;
1664 case RX_DROP_UNUSABLE:
1665 dev_kfree_skb(rx->skb);
1666 break;
1667 }
1668 }
1669
1670 /* main receive path */
1671
1672 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1673 u8 *bssid, struct ieee80211_txrx_data *rx,
1674 struct ieee80211_hdr *hdr)
1675 {
1676 int multicast = is_multicast_ether_addr(hdr->addr1);
1677
1678 switch (sdata->vif.type) {
1679 case IEEE80211_IF_TYPE_STA:
1680 if (!bssid)
1681 return 0;
1682 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1683 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
1684 return 0;
1685 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
1686 } else if (!multicast &&
1687 compare_ether_addr(sdata->dev->dev_addr,
1688 hdr->addr1) != 0) {
1689 if (!(sdata->dev->flags & IFF_PROMISC))
1690 return 0;
1691 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
1692 }
1693 break;
1694 case IEEE80211_IF_TYPE_IBSS:
1695 if (!bssid)
1696 return 0;
1697 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1698 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
1699 return 0;
1700 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
1701 } else if (!multicast &&
1702 compare_ether_addr(sdata->dev->dev_addr,
1703 hdr->addr1) != 0) {
1704 if (!(sdata->dev->flags & IFF_PROMISC))
1705 return 0;
1706 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
1707 } else if (!rx->sta)
1708 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb,
1709 bssid, hdr->addr2);
1710 break;
1711 case IEEE80211_IF_TYPE_VLAN:
1712 case IEEE80211_IF_TYPE_AP:
1713 if (!bssid) {
1714 if (compare_ether_addr(sdata->dev->dev_addr,
1715 hdr->addr1))
1716 return 0;
1717 } else if (!ieee80211_bssid_match(bssid,
1718 sdata->dev->dev_addr)) {
1719 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
1720 return 0;
1721 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
1722 }
1723 if (sdata->dev == sdata->local->mdev &&
1724 !(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
1725 /* do not receive anything via
1726 * master device when not scanning */
1727 return 0;
1728 break;
1729 case IEEE80211_IF_TYPE_WDS:
1730 if (bssid ||
1731 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
1732 return 0;
1733 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
1734 return 0;
1735 break;
1736 case IEEE80211_IF_TYPE_MNTR:
1737 /* take everything */
1738 break;
1739 case IEEE80211_IF_TYPE_INVALID:
1740 /* should never get here */
1741 WARN_ON(1);
1742 break;
1743 }
1744
1745 return 1;
1746 }
1747
1748 /*
1749 * This is the actual Rx frames handler. as it blongs to Rx path it must
1750 * be called with rcu_read_lock protection.
1751 */
1752 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1753 struct sk_buff *skb,
1754 struct ieee80211_rx_status *status,
1755 u32 load,
1756 struct ieee80211_rate *rate)
1757 {
1758 struct ieee80211_local *local = hw_to_local(hw);
1759 struct ieee80211_sub_if_data *sdata;
1760 struct ieee80211_hdr *hdr;
1761 struct ieee80211_txrx_data rx;
1762 u16 type;
1763 int prepares;
1764 struct ieee80211_sub_if_data *prev = NULL;
1765 struct sk_buff *skb_new;
1766 u8 *bssid;
1767
1768 hdr = (struct ieee80211_hdr *) skb->data;
1769 memset(&rx, 0, sizeof(rx));
1770 rx.skb = skb;
1771 rx.local = local;
1772
1773 rx.u.rx.status = status;
1774 rx.u.rx.load = load;
1775 rx.u.rx.rate = rate;
1776 rx.fc = le16_to_cpu(hdr->frame_control);
1777 type = rx.fc & IEEE80211_FCTL_FTYPE;
1778
1779 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT)
1780 local->dot11ReceivedFragmentCount++;
1781
1782 rx.sta = sta_info_get(local, hdr->addr2);
1783 if (rx.sta) {
1784 rx.dev = rx.sta->dev;
1785 rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev);
1786 }
1787
1788 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
1789 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
1790 goto end;
1791 }
1792
1793 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
1794 rx.flags |= IEEE80211_TXRXD_RXIN_SCAN;
1795
1796 ieee80211_parse_qos(&rx);
1797 ieee80211_verify_ip_alignment(&rx);
1798
1799 skb = rx.skb;
1800
1801 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1802 if (!netif_running(sdata->dev))
1803 continue;
1804
1805 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR)
1806 continue;
1807
1808 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
1809 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH;
1810 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
1811
1812 if (!prepares)
1813 continue;
1814
1815 /*
1816 * frame is destined for this interface, but if it's not
1817 * also for the previous one we handle that after the
1818 * loop to avoid copying the SKB once too much
1819 */
1820
1821 if (!prev) {
1822 prev = sdata;
1823 continue;
1824 }
1825
1826 /*
1827 * frame was destined for the previous interface
1828 * so invoke RX handlers for it
1829 */
1830
1831 skb_new = skb_copy(skb, GFP_ATOMIC);
1832 if (!skb_new) {
1833 if (net_ratelimit())
1834 printk(KERN_DEBUG "%s: failed to copy "
1835 "multicast frame for %s",
1836 wiphy_name(local->hw.wiphy),
1837 prev->dev->name);
1838 continue;
1839 }
1840 rx.fc = le16_to_cpu(hdr->frame_control);
1841 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1842 prev = sdata;
1843 }
1844 if (prev) {
1845 rx.fc = le16_to_cpu(hdr->frame_control);
1846 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1847 } else
1848 dev_kfree_skb(skb);
1849
1850 end:
1851 if (rx.sta)
1852 sta_info_put(rx.sta);
1853 }
1854
1855 #define SEQ_MODULO 0x1000
1856 #define SEQ_MASK 0xfff
1857
1858 static inline int seq_less(u16 sq1, u16 sq2)
1859 {
1860 return (((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1));
1861 }
1862
1863 static inline u16 seq_inc(u16 sq)
1864 {
1865 return ((sq + 1) & SEQ_MASK);
1866 }
1867
1868 static inline u16 seq_sub(u16 sq1, u16 sq2)
1869 {
1870 return ((sq1 - sq2) & SEQ_MASK);
1871 }
1872
1873
1874 /*
1875 * As it function blongs to Rx path it must be called with
1876 * the proper rcu_read_lock protection for its flow.
1877 */
1878 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1879 struct tid_ampdu_rx *tid_agg_rx,
1880 struct sk_buff *skb, u16 mpdu_seq_num,
1881 int bar_req)
1882 {
1883 struct ieee80211_local *local = hw_to_local(hw);
1884 struct ieee80211_rx_status status;
1885 u16 head_seq_num, buf_size;
1886 int index;
1887 u32 pkt_load;
1888 struct ieee80211_supported_band *sband;
1889 struct ieee80211_rate *rate;
1890
1891 buf_size = tid_agg_rx->buf_size;
1892 head_seq_num = tid_agg_rx->head_seq_num;
1893
1894 /* frame with out of date sequence number */
1895 if (seq_less(mpdu_seq_num, head_seq_num)) {
1896 dev_kfree_skb(skb);
1897 return 1;
1898 }
1899
1900 /* if frame sequence number exceeds our buffering window size or
1901 * block Ack Request arrived - release stored frames */
1902 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
1903 /* new head to the ordering buffer */
1904 if (bar_req)
1905 head_seq_num = mpdu_seq_num;
1906 else
1907 head_seq_num =
1908 seq_inc(seq_sub(mpdu_seq_num, buf_size));
1909 /* release stored frames up to new head to stack */
1910 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1911 index = seq_sub(tid_agg_rx->head_seq_num,
1912 tid_agg_rx->ssn)
1913 % tid_agg_rx->buf_size;
1914
1915 if (tid_agg_rx->reorder_buf[index]) {
1916 /* release the reordered frames to stack */
1917 memcpy(&status,
1918 tid_agg_rx->reorder_buf[index]->cb,
1919 sizeof(status));
1920 sband = local->hw.wiphy->bands[status.band];
1921 rate = &sband->bitrates[status.rate_idx];
1922 pkt_load = ieee80211_rx_load_stats(local,
1923 tid_agg_rx->reorder_buf[index],
1924 &status, rate);
1925 __ieee80211_rx_handle_packet(hw,
1926 tid_agg_rx->reorder_buf[index],
1927 &status, pkt_load, rate);
1928 tid_agg_rx->stored_mpdu_num--;
1929 tid_agg_rx->reorder_buf[index] = NULL;
1930 }
1931 tid_agg_rx->head_seq_num =
1932 seq_inc(tid_agg_rx->head_seq_num);
1933 }
1934 if (bar_req)
1935 return 1;
1936 }
1937
1938 /* now the new frame is always in the range of the reordering */
1939 /* buffer window */
1940 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
1941 % tid_agg_rx->buf_size;
1942 /* check if we already stored this frame */
1943 if (tid_agg_rx->reorder_buf[index]) {
1944 dev_kfree_skb(skb);
1945 return 1;
1946 }
1947
1948 /* if arrived mpdu is in the right order and nothing else stored */
1949 /* release it immediately */
1950 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1951 tid_agg_rx->stored_mpdu_num == 0) {
1952 tid_agg_rx->head_seq_num =
1953 seq_inc(tid_agg_rx->head_seq_num);
1954 return 0;
1955 }
1956
1957 /* put the frame in the reordering buffer */
1958 tid_agg_rx->reorder_buf[index] = skb;
1959 tid_agg_rx->stored_mpdu_num++;
1960 /* release the buffer until next missing frame */
1961 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
1962 % tid_agg_rx->buf_size;
1963 while (tid_agg_rx->reorder_buf[index]) {
1964 /* release the reordered frame back to stack */
1965 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
1966 sizeof(status));
1967 sband = local->hw.wiphy->bands[status.band];
1968 rate = &sband->bitrates[status.rate_idx];
1969 pkt_load = ieee80211_rx_load_stats(local,
1970 tid_agg_rx->reorder_buf[index],
1971 &status, rate);
1972 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
1973 &status, pkt_load, rate);
1974 tid_agg_rx->stored_mpdu_num--;
1975 tid_agg_rx->reorder_buf[index] = NULL;
1976 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
1977 index = seq_sub(tid_agg_rx->head_seq_num,
1978 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
1979 }
1980 return 1;
1981 }
1982
1983 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
1984 struct sk_buff *skb)
1985 {
1986 struct ieee80211_hw *hw = &local->hw;
1987 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1988 struct sta_info *sta;
1989 struct tid_ampdu_rx *tid_agg_rx;
1990 u16 fc, sc;
1991 u16 mpdu_seq_num;
1992 u8 ret = 0, *qc;
1993 int tid;
1994
1995 sta = sta_info_get(local, hdr->addr2);
1996 if (!sta)
1997 return ret;
1998
1999 fc = le16_to_cpu(hdr->frame_control);
2000
2001 /* filter the QoS data rx stream according to
2002 * STA/TID and check if this STA/TID is on aggregation */
2003 if (!WLAN_FC_IS_QOS_DATA(fc))
2004 goto end_reorder;
2005
2006 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN;
2007 tid = qc[0] & QOS_CONTROL_TID_MASK;
2008 tid_agg_rx = &(sta->ampdu_mlme.tid_rx[tid]);
2009
2010 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL)
2011 goto end_reorder;
2012
2013 /* null data frames are excluded */
2014 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC))
2015 goto end_reorder;
2016
2017 /* new un-ordered ampdu frame - process it */
2018
2019 /* reset session timer */
2020 if (tid_agg_rx->timeout) {
2021 unsigned long expires =
2022 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
2023 mod_timer(&tid_agg_rx->session_timer, expires);
2024 }
2025
2026 /* if this mpdu is fragmented - terminate rx aggregation session */
2027 sc = le16_to_cpu(hdr->seq_ctrl);
2028 if (sc & IEEE80211_SCTL_FRAG) {
2029 ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr,
2030 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2031 ret = 1;
2032 goto end_reorder;
2033 }
2034
2035 /* according to mpdu sequence number deal with reordering buffer */
2036 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2037 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2038 mpdu_seq_num, 0);
2039 end_reorder:
2040 if (sta)
2041 sta_info_put(sta);
2042 return ret;
2043 }
2044
2045 /*
2046 * This is the receive path handler. It is called by a low level driver when an
2047 * 802.11 MPDU is received from the hardware.
2048 */
2049 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2050 struct ieee80211_rx_status *status)
2051 {
2052 struct ieee80211_local *local = hw_to_local(hw);
2053 u32 pkt_load;
2054 struct ieee80211_rate *rate = NULL;
2055 struct ieee80211_supported_band *sband;
2056
2057 if (status->band < 0 ||
2058 status->band > IEEE80211_NUM_BANDS) {
2059 WARN_ON(1);
2060 return;
2061 }
2062
2063 sband = local->hw.wiphy->bands[status->band];
2064
2065 if (!sband ||
2066 status->rate_idx < 0 ||
2067 status->rate_idx >= sband->n_bitrates) {
2068 WARN_ON(1);
2069 return;
2070 }
2071
2072 rate = &sband->bitrates[status->rate_idx];
2073
2074 /*
2075 * key references and virtual interfaces are protected using RCU
2076 * and this requires that we are in a read-side RCU section during
2077 * receive processing
2078 */
2079 rcu_read_lock();
2080
2081 /*
2082 * Frames with failed FCS/PLCP checksum are not returned,
2083 * all other frames are returned without radiotap header
2084 * if it was previously present.
2085 * Also, frames with less than 16 bytes are dropped.
2086 */
2087 skb = ieee80211_rx_monitor(local, skb, status, rate);
2088 if (!skb) {
2089 rcu_read_unlock();
2090 return;
2091 }
2092
2093 pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
2094 local->channel_use_raw += pkt_load;
2095
2096 if (!ieee80211_rx_reorder_ampdu(local, skb))
2097 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate);
2098
2099 rcu_read_unlock();
2100 }
2101 EXPORT_SYMBOL(__ieee80211_rx);
2102
2103 /* This is a version of the rx handler that can be called from hard irq
2104 * context. Post the skb on the queue and schedule the tasklet */
2105 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2106 struct ieee80211_rx_status *status)
2107 {
2108 struct ieee80211_local *local = hw_to_local(hw);
2109
2110 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2111
2112 skb->dev = local->mdev;
2113 /* copy status into skb->cb for use by tasklet */
2114 memcpy(skb->cb, status, sizeof(*status));
2115 skb->pkt_type = IEEE80211_RX_MSG;
2116 skb_queue_tail(&local->skb_queue, skb);
2117 tasklet_schedule(&local->tasklet);
2118 }
2119 EXPORT_SYMBOL(ieee80211_rx_irqsafe);