2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * Implementation of receive path.
24 * Setup and link descriptors.
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
31 * NOTE: Caller should hold the rxbuf lock.
34 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
36 struct ath_hal
*ah
= sc
->sc_ah
;
43 ds
->ds_link
= 0; /* link to null */
44 ds
->ds_data
= bf
->bf_buf_addr
;
47 * virtual addr of the beginning of the buffer. */
50 ds
->ds_vdata
= skb
->data
;
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah
,
55 skb_tailroom(skb
), /* buffer size */
58 if (sc
->sc_rxlink
== NULL
)
59 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
61 *sc
->sc_rxlink
= bf
->bf_daddr
;
63 sc
->sc_rxlink
= &ds
->ds_link
;
67 /* Process received BAR frame */
69 static int ath_bar_rx(struct ath_softc
*sc
,
73 struct ieee80211_bar
*bar
;
74 struct ath_arx_tid
*rxtid
;
76 struct ath_recv_status
*rx_status
;
77 int tidno
, index
, cindex
;
80 /* look at BAR contents */
82 bar
= (struct ieee80211_bar
*)skb
->data
;
83 tidno
= (bar
->control
& IEEE80211_BAR_CTL_TID_M
)
84 >> IEEE80211_BAR_CTL_TID_S
;
85 seqno
= le16_to_cpu(bar
->start_seq_num
) >> IEEE80211_SEQ_SEQ_SHIFT
;
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
89 rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
91 spin_lock_bh(&rxtid
->tidlock
);
93 /* get relative index */
95 index
= ATH_BA_INDEX(rxtid
->seq_next
, seqno
);
97 /* drop BAR if old sequence (index is too large) */
99 if ((index
> rxtid
->baw_size
) &&
100 (index
> (IEEE80211_SEQ_MAX
- (rxtid
->baw_size
<< 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free
;
104 /* complete receive processing for all pending frames upto BAR seqno */
106 cindex
= (rxtid
->baw_head
+ index
) & (ATH_TID_MAX_BUFS
- 1);
107 while ((rxtid
->baw_head
!= rxtid
->baw_tail
) &&
108 (rxtid
->baw_head
!= cindex
)) {
109 tskb
= rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
;
110 rx_status
= &rxtid
->rxbuf
[rxtid
->baw_head
].rx_status
;
111 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
= NULL
;
114 ath_rx_subframe(an
, tskb
, rx_status
);
116 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
117 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
120 /* ... and indicate rest of the frames in-order */
122 while (rxtid
->baw_head
!= rxtid
->baw_tail
&&
123 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
!= NULL
) {
124 tskb
= rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
;
125 rx_status
= &rxtid
->rxbuf
[rxtid
->baw_head
].rx_status
;
126 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
= NULL
;
128 ath_rx_subframe(an
, tskb
, rx_status
);
130 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
131 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
135 spin_unlock_bh(&rxtid
->tidlock
);
136 /* free bar itself */
138 return IEEE80211_FTYPE_CTL
;
141 /* Function to handle a subframe of aggregation when HT is enabled */
143 static int ath_ampdu_input(struct ath_softc
*sc
,
146 struct ath_recv_status
*rx_status
)
148 struct ieee80211_hdr
*hdr
;
149 struct ath_arx_tid
*rxtid
;
150 struct ath_rxbuf
*rxbuf
;
151 u_int8_t type
, subtype
;
153 int tid
= 0, index
, cindex
, rxdiff
;
157 hdr
= (struct ieee80211_hdr
*)skb
->data
;
158 fc
= hdr
->frame_control
;
160 /* collect stats of frames with non-zero version */
162 if ((le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_VERS
) != 0) {
167 type
= le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_FTYPE
;
168 subtype
= le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_STYPE
;
170 if (ieee80211_is_back_req(fc
))
171 return ath_bar_rx(sc
, an
, skb
);
173 /* special aggregate processing only for qos unicast data frames */
175 if (!ieee80211_is_data(fc
) ||
176 !ieee80211_is_data_qos(fc
) ||
177 is_multicast_ether_addr(hdr
->addr1
))
178 return ath_rx_subframe(an
, skb
, rx_status
);
180 /* lookup rx tid state */
182 if (ieee80211_is_data_qos(fc
)) {
183 qc
= ieee80211_get_qos_ctl(hdr
);
187 if (sc
->sc_opmode
== HAL_M_STA
) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr
->addr1
, sc
->sc_myaddr
, ETH_ALEN
)) {
195 rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
197 spin_lock(&rxtid
->tidlock
);
199 rxdiff
= (rxtid
->baw_tail
- rxtid
->baw_head
) &
200 (ATH_TID_MAX_BUFS
- 1);
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
206 if (!rxtid
->addba_exchangecomplete
) {
207 spin_unlock(&rxtid
->tidlock
);
208 return ath_rx_subframe(an
, skb
, rx_status
);
211 /* extract sequence number from recvd frame */
213 rxseq
= le16_to_cpu(hdr
->seq_ctrl
) >> IEEE80211_SEQ_SEQ_SHIFT
;
215 if (rxtid
->seq_reset
) {
216 rxtid
->seq_reset
= 0;
217 rxtid
->seq_next
= rxseq
;
220 index
= ATH_BA_INDEX(rxtid
->seq_next
, rxseq
);
222 /* drop frame if old sequence (index is too large) */
224 if (index
> (IEEE80211_SEQ_MAX
- (rxtid
->baw_size
<< 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid
->tidlock
);
228 return IEEE80211_FTYPE_DATA
;
231 /* sequence number is beyond block-ack window */
233 if (index
>= rxtid
->baw_size
) {
235 /* complete receive processing for all pending frames */
237 while (index
>= rxtid
->baw_size
) {
239 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
241 if (rxbuf
->rx_wbuf
!= NULL
) {
242 ath_rx_subframe(an
, rxbuf
->rx_wbuf
,
244 rxbuf
->rx_wbuf
= NULL
;
247 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
248 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
254 /* add buffer to the recv ba window */
256 cindex
= (rxtid
->baw_head
+ index
) & (ATH_TID_MAX_BUFS
- 1);
257 rxbuf
= rxtid
->rxbuf
+ cindex
;
259 if (rxbuf
->rx_wbuf
!= NULL
) {
260 spin_unlock(&rxtid
->tidlock
);
261 /* duplicate frame */
263 return IEEE80211_FTYPE_DATA
;
266 rxbuf
->rx_wbuf
= skb
;
267 rxbuf
->rx_time
= get_timestamp();
268 rxbuf
->rx_status
= *rx_status
;
270 /* advance tail if sequence received is newer
271 * than any received so far */
273 if (index
>= rxdiff
) {
274 rxtid
->baw_tail
= cindex
;
275 INCR(rxtid
->baw_tail
, ATH_TID_MAX_BUFS
);
278 /* indicate all in-order received frames */
280 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
281 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
285 ath_rx_subframe(an
, rxbuf
->rx_wbuf
, &rxbuf
->rx_status
);
286 rxbuf
->rx_wbuf
= NULL
;
288 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
289 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
293 * start a timer to flush all received frames if there are pending
296 if (rxtid
->baw_head
!= rxtid
->baw_tail
)
297 mod_timer(&rxtid
->timer
, ATH_RX_TIMEOUT
);
299 del_timer_sync(&rxtid
->timer
);
301 spin_unlock(&rxtid
->tidlock
);
302 return IEEE80211_FTYPE_DATA
;
305 /* Timer to flush all received sub-frames */
307 static void ath_rx_timer(unsigned long data
)
309 struct ath_arx_tid
*rxtid
= (struct ath_arx_tid
*)data
;
310 struct ath_node
*an
= rxtid
->an
;
311 struct ath_rxbuf
*rxbuf
;
314 spin_lock_bh(&rxtid
->tidlock
);
315 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
316 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
317 if (!rxbuf
->rx_wbuf
) {
318 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
319 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
324 * Stop if the next one is a very recent frame.
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
333 if ((get_timestamp() - rxbuf
->rx_time
) <
334 (ATH_RX_TIMEOUT
* HZ
/ 1000))
337 ath_rx_subframe(an
, rxbuf
->rx_wbuf
,
339 rxbuf
->rx_wbuf
= NULL
;
341 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
342 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
346 * start a timer to flush all received frames if there are pending
349 if (rxtid
->baw_head
!= rxtid
->baw_tail
)
352 nosched
= 1; /* no need to re-arm the timer again */
354 spin_unlock_bh(&rxtid
->tidlock
);
357 /* Free all pending sub-frames in the re-ordering buffer */
359 static void ath_rx_flush_tid(struct ath_softc
*sc
,
360 struct ath_arx_tid
*rxtid
, int drop
)
362 struct ath_rxbuf
*rxbuf
;
364 spin_lock_bh(&rxtid
->tidlock
);
365 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
366 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
367 if (!rxbuf
->rx_wbuf
) {
368 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
369 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
374 dev_kfree_skb(rxbuf
->rx_wbuf
);
376 ath_rx_subframe(rxtid
->an
,
380 rxbuf
->rx_wbuf
= NULL
;
382 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
383 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
385 spin_unlock_bh(&rxtid
->tidlock
);
388 static u_int8_t
ath_rx_detect_antenna(struct ath_softc
*sc
,
389 struct ath_rx_status
*rxstat
)
391 #define ATH_RX_CHAINMASK_CLR(_chainmask, _chain) \
392 ((_chainmask) &= ~(1 << (_chain)))
393 u_int8_t rx_chainmask
= sc
->sc_rx_chainmask
;
394 int rssiRef
, detectThresh
, detectDelta
;
396 if (IS_CHAN_5GHZ(&sc
->sc_curchan
)) {
397 detectThresh
= sc
->sc_rxchaindetect_thresh5GHz
;
398 detectDelta
= sc
->sc_rxchaindetect_delta5GHz
;
400 detectThresh
= sc
->sc_rxchaindetect_thresh2GHz
;
401 detectDelta
= sc
->sc_rxchaindetect_delta2GHz
;
404 switch (sc
->sc_rxchaindetect_ref
) {
406 rssiRef
= rxstat
->rs_rssi
;
407 if (rssiRef
< detectThresh
)
410 if (rssiRef
- rxstat
->rs_rssi_ctl1
> detectDelta
)
411 ATH_RX_CHAINMASK_CLR(rx_chainmask
, 1);
413 if (rssiRef
- rxstat
->rs_rssi_ctl2
> detectDelta
)
414 ATH_RX_CHAINMASK_CLR(rx_chainmask
, 2);
418 rssiRef
= rxstat
->rs_rssi_ctl1
;
419 if (rssiRef
< detectThresh
)
422 if (rssiRef
- rxstat
->rs_rssi_ctl2
> detectDelta
)
423 ATH_RX_CHAINMASK_CLR(rx_chainmask
, 2);
427 rssiRef
= rxstat
->rs_rssi_ctl2
;
428 if (rssiRef
< detectThresh
)
431 if (rssiRef
- rxstat
->rs_rssi_ctl1
> detectDelta
)
432 ATH_RX_CHAINMASK_CLR(rx_chainmask
, 1);
439 #undef ATH_RX_CHAINMASK_CLR
442 static struct sk_buff
*ath_rxbuf_alloc(struct ath_softc
*sc
,
449 * Cache-line-align. This is important (for the
450 * 5210 at least) as not doing so causes bogus data
454 skb
= dev_alloc_skb(len
+ sc
->sc_cachelsz
- 1);
456 off
= ((unsigned long) skb
->data
) % sc
->sc_cachelsz
;
458 skb_reserve(skb
, sc
->sc_cachelsz
- off
);
460 DPRINTF(sc
, ATH_DEBUG_FATAL
,
461 "%s: skbuff alloc of size %u failed\n",
469 static void ath_rx_requeue(struct ath_softc
*sc
, struct sk_buff
*skb
)
471 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
475 spin_lock_bh(&sc
->sc_rxbuflock
);
476 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
478 * This buffer is still held for hw acess.
479 * Mark it as free to be re-queued it later.
481 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
483 /* XXX: we probably never enter here, remove after
485 list_add_tail(&bf
->list
, &sc
->sc_rxbuf
);
486 ath_rx_buf_link(sc
, bf
);
488 spin_unlock_bh(&sc
->sc_rxbuflock
);
492 * The skb indicated to upper stack won't be returned to us.
493 * So we have to allocate a new one and queue it by ourselves.
495 static int ath_rx_indicate(struct ath_softc
*sc
,
497 struct ath_recv_status
*status
,
500 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
501 struct sk_buff
*nskb
;
504 /* indicate frame to the stack, which will free the old skb. */
505 type
= ath__rx_indicate(sc
, skb
, status
, keyix
);
507 /* allocate a new skb and queue it to for H/W processing */
508 nskb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
511 bf
->bf_buf_addr
= ath_skb_map_single(sc
,
514 /* XXX: Remove get_dma_mem_context() */
515 get_dma_mem_context(bf
, bf_dmacontext
));
516 ATH_RX_CONTEXT(nskb
)->ctx_rxbuf
= bf
;
518 /* queue the new wbuf to H/W */
519 ath_rx_requeue(sc
, nskb
);
525 static void ath_opmode_init(struct ath_softc
*sc
)
527 struct ath_hal
*ah
= sc
->sc_ah
;
528 u_int32_t rfilt
, mfilt
[2];
530 /* configure rx filter */
531 rfilt
= ath_calcrxfilter(sc
);
532 ath9k_hw_setrxfilter(ah
, rfilt
);
534 /* configure bssid mask */
536 ath9k_hw_setbssidmask(ah
, sc
->sc_bssidmask
);
538 /* configure operational mode */
539 ath9k_hw_setopmode(ah
);
541 /* Handle any link-level address change. */
542 ath9k_hw_setmac(ah
, sc
->sc_myaddr
);
544 /* calculate and install multicast filter */
545 mfilt
[0] = mfilt
[1] = ~0;
547 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
548 DPRINTF(sc
, ATH_DEBUG_RECV
,
549 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
550 __func__
, rfilt
, mfilt
[0], mfilt
[1]);
553 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
560 spin_lock_init(&sc
->sc_rxflushlock
);
562 spin_lock_init(&sc
->sc_rxbuflock
);
565 * Cisco's VPN software requires that drivers be able to
566 * receive encapsulated frames that are larger than the MTU.
567 * Since we can't be sure how large a frame we'll get, setup
568 * to handle the larges on possible.
570 sc
->sc_rxbufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
574 DPRINTF(sc
, ATH_DEBUG_CONFIG
, "%s: cachelsz %u rxbufsize %u\n",
575 __func__
, sc
->sc_cachelsz
, sc
->sc_rxbufsize
);
577 /* Initialize rx descriptors */
579 error
= ath_descdma_setup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
,
582 DPRINTF(sc
, ATH_DEBUG_FATAL
,
583 "%s: failed to allocate rx descriptors: %d\n",
588 /* Pre-allocate a wbuf for each rx buffer */
590 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
591 skb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
599 ath_skb_map_single(sc
, skb
, PCI_DMA_FROMDEVICE
,
600 get_dma_mem_context(bf
, bf_dmacontext
));
601 ATH_RX_CONTEXT(skb
)->ctx_rxbuf
= bf
;
603 sc
->sc_rxlink
= NULL
;
613 /* Reclaim all rx queue resources */
615 void ath_rx_cleanup(struct ath_softc
*sc
)
620 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
626 /* cleanup rx descriptors */
628 if (sc
->sc_rxdma
.dd_desc_len
!= 0)
629 ath_descdma_cleanup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
);
633 * Calculate the receive filter according to the
634 * operating mode and state:
636 * o always accept unicast, broadcast, and multicast traffic
637 * o maintain current state of phy error reception (the hal
638 * may enable phy error frames for noise immunity work)
639 * o probe request frames are accepted only when operating in
640 * hostap, adhoc, or monitor modes
641 * o enable promiscuous mode according to the interface state
643 * - when operating in adhoc mode so the 802.11 layer creates
644 * node table entries for peers,
645 * - when operating in station mode for collecting rssi data when
646 * the station is otherwise quiet, or
647 * - when operating as a repeater so we see repeater-sta beacons
651 u_int32_t
ath_calcrxfilter(struct ath_softc
*sc
)
653 #define RX_FILTER_PRESERVE (HAL_RX_FILTER_PHYERR | HAL_RX_FILTER_PHYRADAR)
656 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
657 | HAL_RX_FILTER_UCAST
| HAL_RX_FILTER_BCAST
658 | HAL_RX_FILTER_MCAST
;
660 /* If not a STA, enable processing of Probe Requests */
661 if (sc
->sc_opmode
!= HAL_M_STA
)
662 rfilt
|= HAL_RX_FILTER_PROBEREQ
;
664 /* Can't set HOSTAP into promiscous mode */
665 if (sc
->sc_opmode
== HAL_M_MONITOR
) {
666 rfilt
|= HAL_RX_FILTER_PROM
;
667 /* ??? To prevent from sending ACK */
668 rfilt
&= ~HAL_RX_FILTER_UCAST
;
671 if (sc
->sc_opmode
== HAL_M_STA
|| sc
->sc_opmode
== HAL_M_IBSS
||
672 sc
->sc_nostabeacons
|| sc
->sc_scanning
)
673 rfilt
|= HAL_RX_FILTER_BEACON
;
675 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
677 if (sc
->sc_opmode
== HAL_M_HOSTAP
)
678 rfilt
|= (HAL_RX_FILTER_BEACON
| HAL_RX_FILTER_PSPOLL
);
680 #undef RX_FILTER_PRESERVE
683 /* Enable the receive h/w following a reset. */
685 int ath_startrecv(struct ath_softc
*sc
)
687 struct ath_hal
*ah
= sc
->sc_ah
;
688 struct ath_buf
*bf
, *tbf
;
690 spin_lock_bh(&sc
->sc_rxbuflock
);
691 if (list_empty(&sc
->sc_rxbuf
))
694 sc
->sc_rxlink
= NULL
;
695 list_for_each_entry_safe(bf
, tbf
, &sc
->sc_rxbuf
, list
) {
696 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
697 /* restarting h/w, no need for holding descriptors */
698 bf
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
700 * Upper layer may not be done with the frame yet so
701 * we can't just re-queue it to hardware. Remove it
702 * from h/w queue. It'll be re-queued when upper layer
703 * returns the frame and ath_rx_requeue_mpdu is called.
705 if (!(bf
->bf_status
& ATH_BUFSTATUS_FREE
)) {
710 /* chain descriptors */
711 ath_rx_buf_link(sc
, bf
);
714 /* We could have deleted elements so the list may be empty now */
715 if (list_empty(&sc
->sc_rxbuf
))
718 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
719 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
720 ath9k_hw_rxena(ah
); /* enable recv descriptors */
723 spin_unlock_bh(&sc
->sc_rxbuflock
);
724 ath_opmode_init(sc
); /* set filters, etc. */
725 ath9k_hw_startpcureceive(ah
); /* re-enable PCU/DMA engine */
729 /* Disable the receive h/w in preparation for a reset. */
731 enum hal_bool
ath_stoprecv(struct ath_softc
*sc
)
733 struct ath_hal
*ah
= sc
->sc_ah
;
735 enum hal_bool stopped
;
737 ath9k_hw_stoppcurecv(ah
); /* disable PCU */
738 ath9k_hw_setrxfilter(ah
, 0); /* clear recv filter */
739 stopped
= ath9k_hw_stopdmarecv(ah
); /* disable DMA engine */
740 udelay(3000); /* 3ms is long enough for 1 frame */
741 tsf
= ath9k_hw_gettsf64(ah
);
742 sc
->sc_rxlink
= NULL
; /* just in case */
746 /* Flush receive queue */
748 void ath_flushrecv(struct ath_softc
*sc
)
751 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
752 * queue at the same time. Use a lock to serialize the access of rx
754 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
755 * Instead, do not claim the spinlock but check for a flush in
756 * progress (see references to sc_rxflush)
758 spin_lock_bh(&sc
->sc_rxflushlock
);
761 ath_rx_tasklet(sc
, 1);
764 spin_unlock_bh(&sc
->sc_rxflushlock
);
767 /* Process an individual frame */
769 int ath_rx_input(struct ath_softc
*sc
,
773 struct ath_recv_status
*rx_status
,
774 enum ATH_RX_TYPE
*status
)
776 if (is_ampdu
&& sc
->sc_rxaggr
) {
777 *status
= ATH_RX_CONSUMED
;
778 return ath_ampdu_input(sc
, an
, skb
, rx_status
);
780 *status
= ATH_RX_NON_CONSUMED
;
785 /* Process receive queue, as well as LED, etc. */
787 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
)
789 #define PA2DESC(_sc, _pa) \
790 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
791 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
793 struct ath_buf
*bf
, *bf_held
= NULL
;
795 struct ieee80211_hdr
*hdr
;
796 struct sk_buff
*skb
= NULL
;
797 struct ath_recv_status rx_status
;
798 struct ath_hal
*ah
= sc
->sc_ah
;
799 int type
, rx_processed
= 0;
801 u_int8_t rxchainmask
, chainreset
= 0;
802 enum hal_status retval
;
805 DPRINTF(sc
, ATH_DEBUG_RX_PROC
, "%s\n", __func__
);
808 /* If handling rx interrupt and flush is in progress => exit */
809 if (sc
->sc_rxflush
&& (flush
== 0))
812 spin_lock_bh(&sc
->sc_rxbuflock
);
813 if (list_empty(&sc
->sc_rxbuf
)) {
814 sc
->sc_rxlink
= NULL
;
815 spin_unlock_bh(&sc
->sc_rxbuflock
);
819 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
822 * There is a race condition that BH gets scheduled after sw
823 * writes RxE and before hw re-load the last descriptor to get
824 * the newly chained one. Software must keep the last DONE
825 * descriptor as a holding descriptor - software does so by
826 * marking it with the STALE flag.
828 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
830 if (list_is_last(&bf_held
->list
, &sc
->sc_rxbuf
)) {
832 * The holding descriptor is the last
833 * descriptor in queue. It's safe to
834 * remove the last holding descriptor
837 list_del(&bf_held
->list
);
838 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
839 sc
->sc_rxlink
= NULL
;
841 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
842 list_add_tail(&bf_held
->list
,
844 ath_rx_buf_link(sc
, bf_held
);
846 spin_unlock_bh(&sc
->sc_rxbuflock
);
849 bf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
856 * Must provide the virtual address of the current
857 * descriptor, the physical address, and the virtual
858 * address of the next descriptor in the h/w chain.
859 * This allows the HAL to look ahead to see if the
860 * hardware is done with a descriptor by checking the
861 * done bit in the following descriptor and the address
862 * of the current descriptor the DMA engine is working
863 * on. All this is necessary because of our use of
864 * a self-linked list to avoid rx overruns.
866 retval
= ath9k_hw_rxprocdesc(ah
,
869 PA2DESC(sc
, ds
->ds_link
),
871 if (HAL_EINPROGRESS
== retval
) {
873 struct ath_desc
*tds
;
875 if (list_is_last(&bf
->list
, &sc
->sc_rxbuf
)) {
876 spin_unlock_bh(&sc
->sc_rxbuflock
);
880 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
883 * On some hardware the descriptor status words could
884 * get corrupted, including the done bit. Because of
885 * this, check if the next descriptor's done bit is
888 * If the next descriptor's done bit is set, the current
889 * descriptor has been corrupted. Force s/w to discard
890 * this descriptor and continue...
894 retval
= ath9k_hw_rxprocdesc(ah
,
896 PA2DESC(sc
, tds
->ds_link
), 0);
897 if (HAL_EINPROGRESS
== retval
) {
898 spin_unlock_bh(&sc
->sc_rxbuflock
);
903 /* XXX: we do not support frames spanning
904 * multiple descriptors */
905 bf
->bf_status
|= ATH_BUFSTATUS_DONE
;
908 if (skb
== NULL
) { /* XXX ??? can this happen */
909 spin_unlock_bh(&sc
->sc_rxbuflock
);
913 * Now we know it's a completed frame, we can indicate the
914 * frame. Remove the previous holding descriptor and leave
915 * this one in the queue as the new holding descriptor.
918 list_del(&bf_held
->list
);
919 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
920 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
921 list_add_tail(&bf_held
->list
, &sc
->sc_rxbuf
);
922 /* try to requeue this descriptor */
923 ath_rx_buf_link(sc
, bf_held
);
927 bf
->bf_status
|= ATH_BUFSTATUS_STALE
;
930 * Release the lock here in case ieee80211_input() return
931 * the frame immediately by calling ath_rx_mpdu_requeue().
933 spin_unlock_bh(&sc
->sc_rxbuflock
);
937 * If we're asked to flush receive queue, directly
938 * chain it back at the queue without processing it.
943 hdr
= (struct ieee80211_hdr
*)skb
->data
;
944 fc
= hdr
->frame_control
;
945 memzero(&rx_status
, sizeof(struct ath_recv_status
));
947 if (ds
->ds_rxstat
.rs_more
) {
949 * Frame spans multiple descriptors; this
950 * cannot happen yet as we don't support
951 * jumbograms. If not in monitor mode,
956 * Enable this if you want to see
957 * error frames in Monitor mode.
959 if (sc
->sc_opmode
!= HAL_M_MONITOR
)
962 /* fall thru for monitor mode handling... */
963 } else if (ds
->ds_rxstat
.rs_status
!= 0) {
964 if (ds
->ds_rxstat
.rs_status
& HAL_RXERR_CRC
)
965 rx_status
.flags
|= ATH_RX_FCS_ERROR
;
966 if (ds
->ds_rxstat
.rs_status
& HAL_RXERR_PHY
) {
967 phyerr
= ds
->ds_rxstat
.rs_phyerr
& 0x1f;
971 if (ds
->ds_rxstat
.rs_status
& HAL_RXERR_DECRYPT
) {
973 * Decrypt error. We only mark packet status
974 * here and always push up the frame up to let
975 * mac80211 handle the actual error case, be
976 * it no decryption key or real decryption
977 * error. This let us keep statistics there.
979 rx_status
.flags
|= ATH_RX_DECRYPT_ERROR
;
980 } else if (ds
->ds_rxstat
.rs_status
& HAL_RXERR_MIC
) {
982 * Demic error. We only mark frame status here
983 * and always push up the frame up to let
984 * mac80211 handle the actual error case. This
985 * let us keep statistics there. Hardware may
986 * post a false-positive MIC error.
988 if (ieee80211_is_ctl(fc
))
990 * Sometimes, we get invalid
991 * MIC failures on valid control frames.
992 * Remove these mic errors.
994 ds
->ds_rxstat
.rs_status
&=
997 rx_status
.flags
|= ATH_RX_MIC_ERROR
;
1000 * Reject error frames with the exception of
1001 * decryption and MIC failures. For monitor mode,
1002 * we also ignore the CRC error.
1004 if (sc
->sc_opmode
== HAL_M_MONITOR
) {
1005 if (ds
->ds_rxstat
.rs_status
&
1006 ~(HAL_RXERR_DECRYPT
| HAL_RXERR_MIC
|
1010 if (ds
->ds_rxstat
.rs_status
&
1011 ~(HAL_RXERR_DECRYPT
| HAL_RXERR_MIC
)) {
1017 * The status portion of the descriptor could get corrupted.
1019 if (sc
->sc_rxbufsize
< ds
->ds_rxstat
.rs_datalen
)
1022 * Sync and unmap the frame. At this point we're
1023 * committed to passing the sk_buff somewhere so
1024 * clear buf_skb; this means a new sk_buff must be
1025 * allocated when the rx descriptor is setup again
1026 * to receive another frame.
1028 skb_put(skb
, ds
->ds_rxstat
.rs_datalen
);
1029 skb
->protocol
= ETH_P_CONTROL
;
1030 rx_status
.tsf
= ath_extend_tsf(sc
, ds
->ds_rxstat
.rs_tstamp
);
1031 rx_status
.rateieee
=
1032 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].ieeerate
;
1033 rx_status
.rateKbps
=
1034 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].rateKbps
;
1035 rx_status
.ratecode
= ds
->ds_rxstat
.rs_rate
;
1038 if (rx_status
.ratecode
& 0x80) {
1039 /* TODO - add table to avoid division */
1040 if (ds
->ds_rxstat
.rs_flags
& HAL_RX_2040
) {
1041 rx_status
.flags
|= ATH_RX_40MHZ
;
1042 rx_status
.rateKbps
=
1043 (rx_status
.rateKbps
* 27) / 13;
1045 if (ds
->ds_rxstat
.rs_flags
& HAL_RX_GI
)
1046 rx_status
.rateKbps
=
1047 (rx_status
.rateKbps
* 10) / 9;
1049 rx_status
.flags
|= ATH_RX_SHORT_GI
;
1052 /* sc->sc_noise_floor is only available when the station
1053 attaches to an AP, so we use a default value
1054 if we are not yet attached. */
1056 /* XXX we should use either sc->sc_noise_floor or
1057 * ath_hal_getChanNoise(ah, &sc->sc_curchan)
1058 * to calculate the noise floor.
1059 * However, the value returned by ath_hal_getChanNoise
1060 * seems to be incorrect (-31dBm on the last test),
1061 * so we will use a hard-coded value until we
1062 * figure out what is going on.
1064 rx_status
.abs_rssi
=
1065 ds
->ds_rxstat
.rs_rssi
+ ATH_DEFAULT_NOISE_FLOOR
;
1067 pci_dma_sync_single_for_cpu(sc
->pdev
,
1070 PCI_DMA_FROMDEVICE
);
1071 pci_unmap_single(sc
->pdev
,
1074 PCI_DMA_FROMDEVICE
);
1076 /* XXX: Ah! make me more readable, use a helper */
1077 if (sc
->sc_hashtsupport
) {
1078 if (ds
->ds_rxstat
.rs_moreaggr
== 0) {
1079 rx_status
.rssictl
[0] =
1080 ds
->ds_rxstat
.rs_rssi_ctl0
;
1081 rx_status
.rssictl
[1] =
1082 ds
->ds_rxstat
.rs_rssi_ctl1
;
1083 rx_status
.rssictl
[2] =
1084 ds
->ds_rxstat
.rs_rssi_ctl2
;
1085 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
1086 if (ds
->ds_rxstat
.rs_flags
& HAL_RX_2040
) {
1087 rx_status
.rssiextn
[0] =
1088 ds
->ds_rxstat
.rs_rssi_ext0
;
1089 rx_status
.rssiextn
[1] =
1090 ds
->ds_rxstat
.rs_rssi_ext1
;
1091 rx_status
.rssiextn
[2] =
1092 ds
->ds_rxstat
.rs_rssi_ext2
;
1094 ATH_RX_RSSI_EXTN_VALID
;
1096 rx_status
.flags
|= ATH_RX_RSSI_VALID
|
1097 ATH_RX_CHAIN_RSSI_VALID
;
1101 * Need to insert the "combined" rssi into the
1102 * status structure for upper layer processing
1104 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
1105 rx_status
.flags
|= ATH_RX_RSSI_VALID
;
1108 /* Pass frames up to the stack. */
1110 type
= ath_rx_indicate(sc
, skb
,
1111 &rx_status
, ds
->ds_rxstat
.rs_keyix
);
1113 if (sc
->sc_diversity
) {
1115 * When using hardware fast diversity, change the
1116 * default rx antenna if rx diversity chooses the
1117 * other antenna 3 times in a row.
1119 if (sc
->sc_defant
!= ds
->ds_rxstat
.rs_antenna
) {
1120 if (++sc
->sc_rxotherant
>= 3)
1121 ath_setdefantenna(sc
,
1122 ds
->ds_rxstat
.rs_antenna
);
1124 sc
->sc_rxotherant
= 0;
1128 * redo antenna detection for Lenovo devices
1130 if (sc
->sc_rx_chainmask_detect
&& sc
->sc_rx_chainmask_start
) {
1131 rxchainmask
= ath_rx_detect_antenna(sc
, &ds
->ds_rxstat
);
1133 sc
->sc_rx_chainmask_detect
= 0;
1134 sc
->sc_rx_chainmask_start
= 0;
1135 if (sc
->sc_rx_chainmask
!= rxchainmask
) {
1136 sc
->sc_rx_chainmask
= rxchainmask
;
1138 /* we have to do an reset to
1139 * change chain mask */
1145 #ifdef CONFIG_SLOW_ANT_DIV
1146 if (sc
->sc_slowAntDiv
&&
1147 (rx_status
.flags
& ATH_RX_RSSI_VALID
) &&
1148 ieee80211_is_beacon(fc
)) {
1149 ath_slow_ant_div(&sc
->sc_antdiv
, hdr
, &ds
->ds_rxstat
);
1153 * For frames successfully indicated, the buffer will be
1154 * returned to us by upper layers by calling
1155 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1156 * So we don't want to do it here in this loop.
1161 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
1165 DPRINTF(sc
, ATH_DEBUG_CONFIG
,
1166 "%s: Reset rx chain mask. "
1167 "Do internal reset\n", __func__
);
1169 ath_internal_reset(sc
);
1176 /* Process ADDBA request in per-TID data structure */
1178 int ath_rx_aggr_start(struct ath_softc
*sc
,
1183 struct ath_arx_tid
*rxtid
;
1184 struct ath_node
*an
;
1185 struct ieee80211_hw
*hw
= sc
->hw
;
1186 struct ieee80211_supported_band
*sband
;
1189 spin_lock_bh(&sc
->node_lock
);
1190 an
= ath_node_find(sc
, (u8
*) addr
);
1191 spin_unlock_bh(&sc
->node_lock
);
1194 DPRINTF(sc
, ATH_DEBUG_AGGR
,
1195 "%s: Node not found to initialize RX aggregation\n",
1200 sband
= hw
->wiphy
->bands
[hw
->conf
.channel
->band
];
1201 buffersize
= IEEE80211_MIN_AMPDU_BUF
<<
1202 sband
->ht_info
.ampdu_factor
; /* FIXME */
1204 rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
1206 spin_lock_bh(&rxtid
->tidlock
);
1207 if (sc
->sc_rxaggr
) {
1208 /* Allow aggregation reception
1209 * Adjust rx BA window size. Peer might indicate a
1210 * zero buffer size for a _dont_care_ condition.
1213 rxtid
->baw_size
= min(buffersize
, rxtid
->baw_size
);
1215 /* set rx sequence number */
1216 rxtid
->seq_next
= *ssn
;
1218 /* Allocate the receive buffers for this TID */
1219 DPRINTF(sc
, ATH_DEBUG_AGGR
,
1220 "%s: Allcating rxbuffer for TID %d\n", __func__
, tid
);
1222 if (rxtid
->rxbuf
== NULL
) {
1224 * If the rxbuff is not NULL at this point, we *probably*
1225 * already allocated the buffer on a previous ADDBA,
1226 * and this is a subsequent ADDBA that got through.
1227 * Don't allocate, but use the value in the pointer,
1228 * we zero it out when we de-allocate.
1230 rxtid
->rxbuf
= kmalloc(ATH_TID_MAX_BUFS
*
1231 sizeof(struct ath_rxbuf
), GFP_ATOMIC
);
1233 if (rxtid
->rxbuf
== NULL
) {
1234 DPRINTF(sc
, ATH_DEBUG_AGGR
,
1235 "%s: Unable to allocate RX buffer, "
1236 "refusing ADDBA\n", __func__
);
1238 /* Ensure the memory is zeroed out (all internal
1239 * pointers are null) */
1240 memzero(rxtid
->rxbuf
, ATH_TID_MAX_BUFS
*
1241 sizeof(struct ath_rxbuf
));
1242 DPRINTF(sc
, ATH_DEBUG_AGGR
,
1243 "%s: Allocated @%p\n", __func__
, rxtid
->rxbuf
);
1245 /* Allow aggregation reception */
1246 rxtid
->addba_exchangecomplete
= 1;
1249 spin_unlock_bh(&rxtid
->tidlock
);
1256 int ath_rx_aggr_stop(struct ath_softc
*sc
,
1260 struct ath_node
*an
;
1262 spin_lock_bh(&sc
->node_lock
);
1263 an
= ath_node_find(sc
, (u8
*) addr
);
1264 spin_unlock_bh(&sc
->node_lock
);
1267 DPRINTF(sc
, ATH_DEBUG_AGGR
,
1268 "%s: RX aggr stop for non-existent node\n", __func__
);
1272 ath_rx_aggr_teardown(sc
, an
, tid
);
1276 /* Rx aggregation tear down */
1278 void ath_rx_aggr_teardown(struct ath_softc
*sc
,
1279 struct ath_node
*an
, u_int8_t tid
)
1281 struct ath_arx_tid
*rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
1283 if (!rxtid
->addba_exchangecomplete
)
1286 del_timer_sync(&rxtid
->timer
);
1287 ath_rx_flush_tid(sc
, rxtid
, 0);
1288 rxtid
->addba_exchangecomplete
= 0;
1290 /* De-allocate the receive buffer array allocated when addba started */
1293 DPRINTF(sc
, ATH_DEBUG_AGGR
,
1294 "%s: Deallocating TID %d rxbuff @%p\n",
1295 __func__
, tid
, rxtid
->rxbuf
);
1296 kfree(rxtid
->rxbuf
);
1298 /* Set pointer to null to avoid reuse*/
1299 rxtid
->rxbuf
= NULL
;
1303 /* Initialize per-node receive state */
1305 void ath_rx_node_init(struct ath_softc
*sc
, struct ath_node
*an
)
1307 if (sc
->sc_rxaggr
) {
1308 struct ath_arx_tid
*rxtid
;
1311 /* Init per tid rx state */
1312 for (tidno
= 0, rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
1313 tidno
< WME_NUM_TID
;
1316 rxtid
->seq_reset
= 1;
1317 rxtid
->seq_next
= 0;
1318 rxtid
->baw_size
= WME_MAX_BA
;
1319 rxtid
->baw_head
= rxtid
->baw_tail
= 0;
1322 * Ensure the buffer pointer is null at this point
1323 * (needs to be allocated when addba is received)
1326 rxtid
->rxbuf
= NULL
;
1327 setup_timer(&rxtid
->timer
, ath_rx_timer
,
1328 (unsigned long)rxtid
);
1329 spin_lock_init(&rxtid
->tidlock
);
1332 rxtid
->addba_exchangecomplete
= 0;
1337 void ath_rx_node_cleanup(struct ath_softc
*sc
, struct ath_node
*an
)
1339 if (sc
->sc_rxaggr
) {
1340 struct ath_arx_tid
*rxtid
;
1343 /* Init per tid rx state */
1344 for (tidno
= 0, rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
1345 tidno
< WME_NUM_TID
;
1348 if (!rxtid
->addba_exchangecomplete
)
1351 /* must cancel timer first */
1352 del_timer_sync(&rxtid
->timer
);
1354 /* drop any pending sub-frames */
1355 ath_rx_flush_tid(sc
, rxtid
, 1);
1357 for (i
= 0; i
< ATH_TID_MAX_BUFS
; i
++)
1358 ASSERT(rxtid
->rxbuf
[i
].rx_wbuf
== NULL
);
1360 rxtid
->addba_exchangecomplete
= 0;
1366 /* Cleanup per-node receive state */
1368 void ath_rx_node_free(struct ath_softc
*sc
, struct ath_node
*an
)
1370 ath_rx_node_cleanup(sc
, an
);
1373 dma_addr_t
ath_skb_map_single(struct ath_softc
*sc
,
1374 struct sk_buff
*skb
,
1379 * NB: do NOT use skb->len, which is 0 on initialization.
1380 * Use skb's entire data area instead.
1382 *pa
= pci_map_single(sc
->pdev
, skb
->data
,
1383 skb
->end
- skb
->head
, direction
);
1387 void ath_skb_unmap_single(struct ath_softc
*sc
,
1388 struct sk_buff
*skb
,
1392 /* Unmap skb's entire data area */
1393 pci_unmap_single(sc
->pdev
, *pa
, skb
->end
- skb
->head
, direction
);