sync ath9k with latest git code
[openwrt/svn-archive/archive.git] / package / ath9k / src / drivers / net / wireless / ath9k / core.c
1 /*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19 #include "core.h"
20 #include "regd.h"
21
22 static int ath_outdoor; /* enable outdoor use */
23
24 static const u_int8_t ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27 static u_int32_t ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29 static u_int32_t ath_chainmask_sel_down_rssi_thres =
30 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
31 static u_int32_t ath_chainmask_sel_period =
32 ATH_CHAINMASK_SEL_TIMEOUT;
33
34 /* return bus cachesize in 4B word units */
35
36 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37 {
38 u_int8_t u8tmp;
39
40 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u_int8_t *)&u8tmp);
41 *csz = (int)u8tmp;
42
43 /*
44 * This check was put in to avoid "unplesant" consequences if
45 * the bootrom has not fully initialized all PCI devices.
46 * Sometimes the cache line size register is not set
47 */
48
49 if (*csz == 0)
50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51 }
52
53 /*
54 * Set current operating mode
55 *
56 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although
58 * they have been superceeded by the ath_led module.
59 */
60
61 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62 {
63 const struct hal_rate_table *rt;
64 int i;
65
66 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
67 rt = sc->sc_rates[mode];
68 BUG_ON(!rt);
69
70 for (i = 0; i < rt->rateCount; i++)
71 sc->sc_rixmap[rt->info[i].rateCode] = (u_int8_t) i;
72
73 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
74 for (i = 0; i < 256; i++) {
75 u_int8_t ix = rt->rateCodeToIndex[i];
76
77 if (ix == 0xff)
78 continue;
79
80 sc->sc_hwmap[i].ieeerate =
81 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
82 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
83
84 if (rt->info[ix].shortPreamble ||
85 rt->info[ix].phy == PHY_OFDM) {
86 }
87 /* NB: this uses the last entry if the rate isn't found */
88 /* XXX beware of overlow */
89 }
90 sc->sc_currates = rt;
91 sc->sc_curmode = mode;
92 /*
93 * All protection frames are transmited at 2Mb/s for
94 * 11g, otherwise at 1Mb/s.
95 * XXX select protection rate index from rate table.
96 */
97 sc->sc_protrix = (mode == WIRELESS_MODE_11g ? 1 : 0);
98 /* rate index used to send mgt frames */
99 sc->sc_minrateix = 0;
100 }
101
102 /*
103 * Select Rate Table
104 *
105 * Based on the wireless mode passed in, the rate table in the ATH object
106 * is set to the mode specific rate table. This also calls the callback
107 * function to set the rate in the protocol layer object.
108 */
109
110 static int ath_rate_setup(struct ath_softc *sc, enum wireless_mode mode)
111 {
112 struct ath_hal *ah = sc->sc_ah;
113 const struct hal_rate_table *rt;
114
115 switch (mode) {
116 case WIRELESS_MODE_11a:
117 sc->sc_rates[mode] =
118 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11A);
119 break;
120 case WIRELESS_MODE_11b:
121 sc->sc_rates[mode] =
122 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11B);
123 break;
124 case WIRELESS_MODE_11g:
125 sc->sc_rates[mode] =
126 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11G);
127 break;
128 case WIRELESS_MODE_11NA_HT20:
129 sc->sc_rates[mode] =
130 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NA_HT20);
131 break;
132 case WIRELESS_MODE_11NG_HT20:
133 sc->sc_rates[mode] =
134 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NG_HT20);
135 break;
136 case WIRELESS_MODE_11NA_HT40PLUS:
137 sc->sc_rates[mode] =
138 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NA_HT40PLUS);
139 break;
140 case WIRELESS_MODE_11NA_HT40MINUS:
141 sc->sc_rates[mode] =
142 ath9k_hw_getratetable(ah,
143 ATH9K_MODE_SEL_11NA_HT40MINUS);
144 break;
145 case WIRELESS_MODE_11NG_HT40PLUS:
146 sc->sc_rates[mode] =
147 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NG_HT40PLUS);
148 break;
149 case WIRELESS_MODE_11NG_HT40MINUS:
150 sc->sc_rates[mode] =
151 ath9k_hw_getratetable(ah,
152 ATH9K_MODE_SEL_11NG_HT40MINUS);
153 break;
154 default:
155 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid mode %u\n",
156 __func__, mode);
157 return 0;
158 }
159 rt = sc->sc_rates[mode];
160 if (rt == NULL)
161 return 0;
162
163 /* setup rate set in 802.11 protocol layer */
164 ath_setup_rate(sc, mode, NORMAL_RATE, rt);
165
166 return 1;
167 }
168
169 /*
170 * Set up channel list
171 *
172 * Determines the proper set of channelflags based on the selected mode,
173 * allocates a channel array, and passes it to the HAL for initialization.
174 * If successful, the list is passed to the upper layer, then de-allocated.
175 */
176
177 static int ath_getchannels(struct ath_softc *sc,
178 u_int cc,
179 bool outDoor,
180 bool xchanMode)
181 {
182 struct ath_hal *ah = sc->sc_ah;
183 struct hal_channel *chans;
184 int nchan;
185 u_int8_t regclassids[ATH_REGCLASSIDS_MAX];
186 u_int nregclass = 0;
187
188 chans = kmalloc(ATH_CHAN_MAX * sizeof(struct hal_channel), GFP_KERNEL);
189 if (chans == NULL) {
190 DPRINTF(sc, ATH_DBG_FATAL,
191 "%s: unable to allocate channel table\n", __func__);
192 return -ENOMEM;
193 }
194
195 if (!ath9k_regd_init_channels(ah,
196 chans,
197 ATH_CHAN_MAX,
198 (u_int *)&nchan,
199 regclassids,
200 ATH_REGCLASSIDS_MAX,
201 &nregclass,
202 cc,
203 ATH9K_MODE_SEL_ALL,
204 outDoor,
205 xchanMode)) {
206 u_int32_t rd = ah->ah_currentRD;
207
208 DPRINTF(sc, ATH_DBG_FATAL,
209 "%s: unable to collect channel list from hal; "
210 "regdomain likely %u country code %u\n",
211 __func__, rd, cc);
212 kfree(chans);
213 return -EINVAL;
214 }
215
216 ath_setup_channel_list(sc,
217 CLIST_UPDATE,
218 chans,
219 nchan,
220 regclassids,
221 nregclass,
222 CTRY_DEFAULT);
223
224 kfree(chans);
225 return 0;
226 }
227
228 /*
229 * Determine mode from channel flags
230 *
231 * This routine will provide the enumerated WIRELESSS_MODE value based
232 * on the settings of the channel flags. If ho valid set of flags
233 * exist, the lowest mode (11b) is selected.
234 */
235
236 static enum wireless_mode ath_chan2mode(struct hal_channel *chan)
237 {
238 if ((chan->channelFlags & CHANNEL_A) == CHANNEL_A)
239 return WIRELESS_MODE_11a;
240 else if ((chan->channelFlags & CHANNEL_G) == CHANNEL_G)
241 return WIRELESS_MODE_11g;
242 else if ((chan->channelFlags & CHANNEL_B) == CHANNEL_B)
243 return WIRELESS_MODE_11b;
244 else if ((chan->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20)
245 return WIRELESS_MODE_11NA_HT20;
246 else if ((chan->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20)
247 return WIRELESS_MODE_11NG_HT20;
248 else if ((chan->channelFlags & CHANNEL_A_HT40PLUS) ==
249 CHANNEL_A_HT40PLUS)
250 return WIRELESS_MODE_11NA_HT40PLUS;
251 else if ((chan->channelFlags & CHANNEL_A_HT40MINUS) ==
252 CHANNEL_A_HT40MINUS)
253 return WIRELESS_MODE_11NA_HT40MINUS;
254 else if ((chan->channelFlags & CHANNEL_G_HT40PLUS) ==
255 CHANNEL_G_HT40PLUS)
256 return WIRELESS_MODE_11NG_HT40PLUS;
257 else if ((chan->channelFlags & CHANNEL_G_HT40MINUS) ==
258 CHANNEL_G_HT40MINUS)
259 return WIRELESS_MODE_11NG_HT40MINUS;
260
261 /* NB: should not get here */
262 return WIRELESS_MODE_11b;
263 }
264
265 /*
266 * Change Channels
267 *
268 * Performs the actions to change the channel in the hardware, and set up
269 * the current operating mode for the new channel.
270 */
271
272 static void ath_chan_change(struct ath_softc *sc, struct hal_channel *chan)
273 {
274 enum wireless_mode mode;
275
276 mode = ath_chan2mode(chan);
277
278 ath_rate_setup(sc, mode);
279 ath_setcurmode(sc, mode);
280 }
281
282 /*
283 * Stop the device, grabbing the top-level lock to protect
284 * against concurrent entry through ath_init (which can happen
285 * if another thread does a system call and the thread doing the
286 * stop is preempted).
287 */
288
289 static int ath_stop(struct ath_softc *sc)
290 {
291 struct ath_hal *ah = sc->sc_ah;
292
293 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
294 __func__, sc->sc_invalid);
295
296 /*
297 * Shutdown the hardware and driver:
298 * stop output from above
299 * reset 802.11 state machine
300 * (sends station deassoc/deauth frames)
301 * turn off timers
302 * disable interrupts
303 * clear transmit machinery
304 * clear receive machinery
305 * turn off the radio
306 * reclaim beacon resources
307 *
308 * Note that some of this work is not possible if the
309 * hardware is gone (invalid).
310 */
311
312 if (!sc->sc_invalid)
313 ath9k_hw_set_interrupts(ah, 0);
314 ath_draintxq(sc, false);
315 if (!sc->sc_invalid) {
316 ath_stoprecv(sc);
317 ath9k_hw_phy_disable(ah);
318 } else
319 sc->sc_rxlink = NULL;
320
321 return 0;
322 }
323
324 /*
325 * Start Scan
326 *
327 * This function is called when starting a channel scan. It will perform
328 * power save wakeup processing, set the filter for the scan, and get the
329 * chip ready to send broadcast packets out during the scan.
330 */
331
332 void ath_scan_start(struct ath_softc *sc)
333 {
334 struct ath_hal *ah = sc->sc_ah;
335 u_int32_t rfilt;
336 u_int32_t now = (u_int32_t) jiffies_to_msecs(get_timestamp());
337
338 sc->sc_scanning = 1;
339 rfilt = ath_calcrxfilter(sc);
340 ath9k_hw_setrxfilter(ah, rfilt);
341 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
342
343 /* Restore previous power management state. */
344
345 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
346 now / 1000, now % 1000, __func__, rfilt);
347 }
348
349 /*
350 * Scan End
351 *
352 * This routine is called by the upper layer when the scan is completed. This
353 * will set the filters back to normal operating mode, set the BSSID to the
354 * correct value, and restore the power save state.
355 */
356
357 void ath_scan_end(struct ath_softc *sc)
358 {
359 struct ath_hal *ah = sc->sc_ah;
360 u_int32_t rfilt;
361 u_int32_t now = (u_int32_t) jiffies_to_msecs(get_timestamp());
362
363 sc->sc_scanning = 0;
364 /* Request for a full reset due to rx packet filter changes */
365 sc->sc_full_reset = 1;
366 rfilt = ath_calcrxfilter(sc);
367 ath9k_hw_setrxfilter(ah, rfilt);
368 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
369
370 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
371 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
372 }
373
374 /*
375 * Set the current channel
376 *
377 * Set/change channels. If the channel is really being changed, it's done
378 * by reseting the chip. To accomplish this we must first cleanup any pending
379 * DMA, then restart stuff after a la ath_init.
380 */
381 int ath_set_channel(struct ath_softc *sc, struct hal_channel *hchan)
382 {
383 struct ath_hal *ah = sc->sc_ah;
384 bool fastcc = true, stopped;
385 enum hal_ht_macmode ht_macmode;
386
387 if (sc->sc_invalid) /* if the device is invalid or removed */
388 return -EIO;
389
390 DPRINTF(sc, ATH_DBG_CONFIG,
391 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
392 __func__,
393 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
394 sc->sc_curchan.channelFlags),
395 sc->sc_curchan.channel,
396 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
397 hchan->channel, hchan->channelFlags);
398
399 ht_macmode = ath_cwm_macmode(sc);
400
401 if (hchan->channel != sc->sc_curchan.channel ||
402 hchan->channelFlags != sc->sc_curchan.channelFlags ||
403 sc->sc_update_chainmask || sc->sc_full_reset) {
404 enum hal_status status;
405 /*
406 * This is only performed if the channel settings have
407 * actually changed.
408 *
409 * To switch channels clear any pending DMA operations;
410 * wait long enough for the RX fifo to drain, reset the
411 * hardware at the new frequency, and then re-enable
412 * the relevant bits of the h/w.
413 */
414 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
415 ath_draintxq(sc, false); /* clear pending tx frames */
416 stopped = ath_stoprecv(sc); /* turn off frame recv */
417
418 /* XXX: do not flush receive queue here. We don't want
419 * to flush data frames already in queue because of
420 * changing channel. */
421
422 if (!stopped || sc->sc_full_reset)
423 fastcc = false;
424
425 spin_lock_bh(&sc->sc_resetlock);
426 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
427 ht_macmode, sc->sc_tx_chainmask,
428 sc->sc_rx_chainmask,
429 sc->sc_ht_extprotspacing,
430 fastcc, &status)) {
431 DPRINTF(sc, ATH_DBG_FATAL,
432 "%s: unable to reset channel %u (%uMhz) "
433 "flags 0x%x hal status %u\n", __func__,
434 ath9k_hw_mhz2ieee(ah, hchan->channel,
435 hchan->channelFlags),
436 hchan->channel, hchan->channelFlags, status);
437 spin_unlock_bh(&sc->sc_resetlock);
438 return -EIO;
439 }
440 spin_unlock_bh(&sc->sc_resetlock);
441
442 sc->sc_curchan = *hchan;
443 sc->sc_update_chainmask = 0;
444 sc->sc_full_reset = 0;
445
446 /* Re-enable rx framework */
447 if (ath_startrecv(sc) != 0) {
448 DPRINTF(sc, ATH_DBG_FATAL,
449 "%s: unable to restart recv logic\n", __func__);
450 return -EIO;
451 }
452 /*
453 * Change channels and update the h/w rate map
454 * if we're switching; e.g. 11a to 11b/g.
455 */
456 ath_chan_change(sc, hchan);
457 ath_update_txpow(sc); /* update tx power state */
458 /*
459 * Re-enable interrupts.
460 */
461 ath9k_hw_set_interrupts(ah, sc->sc_imask);
462 }
463 return 0;
464 }
465
466 /**********************/
467 /* Chainmask Handling */
468 /**********************/
469
470 static void ath_chainmask_sel_timertimeout(unsigned long data)
471 {
472 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
473 cm->switch_allowed = 1;
474 }
475
476 /* Start chainmask select timer */
477 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
478 {
479 cm->switch_allowed = 0;
480 mod_timer(&cm->timer, ath_chainmask_sel_period);
481 }
482
483 /* Stop chainmask select timer */
484 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
485 {
486 cm->switch_allowed = 0;
487 del_timer_sync(&cm->timer);
488 }
489
490 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
491 {
492 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
493
494 memzero(cm, sizeof(struct ath_chainmask_sel));
495
496 cm->cur_tx_mask = sc->sc_tx_chainmask;
497 cm->cur_rx_mask = sc->sc_rx_chainmask;
498 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
499 setup_timer(&cm->timer,
500 ath_chainmask_sel_timertimeout, (unsigned long) cm);
501 }
502
503 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
504 {
505 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
506
507 /*
508 * Disable auto-swtiching in one of the following if conditions.
509 * sc_chainmask_auto_sel is used for internal global auto-switching
510 * enabled/disabled setting
511 */
512 if (sc->sc_ah->ah_caps.halTxChainMask != ATH_CHAINMASK_SEL_3X3) {
513 cm->cur_tx_mask = sc->sc_tx_chainmask;
514 return cm->cur_tx_mask;
515 }
516
517 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
518 return cm->cur_tx_mask;
519
520 if (cm->switch_allowed) {
521 /* Switch down from tx 3 to tx 2. */
522 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
523 ATH_RSSI_OUT(cm->tx_avgrssi) >=
524 ath_chainmask_sel_down_rssi_thres) {
525 cm->cur_tx_mask = sc->sc_tx_chainmask;
526
527 /* Don't let another switch happen until
528 * this timer expires */
529 ath_chainmask_sel_timerstart(cm);
530 }
531 /* Switch up from tx 2 to 3. */
532 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
533 ATH_RSSI_OUT(cm->tx_avgrssi) <=
534 ath_chainmask_sel_up_rssi_thres) {
535 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
536
537 /* Don't let another switch happen
538 * until this timer expires */
539 ath_chainmask_sel_timerstart(cm);
540 }
541 }
542
543 return cm->cur_tx_mask;
544 }
545
546 /*
547 * Update tx/rx chainmask. For legacy association,
548 * hard code chainmask to 1x1, for 11n association, use
549 * the chainmask configuration.
550 */
551
552 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
553 {
554 sc->sc_update_chainmask = 1;
555 if (is_ht) {
556 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.halTxChainMask;
557 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.halRxChainMask;
558 } else {
559 sc->sc_tx_chainmask = 1;
560 sc->sc_rx_chainmask = 1;
561 }
562
563 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
564 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
565 }
566
567 /******************/
568 /* VAP management */
569 /******************/
570
571 /*
572 * VAP in Listen mode
573 *
574 * This routine brings the VAP out of the down state into a "listen" state
575 * where it waits for association requests. This is used in AP and AdHoc
576 * modes.
577 */
578
579 int ath_vap_listen(struct ath_softc *sc, int if_id)
580 {
581 struct ath_hal *ah = sc->sc_ah;
582 struct ath_vap *avp;
583 u_int32_t rfilt = 0;
584 DECLARE_MAC_BUF(mac);
585
586 avp = sc->sc_vaps[if_id];
587 if (avp == NULL) {
588 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
589 __func__, if_id);
590 return -EINVAL;
591 }
592
593 #ifdef CONFIG_SLOW_ANT_DIV
594 ath_slow_ant_div_stop(&sc->sc_antdiv);
595 #endif
596
597 /* update ratectrl about the new state */
598 ath_rate_newstate(sc, avp, 0);
599
600 rfilt = ath_calcrxfilter(sc);
601 ath9k_hw_setrxfilter(ah, rfilt);
602
603 if (sc->sc_opmode == HAL_M_STA || sc->sc_opmode == HAL_M_IBSS) {
604 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
605 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
606 } else
607 sc->sc_curaid = 0;
608
609 DPRINTF(sc, ATH_DBG_CONFIG,
610 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
611 __func__, rfilt, print_mac(mac,
612 sc->sc_curbssid), sc->sc_curaid);
613
614 /*
615 * XXXX
616 * Disable BMISS interrupt when we're not associated
617 */
618 ath9k_hw_set_interrupts(ah,
619 sc->sc_imask & ~(HAL_INT_SWBA | HAL_INT_BMISS));
620 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
621 /* need to reconfigure the beacons when it moves to RUN */
622 sc->sc_beacons = 0;
623
624 return 0;
625 }
626
627 int ath_vap_attach(struct ath_softc *sc,
628 int if_id,
629 struct ieee80211_vif *if_data,
630 enum hal_opmode opmode)
631 {
632 struct ath_vap *avp;
633
634 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
635 DPRINTF(sc, ATH_DBG_FATAL,
636 "%s: Invalid interface id = %u\n", __func__, if_id);
637 return -EINVAL;
638 }
639
640 switch (opmode) {
641 case HAL_M_STA:
642 case HAL_M_IBSS:
643 case HAL_M_MONITOR:
644 break;
645 case HAL_M_HOSTAP:
646 /* XXX not right, beacon buffer is allocated on RUN trans */
647 if (list_empty(&sc->sc_bbuf))
648 return -ENOMEM;
649 break;
650 default:
651 return -EINVAL;
652 }
653
654 /* create ath_vap */
655 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
656 if (avp == NULL)
657 return -ENOMEM;
658
659 memzero(avp, sizeof(struct ath_vap));
660 avp->av_if_data = if_data;
661 /* Set the VAP opmode */
662 avp->av_opmode = opmode;
663 avp->av_bslot = -1;
664 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
665 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
666 spin_lock_init(&avp->av_mcastq.axq_lock);
667
668 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
669
670 sc->sc_vaps[if_id] = avp;
671 sc->sc_nvaps++;
672 /* Set the device opmode */
673 sc->sc_opmode = opmode;
674
675 /* default VAP configuration */
676 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
677 avp->av_config.av_fixed_retryset = 0x03030303;
678
679 return 0;
680 }
681
682 int ath_vap_detach(struct ath_softc *sc, int if_id)
683 {
684 struct ath_hal *ah = sc->sc_ah;
685 struct ath_vap *avp;
686
687 avp = sc->sc_vaps[if_id];
688 if (avp == NULL) {
689 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
690 __func__, if_id);
691 return -EINVAL;
692 }
693
694 /*
695 * Quiesce the hardware while we remove the vap. In
696 * particular we need to reclaim all references to the
697 * vap state by any frames pending on the tx queues.
698 *
699 * XXX can we do this w/o affecting other vap's?
700 */
701 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
702 ath_draintxq(sc, false); /* stop xmit side */
703 ath_stoprecv(sc); /* stop recv side */
704 ath_flushrecv(sc); /* flush recv queue */
705
706 /* Reclaim any pending mcast bufs on the vap. */
707 ath_tx_draintxq(sc, &avp->av_mcastq, false);
708
709 kfree(avp);
710 sc->sc_vaps[if_id] = NULL;
711 sc->sc_nvaps--;
712
713 return 0;
714 }
715
716 int ath_vap_config(struct ath_softc *sc,
717 int if_id, struct ath_vap_config *if_config)
718 {
719 struct ath_vap *avp;
720
721 if (if_id >= ATH_BCBUF) {
722 DPRINTF(sc, ATH_DBG_FATAL,
723 "%s: Invalid interface id = %u\n", __func__, if_id);
724 return -EINVAL;
725 }
726
727 avp = sc->sc_vaps[if_id];
728 ASSERT(avp != NULL);
729
730 if (avp)
731 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
732
733 return 0;
734 }
735
736 /********/
737 /* Core */
738 /********/
739
740 int ath_open(struct ath_softc *sc, struct hal_channel *initial_chan)
741 {
742 struct ath_hal *ah = sc->sc_ah;
743 enum hal_status status;
744 int error = 0;
745 enum hal_ht_macmode ht_macmode = ath_cwm_macmode(sc);
746
747 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
748
749 /*
750 * Stop anything previously setup. This is safe
751 * whether this is the first time through or not.
752 */
753 ath_stop(sc);
754
755 /* Initialize chanmask selection */
756 sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
757 sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
758
759 /* Reset SERDES registers */
760 ath9k_hw_configpcipowersave(ah, 0);
761
762 /*
763 * The basic interface to setting the hardware in a good
764 * state is ``reset''. On return the hardware is known to
765 * be powered up and with interrupts disabled. This must
766 * be followed by initialization of the appropriate bits
767 * and then setup of the interrupt mask.
768 */
769 sc->sc_curchan = *initial_chan;
770
771 spin_lock_bh(&sc->sc_resetlock);
772 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
773 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
774 sc->sc_ht_extprotspacing, false, &status)) {
775 DPRINTF(sc, ATH_DBG_FATAL,
776 "%s: unable to reset hardware; hal status %u "
777 "(freq %u flags 0x%x)\n", __func__, status,
778 sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
779 error = -EIO;
780 spin_unlock_bh(&sc->sc_resetlock);
781 goto done;
782 }
783 spin_unlock_bh(&sc->sc_resetlock);
784 /*
785 * This is needed only to setup initial state
786 * but it's best done after a reset.
787 */
788 ath_update_txpow(sc);
789
790 /*
791 * Setup the hardware after reset:
792 * The receive engine is set going.
793 * Frame transmit is handled entirely
794 * in the frame output path; there's nothing to do
795 * here except setup the interrupt mask.
796 */
797 if (ath_startrecv(sc) != 0) {
798 DPRINTF(sc, ATH_DBG_FATAL,
799 "%s: unable to start recv logic\n", __func__);
800 error = -EIO;
801 goto done;
802 }
803 /* Setup our intr mask. */
804 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
805 | HAL_INT_RXEOL | HAL_INT_RXORN
806 | HAL_INT_FATAL | HAL_INT_GLOBAL;
807
808 if (ah->ah_caps.halGTTSupport)
809 sc->sc_imask |= HAL_INT_GTT;
810
811 if (ah->ah_caps.halHTSupport)
812 sc->sc_imask |= HAL_INT_CST;
813
814 /*
815 * Enable MIB interrupts when there are hardware phy counters.
816 * Note we only do this (at the moment) for station mode.
817 */
818 if (ath9k_hw_phycounters(ah) &&
819 ((sc->sc_opmode == HAL_M_STA) || (sc->sc_opmode == HAL_M_IBSS)))
820 sc->sc_imask |= HAL_INT_MIB;
821 /*
822 * Some hardware processes the TIM IE and fires an
823 * interrupt when the TIM bit is set. For hardware
824 * that does, if not overridden by configuration,
825 * enable the TIM interrupt when operating as station.
826 */
827 if (ah->ah_caps.halEnhancedPmSupport && sc->sc_opmode == HAL_M_STA &&
828 !sc->sc_config.swBeaconProcess)
829 sc->sc_imask |= HAL_INT_TIM;
830 /*
831 * Don't enable interrupts here as we've not yet built our
832 * vap and node data structures, which will be needed as soon
833 * as we start receiving.
834 */
835 ath_chan_change(sc, initial_chan);
836
837 /* XXX: we must make sure h/w is ready and clear invalid flag
838 * before turning on interrupt. */
839 sc->sc_invalid = 0;
840 done:
841 return error;
842 }
843
844 /*
845 * Reset the hardware w/o losing operational state. This is
846 * basically a more efficient way of doing ath_stop, ath_init,
847 * followed by state transitions to the current 802.11
848 * operational state. Used to recover from errors rx overrun
849 * and to reset the hardware when rf gain settings must be reset.
850 */
851
852 static int ath_reset_start(struct ath_softc *sc, u_int32_t flag)
853 {
854 struct ath_hal *ah = sc->sc_ah;
855
856 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
857 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
858 ath_stoprecv(sc); /* stop recv side */
859 ath_flushrecv(sc); /* flush recv queue */
860
861 return 0;
862 }
863
864 static int ath_reset_end(struct ath_softc *sc, u_int32_t flag)
865 {
866 struct ath_hal *ah = sc->sc_ah;
867
868 if (ath_startrecv(sc) != 0) /* restart recv */
869 DPRINTF(sc, ATH_DBG_FATAL,
870 "%s: unable to start recv logic\n", __func__);
871
872 /*
873 * We may be doing a reset in response to a request
874 * that changes the channel so update any state that
875 * might change as a result.
876 */
877 ath_chan_change(sc, &sc->sc_curchan);
878
879 ath_update_txpow(sc); /* update tx power state */
880
881 if (sc->sc_beacons)
882 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
883 ath9k_hw_set_interrupts(ah, sc->sc_imask);
884
885 /* Restart the txq */
886 if (flag & RESET_RETRY_TXQ) {
887 int i;
888 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
889 if (ATH_TXQ_SETUP(sc, i)) {
890 spin_lock_bh(&sc->sc_txq[i].axq_lock);
891 ath_txq_schedule(sc, &sc->sc_txq[i]);
892 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
893 }
894 }
895 }
896 return 0;
897 }
898
899 int ath_reset(struct ath_softc *sc)
900 {
901 struct ath_hal *ah = sc->sc_ah;
902 enum hal_status status;
903 int error = 0;
904 enum hal_ht_macmode ht_macmode = ath_cwm_macmode(sc);
905
906 /* NB: indicate channel change so we do a full reset */
907 spin_lock_bh(&sc->sc_resetlock);
908 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
909 ht_macmode,
910 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
911 sc->sc_ht_extprotspacing, false, &status)) {
912 DPRINTF(sc, ATH_DBG_FATAL,
913 "%s: unable to reset hardware; hal status %u\n",
914 __func__, status);
915 error = -EIO;
916 }
917 spin_unlock_bh(&sc->sc_resetlock);
918
919 return error;
920 }
921
922 int ath_suspend(struct ath_softc *sc)
923 {
924 struct ath_hal *ah = sc->sc_ah;
925
926 /* No I/O if device has been surprise removed */
927 if (sc->sc_invalid)
928 return -EIO;
929
930 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
931 ath9k_hw_set_interrupts(ah, 0);
932
933 /* XXX: we must make sure h/w will not generate any interrupt
934 * before setting the invalid flag. */
935 sc->sc_invalid = 1;
936
937 /* disable HAL and put h/w to sleep */
938 ath9k_hw_disable(sc->sc_ah);
939
940 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
941
942 return 0;
943 }
944
945 /* Interrupt handler. Most of the actual processing is deferred.
946 * It's the caller's responsibility to ensure the chip is awake. */
947
948 irqreturn_t ath_isr(int irq, void *dev)
949 {
950 struct ath_softc *sc = dev;
951 struct ath_hal *ah = sc->sc_ah;
952 enum hal_int status;
953 bool sched = false;
954
955 do {
956 if (sc->sc_invalid) {
957 /*
958 * The hardware is not ready/present, don't
959 * touch anything. Note this can happen early
960 * on if the IRQ is shared.
961 */
962 return IRQ_NONE;
963 }
964 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
965 return IRQ_NONE;
966 }
967
968 /*
969 * Figure out the reason(s) for the interrupt. Note
970 * that the hal returns a pseudo-ISR that may include
971 * bits we haven't explicitly enabled so we mask the
972 * value to insure we only process bits we requested.
973 */
974 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
975
976 status &= sc->sc_imask; /* discard unasked-for bits */
977
978 /*
979 * If there are no status bits set, then this interrupt was not
980 * for me (should have been caught above).
981 */
982
983 if (!status)
984 return IRQ_NONE;
985
986 sc->sc_intrstatus = status;
987
988 if (status & HAL_INT_FATAL) {
989 /* need a chip reset */
990 sched = true;
991 } else if (status & HAL_INT_RXORN) {
992 /* need a chip reset */
993 sched = true;
994 } else {
995 if (status & HAL_INT_SWBA) {
996 /* schedule a tasklet for beacon handling */
997 tasklet_schedule(&sc->bcon_tasklet);
998 }
999 if (status & HAL_INT_RXEOL) {
1000 /*
1001 * NB: the hardware should re-read the link when
1002 * RXE bit is written, but it doesn't work
1003 * at least on older hardware revs.
1004 */
1005 sched = true;
1006 }
1007
1008 if (status & HAL_INT_TXURN)
1009 /* bump tx trigger level */
1010 ath9k_hw_updatetxtriglevel(ah, true);
1011 /* XXX: optimize this */
1012 if (status & HAL_INT_RX)
1013 sched = true;
1014 if (status & HAL_INT_TX)
1015 sched = true;
1016 if (status & HAL_INT_BMISS)
1017 sched = true;
1018 /* carrier sense timeout */
1019 if (status & HAL_INT_CST)
1020 sched = true;
1021 if (status & HAL_INT_MIB) {
1022 /*
1023 * Disable interrupts until we service the MIB
1024 * interrupt; otherwise it will continue to
1025 * fire.
1026 */
1027 ath9k_hw_set_interrupts(ah, 0);
1028 /*
1029 * Let the hal handle the event. We assume
1030 * it will clear whatever condition caused
1031 * the interrupt.
1032 */
1033 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1034 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1035 }
1036 if (status & HAL_INT_TIM_TIMER) {
1037 if (!ah->ah_caps.halAutoSleepSupport) {
1038 /* Clear RxAbort bit so that we can
1039 * receive frames */
1040 ath9k_hw_setrxabort(ah, 0);
1041 sched = true;
1042 }
1043 }
1044 }
1045 } while (0);
1046
1047 if (sched) {
1048 /* turn off every interrupt except SWBA */
1049 ath9k_hw_set_interrupts(ah, (sc->sc_imask & HAL_INT_SWBA));
1050 tasklet_schedule(&sc->intr_tq);
1051 }
1052
1053 return IRQ_HANDLED;
1054 }
1055
1056 /* Deferred interrupt processing */
1057
1058 static void ath9k_tasklet(unsigned long data)
1059 {
1060 struct ath_softc *sc = (struct ath_softc *)data;
1061 u_int32_t status = sc->sc_intrstatus;
1062
1063 if (status & HAL_INT_FATAL) {
1064 /* need a chip reset */
1065 ath_internal_reset(sc);
1066 return;
1067 } else {
1068
1069 if (status & (HAL_INT_RX | HAL_INT_RXEOL | HAL_INT_RXORN)) {
1070 /* XXX: fill me in */
1071 /*
1072 if (status & HAL_INT_RXORN) {
1073 }
1074 if (status & HAL_INT_RXEOL) {
1075 }
1076 */
1077 spin_lock_bh(&sc->sc_rxflushlock);
1078 ath_rx_tasklet(sc, 0);
1079 spin_unlock_bh(&sc->sc_rxflushlock);
1080 }
1081 /* XXX: optimize this */
1082 if (status & HAL_INT_TX)
1083 ath_tx_tasklet(sc);
1084 /* XXX: fill me in */
1085 /*
1086 if (status & HAL_INT_BMISS) {
1087 }
1088 if (status & (HAL_INT_TIM | HAL_INT_DTIMSYNC)) {
1089 if (status & HAL_INT_TIM) {
1090 }
1091 if (status & HAL_INT_DTIMSYNC) {
1092 }
1093 }
1094 */
1095 }
1096
1097 /* re-enable hardware interrupt */
1098 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1099 }
1100
1101 int ath_init(u_int16_t devid, struct ath_softc *sc)
1102 {
1103 struct ath_hal *ah = NULL;
1104 enum hal_status status;
1105 int error = 0, i;
1106 int csz = 0;
1107 u_int32_t rd;
1108
1109 /* XXX: hardware will not be ready until ath_open() being called */
1110 sc->sc_invalid = 1;
1111
1112 sc->sc_debug = DBG_DEFAULT;
1113 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1114
1115 /* Initialize tasklet */
1116 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1117 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1118 (unsigned long)sc);
1119
1120 /*
1121 * Cache line size is used to size and align various
1122 * structures used to communicate with the hardware.
1123 */
1124 bus_read_cachesize(sc, &csz);
1125 /* XXX assert csz is non-zero */
1126 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1127
1128 spin_lock_init(&sc->sc_resetlock);
1129
1130 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1131 if (ah == NULL) {
1132 DPRINTF(sc, ATH_DBG_FATAL,
1133 "%s: unable to attach hardware; HAL status %u\n",
1134 __func__, status);
1135 error = -ENXIO;
1136 goto bad;
1137 }
1138 sc->sc_ah = ah;
1139
1140 /* Get the chipset-specific aggr limit. */
1141 sc->sc_rtsaggrlimit = ah->ah_caps.halRtsAggrLimit;
1142
1143 /* Get the hardware key cache size. */
1144 sc->sc_keymax = ah->ah_caps.halKeyCacheSize;
1145 if (sc->sc_keymax > ATH_KEYMAX) {
1146 DPRINTF(sc, ATH_DBG_KEYCACHE,
1147 "%s: Warning, using only %u entries in %u key cache\n",
1148 __func__, ATH_KEYMAX, sc->sc_keymax);
1149 sc->sc_keymax = ATH_KEYMAX;
1150 }
1151
1152 /*
1153 * Reset the key cache since some parts do not
1154 * reset the contents on initial power up.
1155 */
1156 for (i = 0; i < sc->sc_keymax; i++)
1157 ath9k_hw_keyreset(ah, (u_int16_t) i);
1158 /*
1159 * Mark key cache slots associated with global keys
1160 * as in use. If we knew TKIP was not to be used we
1161 * could leave the +32, +64, and +32+64 slots free.
1162 * XXX only for splitmic.
1163 */
1164 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1165 set_bit(i, sc->sc_keymap);
1166 set_bit(i + 32, sc->sc_keymap);
1167 set_bit(i + 64, sc->sc_keymap);
1168 set_bit(i + 32 + 64, sc->sc_keymap);
1169 }
1170 /*
1171 * Collect the channel list using the default country
1172 * code and including outdoor channels. The 802.11 layer
1173 * is resposible for filtering this list based on settings
1174 * like the phy mode.
1175 */
1176 rd = ah->ah_currentRD;
1177
1178 error = ath_getchannels(sc,
1179 CTRY_DEFAULT,
1180 ath_outdoor,
1181 1);
1182 if (error)
1183 goto bad;
1184
1185 /* default to STA mode */
1186 sc->sc_opmode = HAL_M_MONITOR;
1187
1188 /* Setup rate tables for all potential media types. */
1189 /* 11g encompasses b,g */
1190
1191 ath_rate_setup(sc, WIRELESS_MODE_11a);
1192 ath_rate_setup(sc, WIRELESS_MODE_11g);
1193
1194 /* NB: setup here so ath_rate_update is happy */
1195 ath_setcurmode(sc, WIRELESS_MODE_11a);
1196
1197 /*
1198 * Allocate hardware transmit queues: one queue for
1199 * beacon frames and one data queue for each QoS
1200 * priority. Note that the hal handles reseting
1201 * these queues at the needed time.
1202 */
1203 sc->sc_bhalq = ath_beaconq_setup(ah);
1204 if (sc->sc_bhalq == -1) {
1205 DPRINTF(sc, ATH_DBG_FATAL,
1206 "%s: unable to setup a beacon xmit queue\n", __func__);
1207 error = -EIO;
1208 goto bad2;
1209 }
1210 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
1211 if (sc->sc_cabq == NULL) {
1212 DPRINTF(sc, ATH_DBG_FATAL,
1213 "%s: unable to setup CAB xmit queue\n", __func__);
1214 error = -EIO;
1215 goto bad2;
1216 }
1217
1218 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1219 ath_cabq_update(sc);
1220
1221 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1222 sc->sc_haltype2q[i] = -1;
1223
1224 /* Setup data queues */
1225 /* NB: ensure BK queue is the lowest priority h/w queue */
1226 if (!ath_tx_setup(sc, HAL_WME_AC_BK)) {
1227 DPRINTF(sc, ATH_DBG_FATAL,
1228 "%s: unable to setup xmit queue for BK traffic\n",
1229 __func__);
1230 error = -EIO;
1231 goto bad2;
1232 }
1233
1234 if (!ath_tx_setup(sc, HAL_WME_AC_BE)) {
1235 DPRINTF(sc, ATH_DBG_FATAL,
1236 "%s: unable to setup xmit queue for BE traffic\n",
1237 __func__);
1238 error = -EIO;
1239 goto bad2;
1240 }
1241 if (!ath_tx_setup(sc, HAL_WME_AC_VI)) {
1242 DPRINTF(sc, ATH_DBG_FATAL,
1243 "%s: unable to setup xmit queue for VI traffic\n",
1244 __func__);
1245 error = -EIO;
1246 goto bad2;
1247 }
1248 if (!ath_tx_setup(sc, HAL_WME_AC_VO)) {
1249 DPRINTF(sc, ATH_DBG_FATAL,
1250 "%s: unable to setup xmit queue for VO traffic\n",
1251 __func__);
1252 error = -EIO;
1253 goto bad2;
1254 }
1255
1256 sc->sc_rc = ath_rate_attach(ah);
1257 if (sc->sc_rc == NULL) {
1258 error = EIO;
1259 goto bad2;
1260 }
1261
1262 if (ath9k_hw_getcapability(ah, HAL_CAP_CIPHER, HAL_CIPHER_TKIP, NULL)) {
1263 /*
1264 * Whether we should enable h/w TKIP MIC.
1265 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1266 * report WMM capable, so it's always safe to turn on
1267 * TKIP MIC in this case.
1268 */
1269 ath9k_hw_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 1, NULL);
1270 }
1271
1272 /*
1273 * Check whether the separate key cache entries
1274 * are required to handle both tx+rx MIC keys.
1275 * With split mic keys the number of stations is limited
1276 * to 27 otherwise 59.
1277 */
1278 if (ath9k_hw_getcapability(ah, HAL_CAP_CIPHER, HAL_CIPHER_TKIP, NULL)
1279 && ath9k_hw_getcapability(ah, HAL_CAP_CIPHER, HAL_CIPHER_MIC, NULL)
1280 && ath9k_hw_getcapability(ah, HAL_CAP_TKIP_SPLIT, 0, NULL))
1281 sc->sc_splitmic = 1;
1282
1283 /* turn on mcast key search if possible */
1284 if (ath9k_hw_getcapability(ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL)
1285 == HAL_OK)
1286 (void)ath9k_hw_setcapability(ah, HAL_CAP_MCAST_KEYSRCH, 1,
1287 1, NULL);
1288
1289 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1290 sc->sc_config.txpowlimit_override = 0;
1291
1292 /* 11n Capabilities */
1293 if (ah->ah_caps.halHTSupport) {
1294 sc->sc_txaggr = 1;
1295 sc->sc_rxaggr = 1;
1296 }
1297
1298 sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
1299 sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
1300
1301 /* Configuration for rx chain detection */
1302 sc->sc_rxchaindetect_ref = 0;
1303 sc->sc_rxchaindetect_thresh5GHz = 35;
1304 sc->sc_rxchaindetect_thresh2GHz = 35;
1305 sc->sc_rxchaindetect_delta5GHz = 30;
1306 sc->sc_rxchaindetect_delta2GHz = 30;
1307
1308 ath9k_hw_setcapability(ah, HAL_CAP_DIVERSITY, 1, true, NULL);
1309 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1310
1311 ath9k_hw_getmac(ah, sc->sc_myaddr);
1312 if (ah->ah_caps.halBssIdMaskSupport) {
1313 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1314 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1315 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1316 }
1317 sc->sc_slottime = HAL_SLOT_TIME_9; /* default to short slot time */
1318
1319 /* initialize beacon slots */
1320 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1321 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1322
1323 /* save MISC configurations */
1324 sc->sc_config.swBeaconProcess = 1;
1325
1326 #ifdef CONFIG_SLOW_ANT_DIV
1327 /* range is 40 - 255, we use something in the middle */
1328 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1329 #endif
1330
1331 return 0;
1332 bad2:
1333 /* cleanup tx queues */
1334 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
1335 if (ATH_TXQ_SETUP(sc, i))
1336 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1337 bad:
1338 if (ah)
1339 ath9k_hw_detach(ah);
1340 return error;
1341 }
1342
1343 void ath_deinit(struct ath_softc *sc)
1344 {
1345 struct ath_hal *ah = sc->sc_ah;
1346 int i;
1347
1348 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1349
1350 ath_stop(sc);
1351 if (!sc->sc_invalid)
1352 ath9k_hw_setpower(sc->sc_ah, HAL_PM_AWAKE);
1353 ath_rate_detach(sc->sc_rc);
1354 /* cleanup tx queues */
1355 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
1356 if (ATH_TXQ_SETUP(sc, i))
1357 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1358 ath9k_hw_detach(ah);
1359 }
1360
1361 /*******************/
1362 /* Node Management */
1363 /*******************/
1364
1365 struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1366 {
1367 struct ath_vap *avp;
1368 struct ath_node *an;
1369 DECLARE_MAC_BUF(mac);
1370
1371 avp = sc->sc_vaps[if_id];
1372 ASSERT(avp != NULL);
1373
1374 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1375 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1376 if (an == NULL)
1377 return NULL;
1378 memzero(an, sizeof(*an));
1379
1380 an->an_sc = sc;
1381 memcpy(an->an_addr, addr, ETH_ALEN);
1382 atomic_set(&an->an_refcnt, 1);
1383
1384 /* set up per-node tx/rx state */
1385 ath_tx_node_init(sc, an);
1386 ath_rx_node_init(sc, an);
1387
1388 ath_chainmask_sel_init(sc, an);
1389 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1390 list_add(&an->list, &sc->node_list);
1391
1392 return an;
1393 }
1394
1395 void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1396 {
1397 unsigned long flags;
1398
1399 DECLARE_MAC_BUF(mac);
1400
1401 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1402 an->an_flags |= ATH_NODE_CLEAN;
1403 ath_tx_node_cleanup(sc, an, bh_flag);
1404 ath_rx_node_cleanup(sc, an);
1405
1406 ath_tx_node_free(sc, an);
1407 ath_rx_node_free(sc, an);
1408
1409 spin_lock_irqsave(&sc->node_lock, flags);
1410
1411 list_del(&an->list);
1412
1413 spin_unlock_irqrestore(&sc->node_lock, flags);
1414
1415 kfree(an);
1416 }
1417
1418 /* Finds a node and increases the refcnt if found */
1419
1420 struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1421 {
1422 struct ath_node *an = NULL, *an_found = NULL;
1423
1424 if (list_empty(&sc->node_list)) /* FIXME */
1425 goto out;
1426 list_for_each_entry(an, &sc->node_list, list) {
1427 if (!compare_ether_addr(an->an_addr, addr)) {
1428 atomic_inc(&an->an_refcnt);
1429 an_found = an;
1430 break;
1431 }
1432 }
1433 out:
1434 return an_found;
1435 }
1436
1437 /* Decrements the refcnt and if it drops to zero, detach the node */
1438
1439 void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1440 {
1441 if (atomic_dec_and_test(&an->an_refcnt))
1442 ath_node_detach(sc, an, bh_flag);
1443 }
1444
1445 /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1446 struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1447 {
1448 struct ath_node *an = NULL, *an_found = NULL;
1449
1450 if (list_empty(&sc->node_list))
1451 return NULL;
1452
1453 list_for_each_entry(an, &sc->node_list, list)
1454 if (!compare_ether_addr(an->an_addr, addr)) {
1455 an_found = an;
1456 break;
1457 }
1458
1459 return an_found;
1460 }
1461
1462 /*
1463 * Set up New Node
1464 *
1465 * Setup driver-specific state for a newly associated node. This routine
1466 * really only applies if compression or XR are enabled, there is no code
1467 * covering any other cases.
1468 */
1469
1470 void ath_newassoc(struct ath_softc *sc,
1471 struct ath_node *an, int isnew, int isuapsd)
1472 {
1473 int tidno;
1474
1475 /* if station reassociates, tear down the aggregation state. */
1476 if (!isnew) {
1477 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1478 if (sc->sc_txaggr)
1479 ath_tx_aggr_teardown(sc, an, tidno);
1480 if (sc->sc_rxaggr)
1481 ath_rx_aggr_teardown(sc, an, tidno);
1482 }
1483 }
1484 an->an_flags = 0;
1485 }
1486
1487 /**************/
1488 /* Encryption */
1489 /**************/
1490
1491 void ath_key_reset(struct ath_softc *sc, u_int16_t keyix, int freeslot)
1492 {
1493 ath9k_hw_keyreset(sc->sc_ah, keyix);
1494 if (freeslot)
1495 clear_bit(keyix, sc->sc_keymap);
1496 }
1497
1498 int ath_keyset(struct ath_softc *sc,
1499 u_int16_t keyix,
1500 struct hal_keyval *hk,
1501 const u_int8_t mac[ETH_ALEN])
1502 {
1503 bool status;
1504
1505 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1506 keyix, hk, mac, false);
1507
1508 return status != false;
1509 }
1510
1511 /***********************/
1512 /* TX Power/Regulatory */
1513 /***********************/
1514
1515 /*
1516 * Set Transmit power in HAL
1517 *
1518 * This routine makes the actual HAL calls to set the new transmit power
1519 * limit.
1520 */
1521
1522 void ath_update_txpow(struct ath_softc *sc)
1523 {
1524 struct ath_hal *ah = sc->sc_ah;
1525 u_int32_t txpow;
1526
1527 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1528 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1529 /* read back in case value is clamped */
1530 ath9k_hw_getcapability(ah, HAL_CAP_TXPOW, 1, &txpow);
1531 sc->sc_curtxpow = txpow;
1532 }
1533 }
1534
1535 /* Return the current country and domain information */
1536 void ath_get_currentCountry(struct ath_softc *sc,
1537 struct hal_country_entry *ctry)
1538 {
1539 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1540
1541 /* If HAL not specific yet, since it is band dependent,
1542 * use the one we passed in. */
1543 if (ctry->countryCode == CTRY_DEFAULT) {
1544 ctry->iso[0] = 0;
1545 ctry->iso[1] = 0;
1546 } else if (ctry->iso[0] && ctry->iso[1]) {
1547 if (!ctry->iso[2]) {
1548 if (ath_outdoor)
1549 ctry->iso[2] = 'O';
1550 else
1551 ctry->iso[2] = 'I';
1552 }
1553 }
1554 }
1555
1556 /**************************/
1557 /* Slow Antenna Diversity */
1558 /**************************/
1559
1560 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1561 struct ath_softc *sc,
1562 int32_t rssitrig)
1563 {
1564 int trig;
1565
1566 /* antdivf_rssitrig can range from 40 - 0xff */
1567 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1568 trig = (rssitrig < 40) ? 40 : rssitrig;
1569
1570 antdiv->antdiv_sc = sc;
1571 antdiv->antdivf_rssitrig = trig;
1572 }
1573
1574 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1575 u_int8_t num_antcfg,
1576 const u_int8_t *bssid)
1577 {
1578 antdiv->antdiv_num_antcfg =
1579 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1580 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1581 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1582 antdiv->antdiv_curcfg = 0;
1583 antdiv->antdiv_bestcfg = 0;
1584 antdiv->antdiv_laststatetsf = 0;
1585
1586 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1587
1588 antdiv->antdiv_start = 1;
1589 }
1590
1591 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1592 {
1593 antdiv->antdiv_start = 0;
1594 }
1595
1596 static int32_t ath_find_max_val(int32_t *val,
1597 u_int8_t num_val, u_int8_t *max_index)
1598 {
1599 u_int32_t MaxVal = *val++;
1600 u_int32_t cur_index = 0;
1601
1602 *max_index = 0;
1603 while (++cur_index < num_val) {
1604 if (*val > MaxVal) {
1605 MaxVal = *val;
1606 *max_index = cur_index;
1607 }
1608
1609 val++;
1610 }
1611
1612 return MaxVal;
1613 }
1614
1615 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1616 struct ieee80211_hdr *hdr,
1617 struct ath_rx_status *rx_stats)
1618 {
1619 struct ath_softc *sc = antdiv->antdiv_sc;
1620 struct ath_hal *ah = sc->sc_ah;
1621 u_int64_t curtsf = 0;
1622 u_int8_t bestcfg, curcfg = antdiv->antdiv_curcfg;
1623 __le16 fc = hdr->frame_control;
1624
1625 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1626 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1627 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1628 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1629 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1630 } else {
1631 return;
1632 }
1633
1634 switch (antdiv->antdiv_state) {
1635 case ATH_ANT_DIV_IDLE:
1636 if ((antdiv->antdiv_lastbrssi[curcfg] <
1637 antdiv->antdivf_rssitrig)
1638 && ((curtsf - antdiv->antdiv_laststatetsf) >
1639 ATH_ANT_DIV_MIN_IDLE_US)) {
1640
1641 curcfg++;
1642 if (curcfg == antdiv->antdiv_num_antcfg)
1643 curcfg = 0;
1644
1645 if (HAL_OK == ath9k_hw_select_antconfig(ah, curcfg)) {
1646 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1647 antdiv->antdiv_curcfg = curcfg;
1648 antdiv->antdiv_laststatetsf = curtsf;
1649 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1650 }
1651 }
1652 break;
1653
1654 case ATH_ANT_DIV_SCAN:
1655 if ((curtsf - antdiv->antdiv_laststatetsf) <
1656 ATH_ANT_DIV_MIN_SCAN_US)
1657 break;
1658
1659 curcfg++;
1660 if (curcfg == antdiv->antdiv_num_antcfg)
1661 curcfg = 0;
1662
1663 if (curcfg == antdiv->antdiv_bestcfg) {
1664 ath_find_max_val(antdiv->antdiv_lastbrssi,
1665 antdiv->antdiv_num_antcfg, &bestcfg);
1666 if (HAL_OK == ath9k_hw_select_antconfig(ah, bestcfg)) {
1667 antdiv->antdiv_bestcfg = bestcfg;
1668 antdiv->antdiv_curcfg = bestcfg;
1669 antdiv->antdiv_laststatetsf = curtsf;
1670 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1671 }
1672 } else {
1673 if (HAL_OK == ath9k_hw_select_antconfig(ah, curcfg)) {
1674 antdiv->antdiv_curcfg = curcfg;
1675 antdiv->antdiv_laststatetsf = curtsf;
1676 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1677 }
1678 }
1679
1680 break;
1681 }
1682 }
1683
1684 /***********************/
1685 /* Descriptor Handling */
1686 /***********************/
1687
1688 /*
1689 * Set up DMA descriptors
1690 *
1691 * This function will allocate both the DMA descriptor structure, and the
1692 * buffers it contains. These are used to contain the descriptors used
1693 * by the system.
1694 */
1695
1696 int ath_descdma_setup(struct ath_softc *sc,
1697 struct ath_descdma *dd,
1698 struct list_head *head,
1699 const char *name,
1700 int nbuf,
1701 int ndesc)
1702 {
1703 #define DS2PHYS(_dd, _ds) \
1704 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1705 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1706 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1707
1708 struct ath_desc *ds;
1709 struct ath_buf *bf;
1710 int i, bsize, error;
1711
1712 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1713 __func__, name, nbuf, ndesc);
1714
1715 /* ath_desc must be a multiple of DWORDs */
1716 if ((sizeof(struct ath_desc) % 4) != 0) {
1717 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1718 __func__);
1719 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1720 error = -ENOMEM;
1721 goto fail;
1722 }
1723
1724 dd->dd_name = name;
1725 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1726
1727 /*
1728 * Need additional DMA memory because we can't use
1729 * descriptors that cross the 4K page boundary. Assume
1730 * one skipped descriptor per 4K page.
1731 */
1732 if (!(sc->sc_ah->ah_caps.hal4kbSplitTransSupport)) {
1733 u_int32_t ndesc_skipped =
1734 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1735 u_int32_t dma_len;
1736
1737 while (ndesc_skipped) {
1738 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1739 dd->dd_desc_len += dma_len;
1740
1741 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1742 };
1743 }
1744
1745 /* allocate descriptors */
1746 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1747 dd->dd_desc_len,
1748 &dd->dd_desc_paddr);
1749 if (dd->dd_desc == NULL) {
1750 error = -ENOMEM;
1751 goto fail;
1752 }
1753 ds = dd->dd_desc;
1754 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1755 __func__, dd->dd_name, ds, (u_int32_t) dd->dd_desc_len,
1756 ito64(dd->dd_desc_paddr), /*XXX*/(u_int32_t) dd->dd_desc_len);
1757
1758 /* allocate buffers */
1759 bsize = sizeof(struct ath_buf) * nbuf;
1760 bf = kmalloc(bsize, GFP_KERNEL);
1761 if (bf == NULL) {
1762 error = -ENOMEM;
1763 goto fail2;
1764 }
1765 memzero(bf, bsize);
1766 dd->dd_bufptr = bf;
1767
1768 INIT_LIST_HEAD(head);
1769 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1770 bf->bf_desc = ds;
1771 bf->bf_daddr = DS2PHYS(dd, ds);
1772
1773 if (!(sc->sc_ah->ah_caps.hal4kbSplitTransSupport)) {
1774 /*
1775 * Skip descriptor addresses which can cause 4KB
1776 * boundary crossing (addr + length) with a 32 dword
1777 * descriptor fetch.
1778 */
1779 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1780 ASSERT((caddr_t) bf->bf_desc <
1781 ((caddr_t) dd->dd_desc +
1782 dd->dd_desc_len));
1783
1784 ds += ndesc;
1785 bf->bf_desc = ds;
1786 bf->bf_daddr = DS2PHYS(dd, ds);
1787 }
1788 }
1789 list_add_tail(&bf->list, head);
1790 }
1791 return 0;
1792 fail2:
1793 pci_free_consistent(sc->pdev,
1794 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1795 fail:
1796 memzero(dd, sizeof(*dd));
1797 return error;
1798 #undef ATH_DESC_4KB_BOUND_CHECK
1799 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1800 #undef DS2PHYS
1801 }
1802
1803 /*
1804 * Cleanup DMA descriptors
1805 *
1806 * This function will free the DMA block that was allocated for the descriptor
1807 * pool. Since this was allocated as one "chunk", it is freed in the same
1808 * manner.
1809 */
1810
1811 void ath_descdma_cleanup(struct ath_softc *sc,
1812 struct ath_descdma *dd,
1813 struct list_head *head)
1814 {
1815 /* Free memory associated with descriptors */
1816 pci_free_consistent(sc->pdev,
1817 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1818
1819 INIT_LIST_HEAD(head);
1820 kfree(dd->dd_bufptr);
1821 memzero(dd, sizeof(*dd));
1822 }
1823
1824 /*************/
1825 /* Utilities */
1826 /*************/
1827
1828 void ath_internal_reset(struct ath_softc *sc)
1829 {
1830 ath_reset_start(sc, 0);
1831 ath_reset(sc);
1832 ath_reset_end(sc, 0);
1833 }
1834
1835 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1836 {
1837 int qnum;
1838
1839 switch (queue) {
1840 case 0:
1841 qnum = sc->sc_haltype2q[HAL_WME_AC_VO];
1842 break;
1843 case 1:
1844 qnum = sc->sc_haltype2q[HAL_WME_AC_VI];
1845 break;
1846 case 2:
1847 qnum = sc->sc_haltype2q[HAL_WME_AC_BE];
1848 break;
1849 case 3:
1850 qnum = sc->sc_haltype2q[HAL_WME_AC_BK];
1851 break;
1852 default:
1853 qnum = sc->sc_haltype2q[HAL_WME_AC_BE];
1854 break;
1855 }
1856
1857 return qnum;
1858 }
1859
1860 int ath_get_mac80211_qnum(u_int queue, struct ath_softc *sc)
1861 {
1862 int qnum;
1863
1864 switch (queue) {
1865 case HAL_WME_AC_VO:
1866 qnum = 0;
1867 break;
1868 case HAL_WME_AC_VI:
1869 qnum = 1;
1870 break;
1871 case HAL_WME_AC_BE:
1872 qnum = 2;
1873 break;
1874 case HAL_WME_AC_BK:
1875 qnum = 3;
1876 break;
1877 default:
1878 qnum = -1;
1879 break;
1880 }
1881
1882 return qnum;
1883 }
1884
1885
1886 /*
1887 * Expand time stamp to TSF
1888 *
1889 * Extend 15-bit time stamp from rx descriptor to
1890 * a full 64-bit TSF using the current h/w TSF.
1891 */
1892
1893 u_int64_t ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp)
1894 {
1895 u_int64_t tsf;
1896
1897 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1898 if ((tsf & 0x7fff) < rstamp)
1899 tsf -= 0x8000;
1900 return (tsf & ~0x7fff) | rstamp;
1901 }
1902
1903 /*
1904 * Set Default Antenna
1905 *
1906 * Call into the HAL to set the default antenna to use. Not really valid for
1907 * MIMO technology.
1908 */
1909
1910 void ath_setdefantenna(void *context, u_int antenna)
1911 {
1912 struct ath_softc *sc = (struct ath_softc *)context;
1913 struct ath_hal *ah = sc->sc_ah;
1914
1915 /* XXX block beacon interrupts */
1916 ath9k_hw_setantenna(ah, antenna);
1917 sc->sc_defant = antenna;
1918 sc->sc_rxotherant = 0;
1919 }
1920
1921 /*
1922 * Set Slot Time
1923 *
1924 * This will wake up the chip if required, and set the slot time for the
1925 * frame (maximum transmit time). Slot time is assumed to be already set
1926 * in the ATH object member sc_slottime
1927 */
1928
1929 void ath_setslottime(struct ath_softc *sc)
1930 {
1931 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1932 sc->sc_updateslot = OK;
1933 }