update ath9k to latest git version
[openwrt/staging/florian.git] / package / ath9k / src / drivers / net / wireless / ath9k / core.c
1 /*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19 #include "core.h"
20 #include "regd.h"
21
22 static int ath_outdoor; /* enable outdoor use */
23
24 static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27 static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29 static u32 ath_chainmask_sel_down_rssi_thres =
30 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
31 static u32 ath_chainmask_sel_period =
32 ATH_CHAINMASK_SEL_TIMEOUT;
33
34 /* return bus cachesize in 4B word units */
35
36 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37 {
38 u8 u8tmp;
39
40 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
41 *csz = (int)u8tmp;
42
43 /*
44 * This check was put in to avoid "unplesant" consequences if
45 * the bootrom has not fully initialized all PCI devices.
46 * Sometimes the cache line size register is not set
47 */
48
49 if (*csz == 0)
50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51 }
52
53 /*
54 * Set current operating mode
55 *
56 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although
58 * they have been superceeded by the ath_led module.
59 */
60
61 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62 {
63 const struct ath9k_rate_table *rt;
64 int i;
65
66 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
67 rt = sc->sc_rates[mode];
68 BUG_ON(!rt);
69
70 for (i = 0; i < rt->rateCount; i++)
71 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
72
73 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
74 for (i = 0; i < 256; i++) {
75 u8 ix = rt->rateCodeToIndex[i];
76
77 if (ix == 0xff)
78 continue;
79
80 sc->sc_hwmap[i].ieeerate =
81 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
82 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
83
84 if (rt->info[ix].shortPreamble ||
85 rt->info[ix].phy == PHY_OFDM) {
86 }
87 /* NB: this uses the last entry if the rate isn't found */
88 /* XXX beware of overlow */
89 }
90 sc->sc_currates = rt;
91 sc->sc_curmode = mode;
92 /*
93 * All protection frames are transmited at 2Mb/s for
94 * 11g, otherwise at 1Mb/s.
95 * XXX select protection rate index from rate table.
96 */
97 sc->sc_protrix = (mode == WIRELESS_MODE_11g ? 1 : 0);
98 /* rate index used to send mgt frames */
99 sc->sc_minrateix = 0;
100 }
101
102 /*
103 * Select Rate Table
104 *
105 * Based on the wireless mode passed in, the rate table in the ATH object
106 * is set to the mode specific rate table. This also calls the callback
107 * function to set the rate in the protocol layer object.
108 */
109
110 static int ath_rate_setup(struct ath_softc *sc, enum wireless_mode mode)
111 {
112 struct ath_hal *ah = sc->sc_ah;
113 const struct ath9k_rate_table *rt;
114
115 switch (mode) {
116 case WIRELESS_MODE_11a:
117 sc->sc_rates[mode] =
118 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11A);
119 break;
120 case WIRELESS_MODE_11b:
121 sc->sc_rates[mode] =
122 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11B);
123 break;
124 case WIRELESS_MODE_11g:
125 sc->sc_rates[mode] =
126 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11G);
127 break;
128 case WIRELESS_MODE_11NA_HT20:
129 sc->sc_rates[mode] =
130 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NA_HT20);
131 break;
132 case WIRELESS_MODE_11NG_HT20:
133 sc->sc_rates[mode] =
134 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NG_HT20);
135 break;
136 case WIRELESS_MODE_11NA_HT40PLUS:
137 sc->sc_rates[mode] =
138 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NA_HT40PLUS);
139 break;
140 case WIRELESS_MODE_11NA_HT40MINUS:
141 sc->sc_rates[mode] =
142 ath9k_hw_getratetable(ah,
143 ATH9K_MODE_SEL_11NA_HT40MINUS);
144 break;
145 case WIRELESS_MODE_11NG_HT40PLUS:
146 sc->sc_rates[mode] =
147 ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NG_HT40PLUS);
148 break;
149 case WIRELESS_MODE_11NG_HT40MINUS:
150 sc->sc_rates[mode] =
151 ath9k_hw_getratetable(ah,
152 ATH9K_MODE_SEL_11NG_HT40MINUS);
153 break;
154 default:
155 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid mode %u\n",
156 __func__, mode);
157 return 0;
158 }
159 rt = sc->sc_rates[mode];
160 if (rt == NULL)
161 return 0;
162
163 /* setup rate set in 802.11 protocol layer */
164 ath_setup_rate(sc, mode, NORMAL_RATE, rt);
165
166 return 1;
167 }
168
169 /*
170 * Set up channel list
171 */
172 static int ath_setup_channels(struct ath_softc *sc)
173 {
174 struct ath_hal *ah = sc->sc_ah;
175 int nchan, i, a = 0, b = 0;
176 u8 regclassids[ATH_REGCLASSIDS_MAX];
177 u32 nregclass = 0;
178 struct ieee80211_supported_band *band_2ghz;
179 struct ieee80211_supported_band *band_5ghz;
180 struct ieee80211_channel *chan_2ghz;
181 struct ieee80211_channel *chan_5ghz;
182 struct ath9k_channel *c;
183
184 /* Fill in ah->ah_channels */
185 if (!ath9k_regd_init_channels(ah,
186 ATH_CHAN_MAX,
187 (u32 *)&nchan,
188 regclassids,
189 ATH_REGCLASSIDS_MAX,
190 &nregclass,
191 CTRY_DEFAULT,
192 ATH9K_MODE_SEL_ALL,
193 false,
194 1)) {
195 u32 rd = ah->ah_currentRD;
196
197 DPRINTF(sc, ATH_DBG_FATAL,
198 "%s: unable to collect channel list; "
199 "regdomain likely %u country code %u\n",
200 __func__, rd, CTRY_DEFAULT);
201 return -EINVAL;
202 }
203
204 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
205 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
206 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
207 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
208
209 for (i = 0; i < nchan; i++) {
210 c = &ah->ah_channels[i];
211 if (IS_CHAN_2GHZ(c)) {
212 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
213 chan_2ghz[a].center_freq = c->channel;
214 chan_2ghz[a].max_power = c->maxTxPower;
215
216 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
217 chan_2ghz[a].flags |=
218 IEEE80211_CHAN_NO_IBSS;
219 if (c->channelFlags & CHANNEL_PASSIVE)
220 chan_2ghz[a].flags |=
221 IEEE80211_CHAN_PASSIVE_SCAN;
222
223 band_2ghz->n_channels = ++a;
224
225 DPRINTF(sc, ATH_DBG_CONFIG,
226 "%s: 2MHz channel: %d, "
227 "channelFlags: 0x%x\n",
228 __func__,
229 c->channel,
230 c->channelFlags);
231 } else if (IS_CHAN_5GHZ(c)) {
232 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
233 chan_5ghz[b].center_freq = c->channel;
234 chan_5ghz[b].max_power = c->maxTxPower;
235
236 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
237 chan_5ghz[b].flags |=
238 IEEE80211_CHAN_NO_IBSS;
239 if (c->channelFlags & CHANNEL_PASSIVE)
240 chan_5ghz[b].flags |=
241 IEEE80211_CHAN_PASSIVE_SCAN;
242
243 band_5ghz->n_channels = ++b;
244
245 DPRINTF(sc, ATH_DBG_CONFIG,
246 "%s: 5MHz channel: %d, "
247 "channelFlags: 0x%x\n",
248 __func__,
249 c->channel,
250 c->channelFlags);
251 }
252 }
253
254 return 0;
255 }
256
257 /*
258 * Determine mode from channel flags
259 *
260 * This routine will provide the enumerated WIRELESSS_MODE value based
261 * on the settings of the channel flags. If ho valid set of flags
262 * exist, the lowest mode (11b) is selected.
263 */
264
265 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
266 {
267 if (chan->chanmode == CHANNEL_A)
268 return WIRELESS_MODE_11a;
269 else if (chan->chanmode == CHANNEL_G)
270 return WIRELESS_MODE_11g;
271 else if (chan->chanmode == CHANNEL_B)
272 return WIRELESS_MODE_11b;
273 else if (chan->chanmode == CHANNEL_A_HT20)
274 return WIRELESS_MODE_11NA_HT20;
275 else if (chan->chanmode == CHANNEL_G_HT20)
276 return WIRELESS_MODE_11NG_HT20;
277 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
278 return WIRELESS_MODE_11NA_HT40PLUS;
279 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
280 return WIRELESS_MODE_11NA_HT40MINUS;
281 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
282 return WIRELESS_MODE_11NG_HT40PLUS;
283 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
284 return WIRELESS_MODE_11NG_HT40MINUS;
285
286 /* NB: should not get here */
287 return WIRELESS_MODE_11b;
288 }
289
290 /*
291 * Change Channels
292 *
293 * Performs the actions to change the channel in the hardware, and set up
294 * the current operating mode for the new channel.
295 */
296
297 static void ath_chan_change(struct ath_softc *sc, struct ath9k_channel *chan)
298 {
299 enum wireless_mode mode;
300
301 mode = ath_chan2mode(chan);
302
303 ath_rate_setup(sc, mode);
304 ath_setcurmode(sc, mode);
305 }
306
307 /*
308 * Stop the device, grabbing the top-level lock to protect
309 * against concurrent entry through ath_init (which can happen
310 * if another thread does a system call and the thread doing the
311 * stop is preempted).
312 */
313
314 static int ath_stop(struct ath_softc *sc)
315 {
316 struct ath_hal *ah = sc->sc_ah;
317
318 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
319 __func__, sc->sc_invalid);
320
321 /*
322 * Shutdown the hardware and driver:
323 * stop output from above
324 * reset 802.11 state machine
325 * (sends station deassoc/deauth frames)
326 * turn off timers
327 * disable interrupts
328 * clear transmit machinery
329 * clear receive machinery
330 * turn off the radio
331 * reclaim beacon resources
332 *
333 * Note that some of this work is not possible if the
334 * hardware is gone (invalid).
335 */
336
337 if (!sc->sc_invalid)
338 ath9k_hw_set_interrupts(ah, 0);
339 ath_draintxq(sc, false);
340 if (!sc->sc_invalid) {
341 ath_stoprecv(sc);
342 ath9k_hw_phy_disable(ah);
343 } else
344 sc->sc_rxlink = NULL;
345
346 return 0;
347 }
348
349 /*
350 * Start Scan
351 *
352 * This function is called when starting a channel scan. It will perform
353 * power save wakeup processing, set the filter for the scan, and get the
354 * chip ready to send broadcast packets out during the scan.
355 */
356
357 void ath_scan_start(struct ath_softc *sc)
358 {
359 struct ath_hal *ah = sc->sc_ah;
360 u32 rfilt;
361 u32 now = (u32) jiffies_to_msecs(get_timestamp());
362
363 sc->sc_scanning = 1;
364 rfilt = ath_calcrxfilter(sc);
365 ath9k_hw_setrxfilter(ah, rfilt);
366 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
367
368 /* Restore previous power management state. */
369
370 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
371 now / 1000, now % 1000, __func__, rfilt);
372 }
373
374 /*
375 * Scan End
376 *
377 * This routine is called by the upper layer when the scan is completed. This
378 * will set the filters back to normal operating mode, set the BSSID to the
379 * correct value, and restore the power save state.
380 */
381
382 void ath_scan_end(struct ath_softc *sc)
383 {
384 struct ath_hal *ah = sc->sc_ah;
385 u32 rfilt;
386 u32 now = (u32) jiffies_to_msecs(get_timestamp());
387
388 sc->sc_scanning = 0;
389 /* Request for a full reset due to rx packet filter changes */
390 sc->sc_full_reset = 1;
391 rfilt = ath_calcrxfilter(sc);
392 ath9k_hw_setrxfilter(ah, rfilt);
393 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
394
395 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
396 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
397 }
398
399 /*
400 * Set the current channel
401 *
402 * Set/change channels. If the channel is really being changed, it's done
403 * by reseting the chip. To accomplish this we must first cleanup any pending
404 * DMA, then restart stuff after a la ath_init.
405 */
406 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
407 {
408 struct ath_hal *ah = sc->sc_ah;
409 bool fastcc = true, stopped;
410 enum ath9k_ht_macmode ht_macmode;
411
412 if (sc->sc_invalid) /* if the device is invalid or removed */
413 return -EIO;
414
415 DPRINTF(sc, ATH_DBG_CONFIG,
416 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
417 __func__,
418 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
419 sc->sc_curchan.channelFlags),
420 sc->sc_curchan.channel,
421 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
422 hchan->channel, hchan->channelFlags);
423
424 ht_macmode = ath_cwm_macmode(sc);
425
426 if (hchan->channel != sc->sc_curchan.channel ||
427 hchan->channelFlags != sc->sc_curchan.channelFlags ||
428 sc->sc_update_chainmask || sc->sc_full_reset) {
429 int status;
430 /*
431 * This is only performed if the channel settings have
432 * actually changed.
433 *
434 * To switch channels clear any pending DMA operations;
435 * wait long enough for the RX fifo to drain, reset the
436 * hardware at the new frequency, and then re-enable
437 * the relevant bits of the h/w.
438 */
439 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
440 ath_draintxq(sc, false); /* clear pending tx frames */
441 stopped = ath_stoprecv(sc); /* turn off frame recv */
442
443 /* XXX: do not flush receive queue here. We don't want
444 * to flush data frames already in queue because of
445 * changing channel. */
446
447 if (!stopped || sc->sc_full_reset)
448 fastcc = false;
449
450 spin_lock_bh(&sc->sc_resetlock);
451 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
452 ht_macmode, sc->sc_tx_chainmask,
453 sc->sc_rx_chainmask,
454 sc->sc_ht_extprotspacing,
455 fastcc, &status)) {
456 DPRINTF(sc, ATH_DBG_FATAL,
457 "%s: unable to reset channel %u (%uMhz) "
458 "flags 0x%x hal status %u\n", __func__,
459 ath9k_hw_mhz2ieee(ah, hchan->channel,
460 hchan->channelFlags),
461 hchan->channel, hchan->channelFlags, status);
462 spin_unlock_bh(&sc->sc_resetlock);
463 return -EIO;
464 }
465 spin_unlock_bh(&sc->sc_resetlock);
466
467 sc->sc_curchan = *hchan;
468 sc->sc_update_chainmask = 0;
469 sc->sc_full_reset = 0;
470
471 /* Re-enable rx framework */
472 if (ath_startrecv(sc) != 0) {
473 DPRINTF(sc, ATH_DBG_FATAL,
474 "%s: unable to restart recv logic\n", __func__);
475 return -EIO;
476 }
477 /*
478 * Change channels and update the h/w rate map
479 * if we're switching; e.g. 11a to 11b/g.
480 */
481 ath_chan_change(sc, hchan);
482 ath_update_txpow(sc); /* update tx power state */
483 /*
484 * Re-enable interrupts.
485 */
486 ath9k_hw_set_interrupts(ah, sc->sc_imask);
487 }
488 return 0;
489 }
490
491 /**********************/
492 /* Chainmask Handling */
493 /**********************/
494
495 static void ath_chainmask_sel_timertimeout(unsigned long data)
496 {
497 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
498 cm->switch_allowed = 1;
499 }
500
501 /* Start chainmask select timer */
502 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
503 {
504 cm->switch_allowed = 0;
505 mod_timer(&cm->timer, ath_chainmask_sel_period);
506 }
507
508 /* Stop chainmask select timer */
509 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
510 {
511 cm->switch_allowed = 0;
512 del_timer_sync(&cm->timer);
513 }
514
515 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
516 {
517 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
518
519 memzero(cm, sizeof(struct ath_chainmask_sel));
520
521 cm->cur_tx_mask = sc->sc_tx_chainmask;
522 cm->cur_rx_mask = sc->sc_rx_chainmask;
523 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
524 setup_timer(&cm->timer,
525 ath_chainmask_sel_timertimeout, (unsigned long) cm);
526 }
527
528 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
529 {
530 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
531
532 /*
533 * Disable auto-swtiching in one of the following if conditions.
534 * sc_chainmask_auto_sel is used for internal global auto-switching
535 * enabled/disabled setting
536 */
537 if (sc->sc_ah->ah_caps.halTxChainMask != ATH_CHAINMASK_SEL_3X3) {
538 cm->cur_tx_mask = sc->sc_tx_chainmask;
539 return cm->cur_tx_mask;
540 }
541
542 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
543 return cm->cur_tx_mask;
544
545 if (cm->switch_allowed) {
546 /* Switch down from tx 3 to tx 2. */
547 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
548 ATH_RSSI_OUT(cm->tx_avgrssi) >=
549 ath_chainmask_sel_down_rssi_thres) {
550 cm->cur_tx_mask = sc->sc_tx_chainmask;
551
552 /* Don't let another switch happen until
553 * this timer expires */
554 ath_chainmask_sel_timerstart(cm);
555 }
556 /* Switch up from tx 2 to 3. */
557 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
558 ATH_RSSI_OUT(cm->tx_avgrssi) <=
559 ath_chainmask_sel_up_rssi_thres) {
560 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
561
562 /* Don't let another switch happen
563 * until this timer expires */
564 ath_chainmask_sel_timerstart(cm);
565 }
566 }
567
568 return cm->cur_tx_mask;
569 }
570
571 /*
572 * Update tx/rx chainmask. For legacy association,
573 * hard code chainmask to 1x1, for 11n association, use
574 * the chainmask configuration.
575 */
576
577 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
578 {
579 sc->sc_update_chainmask = 1;
580 if (is_ht) {
581 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.halTxChainMask;
582 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.halRxChainMask;
583 } else {
584 sc->sc_tx_chainmask = 1;
585 sc->sc_rx_chainmask = 1;
586 }
587
588 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
589 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
590 }
591
592 /******************/
593 /* VAP management */
594 /******************/
595
596 /*
597 * VAP in Listen mode
598 *
599 * This routine brings the VAP out of the down state into a "listen" state
600 * where it waits for association requests. This is used in AP and AdHoc
601 * modes.
602 */
603
604 int ath_vap_listen(struct ath_softc *sc, int if_id)
605 {
606 struct ath_hal *ah = sc->sc_ah;
607 struct ath_vap *avp;
608 u32 rfilt = 0;
609 DECLARE_MAC_BUF(mac);
610
611 avp = sc->sc_vaps[if_id];
612 if (avp == NULL) {
613 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
614 __func__, if_id);
615 return -EINVAL;
616 }
617
618 #ifdef CONFIG_SLOW_ANT_DIV
619 ath_slow_ant_div_stop(&sc->sc_antdiv);
620 #endif
621
622 /* update ratectrl about the new state */
623 ath_rate_newstate(sc, avp);
624
625 rfilt = ath_calcrxfilter(sc);
626 ath9k_hw_setrxfilter(ah, rfilt);
627
628 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
629 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
630 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
631 } else
632 sc->sc_curaid = 0;
633
634 DPRINTF(sc, ATH_DBG_CONFIG,
635 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
636 __func__, rfilt, print_mac(mac,
637 sc->sc_curbssid), sc->sc_curaid);
638
639 /*
640 * XXXX
641 * Disable BMISS interrupt when we're not associated
642 */
643 ath9k_hw_set_interrupts(ah,
644 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
645 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
646 /* need to reconfigure the beacons when it moves to RUN */
647 sc->sc_beacons = 0;
648
649 return 0;
650 }
651
652 int ath_vap_attach(struct ath_softc *sc,
653 int if_id,
654 struct ieee80211_vif *if_data,
655 enum ath9k_opmode opmode)
656 {
657 struct ath_vap *avp;
658
659 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
660 DPRINTF(sc, ATH_DBG_FATAL,
661 "%s: Invalid interface id = %u\n", __func__, if_id);
662 return -EINVAL;
663 }
664
665 switch (opmode) {
666 case ATH9K_M_STA:
667 case ATH9K_M_IBSS:
668 case ATH9K_M_MONITOR:
669 break;
670 case ATH9K_M_HOSTAP:
671 /* XXX not right, beacon buffer is allocated on RUN trans */
672 if (list_empty(&sc->sc_bbuf))
673 return -ENOMEM;
674 break;
675 default:
676 return -EINVAL;
677 }
678
679 /* create ath_vap */
680 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
681 if (avp == NULL)
682 return -ENOMEM;
683
684 memzero(avp, sizeof(struct ath_vap));
685 avp->av_if_data = if_data;
686 /* Set the VAP opmode */
687 avp->av_opmode = opmode;
688 avp->av_bslot = -1;
689 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
690 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
691 spin_lock_init(&avp->av_mcastq.axq_lock);
692
693 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
694
695 sc->sc_vaps[if_id] = avp;
696 sc->sc_nvaps++;
697 /* Set the device opmode */
698 sc->sc_opmode = opmode;
699
700 /* default VAP configuration */
701 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
702 avp->av_config.av_fixed_retryset = 0x03030303;
703
704 return 0;
705 }
706
707 int ath_vap_detach(struct ath_softc *sc, int if_id)
708 {
709 struct ath_hal *ah = sc->sc_ah;
710 struct ath_vap *avp;
711
712 avp = sc->sc_vaps[if_id];
713 if (avp == NULL) {
714 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
715 __func__, if_id);
716 return -EINVAL;
717 }
718
719 /*
720 * Quiesce the hardware while we remove the vap. In
721 * particular we need to reclaim all references to the
722 * vap state by any frames pending on the tx queues.
723 *
724 * XXX can we do this w/o affecting other vap's?
725 */
726 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
727 ath_draintxq(sc, false); /* stop xmit side */
728 ath_stoprecv(sc); /* stop recv side */
729 ath_flushrecv(sc); /* flush recv queue */
730
731 /* Reclaim any pending mcast bufs on the vap. */
732 ath_tx_draintxq(sc, &avp->av_mcastq, false);
733
734 kfree(avp);
735 sc->sc_vaps[if_id] = NULL;
736 sc->sc_nvaps--;
737
738 return 0;
739 }
740
741 int ath_vap_config(struct ath_softc *sc,
742 int if_id, struct ath_vap_config *if_config)
743 {
744 struct ath_vap *avp;
745
746 if (if_id >= ATH_BCBUF) {
747 DPRINTF(sc, ATH_DBG_FATAL,
748 "%s: Invalid interface id = %u\n", __func__, if_id);
749 return -EINVAL;
750 }
751
752 avp = sc->sc_vaps[if_id];
753 ASSERT(avp != NULL);
754
755 if (avp)
756 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
757
758 return 0;
759 }
760
761 /********/
762 /* Core */
763 /********/
764
765 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
766 {
767 struct ath_hal *ah = sc->sc_ah;
768 int status;
769 int error = 0;
770 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
771
772 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
773
774 /*
775 * Stop anything previously setup. This is safe
776 * whether this is the first time through or not.
777 */
778 ath_stop(sc);
779
780 /* Initialize chanmask selection */
781 sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
782 sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
783
784 /* Reset SERDES registers */
785 ath9k_hw_configpcipowersave(ah, 0);
786
787 /*
788 * The basic interface to setting the hardware in a good
789 * state is ``reset''. On return the hardware is known to
790 * be powered up and with interrupts disabled. This must
791 * be followed by initialization of the appropriate bits
792 * and then setup of the interrupt mask.
793 */
794 sc->sc_curchan = *initial_chan;
795
796 spin_lock_bh(&sc->sc_resetlock);
797 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
798 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
799 sc->sc_ht_extprotspacing, false, &status)) {
800 DPRINTF(sc, ATH_DBG_FATAL,
801 "%s: unable to reset hardware; hal status %u "
802 "(freq %u flags 0x%x)\n", __func__, status,
803 sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
804 error = -EIO;
805 spin_unlock_bh(&sc->sc_resetlock);
806 goto done;
807 }
808 spin_unlock_bh(&sc->sc_resetlock);
809 /*
810 * This is needed only to setup initial state
811 * but it's best done after a reset.
812 */
813 ath_update_txpow(sc);
814
815 /*
816 * Setup the hardware after reset:
817 * The receive engine is set going.
818 * Frame transmit is handled entirely
819 * in the frame output path; there's nothing to do
820 * here except setup the interrupt mask.
821 */
822 if (ath_startrecv(sc) != 0) {
823 DPRINTF(sc, ATH_DBG_FATAL,
824 "%s: unable to start recv logic\n", __func__);
825 error = -EIO;
826 goto done;
827 }
828 /* Setup our intr mask. */
829 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
830 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
831 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
832
833 if (ah->ah_caps.halGTTSupport)
834 sc->sc_imask |= ATH9K_INT_GTT;
835
836 if (ah->ah_caps.halHTSupport)
837 sc->sc_imask |= ATH9K_INT_CST;
838
839 /*
840 * Enable MIB interrupts when there are hardware phy counters.
841 * Note we only do this (at the moment) for station mode.
842 */
843 if (ath9k_hw_phycounters(ah) &&
844 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
845 sc->sc_imask |= ATH9K_INT_MIB;
846 /*
847 * Some hardware processes the TIM IE and fires an
848 * interrupt when the TIM bit is set. For hardware
849 * that does, if not overridden by configuration,
850 * enable the TIM interrupt when operating as station.
851 */
852 if (ah->ah_caps.halEnhancedPmSupport && sc->sc_opmode == ATH9K_M_STA &&
853 !sc->sc_config.swBeaconProcess)
854 sc->sc_imask |= ATH9K_INT_TIM;
855 /*
856 * Don't enable interrupts here as we've not yet built our
857 * vap and node data structures, which will be needed as soon
858 * as we start receiving.
859 */
860 ath_chan_change(sc, initial_chan);
861
862 /* XXX: we must make sure h/w is ready and clear invalid flag
863 * before turning on interrupt. */
864 sc->sc_invalid = 0;
865 done:
866 return error;
867 }
868
869 /*
870 * Reset the hardware w/o losing operational state. This is
871 * basically a more efficient way of doing ath_stop, ath_init,
872 * followed by state transitions to the current 802.11
873 * operational state. Used to recover from errors rx overrun
874 * and to reset the hardware when rf gain settings must be reset.
875 */
876
877 static int ath_reset_start(struct ath_softc *sc, u32 flag)
878 {
879 struct ath_hal *ah = sc->sc_ah;
880
881 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
882 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
883 ath_stoprecv(sc); /* stop recv side */
884 ath_flushrecv(sc); /* flush recv queue */
885
886 return 0;
887 }
888
889 static int ath_reset_end(struct ath_softc *sc, u32 flag)
890 {
891 struct ath_hal *ah = sc->sc_ah;
892
893 if (ath_startrecv(sc) != 0) /* restart recv */
894 DPRINTF(sc, ATH_DBG_FATAL,
895 "%s: unable to start recv logic\n", __func__);
896
897 /*
898 * We may be doing a reset in response to a request
899 * that changes the channel so update any state that
900 * might change as a result.
901 */
902 ath_chan_change(sc, &sc->sc_curchan);
903
904 ath_update_txpow(sc); /* update tx power state */
905
906 if (sc->sc_beacons)
907 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
908 ath9k_hw_set_interrupts(ah, sc->sc_imask);
909
910 /* Restart the txq */
911 if (flag & RESET_RETRY_TXQ) {
912 int i;
913 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
914 if (ATH_TXQ_SETUP(sc, i)) {
915 spin_lock_bh(&sc->sc_txq[i].axq_lock);
916 ath_txq_schedule(sc, &sc->sc_txq[i]);
917 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
918 }
919 }
920 }
921 return 0;
922 }
923
924 int ath_reset(struct ath_softc *sc)
925 {
926 struct ath_hal *ah = sc->sc_ah;
927 int status;
928 int error = 0;
929 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
930
931 /* NB: indicate channel change so we do a full reset */
932 spin_lock_bh(&sc->sc_resetlock);
933 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
934 ht_macmode,
935 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
936 sc->sc_ht_extprotspacing, false, &status)) {
937 DPRINTF(sc, ATH_DBG_FATAL,
938 "%s: unable to reset hardware; hal status %u\n",
939 __func__, status);
940 error = -EIO;
941 }
942 spin_unlock_bh(&sc->sc_resetlock);
943
944 return error;
945 }
946
947 int ath_suspend(struct ath_softc *sc)
948 {
949 struct ath_hal *ah = sc->sc_ah;
950
951 /* No I/O if device has been surprise removed */
952 if (sc->sc_invalid)
953 return -EIO;
954
955 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
956 ath9k_hw_set_interrupts(ah, 0);
957
958 /* XXX: we must make sure h/w will not generate any interrupt
959 * before setting the invalid flag. */
960 sc->sc_invalid = 1;
961
962 /* disable HAL and put h/w to sleep */
963 ath9k_hw_disable(sc->sc_ah);
964
965 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
966
967 return 0;
968 }
969
970 /* Interrupt handler. Most of the actual processing is deferred.
971 * It's the caller's responsibility to ensure the chip is awake. */
972
973 irqreturn_t ath_isr(int irq, void *dev)
974 {
975 struct ath_softc *sc = dev;
976 struct ath_hal *ah = sc->sc_ah;
977 enum ath9k_int status;
978 bool sched = false;
979
980 do {
981 if (sc->sc_invalid) {
982 /*
983 * The hardware is not ready/present, don't
984 * touch anything. Note this can happen early
985 * on if the IRQ is shared.
986 */
987 return IRQ_NONE;
988 }
989 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
990 return IRQ_NONE;
991 }
992
993 /*
994 * Figure out the reason(s) for the interrupt. Note
995 * that the hal returns a pseudo-ISR that may include
996 * bits we haven't explicitly enabled so we mask the
997 * value to insure we only process bits we requested.
998 */
999 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1000
1001 status &= sc->sc_imask; /* discard unasked-for bits */
1002
1003 /*
1004 * If there are no status bits set, then this interrupt was not
1005 * for me (should have been caught above).
1006 */
1007
1008 if (!status)
1009 return IRQ_NONE;
1010
1011 sc->sc_intrstatus = status;
1012
1013 if (status & ATH9K_INT_FATAL) {
1014 /* need a chip reset */
1015 sched = true;
1016 } else if (status & ATH9K_INT_RXORN) {
1017 /* need a chip reset */
1018 sched = true;
1019 } else {
1020 if (status & ATH9K_INT_SWBA) {
1021 /* schedule a tasklet for beacon handling */
1022 tasklet_schedule(&sc->bcon_tasklet);
1023 }
1024 if (status & ATH9K_INT_RXEOL) {
1025 /*
1026 * NB: the hardware should re-read the link when
1027 * RXE bit is written, but it doesn't work
1028 * at least on older hardware revs.
1029 */
1030 sched = true;
1031 }
1032
1033 if (status & ATH9K_INT_TXURN)
1034 /* bump tx trigger level */
1035 ath9k_hw_updatetxtriglevel(ah, true);
1036 /* XXX: optimize this */
1037 if (status & ATH9K_INT_RX)
1038 sched = true;
1039 if (status & ATH9K_INT_TX)
1040 sched = true;
1041 if (status & ATH9K_INT_BMISS)
1042 sched = true;
1043 /* carrier sense timeout */
1044 if (status & ATH9K_INT_CST)
1045 sched = true;
1046 if (status & ATH9K_INT_MIB) {
1047 /*
1048 * Disable interrupts until we service the MIB
1049 * interrupt; otherwise it will continue to
1050 * fire.
1051 */
1052 ath9k_hw_set_interrupts(ah, 0);
1053 /*
1054 * Let the hal handle the event. We assume
1055 * it will clear whatever condition caused
1056 * the interrupt.
1057 */
1058 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1059 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1060 }
1061 if (status & ATH9K_INT_TIM_TIMER) {
1062 if (!ah->ah_caps.halAutoSleepSupport) {
1063 /* Clear RxAbort bit so that we can
1064 * receive frames */
1065 ath9k_hw_setrxabort(ah, 0);
1066 sched = true;
1067 }
1068 }
1069 }
1070 } while (0);
1071
1072 if (sched) {
1073 /* turn off every interrupt except SWBA */
1074 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1075 tasklet_schedule(&sc->intr_tq);
1076 }
1077
1078 return IRQ_HANDLED;
1079 }
1080
1081 /* Deferred interrupt processing */
1082
1083 static void ath9k_tasklet(unsigned long data)
1084 {
1085 struct ath_softc *sc = (struct ath_softc *)data;
1086 u32 status = sc->sc_intrstatus;
1087
1088 if (status & ATH9K_INT_FATAL) {
1089 /* need a chip reset */
1090 ath_internal_reset(sc);
1091 return;
1092 } else {
1093
1094 if (status &
1095 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1096 /* XXX: fill me in */
1097 /*
1098 if (status & ATH9K_INT_RXORN) {
1099 }
1100 if (status & ATH9K_INT_RXEOL) {
1101 }
1102 */
1103 spin_lock_bh(&sc->sc_rxflushlock);
1104 ath_rx_tasklet(sc, 0);
1105 spin_unlock_bh(&sc->sc_rxflushlock);
1106 }
1107 /* XXX: optimize this */
1108 if (status & ATH9K_INT_TX)
1109 ath_tx_tasklet(sc);
1110 /* XXX: fill me in */
1111 /*
1112 if (status & ATH9K_INT_BMISS) {
1113 }
1114 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1115 if (status & ATH9K_INT_TIM) {
1116 }
1117 if (status & ATH9K_INT_DTIMSYNC) {
1118 }
1119 }
1120 */
1121 }
1122
1123 /* re-enable hardware interrupt */
1124 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1125 }
1126
1127 int ath_init(u16 devid, struct ath_softc *sc)
1128 {
1129 struct ath_hal *ah = NULL;
1130 int status;
1131 int error = 0, i;
1132 int csz = 0;
1133 u32 rd;
1134
1135 /* XXX: hardware will not be ready until ath_open() being called */
1136 sc->sc_invalid = 1;
1137
1138 sc->sc_debug = DBG_DEFAULT;
1139 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1140
1141 /* Initialize tasklet */
1142 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1143 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1144 (unsigned long)sc);
1145
1146 /*
1147 * Cache line size is used to size and align various
1148 * structures used to communicate with the hardware.
1149 */
1150 bus_read_cachesize(sc, &csz);
1151 /* XXX assert csz is non-zero */
1152 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1153
1154 spin_lock_init(&sc->sc_resetlock);
1155
1156 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1157 if (ah == NULL) {
1158 DPRINTF(sc, ATH_DBG_FATAL,
1159 "%s: unable to attach hardware; HAL status %u\n",
1160 __func__, status);
1161 error = -ENXIO;
1162 goto bad;
1163 }
1164 sc->sc_ah = ah;
1165
1166 /* Get the chipset-specific aggr limit. */
1167 sc->sc_rtsaggrlimit = ah->ah_caps.halRtsAggrLimit;
1168
1169 /* Get the hardware key cache size. */
1170 sc->sc_keymax = ah->ah_caps.halKeyCacheSize;
1171 if (sc->sc_keymax > ATH_KEYMAX) {
1172 DPRINTF(sc, ATH_DBG_KEYCACHE,
1173 "%s: Warning, using only %u entries in %u key cache\n",
1174 __func__, ATH_KEYMAX, sc->sc_keymax);
1175 sc->sc_keymax = ATH_KEYMAX;
1176 }
1177
1178 /*
1179 * Reset the key cache since some parts do not
1180 * reset the contents on initial power up.
1181 */
1182 for (i = 0; i < sc->sc_keymax; i++)
1183 ath9k_hw_keyreset(ah, (u16) i);
1184 /*
1185 * Mark key cache slots associated with global keys
1186 * as in use. If we knew TKIP was not to be used we
1187 * could leave the +32, +64, and +32+64 slots free.
1188 * XXX only for splitmic.
1189 */
1190 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1191 set_bit(i, sc->sc_keymap);
1192 set_bit(i + 32, sc->sc_keymap);
1193 set_bit(i + 64, sc->sc_keymap);
1194 set_bit(i + 32 + 64, sc->sc_keymap);
1195 }
1196 /*
1197 * Collect the channel list using the default country
1198 * code and including outdoor channels. The 802.11 layer
1199 * is resposible for filtering this list based on settings
1200 * like the phy mode.
1201 */
1202 rd = ah->ah_currentRD;
1203
1204 error = ath_setup_channels(sc);
1205 if (error)
1206 goto bad;
1207
1208 /* default to STA mode */
1209 sc->sc_opmode = ATH9K_M_MONITOR;
1210
1211 /* Setup rate tables for all potential media types. */
1212 /* 11g encompasses b,g */
1213
1214 ath_rate_setup(sc, WIRELESS_MODE_11a);
1215 ath_rate_setup(sc, WIRELESS_MODE_11g);
1216
1217 /* NB: setup here so ath_rate_update is happy */
1218 ath_setcurmode(sc, WIRELESS_MODE_11a);
1219
1220 /*
1221 * Allocate hardware transmit queues: one queue for
1222 * beacon frames and one data queue for each QoS
1223 * priority. Note that the hal handles reseting
1224 * these queues at the needed time.
1225 */
1226 sc->sc_bhalq = ath_beaconq_setup(ah);
1227 if (sc->sc_bhalq == -1) {
1228 DPRINTF(sc, ATH_DBG_FATAL,
1229 "%s: unable to setup a beacon xmit queue\n", __func__);
1230 error = -EIO;
1231 goto bad2;
1232 }
1233 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1234 if (sc->sc_cabq == NULL) {
1235 DPRINTF(sc, ATH_DBG_FATAL,
1236 "%s: unable to setup CAB xmit queue\n", __func__);
1237 error = -EIO;
1238 goto bad2;
1239 }
1240
1241 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1242 ath_cabq_update(sc);
1243
1244 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1245 sc->sc_haltype2q[i] = -1;
1246
1247 /* Setup data queues */
1248 /* NB: ensure BK queue is the lowest priority h/w queue */
1249 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1250 DPRINTF(sc, ATH_DBG_FATAL,
1251 "%s: unable to setup xmit queue for BK traffic\n",
1252 __func__);
1253 error = -EIO;
1254 goto bad2;
1255 }
1256
1257 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1258 DPRINTF(sc, ATH_DBG_FATAL,
1259 "%s: unable to setup xmit queue for BE traffic\n",
1260 __func__);
1261 error = -EIO;
1262 goto bad2;
1263 }
1264 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1265 DPRINTF(sc, ATH_DBG_FATAL,
1266 "%s: unable to setup xmit queue for VI traffic\n",
1267 __func__);
1268 error = -EIO;
1269 goto bad2;
1270 }
1271 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1272 DPRINTF(sc, ATH_DBG_FATAL,
1273 "%s: unable to setup xmit queue for VO traffic\n",
1274 __func__);
1275 error = -EIO;
1276 goto bad2;
1277 }
1278
1279 sc->sc_rc = ath_rate_attach(ah);
1280 if (sc->sc_rc == NULL) {
1281 error = EIO;
1282 goto bad2;
1283 }
1284
1285 if (ath9k_hw_getcapability(ah, HAL_CAP_CIPHER,
1286 ATH9K_CIPHER_TKIP, NULL)) {
1287 /*
1288 * Whether we should enable h/w TKIP MIC.
1289 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1290 * report WMM capable, so it's always safe to turn on
1291 * TKIP MIC in this case.
1292 */
1293 ath9k_hw_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 1, NULL);
1294 }
1295
1296 /*
1297 * Check whether the separate key cache entries
1298 * are required to handle both tx+rx MIC keys.
1299 * With split mic keys the number of stations is limited
1300 * to 27 otherwise 59.
1301 */
1302 if (ath9k_hw_getcapability(ah, HAL_CAP_CIPHER,
1303 ATH9K_CIPHER_TKIP, NULL)
1304 && ath9k_hw_getcapability(ah, HAL_CAP_CIPHER,
1305 ATH9K_CIPHER_MIC, NULL)
1306 && ath9k_hw_getcapability(ah, HAL_CAP_TKIP_SPLIT,
1307 0, NULL))
1308 sc->sc_splitmic = 1;
1309
1310 /* turn on mcast key search if possible */
1311 if (!ath9k_hw_getcapability(ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL))
1312 (void)ath9k_hw_setcapability(ah, HAL_CAP_MCAST_KEYSRCH, 1,
1313 1, NULL);
1314
1315 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1316 sc->sc_config.txpowlimit_override = 0;
1317
1318 /* 11n Capabilities */
1319 if (ah->ah_caps.halHTSupport) {
1320 sc->sc_txaggr = 1;
1321 sc->sc_rxaggr = 1;
1322 }
1323
1324 sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
1325 sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
1326
1327 /* Configuration for rx chain detection */
1328 sc->sc_rxchaindetect_ref = 0;
1329 sc->sc_rxchaindetect_thresh5GHz = 35;
1330 sc->sc_rxchaindetect_thresh2GHz = 35;
1331 sc->sc_rxchaindetect_delta5GHz = 30;
1332 sc->sc_rxchaindetect_delta2GHz = 30;
1333
1334 ath9k_hw_setcapability(ah, HAL_CAP_DIVERSITY, 1, true, NULL);
1335 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1336
1337 ath9k_hw_getmac(ah, sc->sc_myaddr);
1338 if (ah->ah_caps.halBssIdMaskSupport) {
1339 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1340 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1341 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1342 }
1343 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1344
1345 /* initialize beacon slots */
1346 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1347 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1348
1349 /* save MISC configurations */
1350 sc->sc_config.swBeaconProcess = 1;
1351
1352 #ifdef CONFIG_SLOW_ANT_DIV
1353 /* range is 40 - 255, we use something in the middle */
1354 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1355 #endif
1356
1357 return 0;
1358 bad2:
1359 /* cleanup tx queues */
1360 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1361 if (ATH_TXQ_SETUP(sc, i))
1362 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1363 bad:
1364 if (ah)
1365 ath9k_hw_detach(ah);
1366 return error;
1367 }
1368
1369 void ath_deinit(struct ath_softc *sc)
1370 {
1371 struct ath_hal *ah = sc->sc_ah;
1372 int i;
1373
1374 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1375
1376 ath_stop(sc);
1377 if (!sc->sc_invalid)
1378 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1379 ath_rate_detach(sc->sc_rc);
1380 /* cleanup tx queues */
1381 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1382 if (ATH_TXQ_SETUP(sc, i))
1383 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1384 ath9k_hw_detach(ah);
1385 }
1386
1387 /*******************/
1388 /* Node Management */
1389 /*******************/
1390
1391 struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1392 {
1393 struct ath_vap *avp;
1394 struct ath_node *an;
1395 DECLARE_MAC_BUF(mac);
1396
1397 avp = sc->sc_vaps[if_id];
1398 ASSERT(avp != NULL);
1399
1400 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1401 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1402 if (an == NULL)
1403 return NULL;
1404 memzero(an, sizeof(*an));
1405
1406 an->an_sc = sc;
1407 memcpy(an->an_addr, addr, ETH_ALEN);
1408 atomic_set(&an->an_refcnt, 1);
1409
1410 /* set up per-node tx/rx state */
1411 ath_tx_node_init(sc, an);
1412 ath_rx_node_init(sc, an);
1413
1414 ath_chainmask_sel_init(sc, an);
1415 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1416 list_add(&an->list, &sc->node_list);
1417
1418 return an;
1419 }
1420
1421 void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1422 {
1423 unsigned long flags;
1424
1425 DECLARE_MAC_BUF(mac);
1426
1427 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1428 an->an_flags |= ATH_NODE_CLEAN;
1429 ath_tx_node_cleanup(sc, an, bh_flag);
1430 ath_rx_node_cleanup(sc, an);
1431
1432 ath_tx_node_free(sc, an);
1433 ath_rx_node_free(sc, an);
1434
1435 spin_lock_irqsave(&sc->node_lock, flags);
1436
1437 list_del(&an->list);
1438
1439 spin_unlock_irqrestore(&sc->node_lock, flags);
1440
1441 kfree(an);
1442 }
1443
1444 /* Finds a node and increases the refcnt if found */
1445
1446 struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1447 {
1448 struct ath_node *an = NULL, *an_found = NULL;
1449
1450 if (list_empty(&sc->node_list)) /* FIXME */
1451 goto out;
1452 list_for_each_entry(an, &sc->node_list, list) {
1453 if (!compare_ether_addr(an->an_addr, addr)) {
1454 atomic_inc(&an->an_refcnt);
1455 an_found = an;
1456 break;
1457 }
1458 }
1459 out:
1460 return an_found;
1461 }
1462
1463 /* Decrements the refcnt and if it drops to zero, detach the node */
1464
1465 void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1466 {
1467 if (atomic_dec_and_test(&an->an_refcnt))
1468 ath_node_detach(sc, an, bh_flag);
1469 }
1470
1471 /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1472 struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1473 {
1474 struct ath_node *an = NULL, *an_found = NULL;
1475
1476 if (list_empty(&sc->node_list))
1477 return NULL;
1478
1479 list_for_each_entry(an, &sc->node_list, list)
1480 if (!compare_ether_addr(an->an_addr, addr)) {
1481 an_found = an;
1482 break;
1483 }
1484
1485 return an_found;
1486 }
1487
1488 /*
1489 * Set up New Node
1490 *
1491 * Setup driver-specific state for a newly associated node. This routine
1492 * really only applies if compression or XR are enabled, there is no code
1493 * covering any other cases.
1494 */
1495
1496 void ath_newassoc(struct ath_softc *sc,
1497 struct ath_node *an, int isnew, int isuapsd)
1498 {
1499 int tidno;
1500
1501 /* if station reassociates, tear down the aggregation state. */
1502 if (!isnew) {
1503 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1504 if (sc->sc_txaggr)
1505 ath_tx_aggr_teardown(sc, an, tidno);
1506 if (sc->sc_rxaggr)
1507 ath_rx_aggr_teardown(sc, an, tidno);
1508 }
1509 }
1510 an->an_flags = 0;
1511 }
1512
1513 /**************/
1514 /* Encryption */
1515 /**************/
1516
1517 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1518 {
1519 ath9k_hw_keyreset(sc->sc_ah, keyix);
1520 if (freeslot)
1521 clear_bit(keyix, sc->sc_keymap);
1522 }
1523
1524 int ath_keyset(struct ath_softc *sc,
1525 u16 keyix,
1526 struct ath9k_keyval *hk,
1527 const u8 mac[ETH_ALEN])
1528 {
1529 bool status;
1530
1531 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1532 keyix, hk, mac, false);
1533
1534 return status != false;
1535 }
1536
1537 /***********************/
1538 /* TX Power/Regulatory */
1539 /***********************/
1540
1541 /*
1542 * Set Transmit power in HAL
1543 *
1544 * This routine makes the actual HAL calls to set the new transmit power
1545 * limit.
1546 */
1547
1548 void ath_update_txpow(struct ath_softc *sc)
1549 {
1550 struct ath_hal *ah = sc->sc_ah;
1551 u32 txpow;
1552
1553 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1554 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1555 /* read back in case value is clamped */
1556 ath9k_hw_getcapability(ah, HAL_CAP_TXPOW, 1, &txpow);
1557 sc->sc_curtxpow = txpow;
1558 }
1559 }
1560
1561 /* Return the current country and domain information */
1562 void ath_get_currentCountry(struct ath_softc *sc,
1563 struct ath9k_country_entry *ctry)
1564 {
1565 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1566
1567 /* If HAL not specific yet, since it is band dependent,
1568 * use the one we passed in. */
1569 if (ctry->countryCode == CTRY_DEFAULT) {
1570 ctry->iso[0] = 0;
1571 ctry->iso[1] = 0;
1572 } else if (ctry->iso[0] && ctry->iso[1]) {
1573 if (!ctry->iso[2]) {
1574 if (ath_outdoor)
1575 ctry->iso[2] = 'O';
1576 else
1577 ctry->iso[2] = 'I';
1578 }
1579 }
1580 }
1581
1582 /**************************/
1583 /* Slow Antenna Diversity */
1584 /**************************/
1585
1586 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1587 struct ath_softc *sc,
1588 int32_t rssitrig)
1589 {
1590 int trig;
1591
1592 /* antdivf_rssitrig can range from 40 - 0xff */
1593 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1594 trig = (rssitrig < 40) ? 40 : rssitrig;
1595
1596 antdiv->antdiv_sc = sc;
1597 antdiv->antdivf_rssitrig = trig;
1598 }
1599
1600 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1601 u8 num_antcfg,
1602 const u8 *bssid)
1603 {
1604 antdiv->antdiv_num_antcfg =
1605 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1606 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1607 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1608 antdiv->antdiv_curcfg = 0;
1609 antdiv->antdiv_bestcfg = 0;
1610 antdiv->antdiv_laststatetsf = 0;
1611
1612 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1613
1614 antdiv->antdiv_start = 1;
1615 }
1616
1617 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1618 {
1619 antdiv->antdiv_start = 0;
1620 }
1621
1622 static int32_t ath_find_max_val(int32_t *val,
1623 u8 num_val, u8 *max_index)
1624 {
1625 u32 MaxVal = *val++;
1626 u32 cur_index = 0;
1627
1628 *max_index = 0;
1629 while (++cur_index < num_val) {
1630 if (*val > MaxVal) {
1631 MaxVal = *val;
1632 *max_index = cur_index;
1633 }
1634
1635 val++;
1636 }
1637
1638 return MaxVal;
1639 }
1640
1641 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1642 struct ieee80211_hdr *hdr,
1643 struct ath_rx_status *rx_stats)
1644 {
1645 struct ath_softc *sc = antdiv->antdiv_sc;
1646 struct ath_hal *ah = sc->sc_ah;
1647 u64 curtsf = 0;
1648 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1649 __le16 fc = hdr->frame_control;
1650
1651 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1652 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1653 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1654 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1655 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1656 } else {
1657 return;
1658 }
1659
1660 switch (antdiv->antdiv_state) {
1661 case ATH_ANT_DIV_IDLE:
1662 if ((antdiv->antdiv_lastbrssi[curcfg] <
1663 antdiv->antdivf_rssitrig)
1664 && ((curtsf - antdiv->antdiv_laststatetsf) >
1665 ATH_ANT_DIV_MIN_IDLE_US)) {
1666
1667 curcfg++;
1668 if (curcfg == antdiv->antdiv_num_antcfg)
1669 curcfg = 0;
1670
1671 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1672 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1673 antdiv->antdiv_curcfg = curcfg;
1674 antdiv->antdiv_laststatetsf = curtsf;
1675 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1676 }
1677 }
1678 break;
1679
1680 case ATH_ANT_DIV_SCAN:
1681 if ((curtsf - antdiv->antdiv_laststatetsf) <
1682 ATH_ANT_DIV_MIN_SCAN_US)
1683 break;
1684
1685 curcfg++;
1686 if (curcfg == antdiv->antdiv_num_antcfg)
1687 curcfg = 0;
1688
1689 if (curcfg == antdiv->antdiv_bestcfg) {
1690 ath_find_max_val(antdiv->antdiv_lastbrssi,
1691 antdiv->antdiv_num_antcfg, &bestcfg);
1692 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1693 antdiv->antdiv_bestcfg = bestcfg;
1694 antdiv->antdiv_curcfg = bestcfg;
1695 antdiv->antdiv_laststatetsf = curtsf;
1696 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1697 }
1698 } else {
1699 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1700 antdiv->antdiv_curcfg = curcfg;
1701 antdiv->antdiv_laststatetsf = curtsf;
1702 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1703 }
1704 }
1705
1706 break;
1707 }
1708 }
1709
1710 /***********************/
1711 /* Descriptor Handling */
1712 /***********************/
1713
1714 /*
1715 * Set up DMA descriptors
1716 *
1717 * This function will allocate both the DMA descriptor structure, and the
1718 * buffers it contains. These are used to contain the descriptors used
1719 * by the system.
1720 */
1721
1722 int ath_descdma_setup(struct ath_softc *sc,
1723 struct ath_descdma *dd,
1724 struct list_head *head,
1725 const char *name,
1726 int nbuf,
1727 int ndesc)
1728 {
1729 #define DS2PHYS(_dd, _ds) \
1730 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1731 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1732 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1733
1734 struct ath_desc *ds;
1735 struct ath_buf *bf;
1736 int i, bsize, error;
1737
1738 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1739 __func__, name, nbuf, ndesc);
1740
1741 /* ath_desc must be a multiple of DWORDs */
1742 if ((sizeof(struct ath_desc) % 4) != 0) {
1743 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1744 __func__);
1745 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1746 error = -ENOMEM;
1747 goto fail;
1748 }
1749
1750 dd->dd_name = name;
1751 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1752
1753 /*
1754 * Need additional DMA memory because we can't use
1755 * descriptors that cross the 4K page boundary. Assume
1756 * one skipped descriptor per 4K page.
1757 */
1758 if (!(sc->sc_ah->ah_caps.hal4kbSplitTransSupport)) {
1759 u32 ndesc_skipped =
1760 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1761 u32 dma_len;
1762
1763 while (ndesc_skipped) {
1764 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1765 dd->dd_desc_len += dma_len;
1766
1767 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1768 };
1769 }
1770
1771 /* allocate descriptors */
1772 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1773 dd->dd_desc_len,
1774 &dd->dd_desc_paddr);
1775 if (dd->dd_desc == NULL) {
1776 error = -ENOMEM;
1777 goto fail;
1778 }
1779 ds = dd->dd_desc;
1780 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1781 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1782 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1783
1784 /* allocate buffers */
1785 bsize = sizeof(struct ath_buf) * nbuf;
1786 bf = kmalloc(bsize, GFP_KERNEL);
1787 if (bf == NULL) {
1788 error = -ENOMEM;
1789 goto fail2;
1790 }
1791 memzero(bf, bsize);
1792 dd->dd_bufptr = bf;
1793
1794 INIT_LIST_HEAD(head);
1795 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1796 bf->bf_desc = ds;
1797 bf->bf_daddr = DS2PHYS(dd, ds);
1798
1799 if (!(sc->sc_ah->ah_caps.hal4kbSplitTransSupport)) {
1800 /*
1801 * Skip descriptor addresses which can cause 4KB
1802 * boundary crossing (addr + length) with a 32 dword
1803 * descriptor fetch.
1804 */
1805 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1806 ASSERT((caddr_t) bf->bf_desc <
1807 ((caddr_t) dd->dd_desc +
1808 dd->dd_desc_len));
1809
1810 ds += ndesc;
1811 bf->bf_desc = ds;
1812 bf->bf_daddr = DS2PHYS(dd, ds);
1813 }
1814 }
1815 list_add_tail(&bf->list, head);
1816 }
1817 return 0;
1818 fail2:
1819 pci_free_consistent(sc->pdev,
1820 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1821 fail:
1822 memzero(dd, sizeof(*dd));
1823 return error;
1824 #undef ATH_DESC_4KB_BOUND_CHECK
1825 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1826 #undef DS2PHYS
1827 }
1828
1829 /*
1830 * Cleanup DMA descriptors
1831 *
1832 * This function will free the DMA block that was allocated for the descriptor
1833 * pool. Since this was allocated as one "chunk", it is freed in the same
1834 * manner.
1835 */
1836
1837 void ath_descdma_cleanup(struct ath_softc *sc,
1838 struct ath_descdma *dd,
1839 struct list_head *head)
1840 {
1841 /* Free memory associated with descriptors */
1842 pci_free_consistent(sc->pdev,
1843 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1844
1845 INIT_LIST_HEAD(head);
1846 kfree(dd->dd_bufptr);
1847 memzero(dd, sizeof(*dd));
1848 }
1849
1850 /*************/
1851 /* Utilities */
1852 /*************/
1853
1854 void ath_internal_reset(struct ath_softc *sc)
1855 {
1856 ath_reset_start(sc, 0);
1857 ath_reset(sc);
1858 ath_reset_end(sc, 0);
1859 }
1860
1861 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1862 {
1863 int qnum;
1864
1865 switch (queue) {
1866 case 0:
1867 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1868 break;
1869 case 1:
1870 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1871 break;
1872 case 2:
1873 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1874 break;
1875 case 3:
1876 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1877 break;
1878 default:
1879 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1880 break;
1881 }
1882
1883 return qnum;
1884 }
1885
1886 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1887 {
1888 int qnum;
1889
1890 switch (queue) {
1891 case ATH9K_WME_AC_VO:
1892 qnum = 0;
1893 break;
1894 case ATH9K_WME_AC_VI:
1895 qnum = 1;
1896 break;
1897 case ATH9K_WME_AC_BE:
1898 qnum = 2;
1899 break;
1900 case ATH9K_WME_AC_BK:
1901 qnum = 3;
1902 break;
1903 default:
1904 qnum = -1;
1905 break;
1906 }
1907
1908 return qnum;
1909 }
1910
1911
1912 /*
1913 * Expand time stamp to TSF
1914 *
1915 * Extend 15-bit time stamp from rx descriptor to
1916 * a full 64-bit TSF using the current h/w TSF.
1917 */
1918
1919 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1920 {
1921 u64 tsf;
1922
1923 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1924 if ((tsf & 0x7fff) < rstamp)
1925 tsf -= 0x8000;
1926 return (tsf & ~0x7fff) | rstamp;
1927 }
1928
1929 /*
1930 * Set Default Antenna
1931 *
1932 * Call into the HAL to set the default antenna to use. Not really valid for
1933 * MIMO technology.
1934 */
1935
1936 void ath_setdefantenna(void *context, u32 antenna)
1937 {
1938 struct ath_softc *sc = (struct ath_softc *)context;
1939 struct ath_hal *ah = sc->sc_ah;
1940
1941 /* XXX block beacon interrupts */
1942 ath9k_hw_setantenna(ah, antenna);
1943 sc->sc_defant = antenna;
1944 sc->sc_rxotherant = 0;
1945 }
1946
1947 /*
1948 * Set Slot Time
1949 *
1950 * This will wake up the chip if required, and set the slot time for the
1951 * frame (maximum transmit time). Slot time is assumed to be already set
1952 * in the ATH object member sc_slottime
1953 */
1954
1955 void ath_setslottime(struct ath_softc *sc)
1956 {
1957 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1958 sc->sc_updateslot = OK;
1959 }