--- /dev/null
+#
+# Copyright (C) 2008 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+PKG_NAME:=ath9k
+PKG_VERSION:=20080720
+PKG_RELEASE:=1
+
+include $(INCLUDE_DIR)/package.mk
+
+define KernelPackage/ath9k
+ SUBMENU:=Wireless Drivers
+ TITLE:=Broadcom 43xx wireless support
+ DEPENDS:=+kmod-mac80211 @!TARGET_brcm47xx
+ FILES:=$(PKG_BUILD_DIR)/drivers/net/wireless/ath9k/ath9k.$(LINUX_KMOD_SUFFIX)
+ AUTOLOAD:=$(call AutoLoad,30,ath9k)
+endef
+
+define KernelPackage/ath9k/description
+Kernel module for Atheros AR9xxx and AR5416/AR5418 based cards
+endef
+
+EXTRA_KCONFIG:= \
+ CONFIG_ATH9K=m
+
+EXTRA_CFLAGS:= \
+ $(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=m,%,$(filter %=m,$(EXTRA_KCONFIG)))) \
+ $(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=y,%,$(filter %=y,$(EXTRA_KCONFIG)))) \
+
+MAKE_OPTS:= \
+ ARCH="$(LINUX_KARCH)" \
+ CROSS_COMPILE="$(TARGET_CROSS)" \
+ SUBDIRS="$(PKG_BUILD_DIR)/drivers/net/wireless/ath9k" \
+ EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
+ LINUXINCLUDE="-I$(STAGING_DIR)/usr/include/mac80211 -I$(LINUX_DIR)/include -include linux/autoconf.h" \
+ $(EXTRA_KCONFIG)
+
+define Build/Prepare
+ mkdir -p $(PKG_BUILD_DIR)
+ $(CP) ./src/* $(PKG_BUILD_DIR)/
+ $(Build/Patch)
+ $(if $(QUILT),touch $(PKG_BUILD_DIR)/.quilt_used)
+endef
+
+define Build/Configure
+endef
+
+define Build/Compile
+ $(MAKE) -C "$(LINUX_DIR)" \
+ $(MAKE_OPTS) \
+ modules
+endef
+
+$(eval $(call KernelPackage,ath9k))
--- /dev/null
+Replace udelay(3000) with mdelay(3), because udelay(3000) fails on ARM
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+
+--- a/drivers/net/wireless/ath9k/recv.c
++++ b/drivers/net/wireless/ath9k/recv.c
+@@ -737,7 +737,7 @@
+ ath9k_hw_stoppcurecv(ah); /* disable PCU */
+ ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
+ stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
+- udelay(3000); /* 3ms is long enough for 1 frame */
++ mdelay(3); /* 3ms is long enough for 1 frame */
+ tsf = ath9k_hw_gettsf64(ah);
+ sc->sc_rxlink = NULL; /* just in case */
+ return stopped;
--- /dev/null
+Add missing include statements
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+
+--- a/drivers/net/wireless/ath9k/regd.c
++++ b/drivers/net/wireless/ath9k/regd.c
+@@ -14,6 +14,8 @@
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
++#include <linux/kernel.h>
++#include <linux/slab.h>
+ #include "ath9k.h"
+ #include "regd.h"
+ #include "regd_common.h"
--- /dev/null
+Add missing device ID for AR9160
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+
+--- a/drivers/net/wireless/ath9k/hw.c
++++ b/drivers/net/wireless/ath9k/hw.c
+@@ -8329,6 +8329,8 @@
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ return "Atheros 5416";
++ case AR9160_DEVID_PCI:
++ return "Atheros 9160";
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ return "Atheros 9280";
+@@ -8350,6 +8352,7 @@
+ switch (devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
++ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ ah = ath9k_hw_do_attach(devid, sc, mem, error);
--- /dev/null
+Fix a return code check for ath9k_hw_nvram_read, this function returns
+AH_TRUE when the call succeeded
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+
+--- a/drivers/net/wireless/ath9k/hw.c
++++ b/drivers/net/wireless/ath9k/hw.c
+@@ -803,7 +803,7 @@
+ u_int16_t magic, magic2;
+ int addr;
+
+- if (ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
++ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
+ &magic)) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "%s: Reading Magic # failed\n", __func__);
--- /dev/null
+config ATH9K
+ tristate "Atheros 802.11n wireless cards support"
+ depends on PCI && MAC80211 && WLAN_80211
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
+
+ If you choose to build a module, it'll be called ath9k.
--- /dev/null
+ath9k-y += hw.o \
+ phy.o \
+ regd.o \
+ beacon.o \
+ main.o \
+ recv.o \
+ xmit.o \
+ rc.o \
+ core.o
+
+obj-$(CONFIG_ATH9K) += ath9k.o
--- /dev/null
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_H
+#define ATH9K_H
+
+#include <linux/io.h>
+
+#define ATHEROS_VENDOR_ID 0x168c
+
+#define AR5416_DEVID_PCI 0x0023
+#define AR5416_DEVID_PCIE 0x0024
+#define AR9160_DEVID_PCI 0x0027
+#define AR9280_DEVID_PCI 0x0029
+#define AR9280_DEVID_PCIE 0x002a
+
+#define AR5416_AR9100_DEVID 0x000b
+
+#define AR_SUBVENDOR_ID_NOG 0x0e11
+#define AR_SUBVENDOR_ID_NEW_A 0x7065
+
+#define HAL_TXERR_XRETRY 0x01
+#define HAL_TXERR_FILT 0x02
+#define HAL_TXERR_FIFO 0x04
+#define HAL_TXERR_XTXOP 0x08
+#define HAL_TXERR_TIMER_EXPIRED 0x10
+
+#define HAL_TX_BA 0x01
+#define HAL_TX_PWRMGMT 0x02
+#define HAL_TX_DESC_CFG_ERR 0x04
+#define HAL_TX_DATA_UNDERRUN 0x08
+#define HAL_TX_DELIM_UNDERRUN 0x10
+#define HAL_TX_SW_ABORTED 0x40
+#define HAL_TX_SW_FILTERED 0x80
+
+#define NBBY 8
+#ifndef howmany
+#define howmany(x, y) (((x)+((y)-1))/(y))
+#endif
+
+struct ath_tx_status {
+ u_int32_t ts_tstamp;
+ u_int16_t ts_seqnum;
+ u_int8_t ts_status;
+ u_int8_t ts_ratecode;
+ u_int8_t ts_rateindex;
+ int8_t ts_rssi;
+ u_int8_t ts_shortretry;
+ u_int8_t ts_longretry;
+ u_int8_t ts_virtcol;
+ u_int8_t ts_antenna;
+ u_int8_t ts_flags;
+ int8_t ts_rssi_ctl0;
+ int8_t ts_rssi_ctl1;
+ int8_t ts_rssi_ctl2;
+ int8_t ts_rssi_ext0;
+ int8_t ts_rssi_ext1;
+ int8_t ts_rssi_ext2;
+ u_int8_t pad[3];
+ u_int32_t ba_low;
+ u_int32_t ba_high;
+ u_int32_t evm0;
+ u_int32_t evm1;
+ u_int32_t evm2;
+};
+
+struct ath_rx_status {
+ u_int32_t rs_tstamp;
+ u_int16_t rs_datalen;
+ u_int8_t rs_status;
+ u_int8_t rs_phyerr;
+ int8_t rs_rssi;
+ u_int8_t rs_keyix;
+ u_int8_t rs_rate;
+ u_int8_t rs_antenna;
+ u_int8_t rs_more;
+ int8_t rs_rssi_ctl0;
+ int8_t rs_rssi_ctl1;
+ int8_t rs_rssi_ctl2;
+ int8_t rs_rssi_ext0;
+ int8_t rs_rssi_ext1;
+ int8_t rs_rssi_ext2;
+ u_int8_t rs_isaggr;
+ u_int8_t rs_moreaggr;
+ u_int8_t rs_num_delims;
+ u_int8_t rs_flags;
+ u_int32_t evm0;
+ u_int32_t evm1;
+ u_int32_t evm2;
+};
+
+#define HAL_RXERR_CRC 0x01
+#define HAL_RXERR_PHY 0x02
+#define HAL_RXERR_FIFO 0x04
+#define HAL_RXERR_DECRYPT 0x08
+#define HAL_RXERR_MIC 0x10
+
+#define HAL_RX_MORE 0x01
+#define HAL_RX_MORE_AGGR 0x02
+#define HAL_RX_GI 0x04
+#define HAL_RX_2040 0x08
+#define HAL_RX_DELIM_CRC_PRE 0x10
+#define HAL_RX_DELIM_CRC_POST 0x20
+#define HAL_RX_DECRYPT_BUSY 0x40
+
+enum hal_bool {
+ AH_FALSE = 0,
+ AH_TRUE = 1,
+};
+
+#define HAL_RXKEYIX_INVALID ((u_int8_t)-1)
+#define HAL_TXKEYIX_INVALID ((u_int)-1)
+
+struct ath_desc {
+ u_int32_t ds_link;
+ u_int32_t ds_data;
+ u_int32_t ds_ctl0;
+ u_int32_t ds_ctl1;
+ u_int32_t ds_hw[20];
+ union {
+ struct ath_tx_status tx;
+ struct ath_rx_status rx;
+ void *stats;
+ } ds_us;
+ void *ds_vdata;
+} __packed;
+
+#define ds_txstat ds_us.tx
+#define ds_rxstat ds_us.rx
+#define ds_stat ds_us.stats
+
+#define HAL_TXDESC_CLRDMASK 0x0001
+#define HAL_TXDESC_NOACK 0x0002
+#define HAL_TXDESC_RTSENA 0x0004
+#define HAL_TXDESC_CTSENA 0x0008
+#define HAL_TXDESC_INTREQ 0x0010
+#define HAL_TXDESC_VEOL 0x0020
+#define HAL_TXDESC_EXT_ONLY 0x0040
+#define HAL_TXDESC_EXT_AND_CTL 0x0080
+#define HAL_TXDESC_VMF 0x0100
+#define HAL_TXDESC_FRAG_IS_ON 0x0200
+
+#define HAL_RXDESC_INTREQ 0x0020
+
+enum hal_capability_type {
+ HAL_CAP_CIPHER = 0,
+ HAL_CAP_TKIP_MIC,
+ HAL_CAP_TKIP_SPLIT,
+ HAL_CAP_PHYCOUNTERS,
+ HAL_CAP_DIVERSITY,
+ HAL_CAP_PSPOLL,
+ HAL_CAP_TXPOW,
+ HAL_CAP_PHYDIAG,
+ HAL_CAP_MCAST_KEYSRCH,
+ HAL_CAP_TSF_ADJUST,
+ HAL_CAP_WME_TKIPMIC,
+ HAL_CAP_RFSILENT,
+ HAL_CAP_ANT_CFG_2GHZ,
+ HAL_CAP_ANT_CFG_5GHZ
+};
+
+struct hal_capabilities {
+ u_int halChanSpreadSupport:1,
+ halChapTuningSupport:1,
+ halMicAesCcmSupport:1,
+ halMicCkipSupport:1,
+ halMicTkipSupport:1,
+ halCipherAesCcmSupport:1,
+ halCipherCkipSupport:1,
+ halCipherTkipSupport:1,
+ halVEOLSupport:1,
+ halBssIdMaskSupport:1,
+ halMcastKeySrchSupport:1,
+ halTsfAddSupport:1,
+ halChanHalfRate:1,
+ halChanQuarterRate:1,
+ halHTSupport:1,
+ halGTTSupport:1,
+ halFastCCSupport:1,
+ halRfSilentSupport:1,
+ halWowSupport:1,
+ halCSTSupport:1,
+ halEnhancedPmSupport:1,
+ halAutoSleepSupport:1,
+ hal4kbSplitTransSupport:1,
+ halWowMatchPatternExact:1;
+ u_int32_t halWirelessModes;
+ u_int16_t halTotalQueues;
+ u_int16_t halKeyCacheSize;
+ u_int16_t halLow5GhzChan, halHigh5GhzChan;
+ u_int16_t halLow2GhzChan, halHigh2GhzChan;
+ u_int16_t halNumMRRetries;
+ u_int16_t halRtsAggrLimit;
+ u_int8_t halTxChainMask;
+ u_int8_t halRxChainMask;
+ u_int16_t halTxTrigLevelMax;
+ u_int16_t halRegCap;
+ u_int8_t halNumGpioPins;
+ u_int8_t halNumAntCfg2GHz;
+ u_int8_t halNumAntCfg5GHz;
+};
+
+struct hal_ops_config {
+ int ath_hal_dma_beacon_response_time;
+ int ath_hal_sw_beacon_response_time;
+ int ath_hal_additional_swba_backoff;
+ int ath_hal_6mb_ack;
+ int ath_hal_cwmIgnoreExtCCA;
+ u_int8_t ath_hal_pciePowerSaveEnable;
+ u_int8_t ath_hal_pcieL1SKPEnable;
+ u_int8_t ath_hal_pcieClockReq;
+ u_int32_t ath_hal_pcieWaen;
+ int ath_hal_pciePowerReset;
+ u_int8_t ath_hal_pcieRestore;
+ u_int8_t ath_hal_analogShiftReg;
+ u_int8_t ath_hal_htEnable;
+ u_int32_t ath_hal_ofdmTrigLow;
+ u_int32_t ath_hal_ofdmTrigHigh;
+ u_int32_t ath_hal_cckTrigHigh;
+ u_int32_t ath_hal_cckTrigLow;
+ u_int32_t ath_hal_enableANI;
+ u_int8_t ath_hal_noiseImmunityLvl;
+ u_int32_t ath_hal_ofdmWeakSigDet;
+ u_int32_t ath_hal_cckWeakSigThr;
+ u_int8_t ath_hal_spurImmunityLvl;
+ u_int8_t ath_hal_firStepLvl;
+ int8_t ath_hal_rssiThrHigh;
+ int8_t ath_hal_rssiThrLow;
+ u_int16_t ath_hal_diversityControl;
+ u_int16_t ath_hal_antennaSwitchSwap;
+ int ath_hal_serializeRegMode;
+ int ath_hal_intrMitigation;
+ int ath_hal_debug;
+#define SPUR_DISABLE 0
+#define SPUR_ENABLE_IOCTL 1
+#define SPUR_ENABLE_EEPROM 2
+#define AR_EEPROM_MODAL_SPURS 5
+#define AR_SPUR_5413_1 1640
+#define AR_SPUR_5413_2 1200
+#define AR_NO_SPUR 0x8000
+#define AR_BASE_FREQ_2GHZ 2300
+#define AR_BASE_FREQ_5GHZ 4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+ int ath_hal_spurMode;
+ u_int16_t ath_hal_spurChans[AR_EEPROM_MODAL_SPURS][2];
+};
+
+enum hal_tx_queue {
+ HAL_TX_QUEUE_INACTIVE = 0,
+ HAL_TX_QUEUE_DATA,
+ HAL_TX_QUEUE_BEACON,
+ HAL_TX_QUEUE_CAB,
+ HAL_TX_QUEUE_UAPSD,
+ HAL_TX_QUEUE_PSPOLL
+};
+
+#define HAL_NUM_TX_QUEUES 10
+
+enum hal_tx_queue_subtype {
+ HAL_WME_AC_BK = 0,
+ HAL_WME_AC_BE,
+ HAL_WME_AC_VI,
+ HAL_WME_AC_VO,
+ HAL_WME_UPSD
+};
+
+enum hal_tx_queue_flags {
+ TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
+ TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
+ TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
+ TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
+ TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
+ TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
+ TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
+ TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
+ TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
+};
+
+struct hal_txq_info {
+ u_int32_t tqi_ver;
+ enum hal_tx_queue_subtype tqi_subtype;
+ enum hal_tx_queue_flags tqi_qflags;
+ u_int32_t tqi_priority;
+ u_int32_t tqi_aifs;
+ u_int32_t tqi_cwmin;
+ u_int32_t tqi_cwmax;
+ u_int16_t tqi_shretry;
+ u_int16_t tqi_lgretry;
+ u_int32_t tqi_cbrPeriod;
+ u_int32_t tqi_cbrOverflowLimit;
+ u_int32_t tqi_burstTime;
+ u_int32_t tqi_readyTime;
+ u_int32_t tqi_compBuf;
+};
+
+#define HAL_TQI_NONVAL 0xffff
+
+#define HAL_TXQ_USEDEFAULT ((u_int32_t) -1)
+
+#define HAL_COMP_BUF_MAX_SIZE 9216
+#define HAL_COMP_BUF_ALIGN_SIZE 512
+#define HAL_DECOMP_MASK_SIZE 128
+
+#define HAL_READY_TIME_LO_BOUND 50
+#define HAL_READY_TIME_HI_BOUND 96
+
+enum hal_pkt_type {
+ HAL_PKT_TYPE_NORMAL = 0,
+ HAL_PKT_TYPE_ATIM,
+ HAL_PKT_TYPE_PSPOLL,
+ HAL_PKT_TYPE_BEACON,
+ HAL_PKT_TYPE_PROBE_RESP,
+ HAL_PKT_TYPE_CHIRP,
+ HAL_PKT_TYPE_GRP_POLL,
+};
+
+struct hal_tx_queue_info {
+ u_int32_t tqi_ver;
+ enum hal_tx_queue tqi_type;
+ enum hal_tx_queue_subtype tqi_subtype;
+ enum hal_tx_queue_flags tqi_qflags;
+ u_int32_t tqi_priority;
+ u_int32_t tqi_aifs;
+ u_int32_t tqi_cwmin;
+ u_int32_t tqi_cwmax;
+ u_int16_t tqi_shretry;
+ u_int16_t tqi_lgretry;
+ u_int32_t tqi_cbrPeriod;
+ u_int32_t tqi_cbrOverflowLimit;
+ u_int32_t tqi_burstTime;
+ u_int32_t tqi_readyTime;
+ u_int32_t tqi_physCompBuf;
+ u_int32_t tqi_intFlags;
+};
+
+enum hal_rx_filter {
+ HAL_RX_FILTER_UCAST = 0x00000001,
+ HAL_RX_FILTER_MCAST = 0x00000002,
+ HAL_RX_FILTER_BCAST = 0x00000004,
+ HAL_RX_FILTER_CONTROL = 0x00000008,
+ HAL_RX_FILTER_BEACON = 0x00000010,
+ HAL_RX_FILTER_PROM = 0x00000020,
+ HAL_RX_FILTER_PROBEREQ = 0x00000080,
+ HAL_RX_FILTER_PSPOLL = 0x00004000,
+ HAL_RX_FILTER_PHYERR = 0x00000100,
+ HAL_RX_FILTER_PHYRADAR = 0x00002000,
+};
+
+enum hal_int {
+ HAL_INT_RX = 0x00000001,
+ HAL_INT_RXDESC = 0x00000002,
+ HAL_INT_RXNOFRM = 0x00000008,
+ HAL_INT_RXEOL = 0x00000010,
+ HAL_INT_RXORN = 0x00000020,
+ HAL_INT_TX = 0x00000040,
+ HAL_INT_TXDESC = 0x00000080,
+ HAL_INT_TIM_TIMER = 0x00000100,
+ HAL_INT_TXURN = 0x00000800,
+ HAL_INT_MIB = 0x00001000,
+ HAL_INT_RXPHY = 0x00004000,
+ HAL_INT_RXKCM = 0x00008000,
+ HAL_INT_SWBA = 0x00010000,
+ HAL_INT_BMISS = 0x00040000,
+ HAL_INT_BNR = 0x00100000,
+ HAL_INT_TIM = 0x00200000,
+ HAL_INT_DTIM = 0x00400000,
+ HAL_INT_DTIMSYNC = 0x00800000,
+ HAL_INT_GPIO = 0x01000000,
+ HAL_INT_CABEND = 0x02000000,
+ HAL_INT_CST = 0x10000000,
+ HAL_INT_GTT = 0x20000000,
+ HAL_INT_FATAL = 0x40000000,
+ HAL_INT_GLOBAL = 0x80000000,
+ HAL_INT_BMISC = HAL_INT_TIM
+ | HAL_INT_DTIM | HAL_INT_DTIMSYNC | HAL_INT_CABEND,
+ HAL_INT_COMMON = HAL_INT_RXNOFRM
+ | HAL_INT_RXDESC
+ | HAL_INT_RXEOL
+ | HAL_INT_RXORN
+ | HAL_INT_TXURN
+ | HAL_INT_TXDESC
+ | HAL_INT_MIB
+ | HAL_INT_RXPHY
+ | HAL_INT_RXKCM | HAL_INT_SWBA | HAL_INT_BMISS | HAL_INT_GPIO,
+ HAL_INT_NOCARD = 0xffffffff
+};
+
+struct hal_rate_table {
+ int rateCount;
+ u_int8_t rateCodeToIndex[256];
+ struct {
+ u_int8_t valid;
+ u_int8_t phy;
+ u_int32_t rateKbps;
+ u_int8_t rateCode;
+ u_int8_t shortPreamble;
+ u_int8_t dot11Rate;
+ u_int8_t controlRate;
+ u_int16_t lpAckDuration;
+ u_int16_t spAckDuration;
+ } info[32];
+};
+
+#define HAL_RATESERIES_RTS_CTS 0x0001
+#define HAL_RATESERIES_2040 0x0002
+#define HAL_RATESERIES_HALFGI 0x0004
+
+struct hal_11n_rate_series {
+ u_int Tries;
+ u_int Rate;
+ u_int PktDuration;
+ u_int ChSel;
+ u_int RateFlags;
+};
+
+struct hal_channel {
+ u_int16_t channel;
+ u_int32_t channelFlags;
+ u_int8_t privFlags;
+ int8_t maxRegTxPower;
+ int8_t maxTxPower;
+ int8_t minTxPower;
+};
+
+#define CHANNEL_CW_INT 0x00002
+#define CHANNEL_CCK 0x00020
+#define CHANNEL_OFDM 0x00040
+#define CHANNEL_2GHZ 0x00080
+#define CHANNEL_5GHZ 0x00100
+#define CHANNEL_PASSIVE 0x00200
+#define CHANNEL_DYN 0x00400
+#define CHANNEL_HALF 0x04000
+#define CHANNEL_QUARTER 0x08000
+#define CHANNEL_HT20 0x10000
+#define CHANNEL_HT40PLUS 0x20000
+#define CHANNEL_HT40MINUS 0x40000
+
+#define CHANNEL_INTERFERENCE 0x01
+#define CHANNEL_DFS 0x02
+#define CHANNEL_4MS_LIMIT 0x04
+#define CHANNEL_DFS_CLEAR 0x08
+#define CHANNEL_DISALLOW_ADHOC 0x10
+#define CHANNEL_PER_11D_ADHOC 0x20
+
+#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
+#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
+#define CHANNEL_PUREG (CHANNEL_2GHZ|CHANNEL_OFDM)
+#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
+#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
+#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
+#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
+#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
+#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
+#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
+#define CHANNEL_ALL \
+ (CHANNEL_OFDM| \
+ CHANNEL_CCK| \
+ CHANNEL_2GHZ | \
+ CHANNEL_5GHZ | \
+ CHANNEL_HT20 | \
+ CHANNEL_HT40PLUS | \
+ CHANNEL_HT40MINUS)
+
+struct hal_channel_internal {
+ u_int16_t channel;
+ u_int32_t channelFlags;
+ u_int8_t privFlags;
+ int8_t maxRegTxPower;
+ int8_t maxTxPower;
+ int8_t minTxPower;
+ enum hal_bool bssSendHere;
+ u_int8_t gainI;
+ enum hal_bool iqCalValid;
+ int32_t CalValid;
+ enum hal_bool oneTimeCalsDone;
+ int8_t iCoff;
+ int8_t qCoff;
+ int16_t rawNoiseFloor;
+ int16_t noiseFloorAdjust;
+ int8_t antennaMax;
+ u_int32_t regDmnFlags;
+ u_int32_t conformanceTestLimit;
+ u_int64_t ah_tsf_last;
+ u_int64_t ah_channel_time;
+ u_int16_t mainSpur;
+ u_int64_t dfsTsf;
+#ifdef ATH_NF_PER_CHAN
+ struct hal_nfcal_hist nfCalHist[NUM_NF_READINGS];
+#endif
+};
+
+#define HAL_SPUR_VAL_MASK 0x3FFF
+#define HAL_SPUR_CHAN_WIDTH 87
+#define HAL_BIN_WIDTH_BASE_100HZ 3125
+#define HAL_MAX_BINS_ALLOWED 28
+
+#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
+ (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
+ (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
+ (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
+#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B)
+#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
+ (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
+ (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
+ (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
+#define IS_CHAN_PUREG(_c) \
+ (((_c)->channelFlags & CHANNEL_PUREG) == CHANNEL_PUREG)
+#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0)
+#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
+#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
+#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
+#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
+#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
+#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+#define IS_CHAN_HT20(_c) (((_c)->channelFlags & CHANNEL_HT20) != 0)
+#define IS_CHAN_HT40(_c) ((((_c)->channelFlags & CHANNEL_HT40PLUS) != 0) \
+ || (((_c)->channelFlags & CHANNEL_HT40MINUS) != 0))
+#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990)
+#define IS_CHAN_A_5MHZ_SPACED(_c) \
+ ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
+ (((_c)->channel % 20) != 0) && \
+ (((_c)->channel % 10) != 0))
+
+struct hal_keyval {
+ u_int8_t kv_type;
+ u_int8_t kv_pad;
+ u_int16_t kv_len;
+ u_int8_t kv_val[16];
+ u_int8_t kv_mic[8];
+ u_int8_t kv_txmic[8];
+};
+
+enum hal_key_type {
+ HAL_KEY_TYPE_CLEAR,
+ HAL_KEY_TYPE_WEP,
+ HAL_KEY_TYPE_AES,
+ HAL_KEY_TYPE_TKIP,
+};
+
+enum hal_cipher {
+ HAL_CIPHER_WEP = 0,
+ HAL_CIPHER_AES_OCB = 1,
+ HAL_CIPHER_AES_CCM = 2,
+ HAL_CIPHER_CKIP = 3,
+ HAL_CIPHER_TKIP = 4,
+ HAL_CIPHER_CLR = 5,
+ HAL_CIPHER_MIC = 127
+};
+
+#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
+#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
+#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
+#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
+#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
+#define AR_EEPROM_EEPCAP_MAXQCU_S 4
+#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
+#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
+#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
+
+#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
+#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
+
+#define SD_NO_CTL 0xE0
+#define NO_CTL 0xff
+#define CTL_MODE_M 7
+#define CTL_11A 0
+#define CTL_11B 1
+#define CTL_11G 2
+#define CTL_2GHT20 5
+#define CTL_5GHT20 6
+#define CTL_2GHT40 7
+#define CTL_5GHT40 8
+
+#define AR_EEPROM_MAC(i) (0x1d+(i))
+#define EEP_SCALE 100
+#define EEP_DELTA 10
+
+#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
+#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
+#define AR_EEPROM_RFSILENT_POLARITY 0x0002
+#define AR_EEPROM_RFSILENT_POLARITY_S 1
+
+enum {
+ CTRY_DEBUG = 0x1ff,
+ CTRY_DEFAULT = 0
+};
+
+enum reg_ext_bitmap {
+ REG_EXT_JAPAN_MIDBAND = 1,
+ REG_EXT_FCC_DFS_HT40 = 2,
+ REG_EXT_JAPAN_NONDFS_HT40 = 3,
+ REG_EXT_JAPAN_DFS_HT40 = 4
+};
+
+struct hal_country_entry {
+ u_int16_t countryCode;
+ u_int16_t regDmnEnum;
+ u_int16_t regDmn5G;
+ u_int16_t regDmn2G;
+ u_int8_t isMultidomain;
+ u_int8_t iso[3];
+};
+
+#define HAL_DBG_RESET 0x00000001
+#define HAL_DBG_PHY_IO 0x00000002
+#define HAL_DBG_REG_IO 0x00000004
+#define HAL_DBG_RF_PARAM 0x00000008
+#define HAL_DBG_QUEUE 0x00000010
+#define HAL_DBG_EEPROM_DUMP 0x00000020
+#define HAL_DBG_EEPROM 0x00000040
+#define HAL_DBG_NF_CAL 0x00000080
+#define HAL_DBG_CALIBRATE 0x00000100
+#define HAL_DBG_CHANNEL 0x00000200
+#define HAL_DBG_INTERRUPT 0x00000400
+#define HAL_DBG_DFS 0x00000800
+#define HAL_DBG_DMA 0x00001000
+#define HAL_DBG_REGULATORY 0x00002000
+#define HAL_DBG_TX 0x00004000
+#define HAL_DBG_TXDESC 0x00008000
+#define HAL_DBG_RX 0x00010000
+#define HAL_DBG_RXDESC 0x00020000
+#define HAL_DBG_ANI 0x00040000
+#define HAL_DBG_BEACON 0x00080000
+#define HAL_DBG_KEYCACHE 0x00100000
+#define HAL_DBG_POWER_MGMT 0x00200000
+#define HAL_DBG_MALLOC 0x00400000
+#define HAL_DBG_POWER_OVERRIDE 0x01000000
+#define HAL_DBG_SPUR_MITIGATE 0x02000000
+#define HAL_DBG_UNMASKABLE 0xFFFFFFFF
+
+#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
+#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
+
+#define SM(_v, _f) (((_v) << _f##_S) & _f)
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+#define OS_REG_RMW(_a, _r, _set, _clr) \
+ REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
+#define OS_REG_RMW_FIELD(_a, _r, _f, _v) \
+ REG_WRITE(_a, _r, \
+ (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
+#define OS_REG_SET_BIT(_a, _r, _f) \
+ REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
+#define OS_REG_CLR_BIT(_a, _r, _f) \
+ REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
+#define OS_REG_ath9k_regd_is_bit_set(_a, _r, _f) \
+ ((REG_READ(_a, _r) & _f) != 0)
+
+#define HAL_COMP_BUF_MAX_SIZE 9216
+#define HAL_COMP_BUF_ALIGN_SIZE 512
+
+#define HAL_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
+
+#define INIT_AIFS 2
+#define INIT_CWMIN 15
+#define INIT_CWMIN_11B 31
+#define INIT_CWMAX 1023
+#define INIT_SH_RETRY 10
+#define INIT_LG_RETRY 10
+#define INIT_SSH_RETRY 32
+#define INIT_SLG_RETRY 32
+
+#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
+
+#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
+#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
+
+#define IEEE80211_WEP_IVLEN 3
+#define IEEE80211_WEP_KIDLEN 1
+#define IEEE80211_WEP_CRCLEN 4
+#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
+ (IEEE80211_WEP_IVLEN + \
+ IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_CRCLEN))
+#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
+ (IEEE80211_WEP_IVLEN + \
+ IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_CRCLEN))
+
+#define MAX_REG_ADD_COUNT 129
+#define MAX_RATE_POWER 63
+
+#define LE_READ_2(p) \
+ ((u_int16_t) \
+ ((((const u_int8_t *)(p))[0]) | \
+ (((const u_int8_t *)(p))[1] << 8)))
+
+#define LE_READ_4(p) \
+ ((u_int32_t) \
+ ((((const u_int8_t *)(p))[0]) | \
+ (((const u_int8_t *)(p))[1] << 8) | \
+ (((const u_int8_t *)(p))[2] << 16) | \
+ (((const u_int8_t *)(p))[3] << 24)))
+
+enum hal_status {
+ HAL_OK = 0,
+ HAL_ENXIO,
+ HAL_ENOMEM,
+ HAL_EIO,
+ HAL_EEMAGIC,
+ HAL_EEVERSION,
+ HAL_EELOCKED,
+ HAL_EEBADSUM,
+ HAL_EEREAD,
+ HAL_EEBADMAC,
+ HAL_EESIZE,
+ HAL_EEWRITE,
+ HAL_EINVAL,
+ HAL_ENOTSUPP,
+ HAL_ESELFTEST,
+ HAL_EINPROGRESS
+};
+
+enum hal_power_mode {
+ HAL_PM_AWAKE = 0,
+ HAL_PM_FULL_SLEEP,
+ HAL_PM_NETWORK_SLEEP,
+ HAL_PM_UNDEFINED
+};
+
+enum hal_rfgain {
+ HAL_RFGAIN_INACTIVE = 0,
+ HAL_RFGAIN_READ_REQUESTED,
+ HAL_RFGAIN_NEED_CHANGE
+};
+
+#define HAL_ANTENNA_MIN_MODE 0
+#define HAL_ANTENNA_FIXED_A 1
+#define HAL_ANTENNA_FIXED_B 2
+#define HAL_ANTENNA_MAX_MODE 3
+
+struct hal_mib_stats {
+ u_int32_t ackrcv_bad;
+ u_int32_t rts_bad;
+ u_int32_t rts_good;
+ u_int32_t fcs_bad;
+ u_int32_t beacons;
+};
+
+enum hal_ant_setting {
+ HAL_ANT_VARIABLE = 0,
+ HAL_ANT_FIXED_A,
+ HAL_ANT_FIXED_B
+};
+
+enum hal_opmode {
+ HAL_M_STA = 1,
+ HAL_M_IBSS = 0,
+ HAL_M_HOSTAP = 6,
+ HAL_M_MONITOR = 8
+};
+
+enum {
+ HAL_SLOT_TIME_6 = 6,
+ HAL_SLOT_TIME_9 = 9,
+ HAL_SLOT_TIME_20 = 20,
+};
+
+
+enum hal_ht_macmode {
+ HAL_HT_MACMODE_20 = 0,
+ HAL_HT_MACMODE_2040 = 1,
+};
+
+enum hal_ht_extprotspacing {
+ HAL_HT_EXTPROTSPACING_20 = 0,
+ HAL_HT_EXTPROTSPACING_25 = 1,
+};
+
+struct hal_ht_cwm {
+ enum hal_ht_macmode ht_macmode;
+ enum hal_ht_extprotspacing ht_extprotspacing;
+};
+
+enum hal_freq_band {
+ HAL_FREQ_BAND_5GHZ = 0,
+ HAL_FREQ_BAND_2GHZ = 1,
+};
+
+enum {
+ HAL_TRUE_CHIP = 1
+};
+
+enum hal_bus_type {
+ HAL_BUS_TYPE_PCI,
+ HAL_BUS_TYPE_AHB
+};
+
+enum hal_ani_cmd {
+ HAL_ANI_PRESENT = 0x1,
+ HAL_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
+ HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
+ HAL_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
+ HAL_ANI_FIRSTEP_LEVEL = 0x10,
+ HAL_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
+ HAL_ANI_MODE = 0x40,
+ HAL_ANI_PHYERR_RESET = 0x80,
+ HAL_ANI_ALL = 0xff
+};
+
+enum phytype {
+ PHY_DS,
+ PHY_FH,
+ PHY_OFDM,
+ PHY_HT,
+ PHY_MAX
+};
+#define PHY_CCK PHY_DS
+
+enum start_adhoc_option {
+ START_ADHOC_NO_11A,
+ START_ADHOC_PER_11D,
+ START_ADHOC_IN_11A,
+ START_ADHOC_IN_11B,
+};
+
+enum hal_tp_scale {
+ HAL_TP_SCALE_MAX = 0,
+ HAL_TP_SCALE_50,
+ HAL_TP_SCALE_25,
+ HAL_TP_SCALE_12,
+ HAL_TP_SCALE_MIN
+};
+
+enum ser_reg_mode {
+ SER_REG_MODE_OFF = 0,
+ SER_REG_MODE_ON = 1,
+ SER_REG_MODE_AUTO = 2,
+};
+
+#define AR_PHY_CCA_MAX_GOOD_VALUE -85
+#define AR_PHY_CCA_MAX_HIGH_VALUE -62
+#define AR_PHY_CCA_MIN_BAD_VALUE -121
+#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
+#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+
+#define HAL_NF_CAL_HIST_MAX 5
+#define NUM_NF_READINGS 6
+
+struct hal_nfcal_hist {
+ int16_t nfCalBuffer[HAL_NF_CAL_HIST_MAX];
+ u_int8_t currIndex;
+ int16_t privNF;
+ u_int8_t invalidNFcount;
+};
+
+struct hal_beacon_state {
+ u_int32_t bs_nexttbtt;
+ u_int32_t bs_nextdtim;
+ u_int32_t bs_intval;
+#define HAL_BEACON_PERIOD 0x0000ffff
+#define HAL_BEACON_ENA 0x00800000
+#define HAL_BEACON_RESET_TSF 0x01000000
+ u_int32_t bs_dtimperiod;
+ u_int16_t bs_cfpperiod;
+ u_int16_t bs_cfpmaxduration;
+ u_int32_t bs_cfpnext;
+ u_int16_t bs_timoffset;
+ u_int16_t bs_bmissthreshold;
+ u_int32_t bs_sleepduration;
+};
+
+struct hal_node_stats {
+ u_int32_t ns_avgbrssi;
+ u_int32_t ns_avgrssi;
+ u_int32_t ns_avgtxrssi;
+ u_int32_t ns_avgtxrate;
+};
+
+#define HAL_RSSI_EP_MULTIPLIER (1<<7)
+#define HAL_RATE_EP_MULTIPLIER (1<<7)
+
+enum hal_gpio_output_mux_type {
+ HAL_GPIO_OUTPUT_MUX_AS_OUTPUT,
+ HAL_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
+ HAL_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
+ HAL_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
+ HAL_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
+ HAL_GPIO_OUTPUT_MUX_NUM_ENTRIES
+};
+
+enum {
+ HAL_RESET_POWER_ON,
+ HAL_RESET_WARM,
+ HAL_RESET_COLD,
+};
+
+#define AH_USE_EEPROM 0x1
+#define AH_IS_HB63 0x2
+
+struct ath_hal {
+ u_int32_t ah_magic;
+ u_int16_t ah_devid;
+ u_int16_t ah_subvendorid;
+ void *ah_sc;
+ void __iomem *ah_sh;
+ u_int16_t ah_countryCode;
+ u_int32_t ah_macVersion;
+ u_int16_t ah_macRev;
+ u_int16_t ah_phyRev;
+ u_int16_t ah_analog5GhzRev;
+ u_int16_t ah_analog2GhzRev;
+ u_int8_t ah_decompMask[HAL_DECOMP_MASK_SIZE];
+ u_int32_t ah_flags;
+ enum hal_opmode ah_opmode;
+ struct hal_ops_config ah_config;
+ struct hal_capabilities ah_caps;
+ int16_t ah_powerLimit;
+ u_int16_t ah_maxPowerLevel;
+ u_int ah_tpScale;
+ u_int16_t ah_currentRD;
+ u_int16_t ah_currentRDExt;
+ u_int16_t ah_currentRDInUse;
+ u_int16_t ah_currentRD5G;
+ u_int16_t ah_currentRD2G;
+ char ah_iso[4];
+ enum start_adhoc_option ah_adHocMode;
+ enum hal_bool ah_commonMode;
+ struct hal_channel_internal ah_channels[150];
+ u_int ah_nchan;
+ struct hal_channel_internal *ah_curchan;
+ u_int16_t ah_rfsilent;
+ enum hal_bool ah_rfkillEnabled;
+ enum hal_bool ah_isPciExpress;
+ u_int16_t ah_txTrigLevel;
+#ifndef ATH_NF_PER_CHAN
+ struct hal_nfcal_hist nfCalHist[NUM_NF_READINGS];
+#endif
+};
+
+#define HDPRINTF(_ah, _m, _fmt, ...) do { \
+ if (((_ah) == NULL && _m == HAL_DBG_UNMASKABLE) || \
+ (((struct ath_hal *)(_ah))->ah_config.ath_hal_debug & _m)) \
+ printk(KERN_DEBUG _fmt , ##__VA_ARGS__); \
+ } while (0)
+
+enum wireless_mode {
+ WIRELESS_MODE_11a = 0,
+ WIRELESS_MODE_11b = 2,
+ WIRELESS_MODE_11g = 3,
+ WIRELESS_MODE_11NA_HT20 = 6,
+ WIRELESS_MODE_11NG_HT20 = 7,
+ WIRELESS_MODE_11NA_HT40PLUS = 8,
+ WIRELESS_MODE_11NA_HT40MINUS = 9,
+ WIRELESS_MODE_11NG_HT40PLUS = 10,
+ WIRELESS_MODE_11NG_HT40MINUS = 11,
+ WIRELESS_MODE_MAX
+};
+
+enum {
+ ATH9K_MODE_SEL_11A = 0x00001,
+ ATH9K_MODE_SEL_11B = 0x00002,
+ ATH9K_MODE_SEL_11G = 0x00004,
+ ATH9K_MODE_SEL_11NG_HT20 = 0x00008,
+ ATH9K_MODE_SEL_11NA_HT20 = 0x00010,
+ ATH9K_MODE_SEL_11NG_HT40PLUS = 0x00020,
+ ATH9K_MODE_SEL_11NG_HT40MINUS = 0x00040,
+ ATH9K_MODE_SEL_11NA_HT40PLUS = 0x00080,
+ ATH9K_MODE_SEL_11NA_HT40MINUS = 0x00100,
+ ATH9K_MODE_SEL_2GHZ = (ATH9K_MODE_SEL_11B |
+ ATH9K_MODE_SEL_11G |
+ ATH9K_MODE_SEL_11NG_HT20),
+ ATH9K_MODE_SEL_5GHZ = (ATH9K_MODE_SEL_11A |
+ ATH9K_MODE_SEL_11NA_HT20),
+ ATH9K_MODE_SEL_ALL = 0xffffffff
+};
+
+struct chan_centers {
+ u_int16_t synth_center;
+ u_int16_t ctl_center;
+ u_int16_t ext_center;
+};
+
+enum hal_status ath_hal_getcapability(struct ath_hal *ah,
+ enum hal_capability_type type,
+ u_int32_t capability,
+ u_int32_t *result);
+const struct hal_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
+ u_int mode);
+void ath9k_hw_detach(struct ath_hal *ah);
+struct ath_hal *ath9k_hw_attach(u_int16_t devid, void *sc, void __iomem *mem,
+ enum hal_status *error);
+enum hal_bool ath9k_regd_init_channels(struct ath_hal *ah,
+ struct hal_channel *chans,
+ u_int maxchans, u_int *nchans,
+ u_int8_t *regclassids,
+ u_int maxregids, u_int *nregids,
+ u_int16_t cc, u_int32_t modeSelect,
+ enum hal_bool enableOutdoor,
+ enum hal_bool
+ enableExtendedChannels);
+u_int ath9k_hw_mhz2ieee(struct ath_hal *ah, u_int freq, u_int flags);
+enum hal_int ath9k_hw_set_interrupts(struct ath_hal *ah,
+ enum hal_int ints);
+enum hal_bool ath9k_hw_reset(struct ath_hal *ah, enum hal_opmode opmode,
+ struct hal_channel *chan,
+ enum hal_ht_macmode macmode,
+ u_int8_t txchainmask, u_int8_t rxchainmask,
+ enum hal_ht_extprotspacing extprotspacing,
+ enum hal_bool bChannelChange,
+ enum hal_status *status);
+enum hal_bool ath9k_hw_phy_disable(struct ath_hal *ah);
+void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct hal_channel *chan,
+ enum hal_bool *isCalDone);
+void ath9k_hw_ani_monitor(struct ath_hal *ah,
+ const struct hal_node_stats *stats,
+ struct hal_channel *chan);
+enum hal_bool ath9k_hw_calibrate(struct ath_hal *ah,
+ struct hal_channel *chan,
+ u_int8_t rxchainmask,
+ enum hal_bool longcal,
+ enum hal_bool *isCalDone);
+int16_t ath9k_hw_getchan_noise(struct ath_hal *ah,
+ struct hal_channel *chan);
+void ath9k_hw_write_associd(struct ath_hal *ah, const u_int8_t *bssid,
+ u_int16_t assocId);
+void ath9k_hw_setrxfilter(struct ath_hal *ah, u_int32_t bits);
+void ath9k_hw_write_associd(struct ath_hal *ah, const u_int8_t *bssid,
+ u_int16_t assocId);
+enum hal_bool ath9k_hw_stoptxdma(struct ath_hal *ah, u_int q);
+void ath9k_hw_reset_tsf(struct ath_hal *ah);
+enum hal_bool ath9k_hw_keyisvalid(struct ath_hal *ah, u_int16_t entry);
+enum hal_bool ath9k_hw_keysetmac(struct ath_hal *ah, u_int16_t entry,
+ const u_int8_t *mac);
+enum hal_bool ath9k_hw_set_keycache_entry(struct ath_hal *ah,
+ u_int16_t entry,
+ const struct hal_keyval *k,
+ const u_int8_t *mac,
+ int xorKey);
+enum hal_bool ath9k_hw_set_tsfadjust(struct ath_hal *ah,
+ u_int32_t setting);
+void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
+enum hal_bool ath9k_hw_intrpend(struct ath_hal *ah);
+enum hal_bool ath9k_hw_getisr(struct ath_hal *ah, enum hal_int *masked);
+enum hal_bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah,
+ enum hal_bool bIncTrigLevel);
+void ath9k_hw_procmibevent(struct ath_hal *ah,
+ const struct hal_node_stats *stats);
+enum hal_bool ath9k_hw_setrxabort(struct ath_hal *ah, enum hal_bool set);
+void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum hal_ht_macmode mode);
+enum hal_bool ath9k_hw_setupxtxdesc(struct ath_hal *ah,
+ struct ath_desc *ds,
+ u_int txRate1, u_int txTries1,
+ u_int txRate2, u_int txTries2,
+ u_int txRate3, u_int txTries3);
+enum hal_bool ath9k_hw_phycounters(struct ath_hal *ah);
+enum hal_bool ath9k_hw_keyreset(struct ath_hal *ah, u_int16_t entry);
+enum hal_bool ath9k_hw_getcapability(struct ath_hal *ah,
+ enum hal_capability_type type,
+ u_int32_t capability,
+ u_int32_t *result);
+enum hal_bool ath9k_hw_setcapability(struct ath_hal *ah,
+ enum hal_capability_type type,
+ u_int32_t capability,
+ u_int32_t setting,
+ enum hal_status *status);
+u_int ath9k_hw_getdefantenna(struct ath_hal *ah);
+void ath9k_hw_getmac(struct ath_hal *ah, u_int8_t *mac);
+void ath9k_hw_getbssidmask(struct ath_hal *ah, u_int8_t *mask);
+enum hal_bool ath9k_hw_setbssidmask(struct ath_hal *ah,
+ const u_int8_t *mask);
+enum hal_bool ath9k_hw_setpower(struct ath_hal *ah,
+ enum hal_power_mode mode);
+enum hal_int ath9k_hw_intrget(struct ath_hal *ah);
+u_int64_t ath9k_hw_gettsf64(struct ath_hal *ah);
+u_int ath9k_hw_getdefantenna(struct ath_hal *ah);
+enum hal_bool ath9k_hw_setslottime(struct ath_hal *ah, u_int us);
+enum hal_bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
+ enum hal_ant_setting settings,
+ struct hal_channel *chan,
+ u_int8_t *tx_chainmask,
+ u_int8_t *rx_chainmask,
+ u_int8_t *antenna_cfgd);
+void ath9k_hw_setantenna(struct ath_hal *ah, u_int antenna);
+enum hal_status ath9k_hw_select_antconfig(struct ath_hal *ah,
+ u_int32_t cfg);
+enum hal_bool ath9k_hw_puttxbuf(struct ath_hal *ah, u_int q,
+ u_int32_t txdp);
+enum hal_bool ath9k_hw_txstart(struct ath_hal *ah, u_int q);
+u_int16_t ath9k_hw_computetxtime(struct ath_hal *ah,
+ const struct hal_rate_table *rates,
+ u_int32_t frameLen, u_int16_t rateix,
+ enum hal_bool shortPreamble);
+void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
+ struct ath_desc *lastds,
+ u_int durUpdateEn, u_int rtsctsRate,
+ u_int rtsctsDuration,
+ struct hal_11n_rate_series series[],
+ u_int nseries, u_int flags);
+void ath9k_hw_set11n_burstduration(struct ath_hal *ah,
+ struct ath_desc *ds,
+ u_int burstDuration);
+void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
+u_int32_t ath9k_hw_reverse_bits(u_int32_t val, u_int32_t n);
+enum hal_bool ath9k_hw_resettxqueue(struct ath_hal *ah, u_int q);
+u_int ath9k_regd_get_ctl(struct ath_hal *ah, struct hal_channel *chan);
+u_int ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
+ struct hal_channel *chan);
+u_int ath9k_hw_mhz2ieee(struct ath_hal *ah, u_int freq, u_int flags);
+enum hal_bool ath9k_hw_gettxqueueprops(struct ath_hal *ah, int q,
+ struct hal_txq_info *qInfo);
+enum hal_bool ath9k_hw_settxqueueprops(struct ath_hal *ah, int q,
+ const struct hal_txq_info *qInfo);
+struct hal_channel_internal *ath9k_regd_check_channel(struct ath_hal *ah,
+ const struct hal_channel *c);
+void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u_int pktLen, enum hal_pkt_type type,
+ u_int txPower, u_int keyIx,
+ enum hal_key_type keyType, u_int flags);
+enum hal_bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u_int segLen, enum hal_bool firstSeg,
+ enum hal_bool lastSeg,
+ const struct ath_desc *ds0);
+u_int32_t ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
+ u_int32_t *rxc_pcnt,
+ u_int32_t *rxf_pcnt,
+ u_int32_t *txf_pcnt);
+void ath9k_hw_dmaRegDump(struct ath_hal *ah);
+void ath9k_hw_beaconinit(struct ath_hal *ah,
+ u_int32_t next_beacon, u_int32_t beacon_period);
+void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
+ const struct hal_beacon_state *bs);
+enum hal_bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u_int32_t size, u_int flags);
+void ath9k_hw_putrxbuf(struct ath_hal *ah, u_int32_t rxdp);
+void ath9k_hw_rxena(struct ath_hal *ah);
+void ath9k_hw_setopmode(struct ath_hal *ah);
+enum hal_bool ath9k_hw_setmac(struct ath_hal *ah, const u_int8_t *mac);
+void ath9k_hw_setmcastfilter(struct ath_hal *ah, u_int32_t filter0,
+ u_int32_t filter1);
+u_int32_t ath9k_hw_getrxfilter(struct ath_hal *ah);
+void ath9k_hw_startpcureceive(struct ath_hal *ah);
+void ath9k_hw_stoppcurecv(struct ath_hal *ah);
+enum hal_bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
+enum hal_status ath9k_hw_rxprocdesc(struct ath_hal *ah,
+ struct ath_desc *ds, u_int32_t pa,
+ struct ath_desc *nds, u_int64_t tsf);
+u_int32_t ath9k_hw_gettxbuf(struct ath_hal *ah, u_int q);
+enum hal_status ath9k_hw_txprocdesc(struct ath_hal *ah,
+ struct ath_desc *ds);
+void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
+ u_int numDelims);
+void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
+ u_int aggrLen);
+void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
+enum hal_bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u_int q);
+void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u_int32_t *txqs);
+void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
+void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah,
+ struct ath_desc *ds, u_int vmf);
+enum hal_bool ath9k_hw_SetTxPowerLimit(struct ath_hal *ah, u_int32_t limit,
+ u_int16_t tpcInDb);
+enum hal_bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
+int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum hal_tx_queue type,
+ const struct hal_txq_info *qInfo);
+u_int32_t ath9k_hw_numtxpending(struct ath_hal *ah, u_int q);
+const char *ath9k_hw_probe(u_int16_t vendorid, u_int16_t devid);
+enum hal_bool ath9k_hw_disable(struct ath_hal *ah);
+void ath9k_hw_rfdetach(struct ath_hal *ah);
+void ath9k_hw_get_channel_centers(struct ath_hal *ah,
+ struct hal_channel_internal *chan,
+ struct chan_centers *centers);
+enum hal_bool ath9k_get_channel_edges(struct ath_hal *ah,
+ u_int16_t flags, u_int16_t *low,
+ u_int16_t *high);
+enum hal_bool ath9k_hw_get_chip_power_limits(struct ath_hal *ah,
+ struct hal_channel *chans,
+ u_int32_t nchans);
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+ /* Implementation of beacon processing. */
+
+#include "core.h"
+
+/*
+ * Configure parameters for the beacon queue
+ *
+ * This function will modify certain transmit queue properties depending on
+ * the operating mode of the station (AP or AdHoc). Parameters are AIFS
+ * settings and channel width min/max
+*/
+
+static int ath_beaconq_config(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct hal_txq_info qi;
+
+ ath9k_hw_gettxqueueprops(ah, sc->sc_bhalq, &qi);
+ if (sc->sc_opmode == HAL_M_HOSTAP) {
+ /* Always burst out beacon and CAB traffic. */
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+ } else {
+ /* Adhoc mode; important thing is to use 2x cwmin. */
+ qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs;
+ qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin;
+ qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax;
+ }
+
+ if (!ath9k_hw_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to update h/w beacon queue parameters\n",
+ __func__);
+ return 0;
+ } else {
+ ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
+ return 1;
+ }
+}
+
+/*
+ * Setup the beacon frame for transmit.
+ *
+ * Associates the beacon frame buffer with a transmit descriptor. Will set
+ * up all required antenna switch parameters, rate codes, and channel flags.
+ * Beacons are always sent out at the lowest rate, and are not retried.
+*/
+
+static void ath_beacon_setup(struct ath_softc *sc,
+ struct ath_vap *avp, struct ath_buf *bf)
+{
+ struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_desc *ds;
+ int flags, antenna;
+ const struct hal_rate_table *rt;
+ u_int8_t rix, rate;
+ int ctsrate = 0;
+ int ctsduration = 0;
+ struct hal_11n_rate_series series[4];
+
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: m %p len %u\n",
+ __func__, skb, skb->len);
+
+ /* setup descriptors */
+ ds = bf->bf_desc;
+
+ flags = HAL_TXDESC_NOACK;
+
+ if (sc->sc_opmode == HAL_M_IBSS && sc->sc_hasveol) {
+ ds->ds_link = bf->bf_daddr; /* self-linked */
+ flags |= HAL_TXDESC_VEOL;
+ /* Let hardware handle antenna switching. */
+ antenna = 0;
+ } else {
+ ds->ds_link = 0;
+ /*
+ * Switch antenna every beacon.
+ * Should only switch every beacon period, not for every
+ * SWBA's
+ * XXX assumes two antenna
+ */
+ if (sc->sc_stagbeacons)
+ antenna = ((sc->ast_be_xmit /
+ sc->sc_nbcnvaps) & 1 ? 2 : 1);
+ else
+ antenna = (sc->ast_be_xmit & 1 ? 2 : 1);
+ }
+
+ ds->ds_data = bf->bf_buf_addr;
+
+ /*
+ * Calculate rate code.
+ * XXX everything at min xmit rate
+ */
+ rix = sc->sc_minrateix;
+ rt = sc->sc_currates;
+ rate = rt->info[rix].rateCode;
+ if (sc->sc_flags & ATH_PREAMBLE_SHORT)
+ rate |= rt->info[rix].shortPreamble;
+
+ ath9k_hw_set11n_txdesc(ah, ds
+ , skb->len + FCS_LEN /* frame length */
+ , HAL_PKT_TYPE_BEACON /* Atheros packet type */
+ , avp->av_btxctl.txpower /* txpower XXX */
+ , HAL_TXKEYIX_INVALID /* no encryption */
+ , HAL_KEY_TYPE_CLEAR /* no encryption */
+ , flags /* no ack, veol for beacons */
+ );
+
+ /* NB: beacon's BufLen must be a multiple of 4 bytes */
+ ath9k_hw_filltxdesc(ah, ds
+ , roundup(skb->len, 4) /* buffer length */
+ , AH_TRUE /* first segment */
+ , AH_TRUE /* last segment */
+ , ds /* first descriptor */
+ );
+
+ memzero(series, sizeof(struct hal_11n_rate_series) * 4);
+ series[0].Tries = 1;
+ series[0].Rate = rate;
+ series[0].ChSel = sc->sc_tx_chainmask;
+ series[0].RateFlags = (ctsrate) ? HAL_RATESERIES_RTS_CTS : 0;
+ ath9k_hw_set11n_ratescenario(ah, ds, ds, 0,
+ ctsrate, ctsduration, series, 4, 0);
+
+ /* NB: The desc swap function becomes void,
+ * if descriptor swapping is not enabled
+ */
+ ath_desc_swap(ds);
+}
+
+/* Move everything from the vap's mcast queue to the hardware cab queue.
+ * Caller must hold mcasq lock and cabq lock
+ * XXX MORE_DATA bit?
+ */
+static void empty_mcastq_into_cabq(struct ath_hal *ah,
+ struct ath_txq *mcastq, struct ath_txq *cabq)
+{
+ struct ath_buf *bfmcast;
+
+ BUG_ON(list_empty(&mcastq->axq_q));
+
+ bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
+
+ /* link the descriptors */
+ if (!cabq->axq_link)
+ ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
+ else
+ *cabq->axq_link = cpu_to_le32(bfmcast->bf_daddr);
+
+ /* append the private vap mcast list to the cabq */
+
+ cabq->axq_depth += mcastq->axq_depth;
+ cabq->axq_totalqueued += mcastq->axq_totalqueued;
+ cabq->axq_linkbuf = mcastq->axq_linkbuf;
+ cabq->axq_link = mcastq->axq_link;
+ list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
+ mcastq->axq_depth = 0;
+ mcastq->axq_totalqueued = 0;
+ mcastq->axq_linkbuf = NULL;
+ mcastq->axq_link = NULL;
+}
+
+/* This is only run at DTIM. We move everything from the vap's mcast queue
+ * to the hardware cab queue. Caller must hold the mcastq lock. */
+static void trigger_mcastq(struct ath_hal *ah,
+ struct ath_txq *mcastq, struct ath_txq *cabq)
+{
+ spin_lock_bh(&cabq->axq_lock);
+
+ if (!list_empty(&mcastq->axq_q))
+ empty_mcastq_into_cabq(ah, mcastq, cabq);
+
+ /* cabq is gated by beacon so it is safe to start here */
+ if (!list_empty(&cabq->axq_q))
+ ath9k_hw_txstart(ah, cabq->axq_qnum);
+
+ spin_unlock_bh(&cabq->axq_lock);
+}
+
+/*
+ * Generate beacon frame and queue cab data for a vap.
+ *
+ * Updates the contents of the beacon frame. It is assumed that the buffer for
+ * the beacon frame has been allocated in the ATH object, and simply needs to
+ * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
+ * be added to the beacon frame at this point.
+*/
+static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
+ struct ath_vap *avp;
+ struct sk_buff *skb;
+ int cabq_depth;
+ int mcastq_depth;
+ int is_beacon_dtim = 0;
+ unsigned int curlen;
+ struct ath_txq *cabq;
+ struct ath_txq *mcastq;
+ avp = sc->sc_vaps[if_id];
+
+ mcastq = &avp->av_mcastq;
+ cabq = sc->sc_cabq;
+
+ ASSERT(avp);
+
+ if (avp->av_bcbuf == NULL) {
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
+ __func__, avp, avp->av_bcbuf);
+ return NULL;
+ }
+ bf = avp->av_bcbuf;
+ skb = (struct sk_buff *) bf->bf_mpdu;
+
+ /*
+ * Update dynamic beacon contents. If this returns
+ * non-zero then we need to remap the memory because
+ * the beacon frame changed size (probably because
+ * of the TIM bitmap).
+ */
+ curlen = skb->len;
+
+ /* XXX: spin_lock_bh should not be used here, but sparse bitches
+ * otherwise. We should fix sparse :) */
+ spin_lock_bh(&mcastq->axq_lock);
+ mcastq_depth = avp->av_mcastq.axq_depth;
+
+ if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) ==
+ 1) {
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ } else {
+ pci_dma_sync_single_for_cpu(sc->pdev,
+ bf->bf_buf_addr,
+ skb_tailroom(skb),
+ PCI_DMA_TODEVICE);
+ }
+
+ /*
+ * if the CABQ traffic from previous DTIM is pending and the current
+ * beacon is also a DTIM.
+ * 1) if there is only one vap let the cab traffic continue.
+ * 2) if there are more than one vap and we are using staggered
+ * beacons, then drain the cabq by dropping all the frames in
+ * the cabq so that the current vaps cab traffic can be scheduled.
+ */
+ spin_lock_bh(&cabq->axq_lock);
+ cabq_depth = cabq->axq_depth;
+ spin_unlock_bh(&cabq->axq_lock);
+
+ is_beacon_dtim = avp->av_boff.bo_tim[4] & 1;
+
+ if (mcastq_depth && is_beacon_dtim && cabq_depth) {
+ /*
+ * Unlock the cabq lock as ath_tx_draintxq acquires
+ * the lock again which is a common function and that
+ * acquires txq lock inside.
+ */
+ if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
+ ath_tx_draintxq(sc, cabq, AH_FALSE);
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: flush previous cabq traffic\n", __func__);
+ }
+ }
+
+ /* Construct tx descriptor. */
+ ath_beacon_setup(sc, avp, bf);
+
+ /*
+ * Enable the CAB queue before the beacon queue to
+ * insure cab frames are triggered by this beacon.
+ */
+ if (is_beacon_dtim)
+ trigger_mcastq(ah, mcastq, cabq);
+
+ spin_unlock_bh(&mcastq->axq_lock);
+ return bf;
+}
+
+/*
+ * Startup beacon transmission for adhoc mode when they are sent entirely
+ * by the hardware using the self-linked descriptor + veol trick.
+*/
+
+static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
+ struct ath_vap *avp;
+ struct sk_buff *skb;
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp);
+
+ if (avp->av_bcbuf == NULL) {
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
+ __func__, avp, avp != NULL ? avp->av_bcbuf : NULL);
+ return;
+ }
+ bf = avp->av_bcbuf;
+ skb = (struct sk_buff *) bf->bf_mpdu;
+
+ /* Construct tx descriptor. */
+ ath_beacon_setup(sc, avp, bf);
+
+ /* NB: caller is known to have already stopped tx dma */
+ ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
+ ath9k_hw_txstart(ah, sc->sc_bhalq);
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__,
+ sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
+}
+
+/*
+ * Setup a h/w transmit queue for beacons.
+ *
+ * This function allocates an information structure (struct hal_txq_info)
+ * on the stack, sets some specific parameters (zero out channel width
+ * min/max, and enable aifs). The info structure does not need to be
+ * persistant.
+*/
+
+int ath_beaconq_setup(struct ath_hal *ah)
+{
+ struct hal_txq_info qi;
+
+ memzero(&qi, sizeof(qi));
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+ /* NB: don't enable any interrupts */
+ return ath9k_hw_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
+}
+
+
+/*
+ * Allocate and setup an initial beacon frame.
+ *
+ * Allocate a beacon state variable for a specific VAP instance created on
+ * the ATH interface. This routine also calculates the beacon "slot" for
+ * staggared beacons in the mBSSID case.
+*/
+
+int ath_beacon_alloc(struct ath_softc *sc, int if_id)
+{
+ struct ath_vap *avp;
+ struct ieee80211_hdr *wh;
+ struct ath_buf *bf;
+ struct sk_buff *skb;
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp);
+
+ /* Allocate a beacon descriptor if we haven't done so. */
+ if (!avp->av_bcbuf) {
+ /*
+ * Allocate beacon state for hostap/ibss. We know
+ * a buffer is available.
+ */
+
+ avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
+ struct ath_buf, list);
+ list_del(&avp->av_bcbuf->list);
+
+ if (sc->sc_opmode == HAL_M_HOSTAP || !sc->sc_hasveol) {
+ int slot;
+ /*
+ * Assign the vap to a beacon xmit slot. As
+ * above, this cannot fail to find one.
+ */
+ avp->av_bslot = 0;
+ for (slot = 0; slot < ATH_BCBUF; slot++)
+ if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) {
+ /*
+ * XXX hack, space out slots to better
+ * deal with misses
+ */
+ if (slot+1 < ATH_BCBUF &&
+ sc->sc_bslot[slot+1] ==
+ ATH_IF_ID_ANY) {
+ avp->av_bslot = slot+1;
+ break;
+ }
+ avp->av_bslot = slot;
+ /* NB: keep looking for a double slot */
+ }
+ KASSERT(sc->sc_bslot[avp->av_bslot] == ATH_IF_ID_ANY,
+ ("beacon slot %u not empty?", avp->av_bslot));
+ sc->sc_bslot[avp->av_bslot] = if_id;
+ sc->sc_nbcnvaps++;
+ }
+ }
+
+ /* release the previous beacon frame , if it already exists. */
+ bf = avp->av_bcbuf;
+ if (bf->bf_mpdu != NULL) {
+ struct ath_xmit_status tx_status;
+
+ skb = (struct sk_buff *) bf->bf_mpdu;
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ tx_status.flags = 0;
+ tx_status.retries = 0;
+ ath_tx_complete(sc, skb, &tx_status, NULL);
+ bf->bf_mpdu = NULL;
+ }
+
+ /*
+ * NB: the beacon data buffer must be 32-bit aligned;
+ * we assume the wbuf routines will return us something
+ * with this alignment (perhaps should assert).
+ */
+ skb = ath_get_beacon(sc, if_id, &avp->av_boff, &avp->av_btxctl);
+ if (skb == NULL) {
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /*
+ * Calculate a TSF adjustment factor required for
+ * staggered beacons. Note that we assume the format
+ * of the beacon frame leaves the tstamp field immediately
+ * following the header.
+ */
+ if (sc->sc_stagbeacons && avp->av_bslot > 0) {
+ u_int64_t tsfadjust;
+ int intval;
+
+ /* FIXME: Use default value for now: Sujith */
+
+ intval = ATH_DEFAULT_BINTVAL;
+
+ /*
+ * The beacon interval is in TU's; the TSF in usecs.
+ * We figure out how many TU's to add to align the
+ * timestamp then convert to TSF units and handle
+ * byte swapping before writing it in the frame.
+ * The hardware will then add this each time a beacon
+ * frame is sent. Note that we align vap's 1..N
+ * and leave vap 0 untouched. This means vap 0
+ * has a timestamp in one beacon interval while the
+ * others get a timestamp aligned to the next interval.
+ */
+ tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
+ tsfadjust = cpu_to_le64(tsfadjust<<10); /* TU->TSF */
+
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n",
+ __func__, sc->sc_stagbeacons ? "stagger" : "burst",
+ avp->av_bslot, intval, (unsigned long long)tsfadjust);
+
+ wh = (struct ieee80211_hdr *)skb->data;
+ memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
+ }
+
+ bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ bf->bf_mpdu = skb;
+
+ return 0;
+}
+
+/*
+ * Reclaim beacon resources and return buffer to the pool.
+ *
+ * Checks the VAP to put the beacon frame buffer back to the ATH object
+ * queue, and de-allocates any wbuf frames that were sent as CAB traffic.
+*/
+
+void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
+{
+ if (avp->av_bcbuf != NULL) {
+ struct ath_buf *bf;
+
+ if (avp->av_bslot != -1) {
+ sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY;
+ sc->sc_nbcnvaps--;
+ }
+
+ bf = avp->av_bcbuf;
+ if (bf->bf_mpdu != NULL) {
+ struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
+ struct ath_xmit_status tx_status;
+
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ tx_status.flags = 0;
+ tx_status.retries = 0;
+ ath_tx_complete(sc, skb, &tx_status, NULL);
+ bf->bf_mpdu = NULL;
+ }
+ list_add_tail(&bf->list, &sc->sc_bbuf);
+
+ avp->av_bcbuf = NULL;
+ }
+}
+
+/*
+ * Reclaim beacon resources and return buffer to the pool.
+ *
+ * This function will free any wbuf frames that are still attached to the
+ * beacon buffers in the ATH object. Note that this does not de-allocate
+ * any wbuf objects that are in the transmit queue and have not yet returned
+ * to the ATH object.
+*/
+
+void ath_beacon_free(struct ath_softc *sc)
+{
+ struct ath_buf *bf;
+
+ list_for_each_entry(bf, &sc->sc_bbuf, list) {
+ if (bf->bf_mpdu != NULL) {
+ struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
+ struct ath_xmit_status tx_status;
+
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ tx_status.flags = 0;
+ tx_status.retries = 0;
+ ath_tx_complete(sc, skb, &tx_status, NULL);
+ bf->bf_mpdu = NULL;
+ }
+ }
+}
+
+/*
+ * Tasklet for Sending Beacons
+ *
+ * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
+ * contents are done as needed and the slot time is also adjusted based on
+ * current state.
+ *
+ * This tasklet is not scheduled, it's called in ISR context.
+*/
+
+void ath9k_beacon_tasklet(unsigned long data)
+{
+#define TSF_TO_TU(_h,_l) \
+ ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
+
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf = NULL;
+ int slot, if_id;
+ u_int32_t bfaddr;
+ u_int32_t rx_clear = 0, rx_frame = 0, tx_frame = 0;
+ u_int32_t show_cycles = 0;
+ u_int32_t bc = 0; /* beacon count */
+
+ if (sc->sc_noreset) {
+ show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
+ &rx_clear,
+ &rx_frame,
+ &tx_frame);
+ }
+
+ /*
+ * Check if the previous beacon has gone out. If
+ * not don't try to post another, skip this period
+ * and wait for the next. Missed beacons indicate
+ * a problem and should not occur. If we miss too
+ * many consecutive beacons reset the device.
+ */
+ if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
+ sc->sc_bmisscount++;
+ /* XXX: doth needs the chanchange IE countdown decremented.
+ * We should consider adding a mac80211 call to indicate
+ * a beacon miss so appropriate action could be taken
+ * (in that layer).
+ */
+ if (sc->sc_bmisscount < BSTUCK_THRESH) {
+ if (sc->sc_noreset) {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: missed %u consecutive beacons\n",
+ __func__, sc->sc_bmisscount);
+ if (show_cycles) {
+ /*
+ * Display cycle counter stats
+ * from HW to aide in debug of
+ * stickiness.
+ */
+ DPRINTF(sc,
+ ATH_DEBUG_BEACON,
+ "%s: busy times: rx_clear=%d, "
+ "rx_frame=%d, tx_frame=%d\n",
+ __func__, rx_clear, rx_frame,
+ tx_frame);
+ } else {
+ DPRINTF(sc,
+ ATH_DEBUG_BEACON,
+ "%s: unable to obtain "
+ "busy times\n", __func__);
+ }
+ } else {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: missed %u consecutive beacons\n",
+ __func__, sc->sc_bmisscount);
+ }
+ } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
+ if (sc->sc_noreset) {
+ if (sc->sc_bmisscount == BSTUCK_THRESH) {
+ DPRINTF(sc,
+ ATH_DEBUG_BEACON,
+ "%s: beacon is officially "
+ "stuck\n", __func__);
+ ath9k_hw_dmaRegDump(ah);
+ }
+ } else {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: beacon is officially stuck\n",
+ __func__);
+ ath_bstuck_process(sc);
+ }
+ }
+
+ return;
+ }
+ if (sc->sc_bmisscount != 0) {
+ if (sc->sc_noreset) {
+ DPRINTF(sc,
+ ATH_DEBUG_BEACON,
+ "%s: resume beacon xmit after %u misses\n",
+ __func__, sc->sc_bmisscount);
+ } else {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: resume beacon xmit after %u misses\n",
+ __func__, sc->sc_bmisscount);
+ }
+ sc->sc_bmisscount = 0;
+ }
+
+ /*
+ * Generate beacon frames. If we are sending frames
+ * staggered then calculate the slot for this frame based
+ * on the tsf to safeguard against missing an swba.
+ * Otherwise we are bursting all frames together and need
+ * to generate a frame for each vap that is up and running.
+ */
+ if (sc->sc_stagbeacons) {
+ /* staggered beacons */
+ u_int64_t tsf;
+ u_int32_t tsftu;
+ u_int16_t intval;
+
+ /* FIXME: Use default value for now - Sujith */
+ intval = ATH_DEFAULT_BINTVAL;
+
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf);
+ slot = ((tsftu % intval) * ATH_BCBUF) / intval;
+ if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
+ __func__, slot, (unsigned long long) tsf, tsftu,
+ intval, if_id);
+ bfaddr = 0;
+ if (if_id != ATH_IF_ID_ANY) {
+ bf = ath_beacon_generate(sc, if_id);
+ if (bf != NULL) {
+ bfaddr = bf->bf_daddr;
+ bc = 1;
+ }
+ }
+ } else {
+ /* XXX: Clean this up, move work to a helper */
+ /* burst'd beacons */
+ u_int32_t *bflink;
+ bflink = &bfaddr;
+ /* XXX rotate/randomize order? */
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if_id = sc->sc_bslot[slot];
+ if (if_id != ATH_IF_ID_ANY) {
+ bf = ath_beacon_generate(sc, if_id);
+ if (bf != NULL) {
+ if (bflink != &bfaddr)
+ *bflink = cpu_to_le32(
+ bf->bf_daddr);
+ else
+ *bflink = bf->bf_daddr;
+ bflink = &bf->bf_desc->ds_link;
+ bc++;
+ }
+ }
+ }
+ *bflink = 0; /* link of last frame */
+ }
+ /*
+ * Handle slot time change when a non-ERP station joins/leaves
+ * an 11g network. The 802.11 layer notifies us via callback,
+ * we mark updateslot, then wait one beacon before effecting
+ * the change. This gives associated stations at least one
+ * beacon interval to note the state change.
+ *
+ * NB: The slot time change state machine is clocked according
+ * to whether we are bursting or staggering beacons. We
+ * recognize the request to update and record the current
+ * slot then don't transition until that slot is reached
+ * again. If we miss a beacon for that slot then we'll be
+ * slow to transition but we'll be sure at least one beacon
+ * interval has passed. When bursting slot is always left
+ * set to ATH_BCBUF so this check is a noop.
+ */
+ /* XXX locking */
+ if (sc->sc_updateslot == UPDATE) {
+ sc->sc_updateslot = COMMIT; /* commit next beacon */
+ sc->sc_slotupdate = slot;
+ } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
+ ath_setslottime(sc); /* commit change to hardware */
+
+ if ((!sc->sc_stagbeacons || slot == 0) && (!sc->sc_diversity)) {
+ int otherant;
+ /*
+ * Check recent per-antenna transmit statistics and flip
+ * the default rx antenna if noticeably more frames went out
+ * on the non-default antenna. Only do this if rx diversity
+ * is off.
+ * XXX assumes 2 anntenae
+ */
+ otherant = sc->sc_defant & 1 ? 2 : 1;
+ if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] +
+ ATH_ANTENNA_DIFF) {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: flip defant to %u, %u > %u\n",
+ __func__, otherant, sc->sc_ant_tx[otherant],
+ sc->sc_ant_tx[sc->sc_defant]);
+ ath_setdefantenna(sc, otherant);
+ }
+ sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
+ }
+
+ if (bfaddr != 0) {
+ /*
+ * Stop any current dma and put the new frame(s) on the queue.
+ * This should never fail since we check above that no frames
+ * are still pending on the queue.
+ */
+ if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: beacon queue %u did not stop?\n",
+ __func__, sc->sc_bhalq);
+ /* NB: the HAL still stops DMA, so proceed */
+ }
+
+ /* NB: cabq traffic should already be queued and primed */
+ ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr);
+ ath9k_hw_txstart(ah, sc->sc_bhalq);
+
+ sc->ast_be_xmit += bc; /* XXX per-vap? */
+ }
+#undef TSF_TO_TU
+}
+
+/*
+ * Tasklet for Beacon Stuck processing
+ *
+ * Processing for Beacon Stuck.
+ * Basically calls the ath_internal_reset function to reset the chip.
+*/
+
+void ath_bstuck_process(struct ath_softc *sc)
+{
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: stuck beacon; resetting (bmiss count %u)\n",
+ __func__, sc->sc_bmisscount);
+ ath_internal_reset(sc);
+}
+
+/*
+ * Configure the beacon and sleep timers.
+ *
+ * When operating as an AP this resets the TSF and sets
+ * up the hardware to notify us when we need to issue beacons.
+ *
+ * When operating in station mode this sets up the beacon
+ * timers according to the timestamp of the last received
+ * beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware
+ * will wakeup in time to receive beacons, and configures
+ * the beacon miss handling so we'll receive a BMISS
+ * interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ */
+
+void ath_beacon_config(struct ath_softc *sc, int if_id)
+{
+#define TSF_TO_TU(_h,_l) \
+ ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
+ struct ath_hal *ah = sc->sc_ah;
+ u_int32_t nexttbtt, intval;
+ struct ath_beacon_config conf;
+ enum hal_opmode av_opmode;
+
+ if (if_id != ATH_IF_ID_ANY)
+ av_opmode = sc->sc_vaps[if_id]->av_opmode;
+ else
+ av_opmode = sc->sc_opmode;
+
+ memzero(&conf, sizeof(struct ath_beacon_config));
+
+ /* FIXME: Use default values for now - Sujith */
+ /* Query beacon configuration first */
+ /*
+ * Protocol stack doesn't support dynamic beacon configuration,
+ * use default configurations.
+ */
+ conf.beacon_interval = ATH_DEFAULT_BINTVAL;
+ conf.listen_interval = 1;
+ conf.dtim_period = conf.beacon_interval;
+ conf.dtim_count = 1;
+ conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
+
+ /* extract tstamp from last beacon and convert to TU */
+ nexttbtt = TSF_TO_TU(LE_READ_4(conf.u.last_tstamp + 4),
+ LE_READ_4(conf.u.last_tstamp));
+ /* XXX conditionalize multi-bss support? */
+ if (sc->sc_opmode == HAL_M_HOSTAP) {
+ /*
+ * For multi-bss ap support beacons are either staggered
+ * evenly over N slots or burst together. For the former
+ * arrange for the SWBA to be delivered for each slot.
+ * Slots that are not occupied will generate nothing.
+ */
+ /* NB: the beacon interval is kept internally in TU's */
+ intval = conf.beacon_interval & HAL_BEACON_PERIOD;
+ if (sc->sc_stagbeacons)
+ intval /= ATH_BCBUF; /* for staggered beacons */
+ if ((sc->sc_nostabeacons) &&
+ (av_opmode == HAL_M_HOSTAP))
+ nexttbtt = 0;
+ } else {
+ intval = conf.beacon_interval & HAL_BEACON_PERIOD;
+ }
+
+ if (nexttbtt == 0) /* e.g. for ap mode */
+ nexttbtt = intval;
+ else if (intval) /* NB: can be 0 for monitor mode */
+ nexttbtt = roundup(nexttbtt, intval);
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
+ __func__, nexttbtt, intval, conf.beacon_interval);
+ /* Check for HAL_M_HOSTAP and sc_nostabeacons for WDS client */
+ if ((sc->sc_opmode == HAL_M_STA) ||
+ ((sc->sc_opmode == HAL_M_HOSTAP) &&
+ (av_opmode == HAL_M_STA) &&
+ (sc->sc_nostabeacons))) {
+ struct hal_beacon_state bs;
+ u_int64_t tsf;
+ u_int32_t tsftu;
+ int dtimperiod, dtimcount, sleepduration;
+ int cfpperiod, cfpcount;
+
+ /*
+ * Setup dtim and cfp parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtimperiod = conf.dtim_period;
+ if (dtimperiod <= 0) /* NB: 0 if not known */
+ dtimperiod = 1;
+ dtimcount = conf.dtim_count;
+ if (dtimcount >= dtimperiod) /* NB: sanity check */
+ dtimcount = 0; /* XXX? */
+ cfpperiod = 1; /* NB: no PCF support yet */
+ cfpcount = 0;
+
+ sleepduration = conf.listen_interval * intval;
+ if (sleepduration <= 0)
+ sleepduration = intval;
+
+#define FUDGE 2
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim+cfp state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ if (--dtimcount < 0) {
+ dtimcount = dtimperiod - 1;
+ if (--cfpcount < 0)
+ cfpcount = cfpperiod - 1;
+ }
+ } while (nexttbtt < tsftu);
+#undef FUDGE
+ memzero(&bs, sizeof(bs));
+ bs.bs_intval = intval;
+ bs.bs_nexttbtt = nexttbtt;
+ bs.bs_dtimperiod = dtimperiod*intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
+ bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
+ bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
+ bs.bs_cfpmaxduration = 0;
+ /*
+ * Calculate the number of consecutive beacons to miss
+ * before taking a BMISS interrupt. The configuration
+ * is specified in TU so we only need calculate based
+ * on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ if (sleepduration > intval) {
+ bs.bs_bmissthreshold =
+ conf.listen_interval *
+ ATH_DEFAULT_BMISS_LIMIT / 2;
+ } else {
+ bs.bs_bmissthreshold =
+ howmany(conf.bmiss_timeout, intval);
+ if (bs.bs_bmissthreshold > 15)
+ bs.bs_bmissthreshold = 15;
+ else if (bs.bs_bmissthreshold <= 0)
+ bs.bs_bmissthreshold = 1;
+ }
+
+ /*
+ * Calculate sleep duration. The configuration is
+ * given in ms. We insure a multiple of the beacon
+ * period is used. Also, if the sleep duration is
+ * greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs.bs_sleepduration =
+ roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ if (bs.bs_sleepduration > bs.bs_dtimperiod)
+ bs.bs_sleepduration = bs.bs_dtimperiod;
+
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: tsf %llu "
+ "tsf:tu %u "
+ "intval %u "
+ "nexttbtt %u "
+ "dtim %u "
+ "nextdtim %u "
+ "bmiss %u "
+ "sleep %u "
+ "cfp:period %u "
+ "maxdur %u "
+ "next %u "
+ "timoffset %u\n"
+ , __func__
+ , (unsigned long long)tsf, tsftu
+ , bs.bs_intval
+ , bs.bs_nexttbtt
+ , bs.bs_dtimperiod
+ , bs.bs_nextdtim
+ , bs.bs_bmissthreshold
+ , bs.bs_sleepduration
+ , bs.bs_cfpperiod
+ , bs.bs_cfpmaxduration
+ , bs.bs_cfpnext
+ , bs.bs_timoffset
+ );
+
+ if (!(sc->sc_nostabeacons)) {
+ ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ sc->sc_imask |= HAL_INT_BMISS;
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ }
+ } else {
+ u_int64_t tsf;
+ u_int32_t tsftu;
+ ath9k_hw_set_interrupts(ah, 0);
+ if (nexttbtt == intval)
+ intval |= HAL_BEACON_RESET_TSF;
+ if (sc->sc_opmode == HAL_M_IBSS) {
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF .
+ */
+#define FUDGE 2
+ if (!(intval & HAL_BEACON_RESET_TSF)) {
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU((u_int32_t)(tsf>>32),
+ (u_int32_t)tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ } while (nexttbtt < tsftu);
+ }
+#undef FUDGE
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: IBSS nexttbtt %u intval %u (%u)\n",
+ __func__, nexttbtt,
+ intval & ~HAL_BEACON_RESET_TSF,
+ conf.beacon_interval);
+
+ /*
+ * In IBSS mode enable the beacon timers but only
+ * enable SWBA interrupts if we need to manually
+ * prepare beacon frames. Otherwise we use a
+ * self-linked tx descriptor and let the hardware
+ * deal with things.
+ */
+ intval |= HAL_BEACON_ENA;
+ if (!sc->sc_hasveol)
+ sc->sc_imask |= HAL_INT_SWBA;
+ ath_beaconq_config(sc);
+ } else if (sc->sc_opmode == HAL_M_HOSTAP) {
+ /*
+ * In AP mode we enable the beacon timers and
+ * SWBA interrupts to prepare beacon frames.
+ */
+ intval |= HAL_BEACON_ENA;
+ sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
+ ath_beaconq_config(sc);
+ }
+ ath9k_hw_beaconinit(ah, nexttbtt, intval);
+ sc->sc_bmisscount = 0;
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ /*
+ * When using a self-linked beacon descriptor in
+ * ibss mode load it once here.
+ */
+ if (sc->sc_opmode == HAL_M_IBSS && sc->sc_hasveol)
+ ath_beacon_start_adhoc(sc, 0);
+ }
+#undef TSF_TO_TU
+}
+
+/* Function to collect beacon rssi data and resync beacon if necessary */
+
+void ath_beacon_sync(struct ath_softc *sc, int if_id)
+{
+ /*
+ * Resync beacon timers using the tsf of the
+ * beacon frame we just received.
+ */
+ ath_beacon_config(sc, if_id);
+ sc->sc_beacons = 1;
+}
--- /dev/null
+/*
+ * Copyright (c) 2008, Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+ /* Implementation of the main "ATH" layer. */
+
+#include "core.h"
+#include "regd.h"
+
+static int ath_outdoor = AH_FALSE; /* enable outdoor use */
+
+static const u_int8_t ath_bcast_mac[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static u_int32_t ath_chainmask_sel_up_rssi_thres =
+ ATH_CHAINMASK_SEL_UP_RSSI_THRES;
+static u_int32_t ath_chainmask_sel_down_rssi_thres =
+ ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
+static u_int32_t ath_chainmask_sel_period =
+ ATH_CHAINMASK_SEL_TIMEOUT;
+
+/* return bus cachesize in 4B word units */
+
+static void bus_read_cachesize(struct ath_softc *sc, int *csz)
+{
+ u_int8_t u8tmp;
+
+ pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u_int8_t *)&u8tmp);
+ *csz = (int)u8tmp;
+
+ /*
+ * This check was put in to avoid "unplesant" consequences if
+ * the bootrom has not fully initialized all PCI devices.
+ * Sometimes the cache line size register is not set
+ */
+
+ if (*csz == 0)
+ *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
+}
+
+/*
+ * Set current operating mode
+ *
+ * This function initializes and fills the rate table in the ATH object based
+ * on the operating mode. The blink rates are also set up here, although
+ * they have been superceeded by the ath_led module.
+*/
+
+static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
+{
+ const struct hal_rate_table *rt;
+ int i;
+
+ memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
+ rt = sc->sc_rates[mode];
+ KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
+
+ for (i = 0; i < rt->rateCount; i++)
+ sc->sc_rixmap[rt->info[i].rateCode] = (u_int8_t) i;
+
+ memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
+ for (i = 0; i < 256; i++) {
+ u_int8_t ix = rt->rateCodeToIndex[i];
+
+ if (ix == 0xff)
+ continue;
+
+ sc->sc_hwmap[i].ieeerate =
+ rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
+ sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
+
+ if (rt->info[ix].shortPreamble ||
+ rt->info[ix].phy == PHY_OFDM) {
+ }
+ /* NB: this uses the last entry if the rate isn't found */
+ /* XXX beware of overlow */
+ }
+ sc->sc_currates = rt;
+ sc->sc_curmode = mode;
+ /*
+ * All protection frames are transmited at 2Mb/s for
+ * 11g, otherwise at 1Mb/s.
+ * XXX select protection rate index from rate table.
+ */
+ sc->sc_protrix = (mode == WIRELESS_MODE_11g ? 1 : 0);
+ /* rate index used to send mgt frames */
+ sc->sc_minrateix = 0;
+}
+
+/*
+ * Select Rate Table
+ *
+ * Based on the wireless mode passed in, the rate table in the ATH object
+ * is set to the mode specific rate table. This also calls the callback
+ * function to set the rate in the protocol layer object.
+*/
+
+static int ath_rate_setup(struct ath_softc *sc, enum wireless_mode mode)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ const struct hal_rate_table *rt;
+
+ switch (mode) {
+ case WIRELESS_MODE_11a:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11A);
+ break;
+ case WIRELESS_MODE_11b:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11B);
+ break;
+ case WIRELESS_MODE_11g:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11G);
+ break;
+ case WIRELESS_MODE_11NA_HT20:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NA_HT20);
+ break;
+ case WIRELESS_MODE_11NG_HT20:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NG_HT20);
+ break;
+ case WIRELESS_MODE_11NA_HT40PLUS:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NA_HT40PLUS);
+ break;
+ case WIRELESS_MODE_11NA_HT40MINUS:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah,
+ ATH9K_MODE_SEL_11NA_HT40MINUS);
+ break;
+ case WIRELESS_MODE_11NG_HT40PLUS:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah, ATH9K_MODE_SEL_11NG_HT40PLUS);
+ break;
+ case WIRELESS_MODE_11NG_HT40MINUS:
+ sc->sc_rates[mode] =
+ ath9k_hw_getratetable(ah,
+ ATH9K_MODE_SEL_11NG_HT40MINUS);
+ break;
+ default:
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: invalid mode %u\n",
+ __func__, mode);
+ return 0;
+ }
+ rt = sc->sc_rates[mode];
+ if (rt == NULL)
+ return 0;
+
+ /* setup rate set in 802.11 protocol layer */
+ ath_setup_rate(sc, mode, NORMAL_RATE, rt);
+
+ return 1;
+}
+
+/*
+ * Set up channel list
+ *
+ * Determines the proper set of channelflags based on the selected mode,
+ * allocates a channel array, and passes it to the HAL for initialization.
+ * If successful, the list is passed to the upper layer, then de-allocated.
+*/
+
+static int ath_getchannels(struct ath_softc *sc,
+ u_int cc,
+ enum hal_bool outDoor,
+ enum hal_bool xchanMode)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct hal_channel *chans;
+ int nchan;
+ u_int8_t regclassids[ATH_REGCLASSIDS_MAX];
+ u_int nregclass = 0;
+
+ chans = kmalloc(ATH_CHAN_MAX * sizeof(struct hal_channel), GFP_KERNEL);
+ if (chans == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to allocate channel table\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (!ath9k_regd_init_channels(ah,
+ chans,
+ ATH_CHAN_MAX,
+ (u_int *)&nchan,
+ regclassids,
+ ATH_REGCLASSIDS_MAX,
+ &nregclass,
+ cc,
+ ATH9K_MODE_SEL_ALL,
+ outDoor,
+ xchanMode)) {
+ u_int32_t rd = ah->ah_currentRD;
+
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to collect channel list from hal; "
+ "regdomain likely %u country code %u\n",
+ __func__, rd, cc);
+ kfree(chans);
+ return -EINVAL;
+ }
+
+ ath_setup_channel_list(sc,
+ CLIST_UPDATE,
+ chans,
+ nchan,
+ regclassids,
+ nregclass,
+ CTRY_DEFAULT);
+
+ kfree(chans);
+ return 0;
+}
+
+/*
+ * Determine mode from channel flags
+ *
+ * This routine will provide the enumerated WIRELESSS_MODE value based
+ * on the settings of the channel flags. If ho valid set of flags
+ * exist, the lowest mode (11b) is selected.
+*/
+
+static enum wireless_mode ath_chan2mode(struct hal_channel *chan)
+{
+ if ((chan->channelFlags & CHANNEL_A) == CHANNEL_A)
+ return WIRELESS_MODE_11a;
+ else if ((chan->channelFlags & CHANNEL_G) == CHANNEL_G)
+ return WIRELESS_MODE_11g;
+ else if ((chan->channelFlags & CHANNEL_B) == CHANNEL_B)
+ return WIRELESS_MODE_11b;
+ else if ((chan->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20)
+ return WIRELESS_MODE_11NA_HT20;
+ else if ((chan->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20)
+ return WIRELESS_MODE_11NG_HT20;
+ else if ((chan->channelFlags & CHANNEL_A_HT40PLUS) ==
+ CHANNEL_A_HT40PLUS)
+ return WIRELESS_MODE_11NA_HT40PLUS;
+ else if ((chan->channelFlags & CHANNEL_A_HT40MINUS) ==
+ CHANNEL_A_HT40MINUS)
+ return WIRELESS_MODE_11NA_HT40MINUS;
+ else if ((chan->channelFlags & CHANNEL_G_HT40PLUS) ==
+ CHANNEL_G_HT40PLUS)
+ return WIRELESS_MODE_11NG_HT40PLUS;
+ else if ((chan->channelFlags & CHANNEL_G_HT40MINUS) ==
+ CHANNEL_G_HT40MINUS)
+ return WIRELESS_MODE_11NG_HT40MINUS;
+
+ /* NB: should not get here */
+ return WIRELESS_MODE_11b;
+}
+
+/*
+ * Change Channels
+ *
+ * Performs the actions to change the channel in the hardware, and set up
+ * the current operating mode for the new channel.
+*/
+
+static void ath_chan_change(struct ath_softc *sc, struct hal_channel *chan)
+{
+ enum wireless_mode mode;
+
+ mode = ath_chan2mode(chan);
+
+ ath_rate_setup(sc, mode);
+ ath_setcurmode(sc, mode);
+}
+
+/*
+ * Stop the device, grabbing the top-level lock to protect
+ * against concurrent entry through ath_init (which can happen
+ * if another thread does a system call and the thread doing the
+ * stop is preempted).
+ */
+
+static int ath_stop(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%s: invalid %u\n",
+ __func__, sc->sc_invalid);
+
+ /*
+ * Shutdown the hardware and driver:
+ * stop output from above
+ * reset 802.11 state machine
+ * (sends station deassoc/deauth frames)
+ * turn off timers
+ * disable interrupts
+ * clear transmit machinery
+ * clear receive machinery
+ * turn off the radio
+ * reclaim beacon resources
+ *
+ * Note that some of this work is not possible if the
+ * hardware is gone (invalid).
+ */
+
+ if (!sc->sc_invalid)
+ ath9k_hw_set_interrupts(ah, 0);
+ ath_draintxq(sc, AH_FALSE);
+ if (!sc->sc_invalid) {
+ ath_stoprecv(sc);
+ ath9k_hw_phy_disable(ah);
+ } else
+ sc->sc_rxlink = NULL;
+
+ return 0;
+}
+
+/*
+ * Start Scan
+ *
+ * This function is called when starting a channel scan. It will perform
+ * power save wakeup processing, set the filter for the scan, and get the
+ * chip ready to send broadcast packets out during the scan.
+*/
+
+void ath_scan_start(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u_int32_t rfilt;
+ u_int32_t now = (u_int32_t) jiffies_to_msecs(get_timestamp());
+
+ sc->sc_scanning = 1;
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+ ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
+
+ /* Restore previous power management state. */
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
+ now / 1000, now % 1000, __func__, rfilt);
+}
+
+/*
+ * Scan End
+ *
+ * This routine is called by the upper layer when the scan is completed. This
+ * will set the filters back to normal operating mode, set the BSSID to the
+ * correct value, and restore the power save state.
+*/
+
+void ath_scan_end(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u_int32_t rfilt;
+ u_int32_t now = (u_int32_t) jiffies_to_msecs(get_timestamp());
+
+ sc->sc_scanning = 0;
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+ ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
+ now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
+}
+
+/*
+ * Set the current channel
+ *
+ * Set/change channels. If the channel is really being changed, it's done
+ * by reseting the chip. To accomplish this we must first cleanup any pending
+ * DMA, then restart stuff after a la ath_init.
+*/
+int ath_set_channel(struct ath_softc *sc, struct hal_channel *hchan)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ enum hal_bool fastcc = AH_TRUE, stopped;
+ enum hal_ht_macmode ht_macmode;
+
+ if (sc->sc_invalid) /* if the device is invalid or removed */
+ return -EIO;
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG,
+ "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
+ __func__,
+ ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
+ sc->sc_curchan.channelFlags),
+ sc->sc_curchan.channel,
+ ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
+ hchan->channel, hchan->channelFlags);
+
+ ht_macmode = ath_cwm_macmode(sc);
+
+ if (hchan->channel != sc->sc_curchan.channel ||
+ hchan->channelFlags != sc->sc_curchan.channelFlags ||
+ sc->sc_update_chainmask || sc->sc_full_reset) {
+ enum hal_status status;
+ /*
+ * This is only performed if the channel settings have
+ * actually changed.
+ *
+ * To switch channels clear any pending DMA operations;
+ * wait long enough for the RX fifo to drain, reset the
+ * hardware at the new frequency, and then re-enable
+ * the relevant bits of the h/w.
+ */
+ ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
+ ath_draintxq(sc, AH_FALSE); /* clear pending tx frames */
+ stopped = ath_stoprecv(sc); /* turn off frame recv */
+
+ /* XXX: do not flush receive queue here. We don't want
+ * to flush data frames already in queue because of
+ * changing channel. */
+
+ if (!stopped || sc->sc_full_reset)
+ fastcc = AH_FALSE;
+
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
+ ht_macmode, sc->sc_tx_chainmask,
+ sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing,
+ fastcc, &status)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to reset channel %u (%uMhz) "
+ "flags 0x%x hal status %u\n", __func__,
+ ath9k_hw_mhz2ieee(ah, hchan->channel,
+ hchan->channelFlags),
+ hchan->channel, hchan->channelFlags, status);
+ spin_unlock_bh(&sc->sc_resetlock);
+ return -EIO;
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+
+ sc->sc_curchan = *hchan;
+ sc->sc_update_chainmask = 0;
+ sc->sc_full_reset = 0;
+
+ /* Re-enable rx framework */
+ if (ath_startrecv(sc) != 0) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to restart recv logic\n", __func__);
+ return -EIO;
+ }
+ /*
+ * Change channels and update the h/w rate map
+ * if we're switching; e.g. 11a to 11b/g.
+ */
+ ath_chan_change(sc, hchan);
+ ath_update_txpow(sc, 0); /* update tx power state */
+ /*
+ * Re-enable interrupts.
+ */
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ }
+ return 0;
+}
+
+/**********************/
+/* Chainmask Handling */
+/**********************/
+
+static void ath_chainmask_sel_timertimeout(unsigned long data)
+{
+ struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
+ cm->switch_allowed = 1;
+}
+
+/* Start chainmask select timer */
+static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
+{
+ cm->switch_allowed = 0;
+ mod_timer(&cm->timer, ath_chainmask_sel_period);
+}
+
+/* Stop chainmask select timer */
+static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
+{
+ cm->switch_allowed = 0;
+ del_timer_sync(&cm->timer);
+}
+
+static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
+
+ memzero(cm, sizeof(struct ath_chainmask_sel));
+
+ cm->cur_tx_mask = sc->sc_tx_chainmask;
+ cm->cur_rx_mask = sc->sc_rx_chainmask;
+ cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
+ setup_timer(&cm->timer,
+ ath_chainmask_sel_timertimeout, (unsigned long) cm);
+}
+
+int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
+
+ /*
+ * Disable auto-swtiching in one of the following if conditions.
+ * sc_chainmask_auto_sel is used for internal global auto-switching
+ * enabled/disabled setting
+ */
+ if ((sc->sc_no_tx_3_chains == AH_FALSE) ||
+ (sc->sc_config.chainmask_sel == AH_FALSE))
+ cm->cur_tx_mask = sc->sc_tx_chainmask;
+ return cm->cur_tx_mask;
+
+ if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
+ return cm->cur_tx_mask;
+
+ if (cm->switch_allowed) {
+ /* Switch down from tx 3 to tx 2. */
+ if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
+ ATH_RSSI_OUT(cm->tx_avgrssi) >=
+ ath_chainmask_sel_down_rssi_thres) {
+ cm->cur_tx_mask = sc->sc_tx_chainmask;
+
+ /* Don't let another switch happen until
+ * this timer expires */
+ ath_chainmask_sel_timerstart(cm);
+ }
+ /* Switch up from tx 2 to 3. */
+ else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
+ ATH_RSSI_OUT(cm->tx_avgrssi) <=
+ ath_chainmask_sel_up_rssi_thres) {
+ cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
+
+ /* Don't let another switch happen
+ * until this timer expires */
+ ath_chainmask_sel_timerstart(cm);
+ }
+ }
+
+ return cm->cur_tx_mask;
+}
+
+/******************/
+/* VAP management */
+/******************/
+
+/*
+ * Down VAP instance
+ *
+ * This routine will stop the indicated VAP and put it in a "down" state.
+ * The down state is basically an initialization state that can be brought
+ * back up by calling the opposite up routine.
+ * This routine will bring the interface out of power save mode, set the
+ * LED states, update the rate control processing, stop DMA transfers, and
+ * set the VAP into the down state.
+*/
+
+int ath_vap_down(struct ath_softc *sc, int if_id, u_int flags)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ if (sc->sc_slowAntDiv)
+ ath_slow_ant_div_stop(&sc->sc_antdiv);
+#endif
+
+ /* update ratectrl about the new state */
+ ath_rate_newstate(sc, avp, 0);
+
+ /* Reclaim beacon resources */
+ if (sc->sc_opmode == HAL_M_HOSTAP || sc->sc_opmode == HAL_M_IBSS) {
+ ath9k_hw_stoptxdma(ah, sc->sc_bhalq);
+ ath_beacon_return(sc, avp);
+ }
+
+ if (flags & ATH_IF_HW_OFF) {
+ sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
+ ath9k_hw_set_interrupts(ah, sc->sc_imask & ~HAL_INT_GLOBAL);
+ sc->sc_beacons = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * VAP in Listen mode
+ *
+ * This routine brings the VAP out of the down state into a "listen" state
+ * where it waits for association requests. This is used in AP and AdHoc
+ * modes.
+*/
+
+int ath_vap_listen(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+ u_int32_t rfilt = 0;
+ DECLARE_MAC_BUF(mac);
+
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ if (sc->sc_slowAntDiv)
+ ath_slow_ant_div_stop(&sc->sc_antdiv);
+#endif
+
+ /* update ratectrl about the new state */
+ ath_rate_newstate(sc, avp, 0);
+
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ if (sc->sc_opmode == HAL_M_STA || sc->sc_opmode == HAL_M_IBSS) {
+ memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
+ ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
+ } else
+ sc->sc_curaid = 0;
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG,
+ "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+ __func__, rfilt, print_mac(mac,
+ sc->sc_curbssid), sc->sc_curaid);
+
+ /*
+ * XXXX
+ * Disable BMISS interrupt when we're not associated
+ */
+ ath9k_hw_set_interrupts(ah,
+ sc->sc_imask & ~(HAL_INT_SWBA | HAL_INT_BMISS));
+ sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
+ /* need to reconfigure the beacons when it moves to RUN */
+ sc->sc_beacons = 0;
+
+ return 0;
+}
+
+int ath_vap_join(struct ath_softc *sc, int if_id,
+ const u_int8_t bssid[ETH_ALEN], u_int flags)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+ u_int32_t rfilt = 0;
+ DECLARE_MAC_BUF(mac);
+
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+ /* update ratectrl about the new state */
+ ath_rate_newstate(sc, avp, 0);
+
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ memcpy(sc->sc_curbssid, bssid, ETH_ALEN);
+ sc->sc_curaid = 0;
+ ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG,
+ "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+ __func__, rfilt,
+ print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
+
+ /*
+ * Update tx/rx chainmask. For legacy association,
+ * hard code chainmask to 1x1, for 11n association, use
+ * the chainmask configuration.
+ */
+ sc->sc_update_chainmask = 1;
+ if (flags & ATH_IF_HT) {
+ sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
+ sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
+ } else {
+ sc->sc_tx_chainmask = 1;
+ sc->sc_rx_chainmask = 1;
+ }
+
+ /* Enable rx chain mask detection if configured to do so */
+
+ sc->sc_rx_chainmask_detect = 0;
+
+ /* Set aggregation protection mode parameters */
+
+ sc->sc_config.ath_aggr_prot = 0;
+
+ /*
+ * Reset our TSF so that its value is lower than the beacon that we are
+ * trying to catch. Only then hw will update its TSF register with the
+ * new beacon. Reset the TSF before setting the BSSID to avoid allowing
+ * in any frames that would update our TSF only to have us clear it
+ * immediately thereafter.
+ */
+ ath9k_hw_reset_tsf(ah);
+
+ /*
+ * XXXX
+ * Disable BMISS interrupt when we're not associated
+ */
+ ath9k_hw_set_interrupts(ah,
+ sc->sc_imask & ~(HAL_INT_SWBA | HAL_INT_BMISS));
+ sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
+ /* need to reconfigure the beacons when it moves to RUN */
+ sc->sc_beacons = 0;
+
+ return 0;
+}
+
+int ath_vap_up(struct ath_softc *sc,
+ int if_id,
+ const u_int8_t bssid[ETH_ALEN],
+ u_int8_t aid, u_int flags)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+ u_int32_t rfilt = 0;
+ int i, error = 0;
+ DECLARE_MAC_BUF(mac);
+
+ ASSERT(if_id != ATH_IF_ID_ANY);
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+ /* update ratectrl about the new state */
+ ath_rate_newstate(sc, avp, 1);
+
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ if (avp->av_opmode == HAL_M_STA || avp->av_opmode == HAL_M_IBSS) {
+ memcpy(sc->sc_curbssid, bssid, ETH_ALEN);
+ sc->sc_curaid = aid;
+ ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
+ }
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG,
+ "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+ __func__, rfilt,
+ print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
+
+ if ((avp->av_opmode != IEEE80211_IF_TYPE_STA) &&
+ (flags & ATH_IF_PRIVACY)) {
+ for (i = 0; i < IEEE80211_WEP_NKID; i++)
+ if (ath9k_hw_keyisvalid(ah, (u_int16_t) i))
+ ath9k_hw_keysetmac(ah, (u_int16_t) i, bssid);
+ }
+
+ switch (avp->av_opmode) {
+ case HAL_M_HOSTAP:
+ case HAL_M_IBSS:
+ /*
+ * Allocate and setup the beacon frame.
+ *
+ * Stop any previous beacon DMA. This may be
+ * necessary, for example, when an ibss merge
+ * causes reconfiguration; there will be a state
+ * transition from RUN->RUN that means we may
+ * be called with beacon transmission active.
+ */
+ ath9k_hw_stoptxdma(ah, sc->sc_bhalq);
+
+ error = ath_beacon_alloc(sc, if_id);
+ if (error != 0)
+ goto bad;
+
+ if (flags & ATH_IF_BEACON_ENABLE)
+ sc->sc_beacons = 0;
+
+ break;
+ case HAL_M_STA:
+ /*
+ * start rx chain mask detection if it is enabled.
+ * Use the default chainmask as starting point.
+ */
+ if (sc->sc_rx_chainmask_detect) {
+ if (flags & ATH_IF_HT)
+ sc->sc_rx_chainmask =
+ ah->ah_caps.halRxChainMask;
+ else
+ sc->sc_rx_chainmask = 1;
+
+ sc->sc_rx_chainmask_start = 1;
+ }
+ break;
+ default:
+ break;
+ }
+ /* Moved beacon_config after dfs_wait check
+ * so that ath_beacon_config won't be called duing dfswait
+ * period - this will fix the beacon stuck afer DFS
+ * CAC period issue
+ * Configure the beacon and sleep timers. */
+
+ if (!sc->sc_beacons && !(flags & ATH_IF_BEACON_SYNC)) {
+ ath_beacon_config(sc, if_id);
+ sc->sc_beacons = 1;
+ }
+
+ /* Reset rssi stats; maybe not the best place... */
+ if (flags & ATH_IF_HW_ON) {
+ sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
+ }
+bad:
+ return error;
+}
+
+int ath_vap_attach(struct ath_softc *sc,
+ int if_id,
+ struct ieee80211_vif *if_data,
+ enum hal_opmode opmode,
+ enum hal_opmode iv_opmode,
+ int nostabeacons)
+{
+ struct ath_vap *avp;
+
+ if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: Invalid interface id = %u\n", __func__, if_id);
+ return -EINVAL;
+ }
+
+ switch (opmode) {
+ case HAL_M_STA:
+ sc->sc_nostabeacons = nostabeacons;
+ break;
+ case HAL_M_IBSS:
+ case HAL_M_MONITOR:
+ break;
+ case HAL_M_HOSTAP:
+ /* copy nostabeacons - for WDS client */
+ sc->sc_nostabeacons = nostabeacons;
+ /* XXX not right, beacon buffer is allocated on RUN trans */
+ if (list_empty(&sc->sc_bbuf))
+ return -ENOMEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* create ath_vap */
+ avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
+ if (avp == NULL)
+ return -ENOMEM;
+
+ memzero(avp, sizeof(struct ath_vap));
+ avp->av_if_data = if_data;
+ /* Set the VAP opmode */
+ avp->av_opmode = iv_opmode;
+ avp->av_bslot = -1;
+ INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
+ INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
+ spin_lock_init(&avp->av_mcastq.axq_lock);
+ if (opmode == HAL_M_HOSTAP || opmode == HAL_M_IBSS) {
+ if (sc->sc_hastsfadd) {
+ /*
+ * Multiple vaps are to transmit beacons and we
+ * have h/w support for TSF adjusting; enable use
+ * of staggered beacons.
+ */
+ /* XXX check for beacon interval too small */
+ sc->sc_stagbeacons = 1;
+ }
+ }
+ if (sc->sc_hastsfadd)
+ ath9k_hw_set_tsfadjust(sc->sc_ah, sc->sc_stagbeacons);
+
+ sc->sc_vaps[if_id] = avp;
+ sc->sc_nvaps++;
+ /* Set the device opmode */
+ sc->sc_opmode = opmode;
+
+ /* default VAP configuration */
+ avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
+ avp->av_config.av_fixed_retryset = 0x03030303;
+
+ return 0;
+}
+
+int ath_vap_detach(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Quiesce the hardware while we remove the vap. In
+ * particular we need to reclaim all references to the
+ * vap state by any frames pending on the tx queues.
+ *
+ * XXX can we do this w/o affecting other vap's?
+ */
+ ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
+ ath_draintxq(sc, AH_FALSE); /* stop xmit side */
+ ath_stoprecv(sc); /* stop recv side */
+ ath_flushrecv(sc); /* flush recv queue */
+
+ /* Reclaim any pending mcast bufs on the vap. */
+ ath_tx_draintxq(sc, &avp->av_mcastq, AH_FALSE);
+
+ if (sc->sc_opmode == HAL_M_HOSTAP && sc->sc_nostabeacons)
+ sc->sc_nostabeacons = 0;
+
+ kfree(avp);
+ sc->sc_vaps[if_id] = NULL;
+ sc->sc_nvaps--;
+
+ /* restart H/W in case there are other VAPs */
+ if (sc->sc_nvaps) {
+ /* Restart rx+tx machines if device is still running. */
+ if (ath_startrecv(sc) != 0) /* restart recv */
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to start recv logic\n", __func__);
+ if (sc->sc_beacons)
+ /* restart beacons */
+ ath_beacon_config(sc, ATH_IF_ID_ANY);
+
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ }
+ return 0;
+}
+
+int ath_vap_config(struct ath_softc *sc,
+ int if_id, struct ath_vap_config *if_config)
+{
+ struct ath_vap *avp;
+
+ if (if_id >= ATH_BCBUF) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: Invalid interface id = %u\n", __func__, if_id);
+ return -EINVAL;
+ }
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp != NULL);
+
+ if (avp)
+ memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
+
+ return 0;
+}
+
+/********/
+/* Core */
+/********/
+
+int ath_open(struct ath_softc *sc, struct hal_channel *initial_chan)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ enum hal_status status;
+ int error = 0;
+ enum hal_ht_macmode ht_macmode = ath_cwm_macmode(sc);
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
+
+ /*
+ * Stop anything previously setup. This is safe
+ * whether this is the first time through or not.
+ */
+ ath_stop(sc);
+
+ /* Initialize chanmask selection */
+ sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
+ sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, 0);
+
+ /*
+ * The basic interface to setting the hardware in a good
+ * state is ``reset''. On return the hardware is known to
+ * be powered up and with interrupts disabled. This must
+ * be followed by initialization of the appropriate bits
+ * and then setup of the interrupt mask.
+ */
+ sc->sc_curchan = *initial_chan;
+
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
+ sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing, AH_FALSE, &status)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to reset hardware; hal status %u "
+ "(freq %u flags 0x%x)\n", __func__, status,
+ sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
+ error = -EIO;
+ spin_unlock_bh(&sc->sc_resetlock);
+ goto done;
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+ /*
+ * This is needed only to setup initial state
+ * but it's best done after a reset.
+ */
+ ath_update_txpow(sc, 0);
+
+ /*
+ * Setup the hardware after reset:
+ * The receive engine is set going.
+ * Frame transmit is handled entirely
+ * in the frame output path; there's nothing to do
+ * here except setup the interrupt mask.
+ */
+ if (ath_startrecv(sc) != 0) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to start recv logic\n", __func__);
+ error = -EIO;
+ goto done;
+ }
+ /* Setup our intr mask. */
+ sc->sc_imask = HAL_INT_RX | HAL_INT_TX
+ | HAL_INT_RXEOL | HAL_INT_RXORN
+ | HAL_INT_FATAL | HAL_INT_GLOBAL;
+
+ if (ah->ah_caps.halGTTSupport)
+ sc->sc_imask |= HAL_INT_GTT;
+
+ if (sc->sc_hashtsupport)
+ sc->sc_imask |= HAL_INT_CST;
+
+ /*
+ * Enable MIB interrupts when there are hardware phy counters.
+ * Note we only do this (at the moment) for station mode.
+ */
+ if (sc->sc_needmib &&
+ ((sc->sc_opmode == HAL_M_STA) || (sc->sc_opmode == HAL_M_IBSS)))
+ sc->sc_imask |= HAL_INT_MIB;
+ /*
+ * Some hardware processes the TIM IE and fires an
+ * interrupt when the TIM bit is set. For hardware
+ * that does, if not overridden by configuration,
+ * enable the TIM interrupt when operating as station.
+ */
+ if (ah->ah_caps.halEnhancedPmSupport && sc->sc_opmode == HAL_M_STA &&
+ !sc->sc_config.swBeaconProcess)
+ sc->sc_imask |= HAL_INT_TIM;
+ /*
+ * Don't enable interrupts here as we've not yet built our
+ * vap and node data structures, which will be needed as soon
+ * as we start receiving.
+ */
+ ath_chan_change(sc, initial_chan);
+
+ /* XXX: we must make sure h/w is ready and clear invalid flag
+ * before turning on interrupt. */
+ sc->sc_invalid = 0;
+done:
+ return error;
+}
+
+/*
+ * Reset the hardware w/o losing operational state. This is
+ * basically a more efficient way of doing ath_stop, ath_init,
+ * followed by state transitions to the current 802.11
+ * operational state. Used to recover from errors rx overrun
+ * and to reset the hardware when rf gain settings must be reset.
+ */
+
+static int ath_reset_start(struct ath_softc *sc, u_int32_t flag)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
+ ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
+ ath_stoprecv(sc); /* stop recv side */
+ ath_flushrecv(sc); /* flush recv queue */
+
+ return 0;
+}
+
+static int ath_reset_end(struct ath_softc *sc, u_int32_t flag)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ if (ath_startrecv(sc) != 0) /* restart recv */
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to start recv logic\n", __func__);
+
+ /*
+ * We may be doing a reset in response to a request
+ * that changes the channel so update any state that
+ * might change as a result.
+ */
+ ath_chan_change(sc, &sc->sc_curchan);
+
+ ath_update_txpow(sc, 0); /* update tx power state */
+
+ if (sc->sc_beacons)
+ ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+
+ /* Restart the txq */
+ if (flag & RESET_RETRY_TXQ) {
+ int i;
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i)) {
+ spin_lock_bh(&sc->sc_txq[i].axq_lock);
+ ath_txq_schedule(sc, &sc->sc_txq[i]);
+ spin_unlock_bh(&sc->sc_txq[i].axq_lock);
+ }
+ }
+ }
+ return 0;
+}
+
+int ath_reset(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ enum hal_status status;
+ int error = 0;
+ enum hal_ht_macmode ht_macmode = ath_cwm_macmode(sc);
+
+ /* NB: indicate channel change so we do a full reset */
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
+ ht_macmode,
+ sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing, AH_FALSE, &status)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to reset hardware; hal status %u\n",
+ __func__, status);
+ error = -EIO;
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+
+ return error;
+}
+
+int ath_suspend(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ /* No I/O if device has been surprise removed */
+ if (sc->sc_invalid)
+ return -EIO;
+
+ /* Shut off the interrupt before setting sc->sc_invalid to '1' */
+ ath9k_hw_set_interrupts(ah, 0);
+
+ /* XXX: we must make sure h/w will not generate any interrupt
+ * before setting the invalid flag. */
+ sc->sc_invalid = 1;
+
+ /* disable HAL and put h/w to sleep */
+ ath9k_hw_disable(sc->sc_ah);
+
+ ath9k_hw_configpcipowersave(sc->sc_ah, 1);
+
+ return 0;
+}
+
+/* Interrupt handler. Most of the actual processing is deferred.
+ * It's the caller's responsibility to ensure the chip is awake. */
+
+int ath_intr(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ enum hal_int status;
+ int sched = ATH_ISR_NOSCHED;
+
+ do {
+ if (sc->sc_invalid) {
+ /*
+ * The hardware is not ready/present, don't
+ * touch anything. Note this can happen early
+ * on if the IRQ is shared.
+ */
+ return ATH_ISR_NOTMINE;
+ }
+ if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
+ return ATH_ISR_NOTMINE;
+ }
+
+ /*
+ * Figure out the reason(s) for the interrupt. Note
+ * that the hal returns a pseudo-ISR that may include
+ * bits we haven't explicitly enabled so we mask the
+ * value to insure we only process bits we requested.
+ */
+ ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
+
+ status &= sc->sc_imask; /* discard unasked-for bits */
+
+ /*
+ * If there are no status bits set, then this interrupt was not
+ * for me (should have been caught above).
+ */
+
+ if (!status)
+ return ATH_ISR_NOTMINE;
+
+ sc->sc_intrstatus = status;
+
+ if (status & HAL_INT_FATAL) {
+ /* need a chip reset */
+ sched = ATH_ISR_SCHED;
+ } else if (status & HAL_INT_RXORN) {
+ /* need a chip reset */
+ sched = ATH_ISR_SCHED;
+ } else {
+ if (status & HAL_INT_SWBA) {
+ /* schedule a tasklet for beacon handling */
+ tasklet_schedule(&sc->bcon_tasklet);
+ }
+ if (status & HAL_INT_RXEOL) {
+ /*
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work
+ * at least on older hardware revs.
+ */
+ sched = ATH_ISR_SCHED;
+ }
+
+ if (status & HAL_INT_TXURN)
+ /* bump tx trigger level */
+ ath9k_hw_updatetxtriglevel(ah, AH_TRUE);
+ /* XXX: optimize this */
+ if (status & HAL_INT_RX)
+ sched = ATH_ISR_SCHED;
+ if (status & HAL_INT_TX)
+ sched = ATH_ISR_SCHED;
+ if (status & HAL_INT_BMISS)
+ sched = ATH_ISR_SCHED;
+ /* carrier sense timeout */
+ if (status & HAL_INT_CST)
+ sched = ATH_ISR_SCHED;
+ if (status & HAL_INT_MIB) {
+ /*
+ * Disable interrupts until we service the MIB
+ * interrupt; otherwise it will continue to
+ * fire.
+ */
+ ath9k_hw_set_interrupts(ah, 0);
+ /*
+ * Let the hal handle the event. We assume
+ * it will clear whatever condition caused
+ * the interrupt.
+ */
+ ath9k_hw_procmibevent(ah, &sc->sc_halstats);
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ }
+ if (status & HAL_INT_TIM_TIMER) {
+ if (!sc->sc_hasautosleep) {
+ /* Clear RxAbort bit so that we can
+ * receive frames */
+ ath9k_hw_setrxabort(ah, 0);
+ /* Set flag indicating we're waiting
+ * for a beacon */
+ sc->sc_waitbeacon = 1;
+
+ sched = ATH_ISR_SCHED;
+ }
+ }
+ }
+ } while (0);
+
+ if (sched == ATH_ISR_SCHED)
+ /* turn off every interrupt except SWBA */
+ ath9k_hw_set_interrupts(ah, (sc->sc_imask & HAL_INT_SWBA));
+
+ return sched;
+
+}
+
+/* Deferred interrupt processing */
+
+static void ath9k_tasklet(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ u_int32_t status = sc->sc_intrstatus;
+
+ if (status & HAL_INT_FATAL) {
+ /* need a chip reset */
+ ath_internal_reset(sc);
+ return;
+ } else {
+
+ if (status & (HAL_INT_RX | HAL_INT_RXEOL | HAL_INT_RXORN)) {
+ /* XXX: fill me in */
+ /*
+ if (status & HAL_INT_RXORN) {
+ }
+ if (status & HAL_INT_RXEOL) {
+ }
+ */
+ spin_lock_bh(&sc->sc_rxflushlock);
+ ath_rx_tasklet(sc, 0);
+ spin_unlock_bh(&sc->sc_rxflushlock);
+ }
+ /* XXX: optimize this */
+ if (status & HAL_INT_TX)
+ ath_tx_tasklet(sc);
+ /* XXX: fill me in */
+ /*
+ if (status & HAL_INT_BMISS) {
+ }
+ if (status & (HAL_INT_TIM | HAL_INT_DTIMSYNC)) {
+ if (status & HAL_INT_TIM) {
+ }
+ if (status & HAL_INT_DTIMSYNC) {
+ }
+ }
+ */
+ }
+
+ /* re-enable hardware interrupt */
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
+}
+
+void ath_set_macmode(struct ath_softc *sc, enum hal_ht_macmode macmode)
+{
+ ath9k_hw_set11nmac2040(sc->sc_ah, macmode);
+}
+
+int ath_init(u_int16_t devid, struct ath_softc *sc)
+{
+ struct ath_hal *ah = NULL;
+ enum hal_status status;
+ int error = 0, i;
+ int csz = 0;
+ u_int32_t rd;
+
+ /* XXX: hardware will not be ready until ath_open() being called */
+ sc->sc_invalid = 1;
+
+ sc->sc_debug = DBG_DEFAULT;
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
+
+ /* Initialize tasklet */
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ bus_read_cachesize(sc, &csz);
+ /* XXX assert csz is non-zero */
+ sc->sc_cachelsz = csz << 2; /* convert to bytes */
+
+ spin_lock_init(&sc->sc_resetlock);
+
+ ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
+ if (ah == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to attach hardware; HAL status %u\n",
+ __func__, status);
+ error = -ENXIO;
+ goto bad;
+ }
+ sc->sc_ah = ah;
+
+ /* Get the chipset-specific aggr limit. */
+ sc->sc_rtsaggrlimit = ah->ah_caps.halRtsAggrLimit;
+
+ /*
+ * Check if the MAC has multi-rate retry support.
+ * We do this by trying to setup a fake extended
+ * descriptor. MAC's that don't have support will
+ * return false w/o doing anything. MAC's that do
+ * support it will return true w/o doing anything.
+ *
+ * XXX This is lame. Just query a hal property, Luke!
+ */
+ sc->sc_mrretry = ath9k_hw_setupxtxdesc(ah, NULL, 0, 0, 0, 0, 0, 0);
+
+ /*
+ * Check if the device has hardware counters for PHY
+ * errors. If so we need to enable the MIB interrupt
+ * so we can act on stat triggers.
+ */
+ if (ath9k_hw_phycounters(ah))
+ sc->sc_needmib = 1;
+
+ /* Get the hardware key cache size. */
+ sc->sc_keymax = ah->ah_caps.halKeyCacheSize;
+ if (sc->sc_keymax > ATH_KEYMAX) {
+ DPRINTF(sc, ATH_DEBUG_KEYCACHE,
+ "%s: Warning, using only %u entries in %u key cache\n",
+ __func__, ATH_KEYMAX, sc->sc_keymax);
+ sc->sc_keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < sc->sc_keymax; i++)
+ ath9k_hw_keyreset(ah, (u_int16_t) i);
+ /*
+ * Mark key cache slots associated with global keys
+ * as in use. If we knew TKIP was not to be used we
+ * could leave the +32, +64, and +32+64 slots free.
+ * XXX only for splitmic.
+ */
+ for (i = 0; i < IEEE80211_WEP_NKID; i++) {
+ set_bit(i, sc->sc_keymap);
+ set_bit(i + 32, sc->sc_keymap);
+ set_bit(i + 64, sc->sc_keymap);
+ set_bit(i + 32 + 64, sc->sc_keymap);
+ }
+ /*
+ * Collect the channel list using the default country
+ * code and including outdoor channels. The 802.11 layer
+ * is resposible for filtering this list based on settings
+ * like the phy mode.
+ */
+ rd = ah->ah_currentRD;
+
+ error = ath_getchannels(sc,
+ CTRY_DEFAULT,
+ ath_outdoor,
+ 1);
+ if (error)
+ goto bad;
+
+ /* default to STA mode */
+ sc->sc_opmode = HAL_M_MONITOR;
+
+ /* Setup rate tables for all potential media types. */
+ /* 11g encompasses b,g */
+
+ ath_rate_setup(sc, WIRELESS_MODE_11a);
+ ath_rate_setup(sc, WIRELESS_MODE_11g);
+
+ /* NB: setup here so ath_rate_update is happy */
+ ath_setcurmode(sc, WIRELESS_MODE_11a);
+
+ /*
+ * Allocate hardware transmit queues: one queue for
+ * beacon frames and one data queue for each QoS
+ * priority. Note that the hal handles reseting
+ * these queues at the needed time.
+ */
+ sc->sc_bhalq = ath_beaconq_setup(ah);
+ if (sc->sc_bhalq == -1) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to setup a beacon xmit queue\n", __func__);
+ error = -EIO;
+ goto bad2;
+ }
+ sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
+ if (sc->sc_cabq == NULL) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to setup CAB xmit queue\n", __func__);
+ error = -EIO;
+ goto bad2;
+ }
+
+ sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
+ sc->sc_haltype2q[i] = -1;
+
+ /* Setup data queues */
+ /* NB: ensure BK queue is the lowest priority h/w queue */
+ if (!ath_tx_setup(sc, HAL_WME_AC_BK)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to setup xmit queue for BK traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+
+ if (!ath_tx_setup(sc, HAL_WME_AC_BE)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to setup xmit queue for BE traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+ if (!ath_tx_setup(sc, HAL_WME_AC_VI)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to setup xmit queue for VI traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+ if (!ath_tx_setup(sc, HAL_WME_AC_VO)) {
+ DPRINTF(sc, ATH_DEBUG_FATAL,
+ "%s: unable to setup xmit queue for VO traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+
+ if (ah->ah_caps.halHTSupport)
+ sc->sc_hashtsupport = 1;
+
+ sc->sc_rc = ath_rate_attach(ah);
+ if (sc->sc_rc == NULL) {
+ error = EIO;
+ goto bad2;
+ }
+
+ if (ath9k_hw_getcapability(ah, HAL_CAP_CIPHER, HAL_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+ sc->sc_hasclrkey = ath9k_hw_getcapability(ah, HAL_CAP_CIPHER,
+ HAL_CIPHER_CLR, NULL);
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(ah, HAL_CAP_CIPHER, HAL_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(ah, HAL_CAP_CIPHER, HAL_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(ah, HAL_CAP_TKIP_SPLIT, 0, NULL))
+ sc->sc_splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (ath9k_hw_getcapability(ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL)
+ == HAL_OK)
+ (void)ath9k_hw_setcapability(ah, HAL_CAP_MCAST_KEYSRCH, 1,
+ 1, NULL);
+
+ sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
+ sc->sc_config.txpowlimit_override = 0;
+
+ /* 11n Capabilities */
+ if (sc->sc_hashtsupport) {
+ sc->sc_txaggr = 1;
+ sc->sc_rxaggr = 1;
+ }
+
+ /* Check for misc other capabilities. */
+ sc->sc_hasbmask = ah->ah_caps.halBssIdMaskSupport ? 1 : 0;
+ sc->sc_hastsfadd =
+ ath9k_hw_getcapability(ah, HAL_CAP_TSF_ADJUST, 0, NULL);
+
+ /*
+ * If we cannot transmit on three chains, prevent chain mask
+ * selection logic from switching between 2x2 and 3x3 chain
+ * masks based on RSSI.
+ */
+ sc->sc_no_tx_3_chains =
+ (ah->ah_caps.halTxChainMask == ATH_CHAINMASK_SEL_3X3) ?
+ AH_TRUE : AH_FALSE;
+ sc->sc_config.chainmask_sel = sc->sc_no_tx_3_chains;
+
+ sc->sc_tx_chainmask = ah->ah_caps.halTxChainMask;
+ sc->sc_rx_chainmask = ah->ah_caps.halRxChainMask;
+
+ /* Configuration for rx chain detection */
+ sc->sc_rxchaindetect_ref = 0;
+ sc->sc_rxchaindetect_thresh5GHz = 35;
+ sc->sc_rxchaindetect_thresh2GHz = 35;
+ sc->sc_rxchaindetect_delta5GHz = 30;
+ sc->sc_rxchaindetect_delta2GHz = 30;
+
+ /*
+ * Query the hal about antenna support
+ * Enable rx fast diversity if hal has support
+ */
+ if (ath9k_hw_getcapability(ah, HAL_CAP_DIVERSITY, 0, NULL)) {
+ sc->sc_hasdiversity = 1;
+ ath9k_hw_setcapability(ah, HAL_CAP_DIVERSITY,
+ 1, AH_TRUE, NULL);
+ sc->sc_diversity = 1;
+ } else {
+ sc->sc_hasdiversity = 0;
+ sc->sc_diversity = 0;
+ ath9k_hw_setcapability(ah, HAL_CAP_DIVERSITY,
+ 1, AH_FALSE, NULL);
+ }
+ sc->sc_defant = ath9k_hw_getdefantenna(ah);
+
+ /*
+ * Not all chips have the VEOL support we want to
+ * use with IBSS beacons; check here for it.
+ */
+ sc->sc_hasveol = ah->ah_caps.halVEOLSupport;
+
+ ath9k_hw_getmac(ah, sc->sc_myaddr);
+ if (sc->sc_hasbmask) {
+ ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
+ ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
+ ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
+ }
+ sc->sc_hasautosleep = ah->ah_caps.halAutoSleepSupport;
+ sc->sc_waitbeacon = 0;
+ sc->sc_slottime = HAL_SLOT_TIME_9; /* default to short slot time */
+
+ /* initialize beacon slots */
+ for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
+ sc->sc_bslot[i] = ATH_IF_ID_ANY;
+
+ /* save MISC configurations */
+ sc->sc_config.swBeaconProcess = 1;
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ sc->sc_slowAntDiv = 1;
+ /* range is 40 - 255, we use something in the middle */
+ ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
+#else
+ sc->sc_slowAntDiv = 0;
+#endif
+
+ return 0;
+bad2:
+ /* cleanup tx queues */
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->sc_txq[i]);
+bad:
+ if (ah)
+ ath9k_hw_detach(ah);
+ return error;
+}
+
+void ath_deinit(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int i;
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%s\n", __func__);
+
+ ath_stop(sc);
+ if (!sc->sc_invalid)
+ ath9k_hw_setpower(sc->sc_ah, HAL_PM_AWAKE);
+ ath_rate_detach(sc->sc_rc);
+ /* cleanup tx queues */
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->sc_txq[i]);
+ ath9k_hw_detach(ah);
+}
+
+/*******************/
+/* Node Management */
+/*******************/
+
+struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
+{
+ struct ath_vap *avp;
+ struct ath_node *an;
+ DECLARE_MAC_BUF(mac);
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp != NULL);
+
+ /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
+ an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
+ if (an == NULL)
+ return NULL;
+ memzero(an, sizeof(*an));
+
+ an->an_sc = sc;
+ memcpy(an->an_addr, addr, ETH_ALEN);
+ atomic_set(&an->an_refcnt, 1);
+
+ /* set up per-node tx/rx state */
+ ath_tx_node_init(sc, an);
+ ath_rx_node_init(sc, an);
+
+ ath_chainmask_sel_init(sc, an);
+ ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
+ list_add(&an->list, &sc->node_list);
+
+ DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p for: %s\n",
+ __func__, an, print_mac(mac, addr));
+
+ return an;
+}
+
+void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
+{
+ unsigned long flags;
+
+ DECLARE_MAC_BUF(mac);
+
+ ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
+ an->an_flags |= ATH_NODE_CLEAN;
+ ath_tx_node_cleanup(sc, an, bh_flag);
+ ath_rx_node_cleanup(sc, an);
+
+ ath_tx_node_free(sc, an);
+ ath_rx_node_free(sc, an);
+
+ spin_lock_irqsave(&sc->node_lock, flags);
+
+ list_del(&an->list);
+
+ spin_unlock_irqrestore(&sc->node_lock, flags);
+
+ DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p for: %s\n",
+ __func__, an, print_mac(mac, an->an_addr));
+
+ kfree(an);
+}
+
+/* Finds a node and increases the refcnt if found */
+
+struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
+{
+ struct ath_node *an = NULL, *an_found = NULL;
+
+ if (list_empty(&sc->node_list)) /* FIXME */
+ goto out;
+ list_for_each_entry(an, &sc->node_list, list) {
+ if (!compare_ether_addr(an->an_addr, addr)) {
+ atomic_inc(&an->an_refcnt);
+ an_found = an;
+ break;
+ }
+ }
+out:
+ return an_found;
+}
+
+/* Decrements the refcnt and if it drops to zero, detach the node */
+
+void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
+{
+ if (atomic_dec_and_test(&an->an_refcnt))
+ ath_node_detach(sc, an, bh_flag);
+}
+
+/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
+struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
+{
+ struct ath_node *an = NULL, *an_found = NULL;
+
+ if (list_empty(&sc->node_list))
+ return NULL;
+
+ list_for_each_entry(an, &sc->node_list, list)
+ if (!compare_ether_addr(an->an_addr, addr)) {
+ an_found = an;
+ break;
+ }
+
+ return an_found;
+}
+
+/*
+ * Set up New Node
+ *
+ * Setup driver-specific state for a newly associated node. This routine
+ * really only applies if compression or XR are enabled, there is no code
+ * covering any other cases.
+*/
+
+void ath_newassoc(struct ath_softc *sc,
+ struct ath_node *an, int isnew, int isuapsd)
+{
+ int tidno;
+
+ /* if station reassociates, tear down the aggregation state. */
+ if (!isnew) {
+ for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
+ if (sc->sc_txaggr)
+ ath_tx_aggr_teardown(sc, an, tidno);
+ if (sc->sc_rxaggr)
+ ath_rx_aggr_teardown(sc, an, tidno);
+ }
+ }
+ an->an_flags = 0;
+}
+
+/**************/
+/* Encryption */
+/**************/
+void ath_key_reset(struct ath_softc *sc, u_int16_t keyix, int freeslot)
+{
+ ath9k_hw_keyreset(sc->sc_ah, keyix);
+ if (freeslot)
+ clear_bit(keyix, sc->sc_keymap);
+}
+
+int ath_keyset(struct ath_softc *sc,
+ u_int16_t keyix,
+ struct hal_keyval *hk,
+ const u_int8_t mac[ETH_ALEN])
+{
+ enum hal_bool status;
+
+ status = ath9k_hw_set_keycache_entry(sc->sc_ah,
+ keyix, hk, mac, AH_FALSE);
+
+ return status != AH_FALSE;
+}
+
+/***********************/
+/* TX Power/Regulatory */
+/***********************/
+
+/*
+ * Set Transmit power in HAL
+ *
+ * This routine makes the actual HAL calls to set the new transmit power
+ * limit. This also calls back into the protocol layer setting the max
+ * transmit power limit.
+*/
+
+void ath_update_txpow(struct ath_softc *sc, u_int16_t tpcInDb)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u_int32_t txpow, txpowlimit;
+
+ txpowlimit = (sc->sc_config.txpowlimit_override) ?
+ sc->sc_config.txpowlimit_override : sc->sc_config.txpowlimit;
+
+ if (sc->sc_curtxpow != txpowlimit) {
+ ath9k_hw_SetTxPowerLimit(ah, txpowlimit, tpcInDb);
+ /* read back in case value is clamped */
+ ath9k_hw_getcapability(ah, HAL_CAP_TXPOW, 1, &txpow);
+ sc->sc_curtxpow = txpow;
+ }
+
+ /* Fetch max tx power level and update protocal stack */
+ ath9k_hw_getcapability(ah, HAL_CAP_TXPOW, 2, &txpow);
+
+ ath__update_txpow(sc, sc->sc_curtxpow, txpow);
+}
+
+/* Return the current country and domain information */
+void ath_get_currentCountry(struct ath_softc *sc,
+ struct hal_country_entry *ctry)
+{
+ ath9k_regd_get_current_country(sc->sc_ah, ctry);
+
+ /* If HAL not specific yet, since it is band dependent,
+ * use the one we passed in. */
+ if (ctry->countryCode == CTRY_DEFAULT) {
+ ctry->iso[0] = 0;
+ ctry->iso[1] = 0;
+ } else if (ctry->iso[0] && ctry->iso[1]) {
+ if (!ctry->iso[2]) {
+ if (ath_outdoor)
+ ctry->iso[2] = 'O';
+ else
+ ctry->iso[2] = 'I';
+ }
+ }
+}
+
+/**************************/
+/* Slow Antenna Diversity */
+/**************************/
+
+void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
+ struct ath_softc *sc,
+ int32_t rssitrig)
+{
+ int trig;
+
+ /* antdivf_rssitrig can range from 40 - 0xff */
+ trig = (rssitrig > 0xff) ? 0xff : rssitrig;
+ trig = (rssitrig < 40) ? 40 : rssitrig;
+
+ antdiv->antdiv_sc = sc;
+ antdiv->antdivf_rssitrig = trig;
+}
+
+void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
+ u_int8_t num_antcfg,
+ const u_int8_t *bssid)
+{
+ antdiv->antdiv_num_antcfg =
+ num_antcfg < ATH_ANT_DIV_MAX_CFG ?
+ num_antcfg : ATH_ANT_DIV_MAX_CFG;
+ antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
+ antdiv->antdiv_curcfg = 0;
+ antdiv->antdiv_bestcfg = 0;
+ antdiv->antdiv_laststatetsf = 0;
+
+ memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
+
+ antdiv->antdiv_start = 1;
+}
+
+void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
+{
+ antdiv->antdiv_start = 0;
+}
+
+static int32_t ath_find_max_val(int32_t *val,
+ u_int8_t num_val, u_int8_t *max_index)
+{
+ u_int32_t MaxVal = *val++;
+ u_int32_t cur_index = 0;
+
+ *max_index = 0;
+ while (++cur_index < num_val) {
+ if (*val > MaxVal) {
+ MaxVal = *val;
+ *max_index = cur_index;
+ }
+
+ val++;
+ }
+
+ return MaxVal;
+}
+
+void ath_slow_ant_div(struct ath_antdiv *antdiv,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rx_stats)
+{
+ struct ath_softc *sc = antdiv->antdiv_sc;
+ struct ath_hal *ah = sc->sc_ah;
+ u_int64_t curtsf = 0;
+ u_int8_t bestcfg, curcfg = antdiv->antdiv_curcfg;
+ __le16 fc = hdr->frame_control;
+
+ if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
+ && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
+ antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
+ antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
+ curtsf = antdiv->antdiv_lastbtsf[curcfg];
+ } else {
+ return;
+ }
+
+ switch (antdiv->antdiv_state) {
+ case ATH_ANT_DIV_IDLE:
+ if ((antdiv->antdiv_lastbrssi[curcfg] <
+ antdiv->antdivf_rssitrig)
+ && ((curtsf - antdiv->antdiv_laststatetsf) >
+ ATH_ANT_DIV_MIN_IDLE_US)) {
+
+ curcfg++;
+ if (curcfg == antdiv->antdiv_num_antcfg)
+ curcfg = 0;
+
+ if (HAL_OK == ath9k_hw_select_antconfig(ah, curcfg)) {
+ antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
+ antdiv->antdiv_curcfg = curcfg;
+ antdiv->antdiv_laststatetsf = curtsf;
+ antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
+ }
+ }
+ break;
+
+ case ATH_ANT_DIV_SCAN:
+ if ((curtsf - antdiv->antdiv_laststatetsf) <
+ ATH_ANT_DIV_MIN_SCAN_US)
+ break;
+
+ curcfg++;
+ if (curcfg == antdiv->antdiv_num_antcfg)
+ curcfg = 0;
+
+ if (curcfg == antdiv->antdiv_bestcfg) {
+ ath_find_max_val(antdiv->antdiv_lastbrssi,
+ antdiv->antdiv_num_antcfg, &bestcfg);
+ if (HAL_OK == ath9k_hw_select_antconfig(ah, bestcfg)) {
+ antdiv->antdiv_bestcfg = bestcfg;
+ antdiv->antdiv_curcfg = bestcfg;
+ antdiv->antdiv_laststatetsf = curtsf;
+ antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
+ }
+ } else {
+ if (HAL_OK == ath9k_hw_select_antconfig(ah, curcfg)) {
+ antdiv->antdiv_curcfg = curcfg;
+ antdiv->antdiv_laststatetsf = curtsf;
+ antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
+ }
+ }
+
+ break;
+ }
+}
+
+/***********************/
+/* Descriptor Handling */
+/***********************/
+
+/*
+ * Set up DMA descriptors
+ *
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+
+int ath_descdma_setup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head,
+ const char *name,
+ int nbuf,
+ int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
+ __func__, name, nbuf, ndesc);
+
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ DPRINTF(sc, ATH_DEBUG_FATAL, "%s: ath_desc not DWORD aligned\n",
+ __func__);
+ ASSERT((sizeof(struct ath_desc) % 4) == 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_name = name;
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->ah_caps.hal4kbSplitTransSupport)) {
+ u_int32_t ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u_int32_t dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = pci_alloc_consistent(sc->pdev,
+ dd->dd_desc_len,
+ &dd->dd_desc_paddr);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ DPRINTF(sc, ATH_DEBUG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
+ __func__, dd->dd_name, ds, (u_int32_t) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u_int32_t) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kmalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ memzero(bf, bsize);
+ dd->dd_bufptr = bf;
+
+ INIT_LIST_HEAD(head);
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->ah_caps.hal4kbSplitTransSupport)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ ASSERT((caddr_t) bf->bf_desc <
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ pci_free_consistent(sc->pdev,
+ dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
+fail:
+ memzero(dd, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+/*
+ * Cleanup DMA descriptors
+ *
+ * This function will free the DMA block that was allocated for the descriptor
+ * pool. Since this was allocated as one "chunk", it is freed in the same
+ * manner.
+*/
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ /* Free memory associated with descriptors */
+ pci_free_consistent(sc->pdev,
+ dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memzero(dd, sizeof(*dd));
+}
+
+/*
+ * Endian Swap for transmit descriptor
+ *
+ * XXX: Move cpu_to_le32() into hw.c and anywhere we set them, then
+ * remove this.
+*/
+void ath_desc_swap(struct ath_desc *ds)
+{
+ ds->ds_link = cpu_to_le32(ds->ds_link);
+ ds->ds_data = cpu_to_le32(ds->ds_data);
+ ds->ds_ctl0 = cpu_to_le32(ds->ds_ctl0);
+ ds->ds_ctl1 = cpu_to_le32(ds->ds_ctl1);
+ ds->ds_hw[0] = cpu_to_le32(ds->ds_hw[0]);
+ ds->ds_hw[1] = cpu_to_le32(ds->ds_hw[1]);
+}
+
+/*************/
+/* Utilities */
+/*************/
+
+void ath_internal_reset(struct ath_softc *sc)
+{
+ ath_reset_start(sc, 0);
+ ath_reset(sc);
+ ath_reset_end(sc, 0);
+}
+
+void ath_setrxfilter(struct ath_softc *sc)
+{
+ u_int32_t rxfilt;
+
+ rxfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(sc->sc_ah, rxfilt);
+}
+
+int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
+{
+ int qnum;
+
+ switch (queue) {
+ case 0:
+ qnum = sc->sc_haltype2q[HAL_WME_AC_VO];
+ break;
+ case 1:
+ qnum = sc->sc_haltype2q[HAL_WME_AC_VI];
+ break;
+ case 2:
+ qnum = sc->sc_haltype2q[HAL_WME_AC_BE];
+ break;
+ case 3:
+ qnum = sc->sc_haltype2q[HAL_WME_AC_BK];
+ break;
+ default:
+ qnum = sc->sc_haltype2q[HAL_WME_AC_BE];
+ break;
+ }
+
+ return qnum;
+}
+
+int ath_get_mac80211_qnum(u_int queue, struct ath_softc *sc)
+{
+ int qnum;
+
+ switch (queue) {
+ case HAL_WME_AC_VO:
+ qnum = 0;
+ break;
+ case HAL_WME_AC_VI:
+ qnum = 1;
+ break;
+ case HAL_WME_AC_BE:
+ qnum = 2;
+ break;
+ case HAL_WME_AC_BK:
+ qnum = 3;
+ break;
+ default:
+ qnum = -1;
+ break;
+ }
+
+ return qnum;
+}
+
+
+/*
+ * Expand time stamp to TSF
+ *
+ * Extend 15-bit time stamp from rx descriptor to
+ * a full 64-bit TSF using the current h/w TSF.
+*/
+
+u_int64_t ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp)
+{
+ u_int64_t tsf;
+
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ if ((tsf & 0x7fff) < rstamp)
+ tsf -= 0x8000;
+ return (tsf & ~0x7fff) | rstamp;
+}
+
+/*
+ * Set Default Antenna
+ *
+ * Call into the HAL to set the default antenna to use. Not really valid for
+ * MIMO technology.
+*/
+
+void ath_setdefantenna(void *context, u_int antenna)
+{
+ struct ath_softc *sc = (struct ath_softc *)context;
+ struct ath_hal *ah = sc->sc_ah;
+
+ /* XXX block beacon interrupts */
+ ath9k_hw_setantenna(ah, antenna);
+ sc->sc_defant = antenna;
+ sc->sc_rxotherant = 0;
+}
+
+/*
+ * Set Slot Time
+ *
+ * This will wake up the chip if required, and set the slot time for the
+ * frame (maximum transmit time). Slot time is assumed to be already set
+ * in the ATH object member sc_slottime
+*/
+
+void ath_setslottime(struct ath_softc *sc)
+{
+ ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
+ sc->sc_updateslot = OK;
+}
--- /dev/null
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef CORE_H
+#define CORE_H
+
+#include <linux/version.h>
+#include <linux/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <asm/byteorder.h>
+#include <linux/scatterlist.h>
+#include <asm/page.h>
+#include <net/mac80211.h>
+
+#include "ath9k.h"
+#include "rc.h"
+
+struct ath_node;
+
+/******************/
+/* Utility macros */
+/******************/
+
+/* An attempt will be made to merge these link list helpers upstream
+ * instead */
+
+static inline void __list_splice_tail(const struct list_head *list,
+ struct list_head *head)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *current_tail = head->prev;
+
+ current_tail->next = first;
+ last->next = head;
+ head->prev = last;
+ first->prev = current_tail;
+}
+
+static inline void __list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ struct list_head *new_first =
+ (entry->next != head) ? entry->next : head;
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry;
+ entry->next = list;
+ head->next = new_first;
+ new_first->prev = head;
+}
+
+/**
+ * list_splice_tail - join two lists, each list being a queue
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice_tail(const struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice_tail(list, head);
+}
+
+/**
+ * list_splice_tail_init - join two lists, each list being a queue, and
+ * reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_tail_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice_tail(list, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ * and if so we won't won't cut the list
+ */
+static inline void list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ BUG_ON(list_empty(head));
+ if (list_is_singular(head))
+ BUG_ON(head->next != entry && head != entry);
+ if (entry == head)
+ INIT_LIST_HEAD(list);
+ else
+ __list_cut_position(list, head, entry);
+}
+
+/* Macro to expand scalars to 64-bit objects */
+
+#define ito64(x) (sizeof(x) == 8) ? \
+ (((unsigned long long int)(x)) & (0xff)) : \
+ (sizeof(x) == 16) ? \
+ (((unsigned long long int)(x)) & 0xffff) : \
+ ((sizeof(x) == 32) ? \
+ (((unsigned long long int)(x)) & 0xffffffff) : \
+ (unsigned long long int)(x))
+
+/* increment with wrap-around */
+#define INCR(_l, _sz) do { \
+ (_l)++; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
+/* decrement with wrap-around */
+#define DECR(_l, _sz) do { \
+ (_l)--; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
+#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define ASSERT(exp) do { \
+ if (unlikely(!(exp))) { \
+ BUG(); \
+ } \
+ } while (0)
+
+#define KASSERT(exp, msg) do { \
+ if (unlikely(!(exp))) { \
+ printk msg; \
+ BUG(); \
+ } \
+ } while (0)
+
+/* XXX: remove */
+#define memzero(_buf, _len) memset(_buf, 0, _len)
+
+#define get_dma_mem_context(var, field) (&((var)->field))
+#define copy_dma_mem_context(dst, src) (*dst = *src)
+
+#define ATH9K_BH_STATUS_INTACT 0
+#define ATH9K_BH_STATUS_CHANGE 1
+
+#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
+
+static inline unsigned long get_timestamp(void)
+{
+ return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
+}
+
+/*************/
+/* Debugging */
+/*************/
+
+enum ATH_DEBUG {
+ ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
+ ATH_DEBUG_RECV = 0x00000002, /* basic recv operation */
+ ATH_DEBUG_BEACON = 0x00000004, /* beacon handling */
+ ATH_DEBUG_TX_PROC = 0x00000008, /* tx ISR proc */
+ ATH_DEBUG_RX_PROC = 0x00000010, /* rx ISR proc */
+ ATH_DEBUG_BEACON_PROC = 0x00000020, /* beacon ISR proc */
+ ATH_DEBUG_RATE = 0x00000040, /* rate control */
+ ATH_DEBUG_CONFIG = 0x00000080, /* configuration */
+ ATH_DEBUG_KEYCACHE = 0x00000100, /* key cache management */
+ ATH_DEBUG_NODE = 0x00000200, /* node management */
+ ATH_DEBUG_AGGR = 0x00000400, /* Aggregation */
+ ATH_DEBUG_CWM = 0x00000800, /* Channel Width Management */
+ ATH_DEBUG_FATAL = 0x00001000, /* fatal errors */
+ ATH_DEBUG_ANY = 0xffffffff
+};
+
+#define DBG_DEFAULT (ATH_DEBUG_FATAL)
+
+#define DPRINTF(sc, _m, _fmt, ...) do { \
+ if (sc->sc_debug & (_m)) \
+ printk(_fmt , ##__VA_ARGS__); \
+ } while (0)
+
+/***************************/
+/* Load-time Configuration */
+/***************************/
+
+/* Per-instance load-time (note: NOT run-time) configurations
+ * for Atheros Device */
+struct ath_config {
+ u_int8_t chainmask_sel; /* enable automatic tx chainmask selection */
+ u_int32_t ath_aggr_prot;
+ u_int16_t txpowlimit;
+ u_int16_t txpowlimit_override;
+ u_int8_t cabqReadytime; /* Cabq Readytime % */
+ u_int8_t swBeaconProcess; /* Process received beacons
+ in SW (vs HW) */
+};
+
+/***********************/
+/* Chainmask Selection */
+/***********************/
+
+#define ATH_CHAINMASK_SEL_TIMEOUT 6000
+/* Default - Number of last RSSI values that is used for
+ * chainmask selection */
+#define ATH_CHAINMASK_SEL_RSSI_CNT 10
+/* Means use 3x3 chainmask instead of configured chainmask */
+#define ATH_CHAINMASK_SEL_3X3 7
+/* Default - Rssi threshold below which we have to switch to 3x3 */
+#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
+/* Default - Rssi threshold above which we have to switch to
+ * user configured values */
+#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
+/* Struct to store the chainmask select related info */
+struct ath_chainmask_sel {
+ struct timer_list timer;
+ int cur_tx_mask; /* user configured or 3x3 */
+ int cur_rx_mask; /* user configured or 3x3 */
+ int tx_avgrssi;
+ u8 switch_allowed:1, /* timer will set this */
+ cm_sel_enabled:1;
+};
+
+int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
+
+/*************************/
+/* Descriptor Management */
+/*************************/
+
+/* Number of descriptors per buffer. The only case where we see skbuff
+chains is due to FF aggregation in the driver. */
+#define ATH_TXDESC 1
+/* if there's more fragment for this MSDU */
+#define ATH_BF_MORE_MPDU 1
+#define ATH_TXBUF_RESET(_bf) do { \
+ (_bf)->bf_status = 0; \
+ (_bf)->bf_lastbf = NULL; \
+ (_bf)->bf_lastfrm = NULL; \
+ (_bf)->bf_next = NULL; \
+ memzero(&((_bf)->bf_state), \
+ sizeof(struct ath_buf_state)); \
+ } while (0)
+
+struct ath_buf_state {
+ int bfs_nframes; /* # frames in aggregate */
+ u_int16_t bfs_al; /* length of aggregate */
+ u_int16_t bfs_frmlen; /* length of frame */
+ int bfs_seqno; /* sequence number */
+ int bfs_tidno; /* tid of this frame */
+ int bfs_retries; /* current retries */
+ struct ath_rc_series bfs_rcs[4]; /* rate series */
+ u8 bfs_isdata:1; /* is a data frame/aggregate */
+ u8 bfs_isaggr:1; /* is an aggregate */
+ u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
+ u8 bfs_ht:1; /* is an HT frame */
+ u8 bfs_isretried:1; /* is retried */
+ u8 bfs_isxretried:1; /* is excessive retried */
+ u8 bfs_shpreamble:1; /* is short preamble */
+ u8 bfs_isbar:1; /* is a BAR */
+ u8 bfs_ispspoll:1; /* is a PS-Poll */
+ u8 bfs_aggrburst:1; /* is a aggr burst */
+ u8 bfs_calcairtime:1; /* requests airtime be calculated
+ when set for tx frame */
+ int bfs_rifsburst_elem; /* RIFS burst/bar */
+ int bfs_nrifsubframes; /* # of elements in burst */
+ enum hal_key_type bfs_keytype; /* key type use to encrypt this frame */
+};
+
+#define bf_nframes bf_state.bfs_nframes
+#define bf_al bf_state.bfs_al
+#define bf_frmlen bf_state.bfs_frmlen
+#define bf_retries bf_state.bfs_retries
+#define bf_seqno bf_state.bfs_seqno
+#define bf_tidno bf_state.bfs_tidno
+#define bf_rcs bf_state.bfs_rcs
+#define bf_isdata bf_state.bfs_isdata
+#define bf_isaggr bf_state.bfs_isaggr
+#define bf_isampdu bf_state.bfs_isampdu
+#define bf_ht bf_state.bfs_ht
+#define bf_isretried bf_state.bfs_isretried
+#define bf_isxretried bf_state.bfs_isxretried
+#define bf_shpreamble bf_state.bfs_shpreamble
+#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
+#define bf_nrifsubframes bf_state.bfs_nrifsubframes
+#define bf_keytype bf_state.bfs_keytype
+#define bf_isbar bf_state.bfs_isbar
+#define bf_ispspoll bf_state.bfs_ispspoll
+#define bf_aggrburst bf_state.bfs_aggrburst
+#define bf_calcairtime bf_state.bfs_calcairtime
+
+/*
+ * Abstraction of a contiguous buffer to transmit/receive. There is only
+ * a single hw descriptor encapsulated here.
+ */
+
+struct ath_buf {
+ struct list_head list;
+ struct list_head *last;
+ struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
+ an aggregate) */
+ struct ath_buf *bf_lastfrm; /* last buf of this frame */
+ struct ath_buf *bf_next; /* next subframe in the aggregate */
+ struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
+ void *bf_mpdu; /* enclosing frame structure */
+ void *bf_node; /* pointer to the node */
+ struct ath_desc *bf_desc; /* virtual addr of desc */
+ dma_addr_t bf_daddr; /* physical addr of desc */
+ dma_addr_t bf_buf_addr; /* physical addr of data buffer */
+ u_int32_t bf_status;
+ u_int16_t bf_flags; /* tx descriptor flags */
+ struct ath_buf_state bf_state; /* buffer state */
+ dma_addr_t bf_dmacontext;
+};
+
+/*
+ * reset the rx buffer.
+ * any new fields added to the athbuf and require
+ * reset need to be added to this macro.
+ * currently bf_status is the only one requires that
+ * requires reset.
+ */
+#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
+
+/* hw processing complete, desc processed by hal */
+#define ATH_BUFSTATUS_DONE 0x00000001
+/* hw processing complete, desc hold for hw */
+#define ATH_BUFSTATUS_STALE 0x00000002
+/* Rx-only: OS is done with this packet and it's ok to queued it to hw */
+#define ATH_BUFSTATUS_FREE 0x00000004
+
+/* DMA state for tx/rx descriptors */
+
+struct ath_descdma {
+ const char *dd_name;
+ struct ath_desc *dd_desc; /* descriptors */
+ dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */
+ u_int32_t dd_desc_len; /* size of dd_desc */
+ struct ath_buf *dd_bufptr; /* associated buffers */
+ dma_addr_t dd_dmacontext;
+};
+
+/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */
+
+struct ath_rx_context {
+ struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */
+};
+#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb)
+
+int ath_descdma_setup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head,
+ const char *name,
+ int nbuf,
+ int ndesc);
+int ath_desc_alloc(struct ath_softc *sc);
+void ath_desc_free(struct ath_softc *sc);
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head);
+void ath_desc_swap(struct ath_desc *ds);
+
+/******/
+/* RX */
+/******/
+
+#define ATH_MAX_ANTENNA 3
+#define ATH_RXBUF 512
+#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
+#define WME_NUM_TID 16
+#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
+#define IEEE80211_BAR_CTL_TID_S 2 /* tid shift */
+
+enum ATH_RX_TYPE {
+ ATH_RX_NON_CONSUMED = 0,
+ ATH_RX_CONSUMED
+};
+
+/* per frame rx status block */
+struct ath_recv_status {
+ u_int64_t tsf; /* mac tsf */
+ int8_t rssi; /* RSSI (noise floor ajusted) */
+ int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int8_t abs_rssi; /* absolute RSSI */
+ u_int8_t rateieee; /* data rate received (IEEE rate code) */
+ u_int8_t ratecode; /* phy rate code */
+ int rateKbps; /* data rate received (Kbps) */
+ int antenna; /* rx antenna */
+ int flags; /* status of associated skb */
+#define ATH_RX_FCS_ERROR 0x01
+#define ATH_RX_MIC_ERROR 0x02
+#define ATH_RX_DECRYPT_ERROR 0x04
+#define ATH_RX_RSSI_VALID 0x08
+/* if any of ctl,extn chainrssis are valid */
+#define ATH_RX_CHAIN_RSSI_VALID 0x10
+/* if extn chain rssis are valid */
+#define ATH_RX_RSSI_EXTN_VALID 0x20
+/* set if 40Mhz, clear if 20Mhz */
+#define ATH_RX_40MHZ 0x40
+/* set if short GI, clear if full GI */
+#define ATH_RX_SHORT_GI 0x80
+};
+
+struct ath_rxbuf {
+ struct sk_buff *rx_wbuf; /* buffer */
+ unsigned long rx_time; /* system time when received */
+ struct ath_recv_status rx_status; /* cached rx status */
+};
+
+/* Per-TID aggregate receiver state for a node */
+struct ath_arx_tid {
+ struct ath_node *an; /* parent ath node */
+ struct ath_rxbuf *rxbuf; /* re-ordering buffer */
+ struct timer_list timer;
+ spinlock_t tidlock; /* lock to protect this TID structure */
+ int baw_head; /* seq_next at head */
+ int baw_tail; /* tail of block-ack window */
+ int seq_reset; /* need to reset start sequence */
+ int addba_exchangecomplete;
+ u_int16_t seq_next; /* next expected sequence */
+ u_int16_t baw_size; /* block-ack window size */
+};
+
+/* Per-node receiver aggregate state */
+struct ath_arx {
+ struct ath_arx_tid tid[WME_NUM_TID];
+};
+
+void ath_setrxfilter(struct ath_softc *sc);
+int ath_startrecv(struct ath_softc *sc);
+enum hal_bool ath_stoprecv(struct ath_softc *sc);
+void ath_flushrecv(struct ath_softc *sc);
+u_int32_t ath_calcrxfilter(struct ath_softc *sc);
+void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
+void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an);
+void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
+void ath_handle_rx_intr(struct ath_softc *sc);
+int ath_rx_init(struct ath_softc *sc, int nbufs);
+void ath_rx_cleanup(struct ath_softc *sc);
+int ath_rx_tasklet(struct ath_softc *sc, int flush);
+int ath_rx_input(struct ath_softc *sc,
+ struct ath_node *node,
+ int is_ampdu,
+ struct sk_buff *skb,
+ struct ath_recv_status *rx_status,
+ enum ATH_RX_TYPE *status);
+int ath__rx_indicate(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_recv_status *status,
+ u_int16_t keyix);
+int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
+ struct ath_recv_status *status);
+
+/******/
+/* TX */
+/******/
+
+#define ATH_FRAG_PER_MSDU 1
+#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
+/* max number of transmit attempts (tries) */
+#define ATH_TXMAXTRY 13
+/* max number of 11n transmit attempts (tries) */
+#define ATH_11N_TXMAXTRY 10
+/* max number of tries for management and control frames */
+#define ATH_MGT_TXMAXTRY 4
+#define WME_BA_BMP_SIZE 64
+#define WME_MAX_BA WME_BA_BMP_SIZE
+#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
+#define TID_TO_WME_AC(_tid) \
+ ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
+ WME_AC_VO)
+
+
+/* Wireless Multimedia Extension Defines */
+#define WME_AC_BE 0 /* best effort */
+#define WME_AC_BK 1 /* background */
+#define WME_AC_VI 2 /* video */
+#define WME_AC_VO 3 /* voice */
+#define WME_NUM_AC 4
+
+enum ATH_SM_PWRSAV{
+ ATH_SM_ENABLE,
+ ATH_SM_PWRSAV_STATIC,
+ ATH_SM_PWRSAV_DYNAMIC,
+};
+
+/*
+ * Data transmit queue state. One of these exists for each
+ * hardware transmit queue. Packets sent to us from above
+ * are assigned to queues based on their priority. Not all
+ * devices support a complete set of hardware transmit queues.
+ * For those devices the array sc_ac2q will map multiple
+ * priorities to fewer hardware queues (typically all to one
+ * hardware queue).
+ */
+struct ath_txq {
+ u_int axq_qnum; /* hardware q number */
+ u_int32_t *axq_link; /* link ptr in last TX desc */
+ struct list_head axq_q; /* transmit queue */
+ spinlock_t axq_lock; /* lock on q and link */
+ unsigned long axq_lockflags; /* intr state when must cli */
+ u_int axq_depth; /* queue depth */
+ u_int8_t axq_aggr_depth; /* aggregates queued */
+ u_int32_t axq_totalqueued;/* total ever queued */
+ u_int axq_intrcnt; /* count to determine
+ if descriptor should generate
+ int on this txq. */
+ bool stopped; /* Is mac80211 queue
+ stopped ? */
+ /* State for patching up CTS when bursting */
+ struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
+ struct ath_desc *axq_lastdsWithCTS; /* first desc of the
+ last descriptor that contains CTS */
+ struct ath_desc *axq_gatingds; /* final desc of the gating desc
+ * that determines whether lastdsWithCTS has
+ * been DMA'ed or not */
+ struct list_head axq_acq;
+};
+
+/* per TID aggregate tx state for a destination */
+struct ath_atx_tid {
+ struct list_head list; /* round-robin tid entry */
+ struct list_head buf_q; /* pending buffers */
+ struct ath_node *an; /* parent node structure */
+ struct ath_atx_ac *ac; /* parent access category */
+ struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];/* active tx frames */
+ u_int16_t seq_start; /* starting seq of BA window */
+ u_int16_t seq_next; /* next seq to be used */
+ u_int16_t baw_size; /* BA window size */
+ int tidno; /* TID number */
+ int baw_head; /* first un-acked tx buffer */
+ int baw_tail; /* next unused tx buffer slot */
+ int sched; /* TID is scheduled */
+ int paused; /* TID is paused */
+ int cleanup_inprogress; /* aggr of this TID is
+ being teared down */
+ u_int32_t addba_exchangecomplete:1; /* ADDBA state */
+ int32_t addba_exchangeinprogress;
+ int addba_exchangeattempts;
+};
+
+/* per access-category aggregate tx state for a destination */
+struct ath_atx_ac {
+ int sched; /* dest-ac is scheduled */
+ int qnum; /* H/W queue number associated
+ with this AC */
+ struct list_head list; /* round-robin txq entry */
+ struct list_head tid_q; /* queue of TIDs with buffers */
+};
+
+/* per dest tx state */
+struct ath_atx {
+ struct ath_atx_tid tid[WME_NUM_TID];
+ struct ath_atx_ac ac[WME_NUM_AC];
+};
+
+/* per-frame tx control block */
+struct ath_tx_control {
+ struct ath_node *an; /* destination to sent to */
+ int if_id; /* only valid for cab traffic */
+ int qnum; /* h/w queue number */
+ u_int ht:1; /* if it can be transmitted using HT */
+ u_int ps:1; /* if one or more stations are in PS mode */
+ u_int use_minrate:1; /* if this frame should transmitted using
+ minimum rate */
+ enum hal_pkt_type atype; /* Atheros packet type */
+ enum hal_key_type keytype; /* key type */
+ u_int flags; /* HAL flags */
+ u_int16_t seqno; /* sequence number */
+ u_int16_t tidno; /* tid number */
+ u_int16_t txpower; /* transmit power */
+ u_int16_t frmlen; /* frame length */
+ u_int32_t keyix; /* key index */
+ int min_rate; /* minimum rate */
+ int mcast_rate; /* multicast rate */
+ u_int16_t nextfraglen; /* next fragment length */
+ /* below is set only by ath_dev */
+ struct ath_softc *dev; /* device handle */
+ dma_addr_t dmacontext;
+};
+
+/* per frame tx status block */
+struct ath_xmit_status {
+ int retries; /* number of retries to successufully
+ transmit this frame */
+ int flags; /* status of transmit */
+#define ATH_TX_ERROR 0x01
+#define ATH_TX_XRETRY 0x02
+#define ATH_TX_BAR 0x04
+};
+
+struct ath_tx_stat {
+ int rssi; /* RSSI (noise floor ajusted) */
+ int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int rateieee; /* data rate xmitted (IEEE rate code) */
+ int rateKbps; /* data rate xmitted (Kbps) */
+ int ratecode; /* phy rate code */
+ int flags; /* validity flags */
+/* if any of ctl,extn chain rssis are valid */
+#define ATH_TX_CHAIN_RSSI_VALID 0x01
+/* if extn chain rssis are valid */
+#define ATH_TX_RSSI_EXTN_VALID 0x02
+ u_int32_t airtime; /* time on air per final tx rate */
+};
+
+struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
+void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
+int ath_tx_setup(struct ath_softc *sc, int haltype);
+void ath_draintxq(struct ath_softc *sc, enum hal_bool retry_tx);
+void ath_tx_draintxq(struct ath_softc *sc,
+ struct ath_txq *txq, enum hal_bool retry_tx);
+void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
+void ath_tx_node_cleanup(struct ath_softc *sc,
+ struct ath_node *an, bool bh_flag);
+void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
+void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
+int ath_tx_init(struct ath_softc *sc, int nbufs);
+int ath_tx_cleanup(struct ath_softc *sc);
+int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+int ath_txq_update(struct ath_softc *sc, int qnum, struct hal_txq_info *q);
+int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb);
+void ath_tx_tasklet(struct ath_softc *sc);
+u_int32_t ath_txq_depth(struct ath_softc *sc, int qnum);
+u_int32_t ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
+void ath_notify_txq_status(struct ath_softc *sc, u_int16_t queue_depth);
+void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_xmit_status *tx_status, struct ath_node *an);
+
+/**********************/
+/* Node / Aggregation */
+/**********************/
+
+/* indicates the node is clened up */
+#define ATH_NODE_CLEAN 0x1
+/* indicates the node is 80211 power save */
+#define ATH_NODE_PWRSAVE 0x2
+
+#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
+#define ADDBA_EXCHANGE_ATTEMPTS 10
+#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
+#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
+/* number of delimiters for encryption padding */
+#define ATH_AGGR_ENCRYPTDELIM 10
+/* minimum h/w qdepth to be sustained to maximize aggregation */
+#define ATH_AGGR_MIN_QDEPTH 2
+#define ATH_AMPDU_SUBFRAME_DEFAULT 32
+#define IEEE80211_SEQ_SEQ_SHIFT 4
+#define IEEE80211_SEQ_MAX 4096
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+
+/* return whether a bit at index _n in bitmap _bm is set
+ * _sz is the size of the bitmap */
+#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
+ ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
+
+/* return block-ack bitmap index given sequence and starting sequence */
+#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+
+/* returns delimiter padding required given the packet length */
+#define ATH_AGGR_GET_NDELIM(_len) \
+ (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
+ (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
+
+#define BAW_WITHIN(_start, _bawsz, _seqno) \
+ ((((_seqno) - (_start)) & 4095) < (_bawsz))
+
+#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
+#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
+#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & HAL_TX_BA)
+#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)])
+
+enum ATH_AGGR_STATUS {
+ ATH_AGGR_DONE,
+ ATH_AGGR_BAW_CLOSED,
+ ATH_AGGR_LIMITED,
+ ATH_AGGR_SHORTPKT,
+ ATH_AGGR_8K_LIMITED,
+};
+
+enum ATH_AGGR_CHECK {
+ AGGR_NOT_REQUIRED,
+ AGGR_REQUIRED,
+ AGGR_CLEANUP_PROGRESS,
+ AGGR_EXCHANGE_PROGRESS,
+ AGGR_EXCHANGE_DONE
+};
+
+struct aggr_rifs_param {
+ int param_max_frames;
+ int param_max_len;
+ int param_rl;
+ int param_al;
+ struct ath_rc_series *param_rcs;
+};
+
+/* Per-node aggregation state */
+struct ath_node_aggr {
+ struct ath_atx tx; /* node transmit state */
+ struct ath_arx rx; /* node receive state */
+};
+
+/* driver-specific node state */
+struct ath_node {
+ struct list_head list;
+ struct ath_softc *an_sc; /* back pointer */
+ atomic_t an_refcnt;
+ struct ath_chainmask_sel an_chainmask_sel;
+ struct ath_node_aggr an_aggr; /* A-MPDU aggregation state */
+ u_int8_t an_smmode; /* SM Power save mode */
+ u_int8_t an_flags;
+ u8 an_addr[ETH_ALEN];
+};
+
+void ath_tx_resume_tid(struct ath_softc *sc,
+ struct ath_atx_tid *tid);
+enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
+ struct ath_node *an, u8 tidno);
+void ath_tx_aggr_teardown(struct ath_softc *sc,
+ struct ath_node *an, u_int8_t tidno);
+void ath_rx_aggr_teardown(struct ath_softc *sc,
+ struct ath_node *an, u_int8_t tidno);
+int ath_rx_aggr_start(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn);
+int ath_rx_aggr_stop(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid);
+int ath_tx_aggr_start(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn);
+int ath_tx_aggr_stop(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid);
+void ath_newassoc(struct ath_softc *sc,
+ struct ath_node *node, int isnew, int isuapsd);
+struct ath_node *ath_node_attach(struct ath_softc *sc,
+ u_int8_t addr[ETH_ALEN], int if_id);
+void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
+struct ath_node *ath_node_get(struct ath_softc *sc, u_int8_t addr[ETH_ALEN]);
+void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
+struct ath_node *ath_node_find(struct ath_softc *sc, u_int8_t *addr);
+
+/*******************/
+/* Beacon Handling */
+/*******************/
+
+/*
+ * Regardless of the number of beacons we stagger, (i.e. regardless of the
+ * number of BSSIDs) if a given beacon does not go out even after waiting this
+ * number of beacon intervals, the game's up.
+ */
+#define BSTUCK_THRESH (9 * ATH_BCBUF)
+#define ATH_BCBUF 4 /* number of beacon buffers */
+#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
+#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
+#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+
+/* beacon configuration */
+struct ath_beacon_config {
+ u_int16_t beacon_interval;
+ u_int16_t listen_interval;
+ u_int16_t dtim_period;
+ u_int16_t bmiss_timeout;
+ u_int8_t dtim_count;
+ u_int8_t tim_offset;
+ union {
+ u_int64_t last_tsf;
+ u_int8_t last_tstamp[8];
+ } u; /* last received beacon/probe response timestamp of this BSS. */
+};
+
+/* offsets in a beacon frame for
+ * quick acess of beacon content by low-level driver */
+struct ath_beacon_offset {
+ u_int8_t *bo_tim; /* start of atim/dtim */
+};
+
+void ath9k_beacon_tasklet(unsigned long data);
+void ath_beacon_config(struct ath_softc *sc, int if_id);
+int ath_beaconq_setup(struct ath_hal *ah);
+int ath_beacon_alloc(struct ath_softc *sc, int if_id);
+void ath_bstuck_process(struct ath_softc *sc);
+void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
+void ath_beacon_free(struct ath_softc *sc);
+void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
+void ath_beacon_sync(struct ath_softc *sc, int if_id);
+void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
+void ath_get_beaconconfig(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_config *conf);
+struct sk_buff *ath_get_beacon(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_offset *bo,
+ struct ath_tx_control *txctl);
+int ath_update_beacon(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_offset *bo,
+ struct sk_buff *skb,
+ int mcast);
+/********/
+/* VAPs */
+/********/
+
+#define ATH_IF_HW_OFF 0x0001 /* hardware state needs to turn off */
+#define ATH_IF_HW_ON 0x0002 /* hardware state needs to turn on */
+/* STA only: the associated AP is HT capable */
+#define ATH_IF_HT 0x0004
+/* AP/IBSS only: current BSS has privacy on */
+#define ATH_IF_PRIVACY 0x0008
+#define ATH_IF_BEACON_ENABLE 0x0010 /* AP/IBSS only: enable beacon */
+#define ATH_IF_BEACON_SYNC 0x0020 /* IBSS only: need to sync beacon */
+
+/*
+ * Define the scheme that we select MAC address for multiple
+ * BSS on the same radio. The very first VAP will just use the MAC
+ * address from the EEPROM. For the next 3 VAPs, we set the
+ * U/L bit (bit 1) in MAC address, and use the next two bits as the
+ * index of the VAP.
+ */
+
+#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
+ ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
+
+/* VAP configuration (from protocol layer) */
+struct ath_vap_config {
+ u_int32_t av_fixed_rateset;
+ u_int32_t av_fixed_retryset;
+};
+
+/* driver-specific vap state */
+struct ath_vap {
+ struct ieee80211_vif *av_if_data; /* interface(vap)
+ instance from 802.11 protocal layer */
+ enum hal_opmode av_opmode; /* VAP operational mode */
+ struct ath_buf *av_bcbuf; /* beacon buffer */
+ struct ath_beacon_offset av_boff; /* dynamic update state */
+ struct ath_tx_control av_btxctl; /* tx control information
+ for beacon */
+ int av_bslot; /* beacon slot index */
+ struct ath_txq av_mcastq; /* multicast
+ transmit queue */
+ struct ath_vap_config av_config; /* vap configuration
+ parameters from 802.11 protocol layer*/
+};
+
+int ath_vap_attach(struct ath_softc *sc,
+ int if_id,
+ struct ieee80211_vif *if_data,
+ enum hal_opmode opmode,
+ enum hal_opmode iv_opmode,
+ int nostabeacons);
+int ath_vap_detach(struct ath_softc *sc, int if_id);
+int ath_vap_config(struct ath_softc *sc,
+ int if_id, struct ath_vap_config *if_config);
+int ath_vap_down(struct ath_softc *sc, int if_id, u_int flags);
+int ath_vap_listen(struct ath_softc *sc, int if_id);
+int ath_vap_join(struct ath_softc *sc,
+ int if_id,
+ const u_int8_t bssid[ETH_ALEN],
+ u_int flags);
+int ath_vap_up(struct ath_softc *sc,
+ int if_id,
+ const u_int8_t bssid[ETH_ALEN],
+ u_int8_t aid,
+ u_int flags);
+
+/*********************/
+/* Antenna diversity */
+/*********************/
+
+#define ATH_ANT_DIV_MAX_CFG 2
+#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
+#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
+
+enum ATH_ANT_DIV_STATE{
+ ATH_ANT_DIV_IDLE,
+ ATH_ANT_DIV_SCAN, /* evaluating antenna */
+};
+
+struct ath_antdiv {
+ struct ath_softc *antdiv_sc;
+ u_int8_t antdiv_start;
+ enum ATH_ANT_DIV_STATE antdiv_state;
+ u_int8_t antdiv_num_antcfg;
+ u_int8_t antdiv_curcfg;
+ u_int8_t antdiv_bestcfg;
+ int32_t antdivf_rssitrig;
+ int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
+ u_int64_t antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
+ u_int64_t antdiv_laststatetsf;
+ u_int8_t antdiv_bssid[ETH_ALEN];
+};
+
+void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
+ struct ath_softc *sc, int32_t rssitrig);
+void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
+ u_int8_t num_antcfg,
+ const u_int8_t *bssid);
+void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
+void ath_slow_ant_div(struct ath_antdiv *antdiv,
+ struct ieee80211_hdr *wh,
+ struct ath_rx_status *rx_stats);
+void ath_setdefantenna(void *sc, u_int antenna);
+
+/********************/
+/* Main driver core */
+/********************/
+
+/*
+ * Default cache line size, in bytes.
+ * Used when PCI device not fully initialized by bootrom/BIOS
+*/
+#define DEFAULT_CACHELINE 32
+#define ATH_DEFAULT_NOISE_FLOOR -95
+#define ATH_REGCLASSIDS_MAX 10
+#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
+#define ATH_PREAMBLE_SHORT (1<<0)
+#define ATH_PROTECT_ENABLE (1<<1)
+#define ATH_MAX_SW_RETRIES 10
+/* Num farmes difference in tx to flip default recv */
+#define ATH_ANTENNA_DIFF 2
+#define ATH_CHAN_MAX 255
+#define IEEE80211_WEP_NKID 4 /* number of key ids */
+#define IEEE80211_RATE_VAL 0x7f
+/*
+ * The key cache is used for h/w cipher state and also for
+ * tracking station state such as the current tx antenna.
+ * We also setup a mapping table between key cache slot indices
+ * and station state to short-circuit node lookups on rx.
+ * Different parts have different size key caches. We handle
+ * up to ATH_KEYMAX entries (could dynamically allocate state).
+ */
+#define ATH_KEYMAX 128 /* max key cache size we handle */
+#define ATH_KEYBYTES (ATH_KEYMAX/NBBY) /* storage space in bytes */
+
+#define RESET_RETRY_TXQ 0x00000001
+#define ATH_IF_ID_ANY 0xff
+
+#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
+
+#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half */
+/* Schedule the bottom half for execution */
+#define ATH_ISR_SCHED 0x0001
+/* This was not my interrupt, for shared IRQ's */
+#define ATH_ISR_NOTMINE 0x0002
+
+#define RSSI_LPF_THRESHOLD -20
+#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
+#define ATH_RATE_DUMMY_MARKER 0
+#define ATH_RSSI_LPF_LEN 10
+#define ATH_RSSI_DUMMY_MARKER 0x127
+
+#define ATH_EP_MUL(x, mul) ((x) * (mul))
+#define ATH_EP_RND(x, mul) \
+ ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+#define ATH_RSSI_OUT(x) \
+ (((x) != ATH_RSSI_DUMMY_MARKER) ? \
+ (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
+#define ATH_RSSI_IN(x) \
+ (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
+#define ATH_LPF_RSSI(x, y, len) \
+ ((x != ATH_RSSI_DUMMY_MARKER) ? \
+ (((x) * ((len) - 1) + (y)) / (len)) : (y))
+#define ATH_RSSI_LPF(x, y) do { \
+ if ((y) >= RSSI_LPF_THRESHOLD) \
+ x = ATH_LPF_RSSI((x), \
+ ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
+ } while (0)
+
+
+enum PROT_MODE {
+ PROT_M_NONE = 0,
+ PROT_M_RTSCTS,
+ PROT_M_CTSONLY
+};
+
+enum ieee80211_clist_cmd {
+ CLIST_UPDATE,
+ CLIST_DFS_UPDATE,
+ CLIST_NEW_COUNTRY
+};
+
+enum RATE_TYPE {
+ NORMAL_RATE = 0,
+ HALF_RATE,
+ QUARTER_RATE
+};
+
+struct ath_ht_info {
+ enum hal_ht_macmode tx_chan_width;
+ u_int16_t maxampdu;
+ u_int8_t mpdudensity;
+ u_int8_t ext_chan_offset;
+};
+
+struct ath_softc {
+ struct ieee80211_hw *hw; /* mac80211 instance */
+ struct pci_dev *pdev; /* Bus handle */
+ void __iomem *mem; /* address of the device */
+ struct tasklet_struct intr_tq; /* General tasklet */
+ struct tasklet_struct bcon_tasklet; /* Beacon tasklet */
+ struct ath_config sc_config; /* per-instance load-time
+ parameters */
+ int sc_debug; /* Debug masks */
+ struct ath_hal *sc_ah; /* HAL Instance */
+ struct ath_rate_softc *sc_rc; /* tx rate control support */
+ u_int32_t sc_intrstatus; /* HAL_STATUS */
+ enum hal_opmode sc_opmode; /* current operating mode */
+
+ /* Properties, Config */
+ unsigned int
+ sc_invalid : 1, /* being detached */
+ sc_mrretry : 1, /* multi-rate retry support */
+ sc_needmib : 1, /* enable MIB stats intr */
+ sc_hasdiversity : 1, /* rx diversity available */
+ sc_diversity : 1, /* enable rx diversity */
+ sc_hasveol : 1, /* tx VEOL support */
+ sc_beacons : 1, /* beacons running */
+ sc_hasbmask : 1, /* bssid mask support */
+ sc_hastsfadd : 1, /* tsf adjust support */
+ sc_scanning : 1, /* scanning active */
+ sc_nostabeacons : 1, /* no beacons for station */
+ sc_hasclrkey : 1, /* CLR key supported */
+ sc_stagbeacons : 1, /* use staggered beacons */
+ sc_txaggr : 1, /* enable 11n tx aggregation */
+ sc_rxaggr : 1, /* enable 11n rx aggregation */
+ sc_hasautosleep : 1, /* automatic sleep after TIM */
+ sc_waitbeacon : 1, /* waiting for first beacon
+ after waking up */
+ sc_no_tx_3_chains : 1, /* user, hardware, regulatory
+ or country may disallow transmit on
+ three chains. */
+ sc_update_chainmask : 1, /* change chain mask */
+ sc_rx_chainmask_detect : 1, /* enable rx chain mask detection */
+ sc_rx_chainmask_start : 1, /* start rx chain mask detection */
+ sc_hashtsupport : 1, /* supports 11n */
+ sc_full_reset : 1, /* force full reset */
+ sc_slowAntDiv : 1; /* enable slow antenna diversity */
+ enum wireless_mode sc_curmode; /* current phy mode */
+ u_int16_t sc_curtxpow; /* current tx power limit */
+ u_int16_t sc_curaid; /* current association id */
+ u_int8_t sc_curbssid[ETH_ALEN];
+ u_int8_t sc_myaddr[ETH_ALEN];
+ enum PROT_MODE sc_protmode; /* protection mode */
+ u_int8_t sc_mcastantenna;/* Multicast antenna number */
+ u_int8_t sc_txantenna; /* data tx antenna
+ (fixed or auto) */
+ u_int8_t sc_nbcnvaps; /* # of vaps sending beacons */
+ u_int16_t sc_nvaps; /* # of active virtual ap's */
+ struct ath_vap *sc_vaps[ATH_BCBUF]; /* interface id
+ to avp map */
+ enum hal_int sc_imask; /* interrupt mask copy */
+ u_int8_t sc_bssidmask[ETH_ALEN];
+ u_int8_t sc_defant; /* current default antenna */
+ u_int8_t sc_rxotherant; /* rx's on non-default antenna*/
+ u_int16_t sc_cachelsz; /* cache line size */
+ int sc_slotupdate; /* slot to next advance fsm */
+ int sc_slottime; /* slot time */
+ u_int8_t sc_noreset;
+ int sc_bslot[ATH_BCBUF];/* beacon xmit slots */
+ struct hal_node_stats sc_halstats; /* station-mode rssi stats */
+ struct list_head node_list;
+ struct ath_ht_info sc_ht_info;
+ int16_t sc_noise_floor; /* signal noise floor in dBm */
+ enum hal_ht_extprotspacing sc_ht_extprotspacing;
+ u_int8_t sc_tx_chainmask;
+ u_int8_t sc_rx_chainmask;
+ u_int8_t sc_rxchaindetect_ref;
+ u_int8_t sc_rxchaindetect_thresh5GHz;
+ u_int8_t sc_rxchaindetect_thresh2GHz;
+ u_int8_t sc_rxchaindetect_delta5GHz;
+ u_int8_t sc_rxchaindetect_delta2GHz;
+ u_int32_t sc_rtsaggrlimit; /* Chipset specific
+ aggr limit */
+ u32 sc_flags;
+#ifdef CONFIG_SLOW_ANT_DIV
+ /* Slow antenna diversity */
+ struct ath_antdiv sc_antdiv;
+#endif
+ enum {
+ OK, /* no change needed */
+ UPDATE, /* update pending */
+ COMMIT /* beacon sent, commit change */
+ } sc_updateslot; /* slot time update fsm */
+
+ /* Crypto */
+ u_int sc_keymax; /* size of key cache */
+ u_int8_t sc_keymap[ATH_KEYBYTES];/* key use bit map */
+ u_int8_t sc_splitmic; /* split TKIP MIC keys */
+ int sc_keytype; /* type of the key being used */
+
+ /* RX */
+ struct list_head sc_rxbuf; /* receive buffer */
+ struct ath_descdma sc_rxdma; /* RX descriptors */
+ int sc_rxbufsize; /* rx size based on mtu */
+ u_int32_t *sc_rxlink; /* link ptr in last RX desc */
+ u_int32_t sc_rxflush; /* rx flush in progress */
+ u_int64_t sc_lastrx; /* tsf of last rx'd frame */
+
+ /* TX */
+ struct list_head sc_txbuf; /* transmit buffer */
+ struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
+ struct ath_descdma sc_txdma; /* TX descriptors */
+ u_int sc_txqsetup; /* h/w queues setup */
+ u_int sc_txintrperiod;/* tx interrupt batching */
+ int sc_haltype2q[HAL_WME_AC_VO+1]; /* HAL WME
+ AC -> h/w qnum */
+ u_int32_t sc_ant_tx[8]; /* recent tx frames/antenna */
+
+ /* Beacon */
+ struct hal_txq_info sc_beacon_qi; /* adhoc only: beacon
+ queue parameters */
+ struct ath_descdma sc_bdma; /* beacon descriptors */
+ struct ath_txq *sc_cabq; /* tx q for cab frames */
+ struct list_head sc_bbuf; /* beacon buffers */
+ u_int sc_bhalq; /* HAL q for outgoing beacons */
+ u_int sc_bmisscount; /* missed beacon transmits */
+ u_int32_t ast_be_xmit; /* beacons transmitted */
+
+ /* Rate */
+ struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
+ const struct hal_rate_table *sc_rates[WIRELESS_MODE_MAX];
+ const struct hal_rate_table *sc_currates; /* current rate table */
+ u_int8_t sc_rixmap[256]; /* IEEE to h/w
+ rate table ix */
+ u_int8_t sc_minrateix; /* min h/w rate index */
+ u_int8_t sc_protrix; /* protection rate index */
+ struct {
+ u_int32_t rateKbps; /* transfer rate in kbs */
+ u_int8_t ieeerate; /* IEEE rate */
+ } sc_hwmap[256]; /* h/w rate ix mappings */
+
+ /* Channel, Band */
+ struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct hal_channel sc_curchan; /* current h/w channel */
+
+ /* Locks */
+ spinlock_t sc_rxflushlock; /* lock of RX flush */
+ spinlock_t sc_rxbuflock; /* rxbuf lock */
+ spinlock_t sc_txbuflock; /* txbuf lock */
+ spinlock_t sc_resetlock;
+ spinlock_t node_lock;
+};
+
+int ath_init(u_int16_t devid, struct ath_softc *sc);
+void ath_deinit(struct ath_softc *sc);
+int ath_open(struct ath_softc *sc, struct hal_channel *initial_chan);
+int ath_suspend(struct ath_softc *sc);
+int ath_intr(struct ath_softc *sc);
+int ath_reset(struct ath_softc *sc);
+void ath_scan_start(struct ath_softc *sc);
+void ath_scan_end(struct ath_softc *sc);
+int ath_set_channel(struct ath_softc *sc, struct hal_channel *hchan);
+void ath_setup_channel_list(struct ath_softc *sc,
+ enum ieee80211_clist_cmd cmd,
+ const struct hal_channel *chans,
+ int nchan,
+ const u_int8_t *regclassids,
+ u_int nregclass,
+ int countrycode);
+void ath_setup_rate(struct ath_softc *sc,
+ enum wireless_mode wMode,
+ enum RATE_TYPE type,
+ const struct hal_rate_table *rt);
+
+/*********************/
+/* Utility Functions */
+/*********************/
+
+void ath_set_macmode(struct ath_softc *sc, enum hal_ht_macmode macmode);
+void ath_key_reset(struct ath_softc *sc, u_int16_t keyix, int freeslot);
+int ath_keyset(struct ath_softc *sc,
+ u_int16_t keyix,
+ struct hal_keyval *hk,
+ const u_int8_t mac[ETH_ALEN]);
+int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
+int ath_get_mac80211_qnum(u_int queue, struct ath_softc *sc);
+void ath_setslottime(struct ath_softc *sc);
+void ath_update_txpow(struct ath_softc *sc, u_int16_t tpcInDb);
+int ath_cabq_update(struct ath_softc *);
+void ath_get_currentCountry(struct ath_softc *sc,
+ struct hal_country_entry *ctry);
+u_int64_t ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp);
+void ath_internal_reset(struct ath_softc *sc);
+u_int32_t ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
+dma_addr_t ath_skb_map_single(struct ath_softc *sc,
+ struct sk_buff *skb,
+ int direction,
+ dma_addr_t *pa);
+void ath_skb_unmap_single(struct ath_softc *sc,
+ struct sk_buff *skb,
+ int direction,
+ dma_addr_t *pa);
+void ath_mcast_merge(struct ath_softc *sc, u_int32_t mfilt[2]);
+void ath__update_txpow(struct ath_softc *sc,
+ u_int16_t txpowlimit,
+ u_int16_t txpowlevel);
+enum hal_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
+
+#endif /* CORE_H */
--- /dev/null
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/io.h>
+
+#include "ath9k.h"
+#include "hw.h"
+#include "reg.h"
+#include "phy.h"
+#include "initvals.h"
+
+static void ath9k_hw_iqcal_collect(struct ath_hal *ah);
+static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u_int8_t numChains);
+static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah);
+static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah,
+ u_int8_t numChains);
+static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah);
+static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah,
+ u_int8_t numChains);
+
+static const u_int8_t CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 };
+static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
+
+static const struct hal_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ath9k_hw_iqcal_collect,
+ ath9k_hw_iqcalibrate
+};
+static const struct hal_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ath9k_hw_iqcal_collect,
+ ath9k_hw_iqcalibrate
+};
+static const struct hal_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ath9k_hw_adc_gaincal_collect,
+ ath9k_hw_adc_gaincal_calibrate
+};
+static const struct hal_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ath9k_hw_adc_gaincal_collect,
+ ath9k_hw_adc_gaincal_calibrate
+};
+static const struct hal_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ath9k_hw_adc_dccal_collect,
+ ath9k_hw_adc_dccal_calibrate
+};
+static const struct hal_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ath9k_hw_adc_dccal_collect,
+ ath9k_hw_adc_dccal_calibrate
+};
+static const struct hal_percal_data adc_init_dc_cal = {
+ ADC_DC_INIT_CAL,
+ MIN_CAL_SAMPLES,
+ INIT_LOG_COUNT,
+ ath9k_hw_adc_dccal_collect,
+ ath9k_hw_adc_dccal_calibrate
+};
+
+static const struct ath_hal ar5416hal = {
+ AR5416_MAGIC,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ CTRY_DEFAULT,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+};
+
+static struct hal_rate_table ar5416_11a_table = {
+ 8,
+ {0},
+ {
+ {AH_TRUE, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
+ {AH_TRUE, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
+ {AH_TRUE, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
+ {AH_TRUE, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
+ {AH_TRUE, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
+ {AH_TRUE, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
+ {AH_TRUE, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
+ {AH_TRUE, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}
+ },
+};
+
+static struct hal_rate_table ar5416_11b_table = {
+ 4,
+ {0},
+ {
+ {AH_TRUE, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
+ {AH_TRUE, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
+ {AH_TRUE, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1},
+ {AH_TRUE, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1}
+ },
+};
+
+static struct hal_rate_table ar5416_11g_table = {
+ 12,
+ {0},
+ {
+ {AH_TRUE, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
+ {AH_TRUE, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
+ {AH_TRUE, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
+ {AH_TRUE, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
+
+ {AH_FALSE, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
+ {AH_FALSE, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
+ {AH_TRUE, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
+ {AH_TRUE, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
+ {AH_TRUE, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
+ {AH_TRUE, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
+ {AH_TRUE, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
+ {AH_TRUE, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}
+ },
+};
+
+static struct hal_rate_table ar5416_11ng_table = {
+ 28,
+ {0},
+ {
+ {AH_TRUE, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
+ {AH_TRUE, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
+ {AH_TRUE, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
+ {AH_TRUE, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
+
+ {AH_FALSE, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
+ {AH_FALSE, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
+ {AH_TRUE, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
+ {AH_TRUE, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
+ {AH_TRUE, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
+ {AH_TRUE, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
+ {AH_TRUE, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
+ {AH_TRUE, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8},
+ {AH_TRUE, PHY_HT, 6500, 0x80, 0x00, 0, 4},
+ {AH_TRUE, PHY_HT, 13000, 0x81, 0x00, 1, 6},
+ {AH_TRUE, PHY_HT, 19500, 0x82, 0x00, 2, 6},
+ {AH_TRUE, PHY_HT, 26000, 0x83, 0x00, 3, 8},
+ {AH_TRUE, PHY_HT, 39000, 0x84, 0x00, 4, 8},
+ {AH_TRUE, PHY_HT, 52000, 0x85, 0x00, 5, 8},
+ {AH_TRUE, PHY_HT, 58500, 0x86, 0x00, 6, 8},
+ {AH_TRUE, PHY_HT, 65000, 0x87, 0x00, 7, 8},
+ {AH_TRUE, PHY_HT, 13000, 0x88, 0x00, 8, 4},
+ {AH_TRUE, PHY_HT, 26000, 0x89, 0x00, 9, 6},
+ {AH_TRUE, PHY_HT, 39000, 0x8a, 0x00, 10, 6},
+ {AH_TRUE, PHY_HT, 52000, 0x8b, 0x00, 11, 8},
+ {AH_TRUE, PHY_HT, 78000, 0x8c, 0x00, 12, 8},
+ {AH_TRUE, PHY_HT, 104000, 0x8d, 0x00, 13, 8},
+ {AH_TRUE, PHY_HT, 117000, 0x8e, 0x00, 14, 8},
+ {AH_TRUE, PHY_HT, 130000, 0x8f, 0x00, 15, 8},
+ },
+};
+
+static struct hal_rate_table ar5416_11na_table = {
+ 24,
+ {0},
+ {
+ {AH_TRUE, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
+ {AH_TRUE, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
+ {AH_TRUE, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
+ {AH_TRUE, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
+ {AH_TRUE, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
+ {AH_TRUE, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
+ {AH_TRUE, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
+ {AH_TRUE, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4},
+ {AH_TRUE, PHY_HT, 6500, 0x80, 0x00, 0, 0},
+ {AH_TRUE, PHY_HT, 13000, 0x81, 0x00, 1, 2},
+ {AH_TRUE, PHY_HT, 19500, 0x82, 0x00, 2, 2},
+ {AH_TRUE, PHY_HT, 26000, 0x83, 0x00, 3, 4},
+ {AH_TRUE, PHY_HT, 39000, 0x84, 0x00, 4, 4},
+ {AH_TRUE, PHY_HT, 52000, 0x85, 0x00, 5, 4},
+ {AH_TRUE, PHY_HT, 58500, 0x86, 0x00, 6, 4},
+ {AH_TRUE, PHY_HT, 65000, 0x87, 0x00, 7, 4},
+ {AH_TRUE, PHY_HT, 13000, 0x88, 0x00, 8, 0},
+ {AH_TRUE, PHY_HT, 26000, 0x89, 0x00, 9, 2},
+ {AH_TRUE, PHY_HT, 39000, 0x8a, 0x00, 10, 2},
+ {AH_TRUE, PHY_HT, 52000, 0x8b, 0x00, 11, 4},
+ {AH_TRUE, PHY_HT, 78000, 0x8c, 0x00, 12, 4},
+ {AH_TRUE, PHY_HT, 104000, 0x8d, 0x00, 13, 4},
+ {AH_TRUE, PHY_HT, 117000, 0x8e, 0x00, 14, 4},
+ {AH_TRUE, PHY_HT, 130000, 0x8f, 0x00, 15, 4},
+ },
+};
+
+static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
+ const struct hal_channel *chan)
+{
+ if (IS_CHAN_CCK(chan))
+ return WIRELESS_MODE_11b;
+ if (IS_CHAN_G(chan))
+ return WIRELESS_MODE_11g;
+ return WIRELESS_MODE_11a;
+}
+
+static enum hal_bool ath9k_hw_wait(struct ath_hal *ah,
+ u_int reg,
+ u_int32_t mask,
+ u_int32_t val)
+{
+ int i;
+
+ for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) {
+ if ((REG_READ(ah, reg) & mask) == val)
+ return AH_TRUE;
+
+ udelay(AH_TIME_QUANTUM);
+ }
+ HDPRINTF(ah, HAL_DBG_PHY_IO,
+ "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
+ __func__, reg, REG_READ(ah, reg), mask, val);
+ return AH_FALSE;
+}
+
+static enum hal_bool ath9k_hw_eeprom_read(struct ath_hal *ah, u_int off,
+ u_int16_t *data)
+{
+ (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
+ return AH_FALSE;
+ }
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return AH_TRUE;
+}
+
+static enum hal_status ath9k_hw_flash_map(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
+
+ if (!ahp->ah_cal_mem) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "%s: cannot remap eeprom region \n", __func__);
+ return HAL_EIO;
+ }
+
+ return HAL_OK;
+}
+
+static enum hal_bool ath9k_hw_flash_read(struct ath_hal *ah, u_int off,
+ u_int16_t *data)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ *data = ioread16(ahp->ah_cal_mem + off);
+ return AH_TRUE;
+}
+
+static void ath9k_hw_read_revisions(struct ath_hal *ah)
+{
+ u_int32_t val;
+
+ val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+
+ if (val == 0xFF) {
+ val = REG_READ(ah, AR_SREV);
+
+ ah->ah_macVersion =
+ (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
+
+ ah->ah_macRev = MS(val, AR_SREV_REVISION2);
+ ah->ah_isPciExpress =
+ (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
+
+ } else {
+ if (!AR_SREV_9100(ah))
+ ah->ah_macVersion = MS(val, AR_SREV_VERSION);
+
+ ah->ah_macRev = val & AR_SREV_REVISION;
+
+ if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE)
+ ah->ah_isPciExpress = AH_TRUE;
+ }
+}
+
+u_int32_t ath9k_hw_reverse_bits(u_int32_t val, u_int32_t n)
+{
+ u_int32_t retval;
+ int i;
+
+ for (i = 0, retval = 0; i < n; i++) {
+ retval = (retval << 1) | (val & 1);
+ val >>= 1;
+ }
+ return retval;
+}
+
+static void ath9k_hw_set_defaults(struct ath_hal *ah)
+{
+ int i;
+
+ ah->ah_config.ath_hal_dma_beacon_response_time = 2;
+ ah->ah_config.ath_hal_sw_beacon_response_time = 10;
+ ah->ah_config.ath_hal_additional_swba_backoff = 0;
+ ah->ah_config.ath_hal_6mb_ack = 0x0;
+ ah->ah_config.ath_hal_cwmIgnoreExtCCA = 0;
+ ah->ah_config.ath_hal_pciePowerSaveEnable = 0;
+ ah->ah_config.ath_hal_pcieL1SKPEnable = 0;
+ ah->ah_config.ath_hal_pcieClockReq = 0;
+ ah->ah_config.ath_hal_pciePowerReset = 0x100;
+ ah->ah_config.ath_hal_pcieRestore = 0;
+ ah->ah_config.ath_hal_pcieWaen = 0;
+ ah->ah_config.ath_hal_analogShiftReg = 1;
+ ah->ah_config.ath_hal_htEnable = 1;
+ ah->ah_config.ath_hal_ofdmTrigLow = 200;
+ ah->ah_config.ath_hal_ofdmTrigHigh = 500;
+ ah->ah_config.ath_hal_cckTrigHigh = 200;
+ ah->ah_config.ath_hal_cckTrigLow = 100;
+ ah->ah_config.ath_hal_enableANI = 0;
+ ah->ah_config.ath_hal_noiseImmunityLvl = 4;
+ ah->ah_config.ath_hal_ofdmWeakSigDet = 1;
+ ah->ah_config.ath_hal_cckWeakSigThr = 0;
+ ah->ah_config.ath_hal_spurImmunityLvl = 2;
+ ah->ah_config.ath_hal_firStepLvl = 0;
+ ah->ah_config.ath_hal_rssiThrHigh = 40;
+ ah->ah_config.ath_hal_rssiThrLow = 7;
+ ah->ah_config.ath_hal_diversityControl = 0;
+ ah->ah_config.ath_hal_antennaSwitchSwap = 0;
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ ah->ah_config.ath_hal_spurChans[i][0] = AR_NO_SPUR;
+ ah->ah_config.ath_hal_spurChans[i][1] = AR_NO_SPUR;
+ }
+
+ ah->ah_config.ath_hal_intrMitigation = 0;
+ ah->ah_config.ath_hal_debug = 0;
+}
+
+static inline void ath9k_hw_override_ini(struct ath_hal *ah,
+ struct hal_channel *chan)
+{
+ if (!AR_SREV_5416_V20_OR_LATER(ah)
+ || AR_SREV_9280_10_OR_LATER(ah))
+ return;
+
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hal *ah,
+ struct hal_channel *chan)
+{
+ u_int32_t synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_CCK(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
+ enum hal_opmode opmode)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ ahp->ah_maskReg = AR_IMR_TXERR |
+ AR_IMR_TXURN |
+ AR_IMR_RXERR |
+ AR_IMR_RXORN |
+ AR_IMR_BCNMISC;
+
+ if (ahp->ah_intrMitigation)
+ ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ ahp->ah_maskReg |= AR_IMR_RXOK;
+
+ ahp->ah_maskReg |= AR_IMR_TXOK;
+
+ if (opmode == HAL_M_HOSTAP)
+ ahp->ah_maskReg |= AR_IMR_MIB;
+
+ REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
+ REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
+
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
+ }
+}
+
+static inline void ath9k_hw_init_qos(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
+ REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
+
+ REG_WRITE(ah, AR_QOS_NO_ACK,
+ SM(2, AR_QOS_NO_ACK_TWO_BIT) |
+ SM(5, AR_QOS_NO_ACK_BIT_OFF) |
+ SM(0, AR_QOS_NO_ACK_BYTE_OFF));
+
+ REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
+ REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
+}
+
+static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
+ u_int reg,
+ u_int32_t mask,
+ u_int32_t shift,
+ u_int32_t val)
+{
+ u_int32_t regVal;
+
+ regVal = REG_READ(ah, reg) & ~mask;
+ regVal |= (val << shift) & mask;
+
+ REG_WRITE(ah, reg, regVal);
+
+ if (ah->ah_config.ath_hal_analogShiftReg)
+ udelay(100);
+
+ return;
+}
+
+static u_int8_t ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp,
+ enum hal_freq_band freq_band)
+{
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ struct modal_eep_header *pModal =
+ &(eep->modalHeader[HAL_FREQ_BAND_2GHZ == freq_band]);
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+ u_int8_t num_ant_config;
+
+ num_ant_config = 1;
+
+ if (pBase->version >= 0x0E0D)
+ if (pModal->useAnt1)
+ num_ant_config += 1;
+
+ return num_ant_config;
+}
+
+static enum hal_status
+ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp,
+ struct hal_channel_internal *chan,
+ u_int8_t index,
+ u_int16_t *config)
+{
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ struct modal_eep_header *pModal =
+ &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+
+ switch (index) {
+ case 0:
+ *config = pModal->antCtrlCommon & 0xFFFF;
+ return HAL_OK;
+ case 1:
+ if (pBase->version >= 0x0E0D) {
+ if (pModal->useAnt1) {
+ *config =
+ ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
+ return HAL_OK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return HAL_EINVAL;
+}
+
+static inline enum hal_bool ath9k_hw_nvram_read(struct ath_hal *ah,
+ u_int off,
+ u_int16_t *data)
+{
+ if (ath9k_hw_use_flash(ah))
+ return ath9k_hw_flash_read(ah, off, data);
+ else
+ return ath9k_hw_eeprom_read(ah, off, data);
+}
+
+static inline enum hal_bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ u_int16_t *eep_data;
+ int addr, ar5416_eep_start_loc = 0;
+
+ if (!ath9k_hw_use_flash(ah)) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "%s: Reading from EEPROM, not flash\n", __func__);
+ ar5416_eep_start_loc = 256;
+ }
+ if (AR_SREV_9100(ah))
+ ar5416_eep_start_loc = 256;
+
+ eep_data = (u_int16_t *) eep;
+ for (addr = 0;
+ addr < sizeof(struct ar5416_eeprom) / sizeof(u_int16_t);
+ addr++) {
+ if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
+ eep_data)) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "%s: Unable to read eeprom region \n",
+ __func__);
+ return AH_FALSE;
+ }
+ eep_data++;
+ }
+ return AH_TRUE;
+}
+
+/* XXX: Clean me up, make me more legible */
+static enum hal_bool
+ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
+ struct hal_channel_internal *chan)
+{
+ struct modal_eep_header *pModal;
+ int i, regChainOffset;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ u_int8_t txRxAttenLocal;
+ u_int16_t ant_config;
+
+ pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+
+ txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
+
+ ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config);
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (AR_SREV_9280(ah)) {
+ if (i >= 2)
+ break;
+ }
+
+ if (AR_SREV_5416_V20_OR_LATER(ah) &&
+ (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
+ && (i != 0))
+ regChainOffset = (i == 1) ? 0x2000 : 0x1000;
+ else
+ regChainOffset = i * 0x1000;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
+ pModal->antCtrlChain[i]);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_TIMING_CTRL4(0) +
+ regChainOffset) &
+ ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
+ SM(pModal->iqCalICh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+
+ if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
+ if ((eep->baseEepHeader.version &
+ AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) {
+ txRxAttenLocal = pModal->txRxAttenCh[i];
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ OS_REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->
+ bswMargin[i]);
+ OS_REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB,
+ pModal->
+ bswAtten[i]);
+ OS_REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->
+ xatten2Margin[i]);
+ OS_REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB,
+ pModal->
+ xatten2Db[i]);
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
+ | SM(pModal->
+ bswMargin[i],
+ AR_PHY_GAIN_2GHZ_BSW_MARGIN));
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
+ | SM(pModal->bswAtten[i],
+ AR_PHY_GAIN_2GHZ_BSW_ATTEN));
+ }
+ }
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ OS_REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN +
+ regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN,
+ txRxAttenLocal);
+ OS_REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN +
+ regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN,
+ pModal->rxTxMarginCh[i]);
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_RXGAIN +
+ regChainOffset) &
+ ~AR_PHY_RXGAIN_TXRX_ATTEN) |
+ SM(txRxAttenLocal,
+ AR_PHY_RXGAIN_TXRX_ATTEN));
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
+ SM(pModal->rxTxMarginCh[i],
+ AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
+ }
+ }
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
+ AR_AN_RF2G1_CH0_OB,
+ AR_AN_RF2G1_CH0_OB_S,
+ pModal->ob);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
+ AR_AN_RF2G1_CH0_DB,
+ AR_AN_RF2G1_CH0_DB_S,
+ pModal->db);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
+ AR_AN_RF2G1_CH1_OB,
+ AR_AN_RF2G1_CH1_OB_S,
+ pModal->ob_ch1);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
+ AR_AN_RF2G1_CH1_DB,
+ AR_AN_RF2G1_CH1_DB_S,
+ pModal->db_ch1);
+ } else {
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
+ AR_AN_RF5G1_CH0_OB5,
+ AR_AN_RF5G1_CH0_OB5_S,
+ pModal->ob);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
+ AR_AN_RF5G1_CH0_DB5,
+ AR_AN_RF5G1_CH0_DB5_S,
+ pModal->db);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
+ AR_AN_RF5G1_CH1_OB5,
+ AR_AN_RF5G1_CH1_OB5_S,
+ pModal->ob_ch1);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
+ AR_AN_RF5G1_CH1_DB5,
+ AR_AN_RF5G1_CH1_DB5_S,
+ pModal->db_ch1);
+ }
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
+ AR_AN_TOP2_XPABIAS_LVL,
+ AR_AN_TOP2_XPABIAS_LVL_S,
+ pModal->xpaBiasLvl);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
+ AR_AN_TOP2_LOCALBIAS,
+ AR_AN_TOP2_LOCALBIAS_S,
+ pModal->local_bias);
+ HDPRINTF(NULL, HAL_DBG_UNMASKABLE, "ForceXPAon: %d\n",
+ pModal->force_xpaon);
+ OS_REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
+ pModal->force_xpaon);
+ }
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
+ pModal->switchSettling);
+ OS_REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
+ pModal->adcDesiredSize);
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ OS_REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_PGA,
+ pModal->pgaDesiredSize);
+
+ REG_WRITE(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
+ | SM(pModal->txEndToXpaOff,
+ AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
+ | SM(pModal->txFrameToXpaOn,
+ AR_PHY_RF_CTL4_FRAME_XPAA_ON)
+ | SM(pModal->txFrameToXpaOn,
+ AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
+ pModal->txEndToRxOn);
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ OS_REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ OS_REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
+ AR_PHY_EXT_CCA0_THRESH62,
+ pModal->thresh62);
+ } else {
+ OS_REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ OS_REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_THRESH62,
+ pModal->thresh62);
+ }
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ OS_REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_END_DATA_START,
+ pModal->txFrameToDataStart);
+ OS_REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
+ pModal->txFrameToPaOn);
+ }
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_3) {
+ if (IS_CHAN_HT40(chan))
+ OS_REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH,
+ pModal->swSettleHt40);
+ }
+
+ return AH_TRUE;
+}
+
+static inline enum hal_status ath9k_hw_check_eeprom(struct ath_hal *ah)
+{
+ u_int32_t sum = 0, el;
+ u_int16_t *eepdata;
+ int i;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ enum hal_bool need_swap = AH_FALSE;
+ struct ar5416_eeprom *eep =
+ (struct ar5416_eeprom *) &ahp->ah_eeprom;
+
+ if (!ath9k_hw_use_flash(ah)) {
+ u_int16_t magic, magic2;
+ int addr;
+
+ if (ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
+ &magic)) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "%s: Reading Magic # failed\n", __func__);
+ return AH_FALSE;
+ }
+ HDPRINTF(ah, HAL_DBG_EEPROM, "%s: Read Magic = 0x%04X\n",
+ __func__, magic);
+
+ if (magic != AR5416_EEPROM_MAGIC) {
+ magic2 = swab16(magic);
+
+ if (magic2 == AR5416_EEPROM_MAGIC) {
+ need_swap = AH_TRUE;
+ eepdata = (u_int16_t *) (&ahp->ah_eeprom);
+
+ for (addr = 0;
+ addr <
+ sizeof(struct ar5416_eeprom) /
+ sizeof(u_int16_t); addr++) {
+ u_int16_t temp;
+
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
+
+ HDPRINTF(ah, HAL_DBG_EEPROM_DUMP,
+ "0x%04X ", *eepdata);
+ if (((addr + 1) % 6) == 0)
+ HDPRINTF(ah,
+ HAL_DBG_EEPROM_DUMP,
+ "\n");
+ }
+ } else {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "Invalid EEPROM Magic. "
+ "endianness missmatch.\n");
+ return HAL_EEBADSUM;
+ }
+ }
+ }
+ HDPRINTF(ah, HAL_DBG_EEPROM, "need_swap = %s.\n",
+ need_swap ? "True" : "False");
+
+ if (need_swap)
+ el = swab16(ahp->ah_eeprom.baseEepHeader.length);
+ else
+ el = ahp->ah_eeprom.baseEepHeader.length;
+
+ eepdata = (u_int16_t *) (&ahp->ah_eeprom);
+ for (i = 0; i <
+ min(el, sizeof(struct ar5416_eeprom)) / sizeof(u_int16_t); i++)
+ sum ^= *eepdata++;
+
+ if (need_swap) {
+ u_int32_t integer, j;
+ u_int16_t word;
+
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "EEPROM Endianness is not native.. Changing \n");
+
+ word = swab16(eep->baseEepHeader.length);
+ eep->baseEepHeader.length = word;
+
+ word = swab16(eep->baseEepHeader.checksum);
+ eep->baseEepHeader.checksum = word;
+
+ word = swab16(eep->baseEepHeader.version);
+ eep->baseEepHeader.version = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[0]);
+ eep->baseEepHeader.regDmn[0] = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[1]);
+ eep->baseEepHeader.regDmn[1] = word;
+
+ word = swab16(eep->baseEepHeader.rfSilent);
+ eep->baseEepHeader.rfSilent = word;
+
+ word = swab16(eep->baseEepHeader.blueToothOptions);
+ eep->baseEepHeader.blueToothOptions = word;
+
+ word = swab16(eep->baseEepHeader.deviceCap);
+ eep->baseEepHeader.deviceCap = word;
+
+ for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
+ struct modal_eep_header *pModal =
+ &eep->modalHeader[j];
+ integer = swab32(pModal->antCtrlCommon);
+ pModal->antCtrlCommon = integer;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ integer = swab32(pModal->antCtrlChain[i]);
+ pModal->antCtrlChain[i] = integer;
+ }
+
+ for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
+ word = swab16(pModal->spurChans[i].spurChan);
+ pModal->spurChans[i].spurChan = word;
+ }
+ }
+ }
+
+ if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER ||
+ ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ar5416_get_eep_ver(ahp));
+ return HAL_EEBADSUM;
+ }
+
+ return HAL_OK;
+}
+
+static enum hal_bool ath9k_hw_chip_test(struct ath_hal *ah)
+{
+ u_int32_t regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
+ u_int32_t regHold[2];
+ u_int32_t patternData[4] = { 0x55555555,
+ 0xaaaaaaaa,
+ 0x66666666,
+ 0x99999999 };
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ u_int32_t addr = regAddr[i];
+ u_int32_t wrData, rdData;
+
+ regHold[i] = REG_READ(ah, addr);
+ for (j = 0; j < 0x100; j++) {
+ wrData = (j << 16) | j;
+ REG_WRITE(ah, addr, wrData);
+ rdData = REG_READ(ah, addr);
+ if (rdData != wrData) {
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "%s: address test failed "
+ "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ __func__, addr, wrData, rdData);
+ return AH_FALSE;
+ }
+ }
+ for (j = 0; j < 4; j++) {
+ wrData = patternData[j];
+ REG_WRITE(ah, addr, wrData);
+ rdData = REG_READ(ah, addr);
+ if (wrData != rdData) {
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "%s: address test failed "
+ "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ __func__, addr, wrData, rdData);
+ return AH_FALSE;
+ }
+ }
+ REG_WRITE(ah, regAddr[i], regHold[i]);
+ }
+ udelay(100);
+ return AH_TRUE;
+}
+
+u_int32_t ath9k_hw_getrxfilter(struct ath_hal *ah)
+{
+ u_int32_t bits = REG_READ(ah, AR_RX_FILTER);
+ u_int32_t phybits = REG_READ(ah, AR_PHY_ERR);
+
+ if (phybits & AR_PHY_ERR_RADAR)
+ bits |= HAL_RX_FILTER_PHYRADAR;
+ if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
+ bits |= HAL_RX_FILTER_PHYERR;
+ return bits;
+}
+
+void ath9k_hw_setrxfilter(struct ath_hal *ah, u_int32_t bits)
+{
+ u_int32_t phybits;
+
+ REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
+ phybits = 0;
+ if (bits & HAL_RX_FILTER_PHYRADAR)
+ phybits |= AR_PHY_ERR_RADAR;
+ if (bits & HAL_RX_FILTER_PHYERR)
+ phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
+ REG_WRITE(ah, AR_PHY_ERR, phybits);
+
+ if (phybits)
+ REG_WRITE(ah, AR_RXCFG,
+ REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
+ else
+ REG_WRITE(ah, AR_RXCFG,
+ REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+}
+
+enum hal_bool ath9k_hw_setcapability(struct ath_hal *ah,
+ enum hal_capability_type type,
+ u_int32_t capability,
+ u_int32_t setting,
+ enum hal_status *status)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u_int32_t v;
+
+ switch (type) {
+ case HAL_CAP_TKIP_MIC:
+ if (setting)
+ ahp->ah_staId1Defaults |=
+ AR_STA_ID1_CRPT_MIC_ENABLE;
+ else
+ ahp->ah_staId1Defaults &=
+ ~AR_STA_ID1_CRPT_MIC_ENABLE;
+ return AH_TRUE;
+ case HAL_CAP_DIVERSITY:
+ v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (setting)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+ return AH_TRUE;
+ case HAL_CAP_MCAST_KEYSRCH:
+ if (setting)
+ ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
+ else
+ ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
+ return AH_TRUE;
+ case HAL_CAP_TSF_ADJUST:
+ if (setting)
+ ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
+ else
+ ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
+ return AH_TRUE;
+ default:
+ return AH_FALSE;
+ }
+}
+
+void ath9k_hw_dmaRegDump(struct ath_hal *ah)
+{
+ u_int32_t val[ATH9K_NUM_DMA_DEBUG_REGS];
+ int qcuOffset = 0, dcuOffset = 0;
+ u_int32_t *qcuBase = &val[0], *dcuBase = &val[4];
+ int i;
+
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
+ HDPRINTF(ah, HAL_DBG_REG_IO, "Raw DMA Debug values:\n");
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
+ if (i % 4 == 0)
+ HDPRINTF(ah, HAL_DBG_REG_IO, "\n");
+
+ val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u_int32_t)));
+ HDPRINTF(ah, HAL_DBG_REG_IO, "%d: %08x ", i, val[i]);
+ }
+
+ HDPRINTF(ah, HAL_DBG_REG_IO, "\n\n");
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+
+ for (i = 0; i < ATH9K_NUM_QUEUES;
+ i++, qcuOffset += 4, dcuOffset += 5) {
+ if (i == 8) {
+ qcuOffset = 0;
+ qcuBase++;
+ }
+
+ if (i == 6) {
+ dcuOffset = 0;
+ dcuBase++;
+ }
+
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset +
+ 3),
+ val[2] & (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ }
+
+ HDPRINTF(ah, HAL_DBG_REG_IO, "\n");
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "qcu_stitch state: %2x qcu_fetch state: %2x\n",
+ (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "qcu_complete state: %2x dcu_complete state: %2x\n",
+ (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "dcu_arb state: %2x dcu_fp state: %2x\n",
+ (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
+ (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
+ (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
+ HDPRINTF(ah, HAL_DBG_REG_IO,
+ "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
+ (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
+
+ HDPRINTF(ah, HAL_DBG_REG_IO, "pcu observe 0x%x \n",
+ REG_READ(ah, AR_OBS_BUS_1));
+ HDPRINTF(ah, HAL_DBG_REG_IO, "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
+}
+
+u_int32_t ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
+ u_int32_t *rxc_pcnt,
+ u_int32_t *rxf_pcnt,
+ u_int32_t *txf_pcnt)
+{
+ static u_int32_t cycles, rx_clear, rx_frame, tx_frame;
+ u_int32_t good = 1;
+
+ u_int32_t rc = REG_READ(ah, AR_RCCNT);
+ u_int32_t rf = REG_READ(ah, AR_RFCNT);
+ u_int32_t tf = REG_READ(ah, AR_TFCNT);
+ u_int32_t cc = REG_READ(ah, AR_CCCNT);
+
+ if (cycles == 0 || cycles > cc) {
+ HDPRINTF(ah, HAL_DBG_CHANNEL,
+ "%s: cycle counter wrap. ExtBusy = 0\n",
+ __func__);
+ good = 0;
+ } else {
+ u_int32_t cc_d = cc - cycles;
+ u_int32_t rc_d = rc - rx_clear;
+ u_int32_t rf_d = rf - rx_frame;
+ u_int32_t tf_d = tf - tx_frame;
+
+ if (cc_d != 0) {
+ *rxc_pcnt = rc_d * 100 / cc_d;
+ *rxf_pcnt = rf_d * 100 / cc_d;
+ *txf_pcnt = tf_d * 100 / cc_d;
+ } else {
+ good = 0;
+ }
+ }
+
+ cycles = cc;
+ rx_frame = rf;
+ rx_clear = rc;
+ tx_frame = tf;
+
+ return good;
+}
+
+void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum hal_ht_macmode mode)
+{
+ u_int32_t macmode;
+
+ if (mode == HAL_HT_MACMODE_2040 &&
+ !ah->ah_config.ath_hal_cwmIgnoreExtCCA)
+ macmode = AR_2040_JOINED_RX_CLEAR;
+ else
+ macmode = 0;
+
+ REG_WRITE(ah, AR_2040_MODE, macmode);
+}
+
+static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+
+static struct ath_hal_5416 *ath9k_hw_newstate(u_int16_t devid, void *sc,
+ void __iomem *mem,
+ enum hal_status *status)
+{
+ static const u_int8_t defbssidmask[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct ath_hal_5416 *ahp;
+ struct ath_hal *ah;
+
+ ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL);
+ if (ahp == NULL) {
+ HDPRINTF(NULL, HAL_DBG_UNMASKABLE,
+ "%s: cannot allocate memory for state block\n",
+ __func__);
+ *status = HAL_ENOMEM;
+ return NULL;
+ }
+
+ ah = &ahp->ah;
+
+ memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
+
+ ah->ah_sc = sc;
+ ah->ah_sh = mem;
+
+ ah->ah_devid = devid;
+ ah->ah_subvendorid = 0;
+
+ ah->ah_flags = 0;
+ if ((devid == AR5416_AR9100_DEVID))
+ ah->ah_macVersion = AR_SREV_VERSION_9100;
+ if (!AR_SREV_9100(ah))
+ ah->ah_flags = AH_USE_EEPROM;
+
+ ah->ah_powerLimit = MAX_RATE_POWER;
+ ah->ah_tpScale = HAL_TP_SCALE_MAX;
+
+ ahp->ah_atimWindow = 0;
+ ahp->ah_diversityControl = ah->ah_config.ath_hal_diversityControl;
+ ahp->ah_antennaSwitchSwap =
+ ah->ah_config.ath_hal_antennaSwitchSwap;
+
+ ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
+ ahp->ah_beaconInterval = 100;
+ ahp->ah_enable32kHzClock = DONT_USE_32KHZ;
+ ahp->ah_slottime = (u_int) -1;
+ ahp->ah_acktimeout = (u_int) -1;
+ ahp->ah_ctstimeout = (u_int) -1;
+ ahp->ah_globaltxtimeout = (u_int) -1;
+ memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN);
+
+ ahp->ah_gBeaconRate = 0;
+
+ return ahp;
+}
+
+static enum hal_status ath9k_hw_eeprom_attach(struct ath_hal *ah)
+{
+ enum hal_status status;
+
+ if (ath9k_hw_use_flash(ah))
+ ath9k_hw_flash_map(ah);
+
+ if (!ath9k_hw_fill_eeprom(ah))
+ return HAL_EIO;
+
+ status = ath9k_hw_check_eeprom(ah);
+
+ return status;
+}
+
+u_int32_t ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
+ enum eeprom_param param)
+{
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ struct modal_eep_header *pModal = eep->modalHeader;
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_NFTHRESH_5:
+ return -pModal[0].noiseFloorThreshCh[0];
+ case EEP_NFTHRESH_2:
+ return -pModal[1].noiseFloorThreshCh[0];
+ case AR_EEPROM_MAC(0):
+ return pBase->macAddr[0] << 8 | pBase->macAddr[1];
+ case AR_EEPROM_MAC(1):
+ return pBase->macAddr[2] << 8 | pBase->macAddr[3];
+ case AR_EEPROM_MAC(2):
+ return pBase->macAddr[4] << 8 | pBase->macAddr[5];
+ case EEP_REG_0:
+ return pBase->regDmn[0];
+ case EEP_REG_1:
+ return pBase->regDmn[1];
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_OB_5:
+ return pModal[0].ob;
+ case EEP_DB_5:
+ return pModal[0].db;
+ case EEP_OB_2:
+ return pModal[1].ob;
+ case EEP_DB_2:
+ return pModal[1].db;
+ case EEP_MINOR_REV:
+ return pBase->version & AR5416_EEP_VER_MINOR_MASK;
+ case EEP_TX_MASK:
+ return pBase->txMask;
+ case EEP_RX_MASK:
+ return pBase->rxMask;
+ default:
+ return 0;
+ }
+}
+
+static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
+{
+ u_int32_t val;
+ int i;
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+static inline enum hal_status ath9k_hw_init_macaddr(struct ath_hal *ah)
+{
+ u_int32_t sum;
+ int i;
+ u_int16_t eeval;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ DECLARE_MAC_BUF(mac);
+
+ sum = 0;
+ for (i = 0; i < 3; i++) {
+ eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i));
+ sum += eeval;
+ ahp->ah_macaddr[2 * i] = eeval >> 8;
+ ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
+ }
+ if (sum == 0 || sum == 0xffff * 3) {
+ HDPRINTF(ah, HAL_DBG_EEPROM,
+ "%s: mac address read failed: %s\n", __func__,
+ print_mac(mac, ahp->ah_macaddr));
+ return HAL_EEBADMAC;
+ }
+
+ return HAL_OK;
+}
+
+static inline int16_t ath9k_hw_interpolate(u_int16_t target,
+ u_int16_t srcLeft,
+ u_int16_t srcRight,
+ int16_t targetLeft,
+ int16_t targetRight)
+{
+ int16_t rv;
+
+ if (srcRight == srcLeft) {
+ rv = targetLeft;
+ } else {
+ rv = (int16_t) (((target - srcLeft) * targetRight +
+ (srcRight - target) * targetLeft) /
+ (srcRight - srcLeft));
+ }
+ return rv;
+}
+
+static inline u_int16_t ath9k_hw_fbin2freq(u_int8_t fbin,
+ enum hal_bool is2GHz)
+{
+
+ if (fbin == AR5416_BCHAN_UNUSED)
+ return fbin;
+
+ return (u_int16_t) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
+}
+
+static u_int16_t ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
+ u_int16_t i,
+ enum hal_bool is2GHz)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep =
+ (struct ar5416_eeprom *) &ahp->ah_eeprom;
+ u_int16_t spur_val = AR_NO_SPUR;
+
+ HDPRINTF(ah, HAL_DBG_ANI,
+ "Getting spur idx %d is2Ghz. %d val %x\n",
+ i, is2GHz, ah->ah_config.ath_hal_spurChans[i][is2GHz]);
+
+ switch (ah->ah_config.ath_hal_spurMode) {
+ case SPUR_DISABLE:
+ break;
+ case SPUR_ENABLE_IOCTL:
+ spur_val = ah->ah_config.ath_hal_spurChans[i][is2GHz];
+ HDPRINTF(ah, HAL_DBG_ANI,
+ "Getting spur val from new loc. %d\n", spur_val);
+ break;
+ case SPUR_ENABLE_EEPROM:
+ spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan;
+ break;
+
+ }
+ return spur_val;
+}
+
+static inline enum hal_status ath9k_hw_rfattach(struct ath_hal *ah)
+{
+ enum hal_bool rfStatus = AH_FALSE;
+ enum hal_status ecode = HAL_OK;
+
+ rfStatus = ath9k_hw_init_rf(ah, &ecode);
+ if (!rfStatus) {
+ HDPRINTF(ah, HAL_DBG_RESET,
+ "%s: RF setup failed, status %u\n", __func__,
+ ecode);
+ return ecode;
+ }
+
+ return HAL_OK;
+}
+
+static enum hal_status ath9k_hw_rf_claim(struct ath_hal *ah)
+{
+ u_int32_t val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ath9k_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ HDPRINTF(ah, HAL_DBG_CHANNEL,
+ "%s: 5G Radio Chip Rev 0x%02X is not "
+ "supported by this driver\n",
+ __func__, ah->ah_analog5GhzRev);
+ return HAL_ENOTSUPP;
+ }
+
+ ah->ah_analog5GhzRev = val;
+
+ return HAL_OK;
+}
+
+static inline void ath9k_hw_init_pll(struct ath_hal *ah,
+ struct hal_channel *chan)
+{
+ u_int32_t pll;
+
+ if (AR_SREV_9100(ah)) {
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll = 0x1450;
+ else
+ pll = 0x1458;
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan)) {
+ pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
+
+
+ if (AR_SREV_9280_20(ah)) {
+ if (((chan->channel % 20) == 0)
+ || ((chan->channel % 10) == 0))
+ pll = 0x2850;
+ else
+ pll = 0x142c;
+ }
+ } else {
+ pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
+ }
+
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+ } else {
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+ }
+ }
+ REG_WRITE(ah, (u_int16_t) (AR_RTC_PLL_CONTROL), pll);
+
+ udelay(RTC_PLL_SETTLE_DELAY);
+
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+}
+
+static void ath9k_hw_set_regs(struct ath_hal *ah, struct hal_channel *chan,
+ enum hal_ht_macmode macmode)
+{
+ u_int32_t phymode;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if (chan->channelFlags & CHANNEL_HT40PLUS)
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ if (ahp->ah_extprotspacing == HAL_HT_EXTPROTSPACING_25)
+ phymode |= AR_PHY_FC_DYN2040_EXT_CH;
+ }
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ ath9k_hw_set11nmac2040(ah, macmode);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
+{
+ u_int32_t val;
+
+ val = REG_READ(ah, AR_STA_ID1);
+ val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
+ switch (opmode) {
+ case HAL_M_HOSTAP:
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
+ | AR_STA_ID1_KSRCH_MODE);
+ OS_REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
+ break;
+ case HAL_M_IBSS:
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
+ | AR_STA_ID1_KSRCH_MODE);
+ OS_REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
+ break;
+ case HAL_M_STA:
+ case HAL_M_MONITOR:
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
+ break;
+ }
+}
+
+static inline void
+ath9k_hw_set_rfmode(struct ath_hal *ah, struct hal_channel *chan)
+{
+ u_int32_t rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ :
+ AR_PHY_MODE_RF2GHZ;
+
+ if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static enum hal_bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
+{
+ u_int32_t rst_flags;
+ u_int32_t tmpReg;
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ AR_RTC_FORCE_WAKE_ON_INT);
+
+ if (AR_SREV_9100(ah)) {
+ rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
+ AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
+ } else {
+ tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if (tmpReg &
+ (AR_INTR_SYNC_LOCAL_TIMEOUT |
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+ } else {
+ REG_WRITE(ah, AR_RC, AR_RC_AHB);
+ }
+
+ rst_flags = AR_RTC_RC_MAC_WARM;
+ if (type == HAL_RESET_COLD)
+ rst_flags |= AR_RTC_RC_MAC_COLD;
+ }
+
+ REG_WRITE(ah, (u_int16_t) (AR_RTC_RC), rst_flags);
+ udelay(50);
+
+ REG_WRITE(ah, (u_int16_t) (AR_RTC_RC), 0);
+ if (!ath9k_hw_wait(ah, (u_int16_t) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
+ HDPRINTF(ah, HAL_DBG_RESET, "%s: RTC stuck in MAC reset\n",
+ __func__);
+ return AH_FALSE;
+ }
+
+ if (!AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_RC, 0);
+
+ ath9k_hw_init_pll(ah, NULL);
+
+ if (AR_SREV_9100(ah))
+ udelay(50);
+
+ return AH_TRUE;
+}
+
+static inline enum hal_bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ AR_RTC_FORCE_WAKE_ON_INT);
+
+ REG_WRITE(ah, (u_int16_t) (AR_RTC_RESET), 0);
+ REG_WRITE(ah, (u_int16_t) (AR_RTC_RESET), 1);
+
+ if (!ath9k_hw_wait(ah,
+ AR_RTC_STATUS,
+ AR_RTC_STATUS_M,
+ AR_RTC_STATUS_ON)) {
+ HDPRINTF(ah, HAL_DBG_RESET, "%s: RTC not waking up\n",
+ __func__);
+ return AH_FALSE;
+ }
+
+ ath9k_hw_read_revisions(ah);
+
+ return ath9k_hw_set_reset(ah, HAL_RESET_WARM);
+}
+
+static enum hal_bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
+ u_int32_t type)
+{
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
+
+ switch (type) {
+ case HAL_RESET_POWER_ON:
+ return ath9k_hw_set_reset_power_on(ah);
+ break;
+ case HAL_RESET_WARM:
+ case HAL_RESET_COLD:
+ return ath9k_hw_set_reset(ah, type);
+ break;
+ default:
+ return AH_FALSE;
+ }
+}
+
+static inline struct hal_channel_internal *ath9k_hw_check_chan(
+ struct ath_hal *ah, struct hal_channel *chan)
+{
+ if ((IS(chan, CHANNEL_2GHZ) ^ IS(chan, CHANNEL_5GHZ)) == 0) {
+ HDPRINTF(ah, HAL_DBG_CHANNEL,
+ "%s: invalid channel %u/0x%x; not marked as "
+ "2GHz or 5GHz\n", __func__, chan->channel,
+ chan->channelFlags);
+ return NULL;
+ }
+
+ if ((IS(chan, CHANNEL_OFDM)
+ ^ IS(chan, CHANNEL_CCK)
+ ^ IS(chan, CHANNEL_HT20)
+ ^ IS(chan, CHANNEL_HT40PLUS)
+ ^ IS(chan, CHANNEL_HT40MINUS)) == 0) {
+ HDPRINTF(ah, HAL_DBG_CHANNEL,
+ "%s: invalid channel %u/0x%x; not marked as "
+ "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
+ __func__, chan->channel, chan->channelFlags);
+ return NULL;
+ }
+
+ return ath9k_regd_check_channel(ah, chan);
+}
+
+static inline enum hal_bool
+ath9k_hw_get_lower_upper_index(u_int8_t target,
+ u_int8_t *pList,
+ u_int16_t listSize,
+ u_int16_t *indexL,
+ u_int16_t *indexR)
+{
+ u_int16_t i;
+
+ if (target <= pList[0]) {
+ *indexL = *indexR = 0;
+ return AH_TRUE;
+ }
+ if (target >= pList[listSize - 1]) {
+ *indexL = *indexR = (u_int16_t) (listSize - 1);
+ return AH_TRUE;
+ }
+
+ for (i = 0; i < listSize - 1; i++) {
+ if (pList[i] == target) {
+ *indexL = *indexR = i;
+ return AH_TRUE;
+ }
+ if (target < pList[i + 1]) {
+ *indexL = i;
+ *indexR = (u_int16_t) (i + 1);
+ return AH_FALSE;
+ }
+ }
+ return AH_FALSE;
+}
+
+static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
+{
+ int16_t nfval;
+ int16_t sort[HAL_NF_CAL_HIST_MAX];
+ int i, j;
+
+ for (i = 0; i < HAL_NF_CAL_HIST_MAX; i++)
+ sort[i] = nfCalBuffer[i];
+
+ for (i = 0; i < HAL_NF_CAL_HIST_MAX - 1; i++) {
+ for (j = 1; j < HAL_NF_CAL_HIST_MAX - i; j++) {
+ if (sort[j] > sort[j - 1]) {
+ nfval = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = nfval;
+ }
+ }
+ }
+ nfval = sort[(HAL_NF_CAL_HIST_MAX - 1) >> 1];
+
+ return nfval;
+}
+
+static void ath9k_hw_update_nfcal_hist_buffer(struct hal_nfcal_hist *h,
+ int16_t *nfarray)
+{
+ int i;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
+
+ if (++h[i].currIndex >= HAL_NF_CAL_HIST_MAX)
+ h[i].currIndex = 0;
+
+ if (h[i].invalidNFcount > 0) {
+ if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE
+ || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
+ h[i].invalidNFcount = HAL_NF_CAL_HIST_MAX;
+ } else {
+ h[i].invalidNFcount--;
+ h[i].privNF = nfarray[i];
+ }
+ } else {
+ h[i].privNF =
+ ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
+ }
+ }
+ return;
+}
+
+static void ar5416GetNoiseFloor(struct ath_hal *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ int16_t nf;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HDPRINTF(ah, HAL_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR9280_PHY_CH1_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR_PHY_CH1_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HDPRINTF(ah, HAL_DBG_NF_CAL,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ if (!AR_SREV_9280(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
+ AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HDPRINTF(ah, HAL_DBG_NF_CAL,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
+ AR9280_PHY_EXT_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
+ AR_PHY_EXT_MINCCA_PWR);