clean up madwifi and sync with trunk
[openwrt/svn-archive/archive.git] / package / madwifi / patches / 300-napi_polling.patch
1 diff -ur madwifi.old/ath/if_ath.c madwifi.dev/ath/if_ath.c
2 --- madwifi.old/ath/if_ath.c 2007-06-01 11:26:04.181689864 +0200
3 +++ madwifi.dev/ath/if_ath.c 2007-06-01 11:39:53.078678368 +0200
4 @@ -167,7 +167,7 @@
5 int, u_int32_t);
6 static void ath_setdefantenna(struct ath_softc *, u_int);
7 static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
8 -static void ath_rx_tasklet(TQUEUE_ARG);
9 +static int ath_rx_poll(struct net_device *dev, int *budget);
10 static int ath_hardstart(struct sk_buff *, struct net_device *);
11 static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
12 #ifdef ATH_SUPERG_COMP
13 @@ -443,7 +443,6 @@
14 ATH_TXBUF_LOCK_INIT(sc);
15 ATH_RXBUF_LOCK_INIT(sc);
16
17 - ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
18 ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
19 ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
20 ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
21 @@ -700,6 +699,8 @@
22 dev->set_mac_address = ath_set_mac_address;
23 dev->change_mtu = ath_change_mtu;
24 dev->tx_queue_len = ATH_TXBUF - 1; /* 1 for mgmt frame */
25 + dev->poll = ath_rx_poll;
26 + dev->weight = 64;
27 #ifdef USE_HEADERLEN_RESV
28 dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
29 sizeof(struct llc) +
30 @@ -1665,6 +1666,7 @@
31 */
32 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
33 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
34 + sc->sc_isr = status;
35 status &= sc->sc_imask; /* discard unasked for bits */
36 if (status & HAL_INT_FATAL) {
37 sc->sc_stats.ast_hardware++;
38 @@ -1700,7 +1702,12 @@
39 if (status & HAL_INT_RX) {
40 sc->sc_tsf = ath_hal_gettsf64(ah);
41 ath_uapsd_processtriggers(sc);
42 - ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
43 + sc->sc_isr &= ~HAL_INT_RX;
44 + if (netif_rx_schedule_prep(dev)) {
45 + sc->sc_imask &= ~HAL_INT_RX;
46 + ath_hal_intrset(ah, sc->sc_imask);
47 + __netif_rx_schedule(dev);
48 + }
49 }
50 if (status & HAL_INT_TX) {
51 #ifdef ATH_SUPERG_DYNTURBO
52 @@ -1726,6 +1733,11 @@
53 }
54 }
55 #endif
56 + /* disable transmit interrupt */
57 + sc->sc_isr &= ~HAL_INT_TX;
58 + ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
59 + sc->sc_imask &= ~HAL_INT_TX;
60 +
61 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
62 }
63 if (status & HAL_INT_BMISS) {
64 @@ -3296,10 +3308,10 @@
65 *
66 * XXX Using in_softirq is not right since we might
67 * be called from other soft irq contexts than
68 - * ath_rx_tasklet.
69 + * ath_rx_poll
70 */
71 if (!in_softirq())
72 - tasklet_disable(&sc->sc_rxtq);
73 + netif_poll_disable(dev);
74 netif_stop_queue(dev);
75 }
76
77 @@ -3312,7 +3324,7 @@
78 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
79 netif_start_queue(dev);
80 if (!in_softirq()) /* NB: see above */
81 - tasklet_enable(&sc->sc_rxtq);
82 + netif_poll_enable(dev);
83 }
84
85 /*
86 @@ -5573,13 +5585,12 @@
87 sc->sc_rxotherant = 0;
88 }
89
90 -static void
91 -ath_rx_tasklet(TQUEUE_ARG data)
92 +static int
93 +ath_rx_poll(struct net_device *dev, int *budget)
94 {
95 #define PA2DESC(_sc, _pa) \
96 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
97 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
98 - struct net_device *dev = (struct net_device *)data;
99 struct ath_buf *bf;
100 struct ath_softc *sc = dev->priv;
101 struct ieee80211com *ic = &sc->sc_ic;
102 @@ -5591,12 +5602,15 @@
103 unsigned int len;
104 int type;
105 u_int phyerr;
106 + u_int processed = 0, early_stop = 0;
107 + u_int rx_limit = dev->quota;
108
109 /* Let the 802.11 layer know about the new noise floor */
110 sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
111 ic->ic_channoise = sc->sc_channoise;
112
113 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
114 +process_rx_again:
115 do {
116 bf = STAILQ_FIRST(&sc->sc_rxbuf);
117 if (bf == NULL) { /* XXX ??? can this happen */
118 @@ -5620,6 +5634,13 @@
119 /* NB: never process the self-linked entry at the end */
120 break;
121 }
122 +
123 + processed++;
124 + if (rx_limit-- < 0) {
125 + early_stop = 1;
126 + break;
127 + }
128 +
129 skb = bf->bf_skb;
130 if (skb == NULL) { /* XXX ??? can this happen */
131 printk("%s: no skbuff (%s)\n", DEV_NAME(dev), __func__);
132 @@ -5658,6 +5679,7 @@
133 sc->sc_stats.ast_rx_phyerr++;
134 phyerr = rs->rs_phyerr & 0x1f;
135 sc->sc_stats.ast_rx_phy[phyerr]++;
136 + goto rx_next;
137 }
138 if (rs->rs_status & HAL_RXERR_DECRYPT) {
139 /*
140 @@ -5868,9 +5890,29 @@
141 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
142 ATH_RXBUF_UNLOCK_IRQ(sc);
143 } while (ath_rxbuf_init(sc, bf) == 0);
144 + if (!early_stop) {
145 + /* Check if more data is received while we were
146 + * processing the descriptor chain.
147 + */
148 + ATH_DISABLE_INTR();
149 + if (sc->sc_isr & HAL_INT_RX) {
150 + sc->sc_isr &= ~HAL_INT_RX;
151 + ATH_ENABLE_INTR();
152 + ath_uapsd_processtriggers(sc);
153 + goto process_rx_again;
154 + }
155 + netif_rx_complete(dev);
156 +
157 + sc->sc_imask |= HAL_INT_RX;
158 + ath_hal_intrset(ah, sc->sc_imask);
159 + ATH_ENABLE_INTR();
160 + }
161 +
162 + *budget -= processed;
163
164 /* rx signal state monitoring */
165 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
166 + return early_stop;
167 #undef PA2DESC
168 }
169
170 @@ -7487,11 +7529,22 @@
171 struct net_device *dev = (struct net_device *)data;
172 struct ath_softc *sc = dev->priv;
173
174 +process_tx_again:
175 if (txqactive(sc->sc_ah, 0))
176 ath_tx_processq(sc, &sc->sc_txq[0]);
177 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
178 ath_tx_processq(sc, sc->sc_cabq);
179
180 + ATH_DISABLE_INTR();
181 + if (sc->sc_isr & HAL_INT_TX) {
182 + sc->sc_isr &= ~HAL_INT_TX;
183 + ATH_ENABLE_INTR();
184 + goto process_tx_again;
185 + }
186 + sc->sc_imask |= HAL_INT_TX;
187 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
188 + ATH_ENABLE_INTR();
189 +
190 netif_wake_queue(dev);
191
192 if (sc->sc_softled)
193 @@ -7508,6 +7561,7 @@
194 struct net_device *dev = (struct net_device *)data;
195 struct ath_softc *sc = dev->priv;
196
197 +process_tx_again:
198 /*
199 * Process each active queue.
200 */
201 @@ -7528,6 +7582,16 @@
202 if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
203 ath_tx_processq(sc, sc->sc_uapsdq);
204
205 + ATH_DISABLE_INTR();
206 + if (sc->sc_isr & HAL_INT_TX) {
207 + sc->sc_isr &= ~HAL_INT_TX;
208 + ATH_ENABLE_INTR();
209 + goto process_tx_again;
210 + }
211 + sc->sc_imask |= HAL_INT_TX;
212 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
213 + ATH_ENABLE_INTR();
214 +
215 netif_wake_queue(dev);
216
217 if (sc->sc_softled)
218 @@ -7545,6 +7609,7 @@
219 unsigned int i;
220
221 /* Process each active queue. */
222 +process_tx_again:
223 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
224 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
225 ath_tx_processq(sc, &sc->sc_txq[i]);
226 @@ -7553,6 +7618,16 @@
227 ath_tx_processq(sc, sc->sc_xrtxq);
228 #endif
229
230 + ATH_DISABLE_INTR();
231 + if (sc->sc_isr & HAL_INT_TX) {
232 + sc->sc_isr &= ~HAL_INT_TX;
233 + ATH_ENABLE_INTR();
234 + goto process_tx_again;
235 + }
236 + sc->sc_imask |= HAL_INT_TX;
237 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
238 + ATH_ENABLE_INTR();
239 +
240 netif_wake_queue(dev);
241
242 if (sc->sc_softled)
243 @@ -7651,6 +7726,7 @@
244 ath_draintxq(struct ath_softc *sc)
245 {
246 struct ath_hal *ah = sc->sc_ah;
247 + int npend = 0;
248 unsigned int i;
249
250 /* XXX return value */
251 @@ -9170,9 +9246,9 @@
252 dev->mtu = mtu;
253 if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
254 /* NB: the rx buffers may need to be reallocated */
255 - tasklet_disable(&sc->sc_rxtq);
256 + netif_poll_disable(dev);
257 error = ath_reset(dev);
258 - tasklet_enable(&sc->sc_rxtq);
259 + netif_poll_enable(dev);
260 }
261 ATH_UNLOCK(sc);
262
263 diff -ur madwifi.old/ath/if_athvar.h madwifi.dev/ath/if_athvar.h
264 --- madwifi.old/ath/if_athvar.h 2007-06-01 11:26:04.158693360 +0200
265 +++ madwifi.dev/ath/if_athvar.h 2007-06-01 11:33:26.549439744 +0200
266 @@ -48,6 +48,10 @@
267 #include "if_athioctl.h"
268 #include "net80211/ieee80211.h" /* XXX for WME_NUM_AC */
269
270 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
271 +#define irqs_disabled() 0
272 +#endif
273 +
274 /*
275 * Deduce if tasklets are available. If not then
276 * fall back to using the immediate work queue.
277 @@ -621,7 +625,6 @@
278 struct ath_buf *sc_rxbufcur; /* current rx buffer */
279 u_int32_t *sc_rxlink; /* link ptr in last RX desc */
280 spinlock_t sc_rxbuflock;
281 - struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
282 struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
283 u_int8_t sc_defant; /* current default antenna */
284 u_int8_t sc_rxotherant; /* rx's on non-default antenna*/
285 @@ -634,6 +637,7 @@
286 u_int sc_txintrperiod; /* tx interrupt batching */
287 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
288 struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
289 + HAL_INT sc_isr; /* unmasked ISR state */
290 struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
291 u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
292 struct ath_descdma sc_bdma; /* beacon descriptors */
293 @@ -714,6 +718,8 @@
294 #define ATH_TXBUF_LOCK_ASSERT(_sc) \
295 KASSERT(spin_is_locked(&(_sc)->sc_txbuflock), ("txbuf not locked!"))
296
297 +#define ATH_DISABLE_INTR local_irq_disable
298 +#define ATH_ENABLE_INTR local_irq_enable
299
300 #define ATH_RXBUF_LOCK_INIT(_sc) spin_lock_init(&(_sc)->sc_rxbuflock)
301 #define ATH_RXBUF_LOCK_DESTROY(_sc)
302 diff -ur madwifi.old/net80211/ieee80211_input.c madwifi.dev/net80211/ieee80211_input.c
303 --- madwifi.old/net80211/ieee80211_input.c 2007-06-01 11:26:04.183689560 +0200
304 +++ madwifi.dev/net80211/ieee80211_input.c 2007-06-01 11:31:46.931583960 +0200
305 @@ -1144,8 +1144,9 @@
306 if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
307 /* attach vlan tag */
308 vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
309 - } else
310 - netif_rx(skb);
311 + } else {
312 + netif_receive_skb(skb);
313 + }
314 dev->last_rx = jiffies;
315 }
316 }