f1799b4219c4d3f40e367bc4cc051b4c5c1b1ca8
[openwrt/svn-archive/archive.git] / package / madwifi / patches / 300-napi_polling.patch
1 Index: madwifi-dfs-r3246/ath/if_ath.c
2 ===================================================================
3 --- madwifi-dfs-r3246.orig/ath/if_ath.c 2008-01-25 16:28:31.352327279 +0100
4 +++ madwifi-dfs-r3246/ath/if_ath.c 2008-01-25 16:30:04.921659489 +0100
5 @@ -188,7 +188,7 @@
6 struct sk_buff *, int, int, u_int64_t);
7 static void ath_setdefantenna(struct ath_softc *, u_int);
8 static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
9 -static void ath_rx_tasklet(TQUEUE_ARG);
10 +static int ath_rx_poll(struct net_device *dev, int *budget);
11 static int ath_hardstart(struct sk_buff *, struct net_device *);
12 static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
13 #ifdef ATH_SUPERG_COMP
14 @@ -545,7 +545,6 @@
15 ATH_TXBUF_LOCK_INIT(sc);
16 ATH_RXBUF_LOCK_INIT(sc);
17
18 - ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
19 ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
20 ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
21 ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
22 @@ -822,6 +821,8 @@
23 dev->set_mac_address = ath_set_mac_address;
24 dev->change_mtu = ath_change_mtu;
25 dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
26 + dev->poll = ath_rx_poll;
27 + dev->weight = 64;
28 #ifdef USE_HEADERLEN_RESV
29 dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
30 sizeof(struct llc) +
31 @@ -2233,6 +2234,7 @@
32 (status & HAL_INT_GLOBAL) ? " HAL_INT_GLOBAL" : ""
33 );
34
35 + sc->sc_isr = status;
36 status &= sc->sc_imask; /* discard unasked for bits */
37 if (status & HAL_INT_FATAL) {
38 sc->sc_stats.ast_hardware++;
39 @@ -2284,7 +2286,14 @@
40 }
41 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
42 ath_uapsd_processtriggers(sc);
43 - ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
44 + sc->sc_isr &= ~HAL_INT_RX;
45 + if (netif_rx_schedule_prep(dev)) {
46 +#ifndef ATH_PRECISE_TSF
47 + sc->sc_imask &= ~HAL_INT_RX;
48 + ath_hal_intrset(ah, sc->sc_imask);
49 +#endif
50 + __netif_rx_schedule(dev);
51 + }
52 }
53 if (status & HAL_INT_TX) {
54 #ifdef ATH_SUPERG_DYNTURBO
55 @@ -2310,6 +2319,11 @@
56 }
57 }
58 #endif
59 + /* disable transmit interrupt */
60 + sc->sc_isr &= ~HAL_INT_TX;
61 + ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
62 + sc->sc_imask &= ~HAL_INT_TX;
63 +
64 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
65 }
66 if (status & HAL_INT_BMISS) {
67 @@ -4017,10 +4031,10 @@
68 *
69 * XXX Using in_softirq is not right since we might
70 * be called from other soft irq contexts than
71 - * ath_rx_tasklet.
72 + * ath_rx_poll
73 */
74 if (!in_softirq())
75 - tasklet_disable(&sc->sc_rxtq);
76 + netif_poll_disable(dev);
77 netif_stop_queue(dev);
78 }
79
80 @@ -4033,7 +4047,7 @@
81 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
82 netif_start_queue(dev);
83 if (!in_softirq()) /* NB: see above */
84 - tasklet_enable(&sc->sc_rxtq);
85 + netif_poll_enable(dev);
86 }
87
88 /*
89 @@ -6358,13 +6372,12 @@
90 sc->sc_rxotherant = 0;
91 }
92
93 -static void
94 -ath_rx_tasklet(TQUEUE_ARG data)
95 +static int
96 +ath_rx_poll(struct net_device *dev, int *budget)
97 {
98 #define PA2DESC(_sc, _pa) \
99 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
100 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
101 - struct net_device *dev = (struct net_device *)data;
102 struct ath_buf *bf;
103 struct ath_softc *sc = dev->priv;
104 struct ieee80211com *ic = &sc->sc_ic;
105 @@ -6376,8 +6389,11 @@
106 unsigned int len;
107 int type;
108 u_int phyerr;
109 + u_int processed = 0, early_stop = 0;
110 + u_int rx_limit = dev->quota;
111
112 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
113 +process_rx_again:
114 do {
115 bf = STAILQ_FIRST(&sc->sc_rxbuf);
116 if (bf == NULL) { /* XXX ??? can this happen */
117 @@ -6401,6 +6417,13 @@
118 /* NB: never process the self-linked entry at the end */
119 break;
120 }
121 +
122 + processed++;
123 + if (rx_limit-- < 0) {
124 + early_stop = 1;
125 + break;
126 + }
127 +
128 skb = bf->bf_skb;
129 if (skb == NULL) {
130 printk("%s: no skbuff (%s)\n", DEV_NAME(dev), __func__);
131 @@ -6448,6 +6471,7 @@
132 sc->sc_stats.ast_rx_phyerr++;
133 phyerr = rs->rs_phyerr & 0x1f;
134 sc->sc_stats.ast_rx_phy[phyerr]++;
135 + goto rx_next;
136 }
137 if (rs->rs_status & HAL_RXERR_DECRYPT) {
138 /*
139 @@ -6658,9 +6682,33 @@
140 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
141 ATH_RXBUF_UNLOCK_IRQ(sc);
142 } while (ath_rxbuf_init(sc, bf) == 0);
143 + if (!early_stop) {
144 + /* Check if more data is received while we were
145 + * processing the descriptor chain.
146 + */
147 +#ifndef ATH_PRECISE_TSF
148 + ATH_DISABLE_INTR();
149 + if (sc->sc_isr & HAL_INT_RX) {
150 + sc->sc_isr &= ~HAL_INT_RX;
151 + ATH_ENABLE_INTR();
152 + ath_uapsd_processtriggers(sc);
153 + goto process_rx_again;
154 + }
155 +#endif
156 + netif_rx_complete(dev);
157 +
158 +#ifndef ATH_PRECISE_TSF
159 + sc->sc_imask |= HAL_INT_RX;
160 + ath_hal_intrset(ah, sc->sc_imask);
161 + ATH_ENABLE_INTR();
162 +#endif
163 + }
164 +
165 + *budget -= processed;
166
167 /* rx signal state monitoring */
168 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
169 + return early_stop;
170 #undef PA2DESC
171 }
172
173 @@ -8308,11 +8356,22 @@
174 struct net_device *dev = (struct net_device *)data;
175 struct ath_softc *sc = dev->priv;
176
177 +process_tx_again:
178 if (txqactive(sc->sc_ah, 0))
179 ath_tx_processq(sc, &sc->sc_txq[0]);
180 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
181 ath_tx_processq(sc, sc->sc_cabq);
182
183 + ATH_DISABLE_INTR();
184 + if (sc->sc_isr & HAL_INT_TX) {
185 + sc->sc_isr &= ~HAL_INT_TX;
186 + ATH_ENABLE_INTR();
187 + goto process_tx_again;
188 + }
189 + sc->sc_imask |= HAL_INT_TX;
190 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
191 + ATH_ENABLE_INTR();
192 +
193 netif_wake_queue(dev);
194
195 if (sc->sc_softled)
196 @@ -8329,6 +8388,7 @@
197 struct net_device *dev = (struct net_device *)data;
198 struct ath_softc *sc = dev->priv;
199
200 +process_tx_again:
201 /*
202 * Process each active queue.
203 */
204 @@ -8349,6 +8409,16 @@
205 if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
206 ath_tx_processq(sc, sc->sc_uapsdq);
207
208 + ATH_DISABLE_INTR();
209 + if (sc->sc_isr & HAL_INT_TX) {
210 + sc->sc_isr &= ~HAL_INT_TX;
211 + ATH_ENABLE_INTR();
212 + goto process_tx_again;
213 + }
214 + sc->sc_imask |= HAL_INT_TX;
215 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
216 + ATH_ENABLE_INTR();
217 +
218 netif_wake_queue(dev);
219
220 if (sc->sc_softled)
221 @@ -8367,10 +8437,21 @@
222
223 /* Process each active queue. This includes sc_cabq, sc_xrtq and
224 * sc_uapsdq */
225 +process_tx_again:
226 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
227 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
228 ath_tx_processq(sc, &sc->sc_txq[i]);
229
230 + ATH_DISABLE_INTR();
231 + if (sc->sc_isr & HAL_INT_TX) {
232 + sc->sc_isr &= ~HAL_INT_TX;
233 + ATH_ENABLE_INTR();
234 + goto process_tx_again;
235 + }
236 + sc->sc_imask |= HAL_INT_TX;
237 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
238 + ATH_ENABLE_INTR();
239 +
240 netif_wake_queue(dev);
241
242 if (sc->sc_softled)
243 @@ -8445,6 +8526,7 @@
244 ath_draintxq(struct ath_softc *sc)
245 {
246 struct ath_hal *ah = sc->sc_ah;
247 + int npend = 0;
248 unsigned int i;
249
250 /* XXX return value */
251 @@ -10310,9 +10392,9 @@
252 dev->mtu = mtu;
253 if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
254 /* NB: the rx buffers may need to be reallocated */
255 - tasklet_disable(&sc->sc_rxtq);
256 + netif_poll_disable(dev);
257 error = ath_reset(dev);
258 - tasklet_enable(&sc->sc_rxtq);
259 + netif_poll_enable(dev);
260 }
261 ATH_UNLOCK(sc);
262
263 Index: madwifi-dfs-r3246/ath/if_athvar.h
264 ===================================================================
265 --- madwifi-dfs-r3246.orig/ath/if_athvar.h 2008-01-25 16:28:31.304324541 +0100
266 +++ madwifi-dfs-r3246/ath/if_athvar.h 2008-01-25 16:28:36.740634339 +0100
267 @@ -50,6 +50,10 @@
268 #include <asm/io.h>
269 #include <linux/list.h>
270
271 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
272 +#define irqs_disabled() 0
273 +#endif
274 +
275 /*
276 * Deduce if tasklets are available. If not then
277 * fall back to using the immediate work queue.
278 @@ -728,7 +732,6 @@
279 struct ath_buf *sc_rxbufcur; /* current rx buffer */
280 u_int32_t *sc_rxlink; /* link ptr in last RX desc */
281 spinlock_t sc_rxbuflock;
282 - struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
283 struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
284 u_int8_t sc_defant; /* current default antenna */
285 u_int8_t sc_rxotherant; /* RXs on non-default antenna */
286 @@ -741,6 +744,7 @@
287 u_int sc_txintrperiod; /* tx interrupt batching */
288 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
289 struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
290 + HAL_INT sc_isr; /* unmasked ISR state */
291 struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
292 u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
293 struct ath_descdma sc_bdma; /* beacon descriptors */
294 @@ -854,6 +858,8 @@
295 #define ATH_TXBUF_LOCK_CHECK(_sc)
296 #endif
297
298 +#define ATH_DISABLE_INTR local_irq_disable
299 +#define ATH_ENABLE_INTR local_irq_enable
300
301 #define ATH_RXBUF_LOCK_INIT(_sc) spin_lock_init(&(_sc)->sc_rxbuflock)
302 #define ATH_RXBUF_LOCK_DESTROY(_sc)
303 Index: madwifi-dfs-r3246/net80211/ieee80211_input.c
304 ===================================================================
305 --- madwifi-dfs-r3246.orig/net80211/ieee80211_input.c 2008-01-25 16:27:28.000000000 +0100
306 +++ madwifi-dfs-r3246/net80211/ieee80211_input.c 2008-01-25 16:32:53.171247487 +0100
307 @@ -1196,7 +1196,7 @@
308 /* attach vlan tag */
309 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
310 if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
311 - /* If netif_rx dropped the packet because
312 + /* If netif_receive_skb dropped the packet because
313 * device was too busy */
314 if (ni_tmp != NULL) {
315 /* node reference was leaked */
316 @@ -1207,8 +1207,8 @@
317 skb = NULL; /* SKB is no longer ours */
318 } else {
319 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
320 - if (netif_rx(skb) == NET_RX_DROP) {
321 - /* If netif_rx dropped the packet because
322 + if (netif_receive_skb(skb) == NET_RX_DROP) {
323 + /* If netif_receive_skb dropped the packet because
324 * device was too busy */
325 if (ni_tmp != NULL) {
326 /* node reference was leaked */
327 @@ -2313,8 +2313,8 @@
328 skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
329
330 ni_tmp = SKB_CB(skb1)->ni;
331 - if (netif_rx(skb1) == NET_RX_DROP) {
332 - /* If netif_rx dropped the packet because
333 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
334 + /* If netif_receive_skb dropped the packet because
335 * device was too busy */
336 if (ni_tmp != NULL) {
337 /* node reference was leaked */
338 Index: madwifi-dfs-r3246/net80211/ieee80211_monitor.c
339 ===================================================================
340 --- madwifi-dfs-r3246.orig/net80211/ieee80211_monitor.c 2008-01-25 16:27:28.000000000 +0100
341 +++ madwifi-dfs-r3246/net80211/ieee80211_monitor.c 2008-01-25 16:32:05.436527235 +0100
342 @@ -586,8 +586,8 @@
343 skb1->protocol =
344 __constant_htons(0x0019); /* ETH_P_80211_RAW */
345
346 - if (netif_rx(skb1) == NET_RX_DROP) {
347 - /* If netif_rx dropped the packet because
348 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
349 + /* If netif_receive_skb dropped the packet because
350 * device was too busy, reclaim the ref. in
351 * the skb. */
352 if (SKB_CB(skb1)->ni != NULL)
353 Index: madwifi-dfs-r3246/net80211/ieee80211_skb.c
354 ===================================================================
355 --- madwifi-dfs-r3246.orig/net80211/ieee80211_skb.c 2008-01-25 16:31:47.167486151 +0100
356 +++ madwifi-dfs-r3246/net80211/ieee80211_skb.c 2008-01-25 16:31:55.695972162 +0100
357 @@ -73,7 +73,7 @@
358 #undef dev_queue_xmit
359 #undef kfree_skb
360 #undef kfree_skb_fast
361 -#undef netif_rx
362 +#undef netif_receive_skb
363 #undef pskb_copy
364 #undef skb_clone
365 #undef skb_copy
366 @@ -627,8 +627,8 @@
367 grp, vlan_tag);
368 }
369
370 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
371 - return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
372 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
373 + return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
374 }
375
376 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
377 @@ -749,7 +749,7 @@
378 }
379
380 EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
381 -EXPORT_SYMBOL(netif_rx_debug);
382 +EXPORT_SYMBOL(netif_receive_skb_debug);
383 EXPORT_SYMBOL(alloc_skb_debug);
384 EXPORT_SYMBOL(dev_alloc_skb_debug);
385 EXPORT_SYMBOL(skb_clone_debug);
386 Index: madwifi-dfs-r3246/net80211/ieee80211_skb.h
387 ===================================================================
388 --- madwifi-dfs-r3246.orig/net80211/ieee80211_skb.h 2008-01-25 16:31:20.845986170 +0100
389 +++ madwifi-dfs-r3246/net80211/ieee80211_skb.h 2008-01-25 16:31:36.982905763 +0100
390 @@ -116,7 +116,7 @@
391 int vlan_hwaccel_receive_skb_debug(struct sk_buff *skb,
392 struct vlan_group *grp, unsigned short vlan_tag,
393 const char* func, int line);
394 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
395 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
396 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
397 const char *func, int line);
398 struct sk_buff * dev_alloc_skb_debug(unsigned int length,
399 @@ -151,7 +151,7 @@
400 #undef dev_queue_xmit
401 #undef kfree_skb
402 #undef kfree_skb_fast
403 -#undef netif_rx
404 +#undef netif_receive_skb
405 #undef pskb_copy
406 #undef skb_clone
407 #undef skb_copy
408 @@ -168,8 +168,8 @@
409 skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
410 #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
411 vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
412 -#define netif_rx(_skb) \
413 - netif_rx_debug(_skb, __func__, __LINE__)
414 +#define netif_receive_skb(_skb) \
415 + netif_receive_skb_debug(_skb, __func__, __LINE__)
416 #define alloc_skb(_length, _gfp_mask) \
417 alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
418 #define dev_alloc_skb(_length) \