oops. some code accidentally slipped into the wrong patch file :)
[openwrt/svn-archive/archive.git] / package / madwifi / patches / 300-napi_polling.patch
1 diff -urN madwifi-ng-refcount-r2313-20070505.old/ath/if_ath.c madwifi-ng-refcount-r2313-20070505.dev/ath/if_ath.c
2 --- madwifi-ng-refcount-r2313-20070505.old/ath/if_ath.c 2007-05-13 18:17:56.576968032 +0200
3 +++ madwifi-ng-refcount-r2313-20070505.dev/ath/if_ath.c 2007-05-13 18:17:56.594965296 +0200
4 @@ -170,7 +170,7 @@
5 int, u_int32_t);
6 static void ath_setdefantenna(struct ath_softc *, u_int);
7 static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
8 -static void ath_rx_tasklet(TQUEUE_ARG);
9 +static int ath_rx_poll(struct net_device *dev, int *budget);
10 static int ath_hardstart(struct sk_buff *, struct net_device *);
11 static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
12 #ifdef ATH_SUPERG_COMP
13 @@ -420,7 +420,6 @@
14 ATH_TXBUF_LOCK_INIT(sc);
15 ATH_RXBUF_LOCK_INIT(sc);
16
17 - ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
18 ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
19 ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
20 ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
21 @@ -674,6 +673,8 @@
22 dev->set_mac_address = ath_set_mac_address;
23 dev->change_mtu = ath_change_mtu;
24 dev->tx_queue_len = ATH_TXBUF - 1; /* 1 for mgmt frame */
25 + dev->poll = ath_rx_poll;
26 + dev->weight = 64;
27 #ifdef USE_HEADERLEN_RESV
28 dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
29 sizeof(struct llc) +
30 @@ -1645,6 +1646,7 @@
31 */
32 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
33 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
34 + sc->sc_isr = status;
35 status &= sc->sc_imask; /* discard unasked for bits */
36 if (status & HAL_INT_FATAL) {
37 sc->sc_stats.ast_hardware++;
38 @@ -1684,7 +1686,12 @@
39 * might take too long to fire */
40 ath_hal_process_noisefloor(ah);
41 sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
42 - ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
43 + sc->sc_isr &= ~HAL_INT_RX;
44 + if (netif_rx_schedule_prep(dev)) {
45 + sc->sc_imask &= ~HAL_INT_RX;
46 + ath_hal_intrset(ah, sc->sc_imask);
47 + __netif_rx_schedule(dev);
48 + }
49 }
50 if (status & HAL_INT_TX) {
51 #ifdef ATH_SUPERG_DYNTURBO
52 @@ -1710,6 +1717,11 @@
53 }
54 }
55 #endif
56 + /* disable transmit interrupt */
57 + sc->sc_isr &= ~HAL_INT_TX;
58 + ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
59 + sc->sc_imask &= ~HAL_INT_TX;
60 +
61 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
62 sc->sc_tx_start = 0;
63 }
64 @@ -2221,12 +2233,13 @@
65 * Insert the frame on the outbound list and
66 * pass it on to the hardware.
67 */
68 - ATH_TXQ_LOCK(txq);
69 + ATH_TXQ_LOCK_BH(txq);
70 if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) {
71 /*
72 * The CAB queue is started from the SWBA handler since
73 * frames only go out on DTIM and to avoid possible races.
74 */
75 + sc->sc_imask &= ~HAL_INT_SWBA;
76 ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA);
77 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
78 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
79 @@ -2242,6 +2255,7 @@
80 ito64(bf->bf_daddr), bf->bf_desc);
81 }
82 txq->axq_link = &lastds->ds_link;
83 + sc->sc_imask |= HAL_INT_SWBA;
84 ath_hal_intrset(ah, sc->sc_imask);
85 } else {
86 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
87 @@ -2275,7 +2289,7 @@
88 sc->sc_tx_start = jiffies;
89 }
90 }
91 - ATH_TXQ_UNLOCK(txq);
92 + ATH_TXQ_UNLOCK_BH(txq);
93
94 sc->sc_devstats.tx_packets++;
95 sc->sc_devstats.tx_bytes += framelen;
96 @@ -2426,8 +2440,14 @@
97 unsigned int pktlen;
98 int framecnt;
99
100 + /*
101 + * NB: using _BH style locking even though this function may be called
102 + * at interrupt time (within tasklet or bh). This should be harmless
103 + * and this function calls others (i.e., ath_tx_start()) which do
104 + * the same.
105 + */
106 for (;;) {
107 - ATH_TXQ_LOCK(txq);
108 + ATH_TXQ_LOCK_BH(txq);
109
110 bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
111 if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff)) {
112 @@ -2441,7 +2461,7 @@
113 ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL;
114 TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
115
116 - ATH_TXQ_UNLOCK(txq);
117 + ATH_TXQ_UNLOCK_BH(txq);
118
119 /* encap and xmit */
120 bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
121 @@ -2462,15 +2482,16 @@
122 }
123 bf_ff->bf_node = NULL;
124
125 - ATH_TXBUF_LOCK_IRQ(sc);
126 + ATH_TXBUF_LOCK_BH(sc);
127 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
128 - ATH_TXBUF_UNLOCK_IRQ(sc);
129 + ATH_TXBUF_UNLOCK_BH(sc);
130 }
131 + ATH_TXQ_UNLOCK_BH(txq);
132 }
133 #endif
134
135 #define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK \
136 - ATH_TXBUF_LOCK_IRQ(sc); \
137 + ATH_TXBUF_LOCK_BH(sc); \
138 bf = STAILQ_FIRST(&sc->sc_txbuf); \
139 if (bf != NULL) { \
140 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); \
141 @@ -2485,10 +2506,21 @@
142 sc->sc_devstopped = 1; \
143 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); \
144 } \
145 - ATH_TXBUF_UNLOCK_IRQ(sc); \
146 +
147 +#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF \
148 + ATH_TXBUF_UNLOCK_BH(sc); \
149 + if (bf == NULL) { /* NB: should not happen */ \
150 + DPRINTF(sc,ATH_DEBUG_XMIT,"%s: discard, no xmit buf\n", __func__); \
151 + sc->sc_stats.ast_tx_nobuf++; \
152 + goto hardstart_fail; \
153 + }
154 +
155 +#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON \
156 + ATH_TXBUF_UNLOCK_BH(sc); \
157 if (bf == NULL) { /* NB: should not happen */ \
158 DPRINTF(sc,ATH_DEBUG_XMIT, \
159 "%s: discard, no xmit buf\n", __func__); \
160 + ATH_TXQ_UNLOCK_BH(txq); \
161 sc->sc_stats.ast_tx_nobuf++; \
162 goto hardstart_fail; \
163 }
164 @@ -2490,6 +2490,7 @@
165 DPRINTF(sc,ATH_DEBUG_XMIT, \
166 "%s: discard, no xmit buf\n", __func__); \
167 sc->sc_stats.ast_tx_nobuf++; \
168 + goto hardstart_fail; \
169 }
170
171 /*
172 @@ -2552,6 +2584,7 @@
173 if (M_FLAG_GET(skb, M_UAPSD)) {
174 /* bypass FF handling */
175 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
176 + ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
177 if (bf == NULL)
178 goto hardstart_fail;
179 goto ff_bypass;
180 @@ -2573,7 +2606,7 @@
181 /* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate()
182 * call too.
183 */
184 - ATH_TXQ_LOCK(txq);
185 + ATH_TXQ_LOCK_BH(txq);
186 if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) {
187
188 if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */
189 @@ -2583,7 +2616,7 @@
190 TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
191 an->an_tx_ffbuf[skb->priority] = NULL;
192
193 - ATH_TXQ_UNLOCK(txq);
194 + ATH_TXQ_UNLOCK_BH(txq);
195
196 /*
197 * chain skbs and add FF magic
198 @@ -2610,6 +2643,7 @@
199 * to give the buffer back.
200 */
201 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
202 + ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON;
203 if (bf == NULL) {
204 ATH_TXQ_UNLOCK(txq);
205 goto hardstart_fail;
206 @@ -2624,7 +2658,7 @@
207
208 TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
209
210 - ATH_TXQ_UNLOCK(txq);
211 + ATH_TXQ_UNLOCK_BH(txq);
212
213 return 0;
214 }
215 @@ -2635,7 +2669,7 @@
216 TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
217 an->an_tx_ffbuf[skb->priority] = NULL;
218
219 - ATH_TXQ_UNLOCK(txq);
220 + ATH_TXQ_UNLOCK_BH(txq);
221
222 /* encap and xmit */
223 bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
224 @@ -2665,9 +2699,9 @@
225 }
226 bf_ff->bf_node = NULL;
227
228 - ATH_TXBUF_LOCK(sc);
229 + ATH_TXBUF_LOCK_BH(sc);
230 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
231 - ATH_TXBUF_UNLOCK(sc);
232 + ATH_TXBUF_UNLOCK_BH(sc);
233 goto ff_flushdone;
234 }
235 /*
236 @@ -2676,14 +2677,13 @@
237 else if (an->an_tx_ffbuf[skb->priority]) {
238 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
239 "%s: Out-Of-Order fast-frame\n", __func__);
240 - ATH_TXQ_UNLOCK(txq);
241 + ATH_TXQ_UNLOCK_BH(txq);
242 } else
243 - ATH_TXQ_UNLOCK(txq);
244 + ATH_TXQ_UNLOCK_BH(txq);
245
246 ff_flushdone:
247 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
248 - if (bf == NULL)
249 - goto hardstart_fail;
250 + ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
251 }
252
253 ff_bypass:
254 @@ -2691,6 +2725,7 @@
255 #else /* ATH_SUPERG_FF */
256
257 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
258 + ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
259
260 #endif /* ATH_SUPERG_FF */
261
262 @@ -2712,7 +2747,7 @@
263 * Allocate 1 ath_buf for each frame given 1 was
264 * already alloc'd
265 */
266 - ATH_TXBUF_LOCK(sc);
267 + ATH_TXBUF_LOCK_BH(sc);
268 for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) {
269 if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) {
270 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
271 @@ -2733,11 +2768,11 @@
272 STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
273 }
274 }
275 - ATH_TXBUF_UNLOCK(sc);
276 + ATH_TXBUF_UNLOCK_BH(sc);
277 STAILQ_INIT(&bf_head);
278 goto hardstart_fail;
279 }
280 - ATH_TXBUF_UNLOCK(sc);
281 + ATH_TXBUF_UNLOCK_BH(sc);
282
283 while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) {
284 unsigned int nextfraglen = 0;
285 @@ -2773,7 +2808,7 @@
286
287 hardstart_fail:
288 if (!STAILQ_EMPTY(&bf_head)) {
289 - ATH_TXBUF_LOCK(sc);
290 + ATH_TXBUF_LOCK_BH(sc);
291 STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
292 tbf->bf_skb = NULL;
293 tbf->bf_node = NULL;
294 @@ -2783,7 +2818,7 @@
295
296 STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
297 }
298 - ATH_TXBUF_UNLOCK(sc);
299 + ATH_TXBUF_UNLOCK_BH(sc);
300 }
301
302 /* free sk_buffs */
303 @@ -2826,7 +2861,7 @@
304 /*
305 * Grab a TX buffer and associated resources.
306 */
307 - ATH_TXBUF_LOCK_IRQ(sc);
308 + ATH_TXBUF_LOCK_BH(sc);
309 bf = STAILQ_FIRST(&sc->sc_txbuf);
310 if (bf != NULL)
311 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
312 @@ -2837,7 +2872,7 @@
313 sc->sc_devstopped=1;
314 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
315 }
316 - ATH_TXBUF_UNLOCK_IRQ(sc);
317 + ATH_TXBUF_UNLOCK_BH(sc);
318 if (bf == NULL) {
319 printk("ath_mgtstart: discard, no xmit buf\n");
320 sc->sc_stats.ast_tx_nobufmgt++;
321 @@ -2866,9 +2901,9 @@
322 bf->bf_skb = NULL;
323 bf->bf_node = NULL;
324
325 - ATH_TXBUF_LOCK_IRQ(sc);
326 + ATH_TXBUF_LOCK_BH(sc);
327 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
328 - ATH_TXBUF_UNLOCK_IRQ(sc);
329 + ATH_TXBUF_UNLOCK_BH(sc);
330 }
331 dev_kfree_skb_any(skb);
332 skb = NULL;
333 @@ -3336,10 +3371,10 @@
334 *
335 * XXX Using in_softirq is not right since we might
336 * be called from other soft irq contexts than
337 - * ath_rx_tasklet.
338 + * ath_rx_poll
339 */
340 if (!in_softirq())
341 - tasklet_disable(&sc->sc_rxtq);
342 + netif_poll_disable(dev);
343 netif_stop_queue(dev);
344 }
345
346 @@ -3352,7 +3387,7 @@
347 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
348 netif_start_queue(dev);
349 if (!in_softirq()) /* NB: see above */
350 - tasklet_enable(&sc->sc_rxtq);
351 + netif_poll_enable(dev);
352 }
353
354 /*
355 @@ -4912,9 +4947,9 @@
356 bf->bf_node = NULL;
357 bf->bf_desc->ds_link = 0;
358
359 - ATH_TXBUF_LOCK_IRQ(sc);
360 + ATH_TXBUF_LOCK_BH(sc);
361 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
362 - ATH_TXBUF_UNLOCK_IRQ(sc);
363 + ATH_TXBUF_UNLOCK_BH(sc);
364
365 an->an_uapsd_overflowqdepth--;
366 }
367 @@ -5585,13 +5620,12 @@
368 sc->sc_rxotherant = 0;
369 }
370
371 -static void
372 -ath_rx_tasklet(TQUEUE_ARG data)
373 +static int
374 +ath_rx_poll(struct net_device *dev, int *budget)
375 {
376 #define PA2DESC(_sc, _pa) \
377 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
378 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
379 - struct net_device *dev = (struct net_device *)data;
380 struct ath_buf *bf;
381 struct ath_softc *sc = dev->priv;
382 struct ieee80211com *ic = &sc->sc_ic;
383 @@ -5602,11 +5636,15 @@
384 unsigned int len;
385 int type;
386 u_int phyerr;
387 + int processed = 0, early_stop = 0;
388 + int rx_limit = dev->quota;
389
390 /* Let the 802.11 layer know about the new noise floor */
391 ic->ic_channoise = sc->sc_channoise;
392
393 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
394 +
395 +process_rx_again:
396 do {
397 bf = STAILQ_FIRST(&sc->sc_rxbuf);
398 if (bf == NULL) { /* XXX ??? can this happen */
399 @@ -5630,6 +5668,13 @@
400 /* NB: never process the self-linked entry at the end */
401 break;
402 }
403 +
404 + processed++;
405 + if (rx_limit-- < 0) {
406 + early_stop = 1;
407 + break;
408 + }
409 +
410 skb = bf->bf_skb;
411 if (skb == NULL) { /* XXX ??? can this happen */
412 printk("%s: no skbuff (%s)\n", dev->name, __func__);
413 @@ -5668,6 +5668,7 @@
414 sc->sc_stats.ast_rx_phyerr++;
415 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
416 sc->sc_stats.ast_rx_phy[phyerr]++;
417 + goto rx_next;
418 }
419 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) {
420 /*
421 @@ -5878,6 +5923,25 @@
422 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
423 ATH_RXBUF_UNLOCK_IRQ(sc);
424 } while (ath_rxbuf_init(sc, bf) == 0);
425 + if (!early_stop) {
426 + /* Check if more data is received while we were
427 + * processing the descriptor chain.
428 + */
429 + ATH_DISABLE_INTR();
430 + if (sc->sc_isr & HAL_INT_RX) {
431 + sc->sc_isr &= ~HAL_INT_RX;
432 + ATH_ENABLE_INTR();
433 + ath_uapsd_processtriggers(sc);
434 + goto process_rx_again;
435 + }
436 + netif_rx_complete(dev);
437 +
438 + sc->sc_imask |= HAL_INT_RX;
439 + ath_hal_intrset(ah, sc->sc_imask);
440 + ATH_ENABLE_INTR();
441 + }
442 +
443 + *budget -= processed;
444
445 /* rx signal state monitoring */
446 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
447 @@ -5885,6 +5949,7 @@
448 sc->sc_rtasksched = 1;
449 schedule_work(&sc->sc_radartask);
450 }
451 + return early_stop;
452 #undef PA2DESC
453 }
454
455 @@ -6160,22 +6225,22 @@
456 }
457 }
458
459 - ATH_TXBUF_LOCK_IRQ(sc);
460 + ATH_TXBUF_LOCK_BH(sc);
461 bf = STAILQ_FIRST(&sc->sc_grppollbuf);
462 if (bf != NULL)
463 STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list);
464 else {
465 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__);
466 - ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
467 + ATH_TXBUF_UNLOCK_BH(sc);
468 return;
469 }
470 /* XXX use a counter and leave at least one for mgmt frames */
471 if (STAILQ_EMPTY(&sc->sc_grppollbuf)) {
472 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__);
473 - ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
474 + ATH_TXBUF_UNLOCK_BH(sc);
475 return;
476 }
477 - ATH_TXBUF_UNLOCK_IRQ(sc);
478 + ATH_TXBUF_UNLOCK_BH(sc);
479
480 bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
481 skb->data, skb->len, BUS_DMA_TODEVICE);
482 @@ -6641,9 +6706,9 @@
483 dev_kfree_skb(lastbuf->bf_skb);
484 lastbuf->bf_skb = NULL;
485 ieee80211_unref_node(&lastbuf->bf_node);
486 - ATH_TXBUF_LOCK_IRQ(sc);
487 + ATH_TXBUF_LOCK_BH(sc);
488 STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list);
489 - ATH_TXBUF_UNLOCK_IRQ(sc);
490 + ATH_TXBUF_UNLOCK_BH(sc);
491
492 /*
493 * move oldest from overflow to delivery
494 @@ -7462,9 +7527,6 @@
495 if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) {
496 if (!sc->sc_dfswait)
497 netif_start_queue(sc->sc_dev);
498 - DPRINTF(sc, ATH_DEBUG_TX_PROC,
499 - "%s: tx tasklet restart the queue\n",
500 - __func__);
501 sc->sc_reapcount = 0;
502 sc->sc_devstopped = 0;
503 } else
504 @@ -7499,11 +7561,22 @@
505 struct net_device *dev = (struct net_device *)data;
506 struct ath_softc *sc = dev->priv;
507
508 +process_tx_again:
509 if (txqactive(sc->sc_ah, 0))
510 ath_tx_processq(sc, &sc->sc_txq[0]);
511 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
512 ath_tx_processq(sc, sc->sc_cabq);
513
514 + ATH_DISABLE_INTR();
515 + if (sc->sc_isr & HAL_INT_TX) {
516 + sc->sc_isr &= ~HAL_INT_TX;
517 + ATH_ENABLE_INTR();
518 + goto process_tx_again;
519 + }
520 + sc->sc_imask |= HAL_INT_TX;
521 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
522 + ATH_ENABLE_INTR();
523 +
524 netif_wake_queue(dev);
525
526 if (sc->sc_softled)
527 @@ -7520,6 +7593,7 @@
528 struct net_device *dev = (struct net_device *)data;
529 struct ath_softc *sc = dev->priv;
530
531 +process_tx_again:
532 /*
533 * Process each active queue.
534 */
535 @@ -7540,6 +7614,16 @@
536 if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
537 ath_tx_processq(sc, sc->sc_uapsdq);
538
539 + ATH_DISABLE_INTR();
540 + if (sc->sc_isr & HAL_INT_TX) {
541 + sc->sc_isr &= ~HAL_INT_TX;
542 + ATH_ENABLE_INTR();
543 + goto process_tx_again;
544 + }
545 + sc->sc_imask |= HAL_INT_TX;
546 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
547 + ATH_ENABLE_INTR();
548 +
549 netif_wake_queue(dev);
550
551 if (sc->sc_softled)
552 @@ -7557,6 +7641,7 @@
553 unsigned int i;
554
555 /* Process each active queue. */
556 +process_tx_again:
557 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
558 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
559 ath_tx_processq(sc, &sc->sc_txq[i]);
560 @@ -7565,6 +7650,16 @@
561 ath_tx_processq(sc, sc->sc_xrtxq);
562 #endif
563
564 + ATH_DISABLE_INTR();
565 + if (sc->sc_isr & HAL_INT_TX) {
566 + sc->sc_isr &= ~HAL_INT_TX;
567 + ATH_ENABLE_INTR();
568 + goto process_tx_again;
569 + }
570 + sc->sc_imask |= HAL_INT_TX;
571 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
572 + ATH_ENABLE_INTR();
573 +
574 netif_wake_queue(dev);
575
576 if (sc->sc_softled)
577 @@ -7662,6 +7663,7 @@
578 ath_draintxq(struct ath_softc *sc)
579 {
580 struct ath_hal *ah = sc->sc_ah;
581 + int npend = 0;
582 unsigned int i;
583
584 /* XXX return value */
585 @@ -9221,9 +9316,9 @@
586 dev->mtu = mtu;
587 if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
588 /* NB: the rx buffers may need to be reallocated */
589 - tasklet_disable(&sc->sc_rxtq);
590 + netif_poll_disable(dev);
591 error = ath_reset(dev);
592 - tasklet_enable(&sc->sc_rxtq);
593 + netif_poll_enable(dev);
594 }
595 ATH_UNLOCK(sc);
596
597 diff -urN madwifi-ng-refcount-r2313-20070505.old/ath/if_athvar.h madwifi-ng-refcount-r2313-20070505.dev/ath/if_athvar.h
598 --- madwifi-ng-refcount-r2313-20070505.old/ath/if_athvar.h 2007-05-13 18:17:56.363000560 +0200
599 +++ madwifi-ng-refcount-r2313-20070505.dev/ath/if_athvar.h 2007-05-13 18:17:56.595965144 +0200
600 @@ -47,6 +47,10 @@
601 #include "if_athioctl.h"
602 #include "net80211/ieee80211.h" /* XXX for WME_NUM_AC */
603
604 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
605 +#define irqs_disabled() 0
606 +#endif
607 +
608 /*
609 * Deduce if tasklets are available. If not then
610 * fall back to using the immediate work queue.
611 @@ -477,8 +481,12 @@
612 #define ATH_TXQ_LOCK_DESTROY(_tq)
613 #define ATH_TXQ_LOCK(_tq) spin_lock(&(_tq)->axq_lock)
614 #define ATH_TXQ_UNLOCK(_tq) spin_unlock(&(_tq)->axq_lock)
615 -#define ATH_TXQ_LOCK_BH(_tq) spin_lock_bh(&(_tq)->axq_lock)
616 -#define ATH_TXQ_UNLOCK_BH(_tq) spin_unlock_bh(&(_tq)->axq_lock)
617 +#define ATH_TXQ_LOCK_BH(_tq) \
618 + if (!irqs_disabled()) \
619 + spin_lock_bh(&(_tq)->axq_lock)
620 +#define ATH_TXQ_UNLOCK_BH(_tq) \
621 + if (!irqs_disabled()) \
622 + spin_unlock_bh(&(_tq)->axq_lock)
623 #define ATH_TXQ_LOCK_IRQ(_tq) do { \
624 unsigned long __axq_lockflags; \
625 spin_lock_irqsave(&(_tq)->axq_lock, __axq_lockflags);
626 @@ -627,7 +635,6 @@
627 struct ath_buf *sc_rxbufcur; /* current rx buffer */
628 u_int32_t *sc_rxlink; /* link ptr in last RX desc */
629 spinlock_t sc_rxbuflock;
630 - struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
631 struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
632 u_int8_t sc_defant; /* current default antenna */
633 u_int8_t sc_rxotherant; /* rx's on non-default antenna*/
634 @@ -640,6 +647,7 @@
635 u_int sc_txintrperiod; /* tx interrupt batching */
636 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
637 struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
638 + HAL_INT sc_isr; /* unmasked ISR state */
639 struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
640 u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
641 struct ath_descdma sc_bdma; /* beacon descriptors */
642 @@ -706,8 +714,12 @@
643 #define ATH_TXBUF_LOCK_DESTROY(_sc)
644 #define ATH_TXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_txbuflock)
645 #define ATH_TXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_txbuflock)
646 -#define ATH_TXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_txbuflock)
647 -#define ATH_TXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_txbuflock)
648 +#define ATH_TXBUF_LOCK_BH(_sc) \
649 + if (!irqs_disabled()) \
650 + spin_lock_bh(&(_sc)->sc_txbuflock)
651 +#define ATH_TXBUF_UNLOCK_BH(_sc) \
652 + if (!irqs_disabled()) \
653 + spin_unlock_bh(&(_sc)->sc_txbuflock)
654 #define ATH_TXBUF_LOCK_IRQ(_sc) do { \
655 unsigned long __txbuflockflags; \
656 spin_lock_irqsave(&(_sc)->sc_txbuflock, __txbuflockflags);
657 @@ -725,8 +737,12 @@
658 #define ATH_RXBUF_LOCK_DESTROY(_sc)
659 #define ATH_RXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_rxbuflock)
660 #define ATH_RXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_rxbuflock)
661 -#define ATH_RXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_rxbuflock)
662 -#define ATH_RXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_rxbuflock)
663 +#define ATH_RXBUF_LOCK_BH(_sc) \
664 + if (!irqs_disabled()) \
665 + spin_lock_bh(&(_sc)->sc_rxbuflock)
666 +#define ATH_RXBUF_UNLOCK_BH(_sc) \
667 + if (!irqs_disabled()) \
668 + spin_unlock_bh(&(_sc)->sc_rxbuflock)
669 #define ATH_RXBUF_LOCK_IRQ(_sc) do { \
670 unsigned long __rxbuflockflags; \
671 spin_lock_irqsave(&(_sc)->sc_rxbuflock, __rxbuflockflags);
672 @@ -736,6 +752,8 @@
673 #define ATH_RXBUF_UNLOCK_IRQ_EARLY(_sc) \
674 spin_unlock_irqrestore(&(_sc)->sc_rxbuflock, __rxbuflockflags);
675
676 +#define ATH_DISABLE_INTR local_irq_disable
677 +#define ATH_ENABLE_INTR local_irq_enable
678
679 /* Protects the device from concurrent accesses */
680 #define ATH_LOCK_INIT(_sc) init_MUTEX(&(_sc)->sc_lock)
681 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_beacon.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_beacon.c
682 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_beacon.c 2007-01-30 05:01:29.000000000 +0100
683 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_beacon.c 2007-05-13 18:17:56.596964992 +0200
684 @@ -286,7 +286,7 @@
685 int len_changed = 0;
686 u_int16_t capinfo;
687
688 - IEEE80211_LOCK(ic);
689 + IEEE80211_BEACON_LOCK(ic);
690
691 if ((ic->ic_flags & IEEE80211_F_DOTH) &&
692 (vap->iv_flags & IEEE80211_F_CHANSWITCH) &&
693 @@ -547,7 +547,7 @@
694 vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
695 }
696
697 - IEEE80211_UNLOCK(ic);
698 + IEEE80211_BEACON_UNLOCK(ic);
699
700 return len_changed;
701 }
702 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_input.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_input.c
703 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_input.c 2007-05-13 18:17:56.106039624 +0200
704 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_input.c 2007-05-13 18:17:56.597964840 +0200
705 @@ -1148,8 +1148,9 @@
706 if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
707 /* attach vlan tag */
708 vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
709 - } else
710 - netif_rx(skb);
711 + } else {
712 + netif_receive_skb(skb);
713 + }
714 dev->last_rx = jiffies;
715 }
716 }
717 @@ -3623,9 +3624,9 @@
718 }
719
720 /* Okay, take the first queued packet and put it out... */
721 - IEEE80211_NODE_SAVEQ_LOCK(ni);
722 + IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
723 IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
724 - IEEE80211_NODE_SAVEQ_UNLOCK(ni);
725 + IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
726 if (skb == NULL) {
727 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
728 "%s", "recv ps-poll, but queue empty");
729 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_linux.h madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_linux.h
730 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_linux.h 2007-05-04 02:10:06.000000000 +0200
731 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_linux.h 2007-05-13 18:17:56.598964688 +0200
732 @@ -31,6 +31,10 @@
733
734 #include <linux/wireless.h>
735
736 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
737 +#define irqs_disabled() 0
738 +#endif
739 +
740 /*
741 * Task deferral
742 *
743 @@ -86,8 +90,12 @@
744 } while (0)
745 #define IEEE80211_UNLOCK_IRQ_EARLY(_ic) \
746 spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
747 -#define IEEE80211_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_comlock)
748 -#define IEEE80211_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_comlock)
749 +#define IEEE80211_LOCK_BH(_ic) \
750 + if (!irqs_disabled()) \
751 + spin_lock_bh(&(_ic)->ic_comlock)
752 +#define IEEE80211_UNLOCK_BH(_ic) \
753 + if (!irqs_disabled()) \
754 + spin_unlock_bh(&(_ic)->ic_comlock)
755 #define IEEE80211_LOCK(_ic) spin_lock(&(_ic)->ic_comlock)
756 #define IEEE80211_UNLOCK(_ic) spin_unlock(&(_ic)->ic_comlock)
757
758 @@ -104,15 +112,22 @@
759 #define IEEE80211_VAPS_LOCK_DESTROY(_ic)
760 #define IEEE80211_VAPS_LOCK(_ic) spin_lock(&(_ic)->ic_vapslock);
761 #define IEEE80211_VAPS_UNLOCK(_ic) spin_unlock(&(_ic)->ic_vapslock);
762 -#define IEEE80211_VAPS_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_vapslock);
763 -#define IEEE80211_VAPS_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_vapslock);
764 -#define IEEE80211_VAPS_LOCK_IRQ(_ic) do { \
765 - int _vaps_lockflags; \
766 - spin_lock_irqsave(&(_ic)->ic_vapslock, _vaps_lockflags);
767 -#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
768 - spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags); \
769 -} while (0)
770 -#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
771 +#define IEEE80211_VAPS_LOCK_BH(_ic) \
772 + if (!irqs_disabled()) \
773 + spin_lock_bh(&(_ic)->ic_vapslock);
774 +#define IEEE80211_VAPS_UNLOCK_BH(_ic) \
775 + if (!irqs_disabled()) \
776 + spin_unlock_bh(&(_ic)->ic_vapslock);
777 +#define IEEE80211_VAPS_LOCK_IRQ(_ic) do { \
778 + unsigned long __vlockflags=0; \
779 + unsigned int __vlocked=0; \
780 + __vlocked=spin_is_locked(&(_ic)->ic_vapslock); \
781 + if(!__vlocked) spin_lock_irqsave(&(_ic)->ic_vapslock, __vlockflags);
782 +#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
783 + if(!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, __vlockflags); \
784 +} while (0);
785 +#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) \
786 + if (!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
787
788 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
789 #define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
790 @@ -122,6 +137,11 @@
791 #define IEEE80211_VAPS_LOCK_ASSERT(_ic)
792 #endif
793
794 +/*
795 + * Beacon locking definitions; piggyback on com lock.
796 + */
797 +#define IEEE80211_BEACON_LOCK(_ic) IEEE80211_LOCK_IRQ(_ic)
798 +#define IEEE80211_BEACON_UNLOCK(_ic) IEEE80211_UNLOCK_IRQ(_ic)
799
800 /*
801 * Node locking definitions.
802 @@ -191,8 +211,12 @@
803 typedef spinlock_t ieee80211_scan_lock_t;
804 #define IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
805 #define IEEE80211_SCAN_LOCK_DESTROY(_nt)
806 -#define IEEE80211_SCAN_LOCK_BH(_nt) spin_lock_bh(&(_nt)->nt_scanlock)
807 -#define IEEE80211_SCAN_UNLOCK_BH(_nt) spin_unlock_bh(&(_nt)->nt_scanlock)
808 +#define IEEE80211_SCAN_LOCK_BH(_nt) \
809 + if (!irqs_disabled()) \
810 + spin_lock_bh(&(_nt)->nt_scanlock)
811 +#define IEEE80211_SCAN_UNLOCK_BH(_nt) \
812 + if (!irqs_disabled()) \
813 + spin_unlock_bh(&(_nt)->nt_scanlock)
814 #define IEEE80211_SCAN_LOCK_IRQ(_nt) do { \
815 unsigned long __scan_lockflags; \
816 spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
817 @@ -217,8 +241,12 @@
818 #define ACL_LOCK_DESTROY(_as)
819 #define ACL_LOCK(_as) spin_lock(&(_as)->as_lock)
820 #define ACL_UNLOCK(_as) spin_unlock(&(_as)->as_lock)
821 -#define ACL_LOCK_BH(_as) spin_lock_bh(&(_as)->as_lock)
822 -#define ACL_UNLOCK_BH(_as) spin_unlock_bh(&(_as)->as_lock)
823 +#define ACL_LOCK_BH(_as) \
824 + if (!irqs_disabled()) \
825 + spin_lock_bh(&(_as)->as_lock)
826 +#define ACL_UNLOCK_BH(_as) \
827 + if (!irqs_disabled()) \
828 + spin_unlock_bh(&(_as)->as_lock)
829
830 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
831 #define ACL_LOCK_ASSERT(_as) \
832 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_node.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_node.c
833 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_node.c 2007-05-13 18:17:56.273014240 +0200
834 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_node.c 2007-05-13 18:17:56.599964536 +0200
835 @@ -1567,7 +1567,7 @@
836 struct ieee80211_node *ni;
837 u_int gen;
838
839 - IEEE80211_SCAN_LOCK_IRQ(nt);
840 + IEEE80211_SCAN_LOCK_BH(nt);
841 gen = ++nt->nt_scangen;
842
843 restart:
844 @@ -1587,7 +1587,7 @@
845 }
846 IEEE80211_NODE_TABLE_UNLOCK_IRQ(nt);
847
848 - IEEE80211_SCAN_UNLOCK_IRQ(nt);
849 + IEEE80211_SCAN_UNLOCK_BH(nt);
850 }
851 EXPORT_SYMBOL(ieee80211_iterate_dev_nodes);
852
853 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_power.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_power.c
854 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_power.c 2007-04-25 22:29:55.000000000 +0200
855 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_power.c 2007-05-13 18:17:56.599964536 +0200
856 @@ -147,7 +147,7 @@
857 #endif
858 struct sk_buff *skb;
859
860 - IEEE80211_NODE_SAVEQ_LOCK(ni);
861 + IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
862 while ((skb = skb_peek(&ni->ni_savedq)) != NULL &&
863 M_AGE_GET(skb) < IEEE80211_INACT_WAIT) {
864 IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
865 @@ -159,7 +159,7 @@
866 }
867 if (skb != NULL)
868 M_AGE_SUB(skb, IEEE80211_INACT_WAIT);
869 - IEEE80211_NODE_SAVEQ_UNLOCK(ni);
870 + IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
871
872 IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
873 "discard %u frames for age", discard);
874 @@ -185,7 +185,7 @@
875 KASSERT(aid < vap->iv_max_aid,
876 ("bogus aid %u, max %u", aid, vap->iv_max_aid));
877
878 - IEEE80211_LOCK(ni->ni_ic);
879 + IEEE80211_BEACON_LOCK(ni->ni_ic);
880 if (set != (isset(vap->iv_tim_bitmap, aid) != 0)) {
881 if (set) {
882 setbit(vap->iv_tim_bitmap, aid);
883 @@ -196,7 +196,7 @@
884 }
885 vap->iv_flags |= IEEE80211_F_TIMUPDATE;
886 }
887 - IEEE80211_UNLOCK(ni->ni_ic);
888 + IEEE80211_BEACON_UNLOCK(ni->ni_ic);
889 }
890
891 /*
892 @@ -297,9 +297,9 @@
893 struct sk_buff *skb;
894 int qlen;
895
896 - IEEE80211_NODE_SAVEQ_LOCK(ni);
897 + IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
898 IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
899 - IEEE80211_NODE_SAVEQ_UNLOCK(ni);
900 + IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
901 if (skb == NULL)
902 break;
903 /*
904 @@ -363,9 +363,9 @@
905 for (;;) {
906 struct sk_buff *skb;
907
908 - IEEE80211_NODE_SAVEQ_LOCK(ni);
909 + IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
910 skb = __skb_dequeue(&ni->ni_savedq);
911 - IEEE80211_NODE_SAVEQ_UNLOCK(ni);
912 + IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
913 if (skb == NULL)
914 break;
915 ieee80211_parent_queue_xmit(skb);
916 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_proto.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_proto.c
917 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_proto.c 2007-05-13 18:17:56.578967728 +0200
918 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_proto.c 2007-05-13 18:17:56.600964384 +0200
919 @@ -635,9 +635,9 @@
920 {
921 struct ieee80211com *ic = vap->iv_ic;
922
923 - IEEE80211_LOCK(ic);
924 + IEEE80211_BEACON_LOCK(ic);
925 ieee80211_wme_initparams_locked(vap);
926 - IEEE80211_UNLOCK(ic);
927 + IEEE80211_BEACON_UNLOCK(ic);
928 }
929
930 void
931 @@ -920,9 +920,9 @@
932 struct ieee80211com *ic = vap->iv_ic;
933
934 if (ic->ic_caps & IEEE80211_C_WME) {
935 - IEEE80211_LOCK(ic);
936 + IEEE80211_BEACON_LOCK(ic);
937 ieee80211_wme_updateparams_locked(vap);
938 - IEEE80211_UNLOCK(ic);
939 + IEEE80211_BEACON_UNLOCK(ic);
940 }
941 }
942
943 diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_scan_sta.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_scan_sta.c
944 --- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_scan_sta.c 2007-02-01 21:49:37.000000000 +0100
945 +++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_scan_sta.c 2007-05-13 18:17:56.601964232 +0200
946 @@ -163,9 +163,11 @@
947 {
948 struct sta_table *st = ss->ss_priv;
949
950 - spin_lock(&st->st_lock);
951 + if (!irqs_disabled())
952 + spin_lock_bh(&st->st_lock);
953 sta_flush_table(st);
954 - spin_unlock(&st->st_lock);
955 + if (!irqs_disabled())
956 + spin_unlock_bh(&st->st_lock);
957 ss->ss_last = 0;
958 return 0;
959 }
960 @@ -215,7 +217,8 @@
961 int hash;
962
963 hash = STA_HASH(macaddr);
964 - spin_lock(&st->st_lock);
965 + if (!irqs_disabled())
966 + spin_lock_bh(&st->st_lock);
967 LIST_FOREACH(se, &st->st_hash[hash], se_hash)
968 if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr) &&
969 sp->ssid[1] == se->base.se_ssid[1] &&
970 @@ -225,7 +228,7 @@
971 MALLOC(se, struct sta_entry *, sizeof(struct sta_entry),
972 M_80211_SCAN, M_NOWAIT | M_ZERO);
973 if (se == NULL) {
974 - spin_unlock(&st->st_lock);
975 + spin_unlock_bh(&st->st_lock);
976 return 0;
977 }
978 se->se_scangen = st->st_scangen-1;
979 @@ -287,7 +290,8 @@
980 se->se_seen = 1;
981 se->se_notseen = 0;
982
983 - spin_unlock(&st->st_lock);
984 + if (!irqs_disabled())
985 + spin_unlock_bh(&st->st_lock);
986
987 /*
988 * If looking for a quick choice and nothing's
989 @@ -1063,7 +1067,8 @@
990 u_int gen;
991 int res = 0;
992
993 - spin_lock(&st->st_scanlock);
994 + if (!irqs_disabled())
995 + spin_lock_bh(&st->st_scanlock);
996 gen = st->st_scangen++;
997 restart:
998 spin_lock(&st->st_lock);
999 @@ -1086,7 +1091,8 @@
1000 spin_unlock(&st->st_lock);
1001
1002 done:
1003 - spin_unlock(&st->st_scanlock);
1004 + if (!irqs_disabled())
1005 + spin_unlock_bh(&st->st_scanlock);
1006
1007 return res;
1008 }
1009 @@ -1235,7 +1241,8 @@
1010 bestchan = NULL;
1011 bestrssi = -1;
1012
1013 - spin_lock(&st->st_lock);
1014 + if (!irqs_disabled())
1015 + spin_lock_bh(&st->st_lock);
1016 for (i = 0; i < ss->ss_last; i++) {
1017 c = ss->ss_chans[i];
1018 maxrssi = 0;
1019 @@ -1248,7 +1255,8 @@
1020 if (bestchan == NULL || maxrssi < bestrssi)
1021 bestchan = c;
1022 }
1023 - spin_unlock(&st->st_lock);
1024 + if (!irqs_disabled())
1025 + spin_unlock_bh(&st->st_lock);
1026
1027 return bestchan;
1028 }