madwifi: put an upper limit on the amount of sampling that minstrel should do when
[openwrt/svn-archive/archive.git] / package / madwifi / patches / 300-napi_polling.patch
1 Index: madwifi-trunk-r3314/ath/if_ath.c
2 ===================================================================
3 --- madwifi-trunk-r3314.orig/ath/if_ath.c 2008-03-06 23:49:24.344018025 +0100
4 +++ madwifi-trunk-r3314/ath/if_ath.c 2008-03-07 00:38:01.010996975 +0100
5 @@ -184,7 +184,11 @@
6 struct sk_buff *, int, int, u_int64_t);
7 static void ath_setdefantenna(struct ath_softc *, u_int);
8 static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
9 -static void ath_rx_tasklet(TQUEUE_ARG);
10 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
11 +static int ath_rx_poll(struct napi_struct *napi, int budget);
12 +#else
13 +static int ath_rx_poll(struct net_device *dev, int *budget);
14 +#endif
15 static int ath_hardstart(struct sk_buff *, struct net_device *);
16 static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
17 #ifdef ATH_SUPERG_COMP
18 @@ -374,6 +378,9 @@
19 u_int32_t new_clamped_maxtxpower);
20 static u_int32_t ath_get_real_maxtxpower(struct ath_softc *sc);
21
22 +static void ath_poll_disable(struct net_device *dev);
23 +static void ath_poll_enable(struct net_device *dev);
24 +
25 /* calibrate every 30 secs in steady state but check every second at first. */
26 static int ath_calinterval = ATH_SHORT_CALINTERVAL;
27 static int ath_countrycode = CTRY_DEFAULT; /* country code */
28 @@ -545,7 +552,6 @@
29
30 atomic_set(&sc->sc_txbuf_counter, 0);
31
32 - ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
33 ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
34 ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
35 ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
36 @@ -819,6 +825,12 @@
37 dev->set_mac_address = ath_set_mac_address;
38 dev->change_mtu = ath_change_mtu;
39 dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
40 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
41 + netif_napi_add(dev, &sc->sc_napi, ath_rx_poll, 64);
42 +#else
43 + dev->poll = ath_rx_poll;
44 + dev->weight = 64;
45 +#endif
46 #ifdef USE_HEADERLEN_RESV
47 dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
48 sizeof(struct llc) +
49 @@ -2213,6 +2225,7 @@
50 (status & HAL_INT_GLOBAL) ? " HAL_INT_GLOBAL" : ""
51 );
52
53 + sc->sc_isr = status;
54 status &= sc->sc_imask; /* discard unasked for bits */
55 /* As soon as we know we have a real interrupt we intend to service,
56 * we will check to see if we need an initial hardware TSF reading.
57 @@ -2270,7 +2283,23 @@
58 }
59 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
60 ath_uapsd_processtriggers(sc, hw_tsf);
61 - ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
62 + sc->sc_isr &= ~HAL_INT_RX;
63 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
64 + if (netif_rx_schedule_prep(dev, &sc->sc_napi))
65 +#else
66 + if (netif_rx_schedule_prep(dev))
67 +#endif
68 + {
69 +#ifndef ATH_PRECISE_TSF
70 + sc->sc_imask &= ~HAL_INT_RX;
71 + ath_hal_intrset(ah, sc->sc_imask);
72 +#endif
73 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
74 + __netif_rx_schedule(dev, &sc->sc_napi);
75 +#else
76 + __netif_rx_schedule(dev);
77 +#endif
78 + }
79 }
80 if (status & HAL_INT_TX) {
81 #ifdef ATH_SUPERG_DYNTURBO
82 @@ -2296,6 +2325,11 @@
83 }
84 }
85 #endif
86 + /* disable transmit interrupt */
87 + sc->sc_isr &= ~HAL_INT_TX;
88 + ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
89 + sc->sc_imask &= ~HAL_INT_TX;
90 +
91 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
92 }
93 if (status & HAL_INT_BMISS) {
94 @@ -2508,6 +2542,7 @@
95 if (sc->sc_tx99 != NULL)
96 sc->sc_tx99->start(sc->sc_tx99);
97 #endif
98 + ath_poll_enable(dev);
99
100 done:
101 ATH_UNLOCK(sc);
102 @@ -2548,6 +2583,9 @@
103 if (sc->sc_tx99 != NULL)
104 sc->sc_tx99->stop(sc->sc_tx99);
105 #endif
106 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
107 + ath_poll_disable(dev);
108 +#endif
109 netif_stop_queue(dev); /* XXX re-enabled by ath_newstate */
110 dev->flags &= ~IFF_RUNNING; /* NB: avoid recursion */
111 ieee80211_stop_running(ic); /* stop all VAPs */
112 @@ -4006,6 +4044,39 @@
113 return ath_keyset(sc, k, mac, vap->iv_bss);
114 }
115
116 +static void ath_poll_disable(struct net_device *dev)
117 +{
118 + struct ath_softc *sc = dev->priv;
119 +
120 + /*
121 + * XXX Using in_softirq is not right since we might
122 + * be called from other soft irq contexts than
123 + * ath_rx_poll
124 + */
125 + if (!in_softirq()) {
126 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
127 + napi_disable(&sc->sc_napi);
128 +#else
129 + netif_poll_disable(dev);
130 +#endif
131 + }
132 +}
133 +
134 +static void ath_poll_enable(struct net_device *dev)
135 +{
136 + struct ath_softc *sc = dev->priv;
137 +
138 + /* NB: see above */
139 + if (!in_softirq()) {
140 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
141 + napi_enable(&sc->sc_napi);
142 +#else
143 + netif_poll_enable(dev);
144 +#endif
145 + }
146 +}
147 +
148 +
149 /*
150 * Block/unblock tx+rx processing while a key change is done.
151 * We assume the caller serializes key management operations
152 @@ -4023,13 +4094,7 @@
153 * When called from the rx tasklet we cannot use
154 * tasklet_disable because it will block waiting
155 * for us to complete execution.
156 - *
157 - * XXX Using in_softirq is not right since we might
158 - * be called from other soft irq contexts than
159 - * ath_rx_tasklet.
160 */
161 - if (!in_softirq())
162 - tasklet_disable(&sc->sc_rxtq);
163 netif_stop_queue(dev);
164 }
165
166 @@ -4040,9 +4105,9 @@
167 struct ath_softc *sc = dev->priv;
168
169 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
170 - netif_wake_queue(dev);
171 - if (!in_softirq()) /* NB: see above */
172 - tasklet_enable(&sc->sc_rxtq);
173 +
174 + if (dev->flags&IFF_RUNNING)
175 + netif_wake_queue(dev);
176 }
177
178 /*
179 @@ -6347,15 +6412,25 @@
180 sc->sc_rxotherant = 0;
181 }
182
183 -static void
184 -ath_rx_tasklet(TQUEUE_ARG data)
185 +static int
186 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
187 +ath_rx_poll(struct napi_struct *napi, int budget)
188 +#else
189 +ath_rx_poll(struct net_device *dev, int *budget)
190 +#endif
191 {
192 #define PA2DESC(_sc, _pa) \
193 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
194 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
195 - struct net_device *dev = (struct net_device *)data;
196 - struct ath_buf *bf;
197 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
198 + struct ath_softc *sc = container_of(napi, struct ath_softc, sc_napi);
199 + struct net_device *dev = sc->sc_dev;
200 + u_int rx_limit = budget;
201 +#else
202 struct ath_softc *sc = dev->priv;
203 + u_int rx_limit = min(dev->quota, *budget);
204 +#endif
205 + struct ath_buf *bf;
206 struct ieee80211com *ic = &sc->sc_ic;
207 struct ath_hal *ah = sc ? sc->sc_ah : NULL;
208 struct ath_desc *ds;
209 @@ -6365,8 +6440,10 @@
210 unsigned int len;
211 int type;
212 u_int phyerr;
213 + u_int processed = 0, early_stop = 0;
214
215 DPRINTF(sc, ATH_DEBUG_RX_PROC, "invoked\n");
216 +process_rx_again:
217 do {
218 bf = STAILQ_FIRST(&sc->sc_rxbuf);
219 if (bf == NULL) { /* XXX ??? can this happen */
220 @@ -6390,6 +6467,15 @@
221 /* NB: never process the self-linked entry at the end */
222 break;
223 }
224 +
225 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
226 + processed++;
227 +#endif
228 + if (rx_limit-- < 0) {
229 + early_stop = 1;
230 + break;
231 + }
232 +
233 skb = bf->bf_skb;
234 if (skb == NULL) {
235 EPRINTF(sc, "Dropping; buffer contains NULL skbuff.\n");
236 @@ -6437,6 +6523,7 @@
237 sc->sc_stats.ast_rx_phyerr++;
238 phyerr = rs->rs_phyerr & 0x1f;
239 sc->sc_stats.ast_rx_phy[phyerr]++;
240 + goto rx_next;
241 }
242 if (rs->rs_status & HAL_RXERR_DECRYPT) {
243 /*
244 @@ -6632,9 +6719,43 @@
245 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
246 ATH_RXBUF_UNLOCK_IRQ(sc);
247 } while (ath_rxbuf_init(sc, bf) == 0);
248 + if (!early_stop) {
249 + unsigned long flags;
250 + /* Check if more data is received while we were
251 + * processing the descriptor chain.
252 + */
253 +#ifndef ATH_PRECISE_TSF
254 + local_irq_save(flags);
255 + if (sc->sc_isr & HAL_INT_RX) {
256 + u_int64_t hw_tsf = ath_hal_gettsf64(ah);
257 + sc->sc_isr &= ~HAL_INT_RX;
258 + local_irq_restore(flags);
259 + ath_uapsd_processtriggers(sc, hw_tsf);
260 + goto process_rx_again;
261 + }
262 +#endif
263 +#ifndef ATH_PRECISE_TSF
264 + sc->sc_imask |= HAL_INT_RX;
265 + ath_hal_intrset(ah, sc->sc_imask);
266 + local_irq_restore(flags);
267 +#endif
268 + }
269 +
270 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
271 + netif_rx_complete(dev, napi);
272 +#else
273 + netif_rx_complete(dev);
274 + *budget -= processed;
275 + dev->quota -= processed;
276 +#endif
277
278 /* rx signal state monitoring */
279 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
280 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
281 + return processed;
282 +#else
283 + return early_stop;
284 +#endif
285 #undef PA2DESC
286 }
287
288 @@ -8285,12 +8406,24 @@
289 {
290 struct net_device *dev = (struct net_device *)data;
291 struct ath_softc *sc = dev->priv;
292 + unsigned long flags;
293
294 +process_tx_again:
295 if (txqactive(sc->sc_ah, 0))
296 ath_tx_processq(sc, &sc->sc_txq[0]);
297 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
298 ath_tx_processq(sc, sc->sc_cabq);
299
300 + local_irq_save(flags);
301 + if (sc->sc_isr & HAL_INT_TX) {
302 + sc->sc_isr &= ~HAL_INT_TX;
303 + local_irq_restore(flags);
304 + goto process_tx_again;
305 + }
306 + sc->sc_imask |= HAL_INT_TX;
307 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
308 + local_irq_restore(flags);
309 +
310 netif_wake_queue(dev);
311
312 if (sc->sc_softled)
313 @@ -8306,7 +8439,9 @@
314 {
315 struct net_device *dev = (struct net_device *)data;
316 struct ath_softc *sc = dev->priv;
317 + unsigned long flags;
318
319 +process_tx_again:
320 /*
321 * Process each active queue.
322 */
323 @@ -8327,6 +8462,16 @@
324 if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
325 ath_tx_processq(sc, sc->sc_uapsdq);
326
327 + local_irq_save(flags);
328 + if (sc->sc_isr & HAL_INT_TX) {
329 + sc->sc_isr &= ~HAL_INT_TX;
330 + local_irq_restore(flags);
331 + goto process_tx_again;
332 + }
333 + sc->sc_imask |= HAL_INT_TX;
334 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
335 + local_irq_restore(flags);
336 +
337 netif_wake_queue(dev);
338
339 if (sc->sc_softled)
340 @@ -8342,13 +8487,25 @@
341 struct net_device *dev = (struct net_device *)data;
342 struct ath_softc *sc = dev->priv;
343 unsigned int i;
344 + unsigned long flags;
345
346 /* Process each active queue. This includes sc_cabq, sc_xrtq and
347 * sc_uapsdq */
348 +process_tx_again:
349 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
350 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
351 ath_tx_processq(sc, &sc->sc_txq[i]);
352
353 + local_irq_save(flags);
354 + if (sc->sc_isr & HAL_INT_TX) {
355 + sc->sc_isr &= ~HAL_INT_TX;
356 + local_irq_restore(flags);
357 + goto process_tx_again;
358 + }
359 + sc->sc_imask |= HAL_INT_TX;
360 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
361 + local_irq_restore(flags);
362 +
363 netif_wake_queue(dev);
364
365 if (sc->sc_softled)
366 @@ -8423,6 +8580,7 @@
367 ath_draintxq(struct ath_softc *sc)
368 {
369 struct ath_hal *ah = sc->sc_ah;
370 + int npend = 0;
371 unsigned int i;
372
373 /* XXX return value */
374 @@ -10281,9 +10439,9 @@
375 dev->mtu = mtu;
376 if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
377 /* NB: the rx buffers may need to be reallocated */
378 - tasklet_disable(&sc->sc_rxtq);
379 + ath_poll_disable(dev);
380 error = ath_reset(dev);
381 - tasklet_enable(&sc->sc_rxtq);
382 + ath_poll_enable(dev);
383 }
384 ATH_UNLOCK(sc);
385
386 Index: madwifi-trunk-r3314/ath/if_athvar.h
387 ===================================================================
388 --- madwifi-trunk-r3314.orig/ath/if_athvar.h 2008-03-06 23:49:24.296015306 +0100
389 +++ madwifi-trunk-r3314/ath/if_athvar.h 2008-03-07 00:34:32.792958008 +0100
390 @@ -53,6 +53,10 @@
391 # include <asm/bitops.h>
392 #endif
393
394 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
395 +#define irqs_disabled() 0
396 +#endif
397 +
398 /*
399 * Deduce if tasklets are available. If not then
400 * fall back to using the immediate work queue.
401 @@ -616,6 +620,9 @@
402 struct ath_softc {
403 struct ieee80211com sc_ic; /* NB: must be first */
404 struct net_device *sc_dev;
405 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
406 + struct napi_struct sc_napi;
407 +#endif
408 void __iomem *sc_iobase; /* address of the device */
409 struct semaphore sc_lock; /* dev-level lock */
410 struct net_device_stats sc_devstats; /* device statistics */
411 @@ -730,7 +737,6 @@
412 struct ath_buf *sc_rxbufcur; /* current rx buffer */
413 u_int32_t *sc_rxlink; /* link ptr in last RX desc */
414 spinlock_t sc_rxbuflock;
415 - struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
416 struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
417 u_int8_t sc_defant; /* current default antenna */
418 u_int8_t sc_rxotherant; /* RXs on non-default antenna */
419 @@ -745,6 +751,7 @@
420 u_int sc_txintrperiod; /* tx interrupt batching */
421 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
422 struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
423 + HAL_INT sc_isr; /* unmasked ISR state */
424 struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
425 u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
426 struct ath_descdma sc_bdma; /* beacon descriptors */
427 @@ -858,6 +865,8 @@
428 #define ATH_TXBUF_LOCK_CHECK(_sc)
429 #endif
430
431 +#define ATH_DISABLE_INTR local_irq_disable
432 +#define ATH_ENABLE_INTR local_irq_enable
433
434 #define ATH_RXBUF_LOCK_INIT(_sc) spin_lock_init(&(_sc)->sc_rxbuflock)
435 #define ATH_RXBUF_LOCK_DESTROY(_sc)
436 Index: madwifi-trunk-r3314/net80211/ieee80211_input.c
437 ===================================================================
438 --- madwifi-trunk-r3314.orig/net80211/ieee80211_input.c 2008-03-06 23:49:11.027259154 +0100
439 +++ madwifi-trunk-r3314/net80211/ieee80211_input.c 2008-03-07 00:34:41.589459294 +0100
440 @@ -1198,7 +1198,7 @@
441 /* attach vlan tag */
442 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
443 if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
444 - /* If netif_rx dropped the packet because
445 + /* If netif_receive_skb dropped the packet because
446 * device was too busy */
447 if (ni_tmp != NULL) {
448 /* node reference was leaked */
449 @@ -1209,8 +1209,8 @@
450 skb = NULL; /* SKB is no longer ours */
451 } else {
452 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
453 - if (netif_rx(skb) == NET_RX_DROP) {
454 - /* If netif_rx dropped the packet because
455 + if (netif_receive_skb(skb) == NET_RX_DROP) {
456 + /* If netif_receive_skb dropped the packet because
457 * device was too busy */
458 if (ni_tmp != NULL) {
459 /* node reference was leaked */
460 @@ -2322,8 +2322,8 @@
461 skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
462
463 ni_tmp = SKB_CB(skb1)->ni;
464 - if (netif_rx(skb1) == NET_RX_DROP) {
465 - /* If netif_rx dropped the packet because
466 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
467 + /* If netif_receive_skb dropped the packet because
468 * device was too busy */
469 if (ni_tmp != NULL) {
470 /* node reference was leaked */
471 Index: madwifi-trunk-r3314/net80211/ieee80211_monitor.c
472 ===================================================================
473 --- madwifi-trunk-r3314.orig/net80211/ieee80211_monitor.c 2008-03-06 23:49:11.031259383 +0100
474 +++ madwifi-trunk-r3314/net80211/ieee80211_monitor.c 2008-03-07 00:33:59.235045655 +0100
475 @@ -584,8 +584,8 @@
476 skb1->protocol =
477 __constant_htons(0x0019); /* ETH_P_80211_RAW */
478
479 - if (netif_rx(skb1) == NET_RX_DROP) {
480 - /* If netif_rx dropped the packet because
481 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
482 + /* If netif_receive_skb dropped the packet because
483 * device was too busy, reclaim the ref. in
484 * the skb. */
485 if (SKB_CB(skb1)->ni != NULL)
486 Index: madwifi-trunk-r3314/net80211/ieee80211_skb.c
487 ===================================================================
488 --- madwifi-trunk-r3314.orig/net80211/ieee80211_skb.c 2008-03-06 23:49:11.039259840 +0100
489 +++ madwifi-trunk-r3314/net80211/ieee80211_skb.c 2008-03-06 23:49:24.384020316 +0100
490 @@ -73,7 +73,7 @@
491 #undef dev_queue_xmit
492 #undef kfree_skb
493 #undef kfree_skb_fast
494 -#undef netif_rx
495 +#undef netif_receive_skb
496 #undef pskb_copy
497 #undef skb_clone
498 #undef skb_copy
499 @@ -638,8 +638,8 @@
500 grp, vlan_tag);
501 }
502
503 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
504 - return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
505 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
506 + return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
507 }
508
509 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
510 @@ -760,7 +760,7 @@
511 }
512
513 EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
514 -EXPORT_SYMBOL(netif_rx_debug);
515 +EXPORT_SYMBOL(netif_receive_skb_debug);
516 EXPORT_SYMBOL(alloc_skb_debug);
517 EXPORT_SYMBOL(dev_alloc_skb_debug);
518 EXPORT_SYMBOL(skb_clone_debug);
519 Index: madwifi-trunk-r3314/net80211/ieee80211_skb.h
520 ===================================================================
521 --- madwifi-trunk-r3314.orig/net80211/ieee80211_skb.h 2008-03-06 23:49:11.047260294 +0100
522 +++ madwifi-trunk-r3314/net80211/ieee80211_skb.h 2008-03-06 23:49:24.384020316 +0100
523 @@ -116,7 +116,7 @@
524 int vlan_hwaccel_receive_skb_debug(struct sk_buff *skb,
525 struct vlan_group *grp, unsigned short vlan_tag,
526 const char* func, int line);
527 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
528 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
529 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
530 const char *func, int line);
531 struct sk_buff * dev_alloc_skb_debug(unsigned int length,
532 @@ -151,7 +151,7 @@
533 #undef dev_queue_xmit
534 #undef kfree_skb
535 #undef kfree_skb_fast
536 -#undef netif_rx
537 +#undef netif_receive_skb
538 #undef pskb_copy
539 #undef skb_clone
540 #undef skb_copy
541 @@ -168,8 +168,8 @@
542 skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
543 #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
544 vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
545 -#define netif_rx(_skb) \
546 - netif_rx_debug(_skb, __func__, __LINE__)
547 +#define netif_receive_skb(_skb) \
548 + netif_receive_skb_debug(_skb, __func__, __LINE__)
549 #define alloc_skb(_length, _gfp_mask) \
550 alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
551 #define dev_alloc_skb(_length) \