177678b988d8e807fea8fd53b902e9cb291e711c
[openwrt/svn-archive/archive.git] / target / linux / rb532 / patches-2.6.27 / 010-korina_rework_korina_rx.patch
1 This function needs an early exit condition to function properly, or
2 else caller assumes napi workload wasn't enough to handle all received
3 packets and korina_rx is called again (and again and again and ...).
4
5 Signed-off-by: Phil Sutter <n0-1@freewrt.org>
6 ---
7 --- a/drivers/net/korina.c 2009-01-19 23:19:10.000000000 +0100
8 +++ b/drivers/net/korina.c 2009-01-19 23:25:31.000000000 +0100
9 @@ -353,15 +353,20 @@
10 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
11 struct sk_buff *skb, *skb_new;
12 u8 *pkt_buf;
13 - u32 devcs, pkt_len, dmas, rx_free_desc;
14 + u32 devcs, pkt_len, dmas;
15 int count;
16
17 dma_cache_inv((u32)rd, sizeof(*rd));
18
19 for (count = 0; count < limit; count++) {
20 + skb = lp->rx_skb[lp->rx_next_done];
21 + skb_new = NULL;
22
23 devcs = rd->devcs;
24
25 + if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
26 + break;
27 +
28 /* Update statistics counters */
29 if (devcs & ETH_RX_CRC)
30 dev->stats.rx_crc_errors++;
31 @@ -384,64 +389,53 @@
32 * in Rc32434 (errata ref #077) */
33 dev->stats.rx_errors++;
34 dev->stats.rx_dropped++;
35 - }
36 -
37 - while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
38 - /* init the var. used for the later
39 - * operations within the while loop */
40 - skb_new = NULL;
41 + } else if ((devcs & ETH_RX_ROK)) {
42 pkt_len = RCVPKT_LENGTH(devcs);
43 - skb = lp->rx_skb[lp->rx_next_done];
44 + /* must be the (first and) last
45 + * descriptor then */
46 + pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
47 +
48 + /* invalidate the cache */
49 + dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
50 +
51 + /* Malloc up new buffer. */
52 + skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
53 +
54 + if (!skb_new)
55 + break;
56 + /* Do not count the CRC */
57 + skb_put(skb, pkt_len - 4);
58 + skb->protocol = eth_type_trans(skb, dev);
59 +
60 + /* Pass the packet to upper layers */
61 + netif_receive_skb(skb);
62 + dev->stats.rx_packets++;
63 + dev->stats.rx_bytes += pkt_len;
64 +
65 + /* Update the mcast stats */
66 + if (devcs & ETH_RX_MP)
67 + dev->stats.multicast++;
68
69 - if ((devcs & ETH_RX_ROK)) {
70 - /* must be the (first and) last
71 - * descriptor then */
72 - pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
73 -
74 - /* invalidate the cache */
75 - dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
76 -
77 - /* Malloc up new buffer. */
78 - skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
79 -
80 - if (!skb_new)
81 - break;
82 - /* Do not count the CRC */
83 - skb_put(skb, pkt_len - 4);
84 - skb->protocol = eth_type_trans(skb, dev);
85 -
86 - /* Pass the packet to upper layers */
87 - netif_receive_skb(skb);
88 - dev->last_rx = jiffies;
89 - dev->stats.rx_packets++;
90 - dev->stats.rx_bytes += pkt_len;
91 -
92 - /* Update the mcast stats */
93 - if (devcs & ETH_RX_MP)
94 - dev->stats.multicast++;
95 -
96 - lp->rx_skb[lp->rx_next_done] = skb_new;
97 - }
98 -
99 - rd->devcs = 0;
100 -
101 - /* Restore descriptor's curr_addr */
102 - if (skb_new)
103 - rd->ca = CPHYSADDR(skb_new->data);
104 - else
105 - rd->ca = CPHYSADDR(skb->data);
106 + lp->rx_skb[lp->rx_next_done] = skb_new;
107 + }
108 + rd->devcs = 0;
109 +
110 + /* Restore descriptor's curr_addr */
111 + if (skb_new)
112 + rd->ca = CPHYSADDR(skb_new->data);
113 + else
114 + rd->ca = CPHYSADDR(skb->data);
115
116 - rd->control = DMA_COUNT(KORINA_RBSIZE) |
117 + rd->control = DMA_COUNT(KORINA_RBSIZE) |
118 DMA_DESC_COD | DMA_DESC_IOD;
119 - lp->rd_ring[(lp->rx_next_done - 1) &
120 - KORINA_RDS_MASK].control &=
121 - ~DMA_DESC_COD;
122 -
123 - lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
124 - dma_cache_wback((u32)rd, sizeof(*rd));
125 - rd = &lp->rd_ring[lp->rx_next_done];
126 - writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
127 - }
128 + lp->rd_ring[(lp->rx_next_done - 1) &
129 + KORINA_RDS_MASK].control &=
130 + ~DMA_DESC_COD;
131 +
132 + lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
133 + dma_cache_wback((u32)rd, sizeof(*rd));
134 + rd = &lp->rd_ring[lp->rx_next_done];
135 + writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
136 }
137
138 dmas = readl(&lp->rx_dma_regs->dmas);