ar7: 2.6.25.1 support
[openwrt/svn-archive/archive.git] / target / linux / ar7 / patches-2.6.25 / 160-cpmac-rx-ring-use-eoq.diff
1 Index: linux-2.6.25.1/drivers/net/cpmac.c
2 ===================================================================
3 --- linux-2.6.25.1.orig/drivers/net/cpmac.c 2008-05-03 16:24:08.000000000 +0200
4 +++ linux-2.6.25.1/drivers/net/cpmac.c 2008-05-03 16:24:37.000000000 +0200
5 @@ -187,6 +187,7 @@
6 #define CPMAC_EOQ 0x1000
7 struct sk_buff *skb;
8 struct cpmac_desc *next;
9 + struct cpmac_desc *prev;
10 dma_addr_t mapping;
11 dma_addr_t data_mapping;
12 };
13 @@ -242,6 +243,16 @@
14 printk("\n");
15 }
16
17 +static void cpmac_dump_all_desc(struct net_device *dev)
18 +{
19 + struct cpmac_priv *priv = netdev_priv(dev);
20 + struct cpmac_desc *dump = priv->rx_head;
21 + do {
22 + cpmac_dump_desc(dev, dump);
23 + dump = dump->next;
24 + } while (dump != priv->rx_head);
25 +}
26 +
27 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
28 {
29 int i;
30 @@ -413,21 +424,40 @@
31 static int cpmac_poll(struct napi_struct *napi, int budget)
32 {
33 struct sk_buff *skb;
34 - struct cpmac_desc *desc;
35 - int received = 0;
36 + struct cpmac_desc *desc, *restart;
37 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
38 + int received = 0, processed = 0;
39
40 spin_lock(&priv->rx_lock);
41 if (unlikely(!priv->rx_head)) {
42 if (netif_msg_rx_err(priv) && net_ratelimit())
43 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
44 priv->dev->name);
45 + spin_unlock(&priv->rx_lock);
46 netif_rx_complete(priv->dev, napi);
47 return 0;
48 }
49
50 desc = priv->rx_head;
51 + restart = NULL;
52 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
53 + processed++;
54 +
55 + if ((desc->dataflags & CPMAC_EOQ) != 0) {
56 + /* The last update to eoq->hw_next didn't happen soon enough, and the
57 + * receiver stopped here. Remember this descriptor so we can restart
58 + * the receiver after freeing some space.
59 + */
60 + if (unlikely(restart)) {
61 + if (netif_msg_rx_err(priv))
62 + printk(KERN_ERR "%s: poll found a duplicate EOQ: %p and %p\n",
63 + priv->dev->name, restart, desc);
64 + goto fatal_error;
65 + }
66 +
67 + restart = desc->next;
68 + }
69 +
70 skb = cpmac_rx_one(priv, desc);
71 if (likely(skb)) {
72 netif_receive_skb(skb);
73 @@ -436,19 +466,81 @@
74 desc = desc->next;
75 }
76
77 + if (desc != priv->rx_head) {
78 + /* We freed some buffers, but not the whole ring, add what we did free to the rx list */
79 + desc->prev->hw_next = (u32)0;
80 + priv->rx_head->prev->hw_next = priv->rx_head->mapping;
81 + }
82 +
83 + /* Optimization: If we did not actually process an EOQ (perhaps because of
84 + * quota limits), check to see if the tail of the queue has EOQ set. We
85 + * should immediately restart in that case so that the receiver can restart
86 + * and run in parallel with more packet processing. This lets us handle slightly
87 + * larger bursts before running out of ring space (assuming dev->weight < ring_size)
88 + */
89 + if (!restart &&
90 + (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) == CPMAC_EOQ &&
91 + (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
92 + /* reset EOQ so the poll loop (above) doesn't try to restart this when it
93 + * eventually gets to this descriptor.
94 + */
95 + priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
96 + restart = priv->rx_head;
97 + }
98 +
99 + if (restart) {
100 + priv->dev->stats.rx_errors++;
101 + priv->dev->stats.rx_fifo_errors++;
102 + if (netif_msg_rx_err(priv) && net_ratelimit())
103 + printk(KERN_WARNING "%s: rx dma ring overrun\n", priv->dev->name);
104 +
105 + if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
106 + if (netif_msg_drv(priv))
107 + printk(KERN_ERR "%s: cpmac_poll is trying to restart rx from a descriptor that's not free: %p\n",
108 + priv->dev->name, restart);
109 + goto fatal_error;
110 + }
111 +
112 + cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
113 + }
114 +
115 priv->rx_head = desc;
116 spin_unlock(&priv->rx_lock);
117 if (unlikely(netif_msg_rx_status(priv)))
118 printk(KERN_DEBUG "%s: poll processed %d packets\n",
119 priv->dev->name, received);
120 - if (desc->dataflags & CPMAC_OWN) {
121 + if (processed == 0) {
122 + /* we ran out of packets to read, revert to interrupt-driven mode */
123 netif_rx_complete(priv->dev, napi);
124 - cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
125 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
126 return 0;
127 }
128
129 return 1;
130 +
131 +fatal_error:
132 + /* Something went horribly wrong. Reset hardware to try to recover rather than wedging. */
133 +
134 + if (netif_msg_drv(priv)) {
135 + printk(KERN_ERR "%s: cpmac_poll is confused. Resetting hardware\n", priv->dev->name);
136 + cpmac_dump_all_desc(priv->dev);
137 + printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
138 + priv->dev->name,
139 + cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
140 + cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
141 + }
142 +
143 + spin_unlock(&priv->rx_lock);
144 + netif_rx_complete(priv->dev, napi);
145 + netif_stop_queue(priv->dev);
146 + napi_disable(&priv->napi);
147 +
148 + atomic_inc(&priv->reset_pending);
149 + cpmac_hw_stop(priv->dev);
150 + if (!schedule_work(&priv->reset_work))
151 + atomic_dec(&priv->reset_pending);
152 + return 0;
153 +
154 }
155
156 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
157 @@ -625,8 +717,10 @@
158 desc->dataflags = CPMAC_OWN;
159 dev->stats.rx_dropped++;
160 }
161 + desc->hw_next = desc->next->mapping;
162 desc = desc->next;
163 }
164 + priv->rx_head->prev->hw_next = 0;
165 }
166
167 static void cpmac_clear_tx(struct net_device *dev)
168 @@ -928,9 +1022,12 @@
169 desc->buflen = CPMAC_SKB_SIZE;
170 desc->dataflags = CPMAC_OWN;
171 desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
172 + desc->next->prev = desc;
173 desc->hw_next = (u32)desc->next->mapping;
174 }
175
176 + priv->rx_head->prev->hw_next = (u32)0;
177 +
178 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
179 dev->name, dev))) {
180 if (netif_msg_drv(priv))