0500fce8fa0a53f78a9b7ef88a4ecf5f51abe68b
[openwrt/openwrt.git] / target / linux / cns3xxx / patches-3.3 / 410-ethernet_fix_jumbo_frame.patch
1 --- a/drivers/net/ethernet/cavium/cns3xxx_eth.c
2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c
3 @@ -26,15 +26,21 @@
4
5 #define DRV_NAME "cns3xxx_eth"
6
7 -#define RX_DESCS 512
8 -#define TX_DESCS 512
9 -#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
10 +#define RX_DESCS 128
11 +#define TX_DESCS 128
12
13 #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
14 #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
15 #define REGS_SIZE 336
16 -#define MAX_MRU (1536 + SKB_DMA_REALIGN)
17 -#define CNS3XXX_MAX_MTU (1536)
18 +
19 +#define RX_BUFFER_ALIGN 64
20 +#define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1))
21 +
22 +#define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN)
23 +#define RX_SEGMENT_ALLOC_SIZE 4096
24 +#define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE))
25 +#define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN)
26 +#define MAX_MTU 9500
27
28 #define NAPI_WEIGHT 64
29
30 @@ -266,7 +272,7 @@ struct _rx_ring {
31 struct rx_desc *desc;
32 dma_addr_t phys_addr;
33 struct rx_desc *cur_addr;
34 - struct sk_buff *buff_tab[RX_DESCS];
35 + void *buff_tab[RX_DESCS];
36 unsigned int phys_tab[RX_DESCS];
37 u32 cur_index;
38 u32 alloc_index;
39 @@ -280,6 +286,8 @@ struct sw {
40 struct cns3xxx_plat_info *plat;
41 struct _tx_ring *tx_ring;
42 struct _rx_ring *rx_ring;
43 + struct sk_buff *frag_first;
44 + struct sk_buff *frag_last;
45 };
46
47 struct port {
48 @@ -500,37 +508,35 @@ static void cns3xxx_alloc_rx_buf(struct
49 struct _rx_ring *rx_ring = sw->rx_ring;
50 unsigned int i = rx_ring->alloc_index;
51 struct rx_desc *desc = &(rx_ring)->desc[i];
52 - struct sk_buff *skb;
53 + void *buf;
54 unsigned int phys;
55
56 for (received += rx_ring->alloc_count; received > 0; received--) {
57 - if ((skb = dev_alloc_skb(MAX_MRU))) {
58 - if (SKB_DMA_REALIGN)
59 - skb_reserve(skb, SKB_DMA_REALIGN);
60 - skb_reserve(skb, NET_IP_ALIGN);
61 - phys = dma_map_single(NULL, skb->data,
62 - CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
63 - if (dma_mapping_error(NULL, phys)) {
64 - dev_kfree_skb(skb);
65 - /* Failed to map, better luck next time */
66 - goto out;;
67 - }
68 - desc->sdp = phys;
69 - } else {
70 - /* Failed to allocate skb, try again next time */
71 + buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
72 + if (!buf)
73 + goto out;
74 +
75 + phys = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
76 + RX_SEGMENT_MRU, DMA_FROM_DEVICE);
77 + if (dma_mapping_error(NULL, phys)) {
78 + kfree(buf);
79 goto out;
80 }
81
82 + desc->sdl = RX_SEGMENT_MRU;
83 + desc->sdp = phys;
84 +
85 /* put the new buffer on RX-free queue */
86 - rx_ring->buff_tab[i] = skb;
87 + rx_ring->buff_tab[i] = buf;
88 rx_ring->phys_tab[i] = phys;
89 if (i == RX_DESCS - 1) {
90 i = 0;
91 desc->config0 = END_OF_RING | FIRST_SEGMENT |
92 - LAST_SEGMENT | CNS3XXX_MAX_MTU;
93 + LAST_SEGMENT | RX_SEGMENT_MRU;
94 desc = &(rx_ring)->desc[i];
95 } else {
96 - desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | CNS3XXX_MAX_MTU;
97 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
98 + RX_SEGMENT_MRU;
99 i++;
100 desc++;
101 }
102 @@ -579,7 +585,6 @@ static void clear_tx_desc(struct sw *sw)
103 static int eth_poll(struct napi_struct *napi, int budget)
104 {
105 struct sw *sw = container_of(napi, struct sw, napi);
106 - struct net_device *dev;
107 struct _rx_ring *rx_ring = sw->rx_ring;
108 int received = 0;
109 unsigned int length;
110 @@ -588,49 +593,82 @@ static int eth_poll(struct napi_struct *
111
112 while (desc->cown) {
113 struct sk_buff *skb;
114 + int reserve = SKB_HEAD_ALIGN;
115
116 if (received >= budget)
117 break;
118
119 - skb = rx_ring->buff_tab[i];
120 + /* process received frame */
121 + dma_unmap_single(NULL, rx_ring->phys_tab[i],
122 + RX_SEGMENT_MRU, DMA_FROM_DEVICE);
123 +
124 + skb = build_skb(rx_ring->buff_tab[i]);
125 + if (!skb)
126 + break;
127
128 - dev = switch_port_tab[desc->sp]->netdev;
129 + skb->dev = switch_port_tab[desc->sp]->netdev;
130
131 length = desc->sdl;
132 - /* process received frame */
133 - dma_unmap_single(&dev->dev, rx_ring->phys_tab[i],
134 - length, DMA_FROM_DEVICE);
135 + if (desc->fsd && !desc->lsd)
136 + length = RX_SEGMENT_MRU;
137 +
138 + if (!desc->fsd) {
139 + reserve -= NET_IP_ALIGN;
140 + if (!desc->lsd)
141 + length += NET_IP_ALIGN;
142 + }
143
144 + skb_reserve(skb, reserve);
145 skb_put(skb, length);
146
147 - skb->dev = dev;
148 - skb->protocol = eth_type_trans(skb, dev);
149 + if (!sw->frag_first)
150 + sw->frag_first = skb;
151 + else {
152 + if (sw->frag_first == sw->frag_last)
153 + skb_frag_add_head(sw->frag_first, skb);
154 + else
155 + sw->frag_last->next = skb;
156 + sw->frag_first->len += skb->len;
157 + sw->frag_first->data_len += skb->len;
158 + sw->frag_first->truesize += skb->truesize;
159 + }
160 + sw->frag_last = skb;
161
162 - dev->stats.rx_packets++;
163 - dev->stats.rx_bytes += length;
164 + if (desc->lsd) {
165 + struct net_device *dev;
166 +
167 + skb = sw->frag_first;
168 + dev = skb->dev;
169 + skb->protocol = eth_type_trans(skb, dev);
170 +
171 + dev->stats.rx_packets++;
172 + dev->stats.rx_bytes += skb->len;
173 +
174 + /* RX Hardware checksum offload */
175 + skb->ip_summed = CHECKSUM_NONE;
176 + switch (desc->prot) {
177 + case 1:
178 + case 2:
179 + case 5:
180 + case 6:
181 + case 13:
182 + case 14:
183 + if (desc->l4f)
184 + break;
185
186 - /* RX Hardware checksum offload */
187 - switch (desc->prot) {
188 - case 1:
189 - case 2:
190 - case 5:
191 - case 6:
192 - case 13:
193 - case 14:
194 - if (desc->l4f)
195 - skb->ip_summed = CHECKSUM_NONE;
196 - else
197 skb->ip_summed = CHECKSUM_UNNECESSARY;
198 - break;
199 - default:
200 - skb->ip_summed = CHECKSUM_NONE;
201 - break;
202 - }
203 + break;
204 + default:
205 + break;
206 + }
207
208 - napi_gro_receive(napi, skb);
209 + napi_gro_receive(napi, skb);
210
211 - received++;
212 + sw->frag_first = NULL;
213 + sw->frag_last = NULL;
214 + }
215
216 + received++;
217 if (++i == RX_DESCS) {
218 i = 0;
219 desc = &(rx_ring)->desc[i];
220 @@ -653,42 +691,60 @@ static int eth_poll(struct napi_struct *
221 return received;
222 }
223
224 +static void eth_set_desc(struct _tx_ring *tx_ring, int index, int index_last,
225 + void *data, int len, u32 config0, u32 pmap)
226 +{
227 + struct tx_desc *tx_desc = &(tx_ring)->desc[index];
228 + unsigned int phys;
229 +
230 + phys = dma_map_single(NULL, data, len, DMA_TO_DEVICE);
231 + tx_desc->sdp = phys;
232 + tx_desc->pmap = pmap;
233 + tx_ring->phys_tab[index] = phys;
234 +
235 + config0 |= len;
236 + if (index == TX_DESCS - 1)
237 + config0 |= END_OF_RING;
238 + if (index == index_last)
239 + config0 |= LAST_SEGMENT;
240 +
241 + mb();
242 + tx_desc->config0 = config0;
243 +}
244 +
245 static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
246 {
247 struct port *port = netdev_priv(dev);
248 struct sw *sw = port->sw;
249 struct _tx_ring *tx_ring = sw->tx_ring;
250 - struct tx_desc *tx_desc;
251 - int index;
252 - int len;
253 + struct sk_buff *skb1;
254 char pmap = (1 << port->id);
255 - unsigned int phys;
256 int nr_frags = skb_shinfo(skb)->nr_frags;
257 - struct skb_frag_struct *frag;
258 + int nr_desc = nr_frags;
259 + int index0, index, index_last;
260 + int len0;
261 unsigned int i;
262 - u32 config0 = 0;
263 + u32 config0;
264
265 if (pmap == 8)
266 pmap = (1 << 4);
267
268 - if (skb->len > CNS3XXX_MAX_MTU) {
269 - dev_kfree_skb(skb);
270 - dev->stats.tx_errors++;
271 - return NETDEV_TX_OK;
272 - }
273 + skb_walk_frags(skb, skb1)
274 + nr_desc++;
275
276 spin_lock(&tx_lock);
277
278 - if ((tx_ring->num_used + nr_frags) >= TX_DESCS) {
279 + if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
280 clear_tx_desc(sw);
281 - if ((tx_ring->num_used + nr_frags) >= TX_DESCS) {
282 + if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
283 spin_unlock(&tx_lock);
284 return NETDEV_TX_BUSY;
285 }
286 }
287
288 - index = tx_ring->cur_index;
289 - tx_ring->cur_index = ((tx_ring->cur_index + nr_frags + 1) % TX_DESCS);
290 + index = index0 = tx_ring->cur_index;
291 + index_last = (index0 + nr_desc) % TX_DESCS;
292 + tx_ring->cur_index = (index_last + 1) % TX_DESCS;
293
294 spin_unlock(&tx_lock);
295
296 @@ -696,79 +752,41 @@ static int eth_xmit(struct sk_buff *skb,
297 if (skb->ip_summed == CHECKSUM_PARTIAL)
298 config0 |= UDP_CHECKSUM | TCP_CHECKSUM;
299
300 - if (!nr_frags) {
301 - tx_desc = &(tx_ring)->desc[index];
302 -
303 - len = skb->len;
304 + len0 = skb->len;
305
306 - phys = dma_map_single(NULL, skb->data, len,
307 - DMA_TO_DEVICE);
308 + /* fragments */
309 + for (i = 0; i < nr_frags; i++) {
310 + struct skb_frag_struct *frag;
311 + void *addr;
312
313 - tx_desc->sdp = phys;
314 - tx_desc->pmap = pmap;
315 - tx_ring->phys_tab[index] = phys;
316 + index = (index + 1) % TX_DESCS;
317
318 - tx_ring->buff_tab[index] = skb;
319 - config0 |= FIRST_SEGMENT | LAST_SEGMENT;
320 - } else {
321 - index = ((index + nr_frags) % TX_DESCS);
322 - tx_desc = &(tx_ring)->desc[index];
323 + frag = &skb_shinfo(skb)->frags[i];
324 + addr = page_address(skb_frag_page(frag)) + frag->page_offset;
325
326 - /* fragments */
327 - for (i = nr_frags; i > 0; i--) {
328 - u32 config;
329 - void *addr;
330 -
331 - frag = &skb_shinfo(skb)->frags[i-1];
332 - len = frag->size;
333 -
334 - addr = page_address(skb_frag_page(frag)) +
335 - frag->page_offset;
336 - phys = dma_map_single(NULL, addr, len, DMA_TO_DEVICE);
337 -
338 - tx_desc->sdp = phys;
339 -
340 - tx_desc->pmap = pmap;
341 - tx_ring->phys_tab[index] = phys;
342 -
343 - config = config0 | len;
344 - if (i == nr_frags) {
345 - config |= LAST_SEGMENT;
346 - tx_ring->buff_tab[index] = skb;
347 - }
348 - if (index == TX_DESCS - 1)
349 - config |= END_OF_RING;
350 - tx_desc->config0 = config;
351 -
352 - if (index == 0) {
353 - index = TX_DESCS - 1;
354 - tx_desc = &(tx_ring)->desc[index];
355 - } else {
356 - index--;
357 - tx_desc--;
358 - }
359 - }
360 + eth_set_desc(tx_ring, index, index_last, addr, frag->size,
361 + config0, pmap);
362 + }
363
364 - /* header */
365 - len = skb->len - skb->data_len;
366 + if (nr_frags)
367 + len0 = skb->len - skb->data_len;
368
369 - phys = dma_map_single(NULL, skb->data, len, DMA_TO_DEVICE);
370 + skb_walk_frags(skb, skb1) {
371 + index = (index + 1) % TX_DESCS;
372 + len0 -= skb1->len;
373
374 - tx_desc->sdp = phys;
375 - tx_desc->pmap = pmap;
376 - tx_ring->phys_tab[index] = phys;
377 - config0 |= FIRST_SEGMENT;
378 + eth_set_desc(tx_ring, index, index_last, skb1->data, skb1->len,
379 + config0, pmap);
380 }
381
382 - if (index == TX_DESCS - 1)
383 - config0 |= END_OF_RING;
384 -
385 - tx_desc->config0 = config0 | len;
386 + tx_ring->buff_tab[index0] = skb;
387 + eth_set_desc(tx_ring, index0, index_last, skb->data, len0,
388 + config0 | FIRST_SEGMENT, pmap);
389
390 mb();
391
392 spin_lock(&tx_lock);
393 - tx_ring->num_used += nr_frags + 1;
394 + tx_ring->num_used += nr_desc + 1;
395 spin_unlock(&tx_lock);
396
397 dev->stats.tx_packets++;
398 @@ -849,24 +867,24 @@ static int init_rings(struct sw *sw)
399 /* Setup RX buffers */
400 for (i = 0; i < RX_DESCS; i++) {
401 struct rx_desc *desc = &(rx_ring)->desc[i];
402 - struct sk_buff *skb;
403 - if (!(skb = dev_alloc_skb(MAX_MRU)))
404 + void *buf;
405 +
406 + buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_KERNEL);
407 + if (!buf)
408 return -ENOMEM;
409 - if (SKB_DMA_REALIGN)
410 - skb_reserve(skb, SKB_DMA_REALIGN);
411 - skb_reserve(skb, NET_IP_ALIGN);
412 - desc->sdl = CNS3XXX_MAX_MTU;
413 +
414 + desc->sdl = RX_SEGMENT_MRU;
415 if (i == (RX_DESCS - 1))
416 desc->eor = 1;
417 desc->fsd = 1;
418 desc->lsd = 1;
419
420 - desc->sdp = dma_map_single(NULL, skb->data,
421 - CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
422 - if (dma_mapping_error(NULL, desc->sdp)) {
423 + desc->sdp = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
424 + RX_SEGMENT_MRU, DMA_FROM_DEVICE);
425 + if (dma_mapping_error(NULL, desc->sdp))
426 return -EIO;
427 - }
428 - rx_ring->buff_tab[i] = skb;
429 +
430 + rx_ring->buff_tab[i] = buf;
431 rx_ring->phys_tab[i] = desc->sdp;
432 desc->cown = 0;
433 }
434 @@ -905,12 +923,13 @@ static void destroy_rings(struct sw *sw)
435 struct _rx_ring *rx_ring = sw->rx_ring;
436 struct rx_desc *desc = &(rx_ring)->desc[i];
437 struct sk_buff *skb = sw->rx_ring->buff_tab[i];
438 - if (skb) {
439 - dma_unmap_single(NULL,
440 - desc->sdp,
441 - CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
442 - dev_kfree_skb(skb);
443 - }
444 +
445 + if (!skb)
446 + continue;
447 +
448 + dma_unmap_single(NULL, desc->sdp, RX_SEGMENT_MRU,
449 + DMA_FROM_DEVICE);
450 + dev_kfree_skb(skb);
451 }
452 dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
453 dma_pool_destroy(rx_dma_pool);
454 @@ -1085,13 +1104,22 @@ static int eth_set_mac(struct net_device
455 return 0;
456 }
457
458 +static int cns3xxx_change_mtu(struct net_device *dev, int new_mtu)
459 +{
460 + if (new_mtu > MAX_MTU)
461 + return -EINVAL;
462 +
463 + dev->mtu = new_mtu;
464 + return 0;
465 +}
466 +
467 static const struct net_device_ops cns3xxx_netdev_ops = {
468 .ndo_open = eth_open,
469 .ndo_stop = eth_close,
470 .ndo_start_xmit = eth_xmit,
471 .ndo_set_rx_mode = eth_rx_mode,
472 .ndo_do_ioctl = eth_ioctl,
473 - .ndo_change_mtu = eth_change_mtu,
474 + .ndo_change_mtu = cns3xxx_change_mtu,
475 .ndo_set_mac_address = eth_set_mac,
476 .ndo_validate_addr = eth_validate_addr,
477 };
478 @@ -1111,7 +1139,7 @@ static int __devinit eth_init_one(struct
479 if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
480 return -ENOMEM;
481 strcpy(napi_dev->name, "switch%d");
482 - napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
483 + napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
484
485 SET_NETDEV_DEV(napi_dev, &pdev->dev);
486 sw = netdev_priv(napi_dev);
487 @@ -1124,6 +1152,10 @@ static int __devinit eth_init_one(struct
488 goto err_free;
489 }
490
491 + temp = __raw_readl(&sw->regs->phy_auto_addr);
492 + temp |= (3 << 30); /* maximum frame length: 9600 bytes */
493 + __raw_writel(temp, &sw->regs->phy_auto_addr);
494 +
495 for (i = 0; i < 4; i++) {
496 temp = __raw_readl(&sw->regs->mac_cfg[i]);
497 temp |= (PORT_DISABLE);
498 @@ -1185,7 +1217,7 @@ static int __devinit eth_init_one(struct
499 dev->netdev_ops = &cns3xxx_netdev_ops;
500 dev->ethtool_ops = &cns3xxx_ethtool_ops;
501 dev->tx_queue_len = 1000;
502 - dev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
503 + dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
504
505 switch_port_tab[port->id] = port;
506 memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);