1 --- a/drivers/net/ethernet/cavium/cns3xxx_eth.c
2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c
5 #define DRV_NAME "cns3xxx_eth"
9 -#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
13 #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
14 #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
16 -#define MAX_MRU (1536 + SKB_DMA_REALIGN)
17 -#define CNS3XXX_MAX_MTU (1536)
19 +#define RX_BUFFER_ALIGN 64
20 +#define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1))
22 +#define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN)
23 +#define RX_SEGMENT_ALLOC_SIZE 4096
24 +#define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE))
25 +#define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN)
28 #define NAPI_WEIGHT 64
30 @@ -266,7 +272,7 @@ struct _rx_ring {
33 struct rx_desc *cur_addr;
34 - struct sk_buff *buff_tab[RX_DESCS];
35 + void *buff_tab[RX_DESCS];
36 unsigned int phys_tab[RX_DESCS];
39 @@ -280,6 +286,8 @@ struct sw {
40 struct cns3xxx_plat_info *plat;
41 struct _tx_ring *tx_ring;
42 struct _rx_ring *rx_ring;
43 + struct sk_buff *frag_first;
44 + struct sk_buff *frag_last;
48 @@ -500,37 +508,35 @@ static void cns3xxx_alloc_rx_buf(struct
49 struct _rx_ring *rx_ring = sw->rx_ring;
50 unsigned int i = rx_ring->alloc_index;
51 struct rx_desc *desc = &(rx_ring)->desc[i];
52 - struct sk_buff *skb;
56 for (received += rx_ring->alloc_count; received > 0; received--) {
57 - if ((skb = dev_alloc_skb(MAX_MRU))) {
58 - if (SKB_DMA_REALIGN)
59 - skb_reserve(skb, SKB_DMA_REALIGN);
60 - skb_reserve(skb, NET_IP_ALIGN);
61 - phys = dma_map_single(NULL, skb->data,
62 - CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
63 - if (dma_mapping_error(NULL, phys)) {
65 - /* Failed to map, better luck next time */
70 - /* Failed to allocate skb, try again next time */
71 + buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
75 + phys = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
76 + RX_SEGMENT_MRU, DMA_FROM_DEVICE);
77 + if (dma_mapping_error(NULL, phys)) {
82 + desc->sdl = RX_SEGMENT_MRU;
85 /* put the new buffer on RX-free queue */
86 - rx_ring->buff_tab[i] = skb;
87 + rx_ring->buff_tab[i] = buf;
88 rx_ring->phys_tab[i] = phys;
89 if (i == RX_DESCS - 1) {
91 desc->config0 = END_OF_RING | FIRST_SEGMENT |
92 - LAST_SEGMENT | CNS3XXX_MAX_MTU;
93 + LAST_SEGMENT | RX_SEGMENT_MRU;
94 desc = &(rx_ring)->desc[i];
96 - desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | CNS3XXX_MAX_MTU;
97 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
102 @@ -579,7 +585,6 @@ static void clear_tx_desc(struct sw *sw)
103 static int eth_poll(struct napi_struct *napi, int budget)
105 struct sw *sw = container_of(napi, struct sw, napi);
106 - struct net_device *dev;
107 struct _rx_ring *rx_ring = sw->rx_ring;
110 @@ -588,49 +593,82 @@ static int eth_poll(struct napi_struct *
114 + int reserve = SKB_HEAD_ALIGN;
116 if (received >= budget)
119 - skb = rx_ring->buff_tab[i];
120 + /* process received frame */
121 + dma_unmap_single(NULL, rx_ring->phys_tab[i],
122 + RX_SEGMENT_MRU, DMA_FROM_DEVICE);
124 - dev = switch_port_tab[desc->sp]->netdev;
125 + skb = build_skb(rx_ring->buff_tab[i]);
129 + skb->dev = switch_port_tab[desc->sp]->netdev;
132 - /* process received frame */
133 - dma_unmap_single(&dev->dev, rx_ring->phys_tab[i],
134 - length, DMA_FROM_DEVICE);
135 + if (desc->fsd && !desc->lsd)
136 + length = RX_SEGMENT_MRU;
139 + reserve -= NET_IP_ALIGN;
141 + length += NET_IP_ALIGN;
144 + skb_reserve(skb, reserve);
145 skb_put(skb, length);
148 - skb->protocol = eth_type_trans(skb, dev);
149 + if (!sw->frag_first)
150 + sw->frag_first = skb;
152 + if (sw->frag_first == sw->frag_last)
153 + skb_frag_add_head(sw->frag_first, skb);
155 + sw->frag_last->next = skb;
156 + sw->frag_first->len += skb->len;
157 + sw->frag_first->data_len += skb->len;
158 + sw->frag_first->truesize += skb->truesize;
160 + sw->frag_last = skb;
163 + struct net_device *dev;
165 - dev->stats.rx_packets++;
166 - dev->stats.rx_bytes += length;
167 + skb = sw->frag_first;
169 + skb->protocol = eth_type_trans(skb, dev);
171 + dev->stats.rx_packets++;
172 + dev->stats.rx_bytes += skb->len;
174 + /* RX Hardware checksum offload */
175 + skb->ip_summed = CHECKSUM_NONE;
176 + switch (desc->prot) {
186 - /* RX Hardware checksum offload */
187 - switch (desc->prot) {
195 - skb->ip_summed = CHECKSUM_NONE;
197 skb->ip_summed = CHECKSUM_UNNECESSARY;
200 - skb->ip_summed = CHECKSUM_NONE;
208 - napi_gro_receive(napi, skb);
209 + napi_gro_receive(napi, skb);
212 + sw->frag_first = NULL;
213 + sw->frag_last = NULL;
217 if (++i == RX_DESCS) {
219 desc = &(rx_ring)->desc[i];
220 @@ -671,12 +709,6 @@ static int eth_xmit(struct sk_buff *skb,
224 - if (skb->len > CNS3XXX_MAX_MTU) {
225 - dev_kfree_skb(skb);
226 - dev->stats.tx_errors++;
227 - return NETDEV_TX_OK;
232 if ((tx_ring->num_used + nr_frags) >= TX_DESCS) {
233 @@ -701,8 +733,7 @@ static int eth_xmit(struct sk_buff *skb,
237 - phys = dma_map_single(NULL, skb->data, len,
239 + phys = dma_map_single(NULL, skb->data, len, DMA_TO_DEVICE);
242 tx_desc->pmap = pmap;
243 @@ -849,24 +880,24 @@ static int init_rings(struct sw *sw)
244 /* Setup RX buffers */
245 for (i = 0; i < RX_DESCS; i++) {
246 struct rx_desc *desc = &(rx_ring)->desc[i];
247 - struct sk_buff *skb;
248 - if (!(skb = dev_alloc_skb(MAX_MRU)))
251 + buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_KERNEL);
254 - if (SKB_DMA_REALIGN)
255 - skb_reserve(skb, SKB_DMA_REALIGN);
256 - skb_reserve(skb, NET_IP_ALIGN);
257 - desc->sdl = CNS3XXX_MAX_MTU;
259 + desc->sdl = RX_SEGMENT_MRU;
260 if (i == (RX_DESCS - 1))
265 - desc->sdp = dma_map_single(NULL, skb->data,
266 - CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
267 - if (dma_mapping_error(NULL, desc->sdp)) {
268 + desc->sdp = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
269 + RX_SEGMENT_MRU, DMA_FROM_DEVICE);
270 + if (dma_mapping_error(NULL, desc->sdp))
273 - rx_ring->buff_tab[i] = skb;
275 + rx_ring->buff_tab[i] = buf;
276 rx_ring->phys_tab[i] = desc->sdp;
279 @@ -905,12 +936,13 @@ static void destroy_rings(struct sw *sw)
280 struct _rx_ring *rx_ring = sw->rx_ring;
281 struct rx_desc *desc = &(rx_ring)->desc[i];
282 struct sk_buff *skb = sw->rx_ring->buff_tab[i];
284 - dma_unmap_single(NULL,
286 - CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
287 - dev_kfree_skb(skb);
293 + dma_unmap_single(NULL, desc->sdp, RX_SEGMENT_MRU,
295 + dev_kfree_skb(skb);
297 dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
298 dma_pool_destroy(rx_dma_pool);
299 @@ -1085,13 +1117,22 @@ static int eth_set_mac(struct net_device
303 +static int cns3xxx_change_mtu(struct net_device *dev, int new_mtu)
305 + if (new_mtu > MAX_MTU)
308 + dev->mtu = new_mtu;
312 static const struct net_device_ops cns3xxx_netdev_ops = {
313 .ndo_open = eth_open,
314 .ndo_stop = eth_close,
315 .ndo_start_xmit = eth_xmit,
316 .ndo_set_rx_mode = eth_rx_mode,
317 .ndo_do_ioctl = eth_ioctl,
318 - .ndo_change_mtu = eth_change_mtu,
319 + .ndo_change_mtu = cns3xxx_change_mtu,
320 .ndo_set_mac_address = eth_set_mac,
321 .ndo_validate_addr = eth_validate_addr,
323 @@ -1124,6 +1165,10 @@ static int __devinit eth_init_one(struct
327 + temp = __raw_readl(&sw->regs->phy_auto_addr);
328 + temp |= (3 << 30); /* maximum frame length: 9600 bytes */
329 + __raw_writel(temp, &sw->regs->phy_auto_addr);
331 for (i = 0; i < 4; i++) {
332 temp = __raw_readl(&sw->regs->mac_cfg[i]);
333 temp |= (PORT_DISABLE);