5f2630ae508d16375b2836d9a53b8680e8078620
[openwrt/openwrt.git] / target / linux / ipq40xx / files-5.4 / drivers / net / ethernet / qualcomm / essedma / edma.c
1 /*
2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/platform_device.h>
17 #include <linux/if_vlan.h>
18 #include "ess_edma.h"
19 #include "edma.h"
20
21 extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
22 bool edma_stp_rstp;
23 u16 edma_ath_eth_type;
24
25 /* edma_skb_priority_offset()
26 * get edma skb priority
27 */
28 static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
29 {
30 return (skb->priority >> 2) & 1;
31 }
32
33 /* edma_alloc_tx_ring()
34 * Allocate Tx descriptors ring
35 */
36 static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
37 struct edma_tx_desc_ring *etdr)
38 {
39 struct platform_device *pdev = edma_cinfo->pdev;
40
41 /* Initialize ring */
42 etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
43 etdr->sw_next_to_fill = 0;
44 etdr->sw_next_to_clean = 0;
45
46 /* Allocate SW descriptors */
47 etdr->sw_desc = vzalloc(etdr->size);
48 if (!etdr->sw_desc) {
49 dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
50 return -ENOMEM;
51 }
52
53 /* Allocate HW descriptors */
54 etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
55 GFP_KERNEL);
56 if (!etdr->hw_desc) {
57 dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
58 vfree(etdr->sw_desc);
59 return -ENOMEM;
60 }
61
62 return 0;
63 }
64
65 /* edma_free_tx_ring()
66 * Free tx rings allocated by edma_alloc_tx_rings
67 */
68 static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
69 struct edma_tx_desc_ring *etdr)
70 {
71 struct platform_device *pdev = edma_cinfo->pdev;
72
73 if (likely(etdr->dma))
74 dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
75 etdr->dma);
76
77 vfree(etdr->sw_desc);
78 etdr->sw_desc = NULL;
79 }
80
81 /* edma_alloc_rx_ring()
82 * allocate rx descriptor ring
83 */
84 static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
85 struct edma_rfd_desc_ring *erxd)
86 {
87 struct platform_device *pdev = edma_cinfo->pdev;
88
89 erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
90 erxd->sw_next_to_fill = 0;
91 erxd->sw_next_to_clean = 0;
92
93 /* Allocate SW descriptors */
94 erxd->sw_desc = vzalloc(erxd->size);
95 if (!erxd->sw_desc)
96 return -ENOMEM;
97
98 /* Alloc HW descriptors */
99 erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
100 GFP_KERNEL);
101 if (!erxd->hw_desc) {
102 vfree(erxd->sw_desc);
103 return -ENOMEM;
104 }
105
106 /* Initialize pending_fill */
107 erxd->pending_fill = 0;
108
109 return 0;
110 }
111
112 /* edma_free_rx_ring()
113 * Free rx ring allocated by alloc_rx_ring
114 */
115 static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
116 struct edma_rfd_desc_ring *rxdr)
117 {
118 struct platform_device *pdev = edma_cinfo->pdev;
119
120 if (likely(rxdr->dma))
121 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
122 rxdr->dma);
123
124 vfree(rxdr->sw_desc);
125 rxdr->sw_desc = NULL;
126 }
127
128 /* edma_configure_tx()
129 * Configure transmission control data
130 */
131 static void edma_configure_tx(struct edma_common_info *edma_cinfo)
132 {
133 u32 txq_ctrl_data;
134
135 txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
136 txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
137 txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
138 edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
139 }
140
141
142 /* edma_configure_rx()
143 * configure reception control data
144 */
145 static void edma_configure_rx(struct edma_common_info *edma_cinfo)
146 {
147 struct edma_hw *hw = &edma_cinfo->hw;
148 u32 rss_type, rx_desc1, rxq_ctrl_data;
149
150 /* Set RSS type */
151 rss_type = hw->rss_type;
152 edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
153
154 /* Set RFD burst number */
155 rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
156
157 /* Set RFD prefetch threshold */
158 rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
159
160 /* Set RFD in host ring low threshold to generte interrupt */
161 rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
162 edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
163
164 /* Set Rx FIFO threshold to start to DMA data to host */
165 rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
166
167 /* Set RX remove vlan bit */
168 rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
169
170 edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
171 }
172
173 /* edma_alloc_rx_buf()
174 * does skb allocation for the received packets.
175 */
176 static int edma_alloc_rx_buf(struct edma_common_info
177 *edma_cinfo,
178 struct edma_rfd_desc_ring *erdr,
179 int cleaned_count, int queue_id)
180 {
181 struct platform_device *pdev = edma_cinfo->pdev;
182 struct edma_rx_free_desc *rx_desc;
183 struct edma_sw_desc *sw_desc;
184 struct sk_buff *skb;
185 unsigned int i;
186 u16 prod_idx, length;
187 u32 reg_data;
188
189 if (cleaned_count > erdr->count)
190 cleaned_count = erdr->count - 1;
191
192 i = erdr->sw_next_to_fill;
193
194 while (cleaned_count) {
195 sw_desc = &erdr->sw_desc[i];
196 length = edma_cinfo->rx_head_buffer_len;
197
198 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
199 skb = sw_desc->skb;
200
201 /* Clear REUSE Flag */
202 sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
203 } else {
204 /* alloc skb */
205 skb = netdev_alloc_skb_ip_align(edma_netdev[0], length);
206 if (!skb) {
207 /* Better luck next round */
208 break;
209 }
210 }
211
212 if (edma_cinfo->page_mode) {
213 struct page *pg = alloc_page(GFP_ATOMIC);
214
215 if (!pg) {
216 dev_kfree_skb_any(skb);
217 break;
218 }
219
220 sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
221 edma_cinfo->rx_page_buffer_len,
222 DMA_FROM_DEVICE);
223 if (dma_mapping_error(&pdev->dev,
224 sw_desc->dma)) {
225 __free_page(pg);
226 dev_kfree_skb_any(skb);
227 break;
228 }
229
230 skb_fill_page_desc(skb, 0, pg, 0,
231 edma_cinfo->rx_page_buffer_len);
232 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
233 sw_desc->length = edma_cinfo->rx_page_buffer_len;
234 } else {
235 sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
236 length, DMA_FROM_DEVICE);
237 if (dma_mapping_error(&pdev->dev,
238 sw_desc->dma)) {
239 dev_kfree_skb_any(skb);
240 break;
241 }
242
243 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
244 sw_desc->length = length;
245 }
246
247 /* Update the buffer info */
248 sw_desc->skb = skb;
249 rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
250 rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
251 if (++i == erdr->count)
252 i = 0;
253 cleaned_count--;
254 }
255
256 erdr->sw_next_to_fill = i;
257
258 if (i == 0)
259 prod_idx = erdr->count - 1;
260 else
261 prod_idx = i - 1;
262
263 /* Update the producer index */
264 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
265 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
266 reg_data |= prod_idx;
267 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
268
269 /* If we couldn't allocate all the buffers
270 * we increment the alloc failure counters
271 */
272 if (cleaned_count)
273 edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
274
275 return cleaned_count;
276 }
277
278 /* edma_init_desc()
279 * update descriptor ring size, buffer and producer/consumer index
280 */
281 static void edma_init_desc(struct edma_common_info *edma_cinfo)
282 {
283 struct edma_rfd_desc_ring *rfd_ring;
284 struct edma_tx_desc_ring *etdr;
285 int i = 0, j = 0;
286 u32 data = 0;
287 u16 hw_cons_idx = 0;
288
289 /* Set the base address of every TPD ring. */
290 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
291 etdr = edma_cinfo->tpd_ring[i];
292
293 /* Update descriptor ring base address */
294 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
295 edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
296
297 /* Calculate hardware consumer index */
298 hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
299 etdr->sw_next_to_fill = hw_cons_idx;
300 etdr->sw_next_to_clean = hw_cons_idx;
301 data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
302 data |= hw_cons_idx;
303
304 /* update producer index */
305 edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
306
307 /* update SW consumer index register */
308 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
309
310 /* Set TPD ring size */
311 edma_write_reg(EDMA_REG_TPD_RING_SIZE,
312 edma_cinfo->tx_ring_count &
313 EDMA_TPD_RING_SIZE_MASK);
314 }
315
316 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
317 rfd_ring = edma_cinfo->rfd_ring[j];
318 /* Update Receive Free descriptor ring base address */
319 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
320 (u32)(rfd_ring->dma));
321 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
322 }
323
324 data = edma_cinfo->rx_head_buffer_len;
325 if (edma_cinfo->page_mode)
326 data = edma_cinfo->rx_page_buffer_len;
327
328 data &= EDMA_RX_BUF_SIZE_MASK;
329 data <<= EDMA_RX_BUF_SIZE_SHIFT;
330
331 /* Update RFD ring size and RX buffer size */
332 data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
333 << EDMA_RFD_RING_SIZE_SHIFT;
334
335 edma_write_reg(EDMA_REG_RX_DESC0, data);
336
337 /* Disable TX FIFO low watermark and high watermark */
338 edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
339
340 /* Load all of base address above */
341 edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
342 data |= 1 << EDMA_LOAD_PTR_SHIFT;
343 edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
344 }
345
346 /* edma_receive_checksum
347 * Api to check checksum on receive packets
348 */
349 static void edma_receive_checksum(struct edma_rx_return_desc *rd,
350 struct sk_buff *skb)
351 {
352 skb_checksum_none_assert(skb);
353
354 /* check the RRD IP/L4 checksum bit to see if
355 * its set, which in turn indicates checksum
356 * failure.
357 */
358 if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
359 return;
360
361 skb->ip_summed = CHECKSUM_UNNECESSARY;
362 }
363
364 /* edma_clean_rfd()
365 * clean up rx resourcers on error
366 */
367 static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
368 {
369 struct edma_rx_free_desc *rx_desc;
370 struct edma_sw_desc *sw_desc;
371
372 rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
373 sw_desc = &erdr->sw_desc[index];
374 if (sw_desc->skb) {
375 dev_kfree_skb_any(sw_desc->skb);
376 sw_desc->skb = NULL;
377 }
378
379 memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
380 }
381
382 /* edma_rx_complete_fraglist()
383 * Complete Rx processing for fraglist skbs
384 */
385 static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
386 {
387 int i;
388 u32 priority;
389 u16 port_type;
390 u8 mac_addr[EDMA_ETH_HDR_LEN];
391
392 port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
393 & EDMA_RRD_PORT_TYPE_MASK;
394 /* if port type is 0x4, then only proceed with
395 * other stp/rstp calculation
396 */
397 if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
398 u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
399
400 /* calculate the frame priority */
401 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
402 & EDMA_RRD_PRIORITY_MASK;
403
404 for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
405 mac_addr[i] = skb->data[i];
406
407 /* Check if destination mac addr is bpdu addr */
408 if (!memcmp(mac_addr, bpdu_mac, 6)) {
409 /* destination mac address is BPDU
410 * destination mac address, then add
411 * atheros header to the packet.
412 */
413 u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
414 (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
415 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
416 skb_push(skb, 4);
417 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
418 *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
419 *(uint16_t *)&skb->data[14] = htons(athr_hdr);
420 }
421 }
422 }
423
424 /*
425 * edma_rx_complete_fraglist()
426 * Complete Rx processing for fraglist skbs
427 */
428 static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
429 u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
430 {
431 struct platform_device *pdev = edma_cinfo->pdev;
432 struct edma_hw *hw = &edma_cinfo->hw;
433 struct sk_buff *skb_temp;
434 struct edma_sw_desc *sw_desc;
435 int i;
436 u16 size_remaining;
437
438 skb->data_len = 0;
439 skb->tail += (hw->rx_head_buff_size - 16);
440 skb->len = skb->truesize = length;
441 size_remaining = length - (hw->rx_head_buff_size - 16);
442
443 /* clean-up all related sw_descs */
444 for (i = 1; i < num_rfds; i++) {
445 struct sk_buff *skb_prev;
446 sw_desc = &erdr->sw_desc[sw_next_to_clean];
447 skb_temp = sw_desc->skb;
448
449 dma_unmap_single(&pdev->dev, sw_desc->dma,
450 sw_desc->length, DMA_FROM_DEVICE);
451
452 if (size_remaining < hw->rx_head_buff_size)
453 skb_put(skb_temp, size_remaining);
454 else
455 skb_put(skb_temp, hw->rx_head_buff_size);
456
457 /*
458 * If we are processing the first rfd, we link
459 * skb->frag_list to the skb corresponding to the
460 * first RFD
461 */
462 if (i == 1)
463 skb_shinfo(skb)->frag_list = skb_temp;
464 else
465 skb_prev->next = skb_temp;
466 skb_prev = skb_temp;
467 skb_temp->next = NULL;
468
469 skb->data_len += skb_temp->len;
470 size_remaining -= skb_temp->len;
471
472 /* Increment SW index */
473 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
474 (*cleaned_count)++;
475 }
476
477 return sw_next_to_clean;
478 }
479
480 /* edma_rx_complete_paged()
481 * Complete Rx processing for paged skbs
482 */
483 static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
484 u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
485 {
486 struct platform_device *pdev = edma_cinfo->pdev;
487 struct sk_buff *skb_temp;
488 struct edma_sw_desc *sw_desc;
489 int i;
490 u16 size_remaining;
491
492 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
493
494 /* Setup skbuff fields */
495 skb->len = length;
496
497 if (likely(num_rfds <= 1)) {
498 skb->data_len = length;
499 skb->truesize += edma_cinfo->rx_page_buffer_len;
500 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
501 16, length);
502 } else {
503 skb_frag_size_sub(frag, 16);
504 skb->data_len = skb_frag_size(frag);
505 skb->truesize += edma_cinfo->rx_page_buffer_len;
506 size_remaining = length - skb_frag_size(frag);
507
508 skb_fill_page_desc(skb, 0, skb_frag_page(frag),
509 16, skb_frag_size(frag));
510
511 /* clean-up all related sw_descs */
512 for (i = 1; i < num_rfds; i++) {
513 sw_desc = &erdr->sw_desc[sw_next_to_clean];
514 skb_temp = sw_desc->skb;
515 frag = &skb_shinfo(skb_temp)->frags[0];
516 dma_unmap_page(&pdev->dev, sw_desc->dma,
517 sw_desc->length, DMA_FROM_DEVICE);
518
519 if (size_remaining < edma_cinfo->rx_page_buffer_len)
520 skb_frag_size_set(frag, size_remaining);
521
522 skb_fill_page_desc(skb, i, skb_frag_page(frag),
523 0, skb_frag_size(frag));
524
525 skb_shinfo(skb_temp)->nr_frags = 0;
526 dev_kfree_skb_any(skb_temp);
527
528 skb->data_len += skb_frag_size(frag);
529 skb->truesize += edma_cinfo->rx_page_buffer_len;
530 size_remaining -= skb_frag_size(frag);
531
532 /* Increment SW index */
533 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
534 (*cleaned_count)++;
535 }
536 }
537
538 return sw_next_to_clean;
539 }
540
541 /*
542 * edma_rx_complete()
543 * Main api called from the poll function to process rx packets.
544 */
545 static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
546 int *work_done, int work_to_do, int queue_id,
547 struct napi_struct *napi)
548 {
549 struct platform_device *pdev = edma_cinfo->pdev;
550 struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
551 struct net_device *netdev;
552 struct edma_adapter *adapter;
553 struct edma_sw_desc *sw_desc;
554 struct sk_buff *skb;
555 struct edma_rx_return_desc *rd;
556 u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
557 sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
558 u32 data = 0;
559 u8 *vaddr;
560 int port_id, i, drop_count = 0;
561 u32 priority;
562 u16 count = erdr->count, rfd_avail;
563 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
564
565 cleaned_count = erdr->pending_fill;
566 sw_next_to_clean = erdr->sw_next_to_clean;
567
568 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
569 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
570 EDMA_RFD_CONS_IDX_MASK;
571
572 do {
573 while (sw_next_to_clean != hw_next_to_clean) {
574 if (!work_to_do)
575 break;
576
577 sw_desc = &erdr->sw_desc[sw_next_to_clean];
578 skb = sw_desc->skb;
579
580 /* Unmap the allocated buffer */
581 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
582 dma_unmap_single(&pdev->dev, sw_desc->dma,
583 sw_desc->length, DMA_FROM_DEVICE);
584 else
585 dma_unmap_page(&pdev->dev, sw_desc->dma,
586 sw_desc->length, DMA_FROM_DEVICE);
587
588 /* Get RRD */
589 if (edma_cinfo->page_mode) {
590 vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
591 memcpy((uint8_t *)&rrd[0], vaddr, 16);
592 rd = (struct edma_rx_return_desc *)rrd;
593 kunmap_atomic(vaddr);
594 } else {
595 rd = (struct edma_rx_return_desc *)skb->data;
596 }
597
598 /* Check if RRD is valid */
599 if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
600 edma_clean_rfd(erdr, sw_next_to_clean);
601 sw_next_to_clean = (sw_next_to_clean + 1) &
602 (erdr->count - 1);
603 cleaned_count++;
604 continue;
605 }
606
607 /* Get the number of RFDs from RRD */
608 num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
609
610 /* Get Rx port ID from switch */
611 port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
612 if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
613 dev_err(&pdev->dev, "Invalid RRD source port bit set");
614 for (i = 0; i < num_rfds; i++) {
615 edma_clean_rfd(erdr, sw_next_to_clean);
616 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
617 cleaned_count++;
618 }
619 continue;
620 }
621
622 /* check if we have a sink for the data we receive.
623 * If the interface isn't setup, we have to drop the
624 * incoming data for now.
625 */
626 netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
627 if (!netdev) {
628 edma_clean_rfd(erdr, sw_next_to_clean);
629 sw_next_to_clean = (sw_next_to_clean + 1) &
630 (erdr->count - 1);
631 cleaned_count++;
632 continue;
633 }
634 adapter = netdev_priv(netdev);
635
636 /* This code is added to handle a usecase where high
637 * priority stream and a low priority stream are
638 * received simultaneously on DUT. The problem occurs
639 * if one of the Rx rings is full and the corresponding
640 * core is busy with other stuff. This causes ESS CPU
641 * port to backpressure all incoming traffic including
642 * high priority one. We monitor free descriptor count
643 * on each CPU and whenever it reaches threshold (< 80),
644 * we drop all low priority traffic and let only high
645 * priotiy traffic pass through. We can hence avoid
646 * ESS CPU port to send backpressure on high priroity
647 * stream.
648 */
649 priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
650 & EDMA_RRD_PRIORITY_MASK;
651 if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
652 rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
653 if (rfd_avail < EDMA_RFD_AVAIL_THR) {
654 sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
655 sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
656 adapter->stats.rx_dropped++;
657 cleaned_count++;
658 drop_count++;
659 if (drop_count == 3) {
660 work_to_do--;
661 (*work_done)++;
662 drop_count = 0;
663 }
664 if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
665 /* If buffer clean count reaches 16, we replenish HW buffers. */
666 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
667 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
668 sw_next_to_clean);
669 cleaned_count = ret_count;
670 erdr->pending_fill = ret_count;
671 }
672 continue;
673 }
674 }
675
676 work_to_do--;
677 (*work_done)++;
678
679 /* Increment SW index */
680 sw_next_to_clean = (sw_next_to_clean + 1) &
681 (erdr->count - 1);
682
683 cleaned_count++;
684
685 /* Get the packet size and allocate buffer */
686 length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
687
688 if (edma_cinfo->page_mode) {
689 /* paged skb */
690 sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
691 if (!pskb_may_pull(skb, ETH_HLEN)) {
692 dev_kfree_skb_any(skb);
693 continue;
694 }
695 } else {
696 /* single or fraglist skb */
697
698 /* Addition of 16 bytes is required, as in the packet
699 * first 16 bytes are rrd descriptors, so actual data
700 * starts from an offset of 16.
701 */
702 skb_reserve(skb, 16);
703 if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
704 skb_put(skb, length);
705 } else {
706 sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
707 }
708 }
709
710 if (edma_stp_rstp) {
711 edma_rx_complete_stp_rstp(skb, port_id, rd);
712 }
713
714 skb->protocol = eth_type_trans(skb, netdev);
715
716 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
717 skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
718 if (netdev->features & NETIF_F_RXHASH) {
719 hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
720 if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
721 skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
722 }
723
724 #ifdef CONFIG_NF_FLOW_COOKIE
725 skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
726 #endif
727 edma_receive_checksum(rd, skb);
728
729 /* Process VLAN HW acceleration indication provided by HW */
730 if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
731 vlan = rd->rrd4;
732 if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
733 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
734 else if (rd->rrd1 & EDMA_RRD_SVLAN)
735 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
736 }
737
738 /* Update rx statistics */
739 adapter->stats.rx_packets++;
740 adapter->stats.rx_bytes += length;
741
742 /* Check if we reached refill threshold */
743 if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
744 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
745 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
746 sw_next_to_clean);
747 cleaned_count = ret_count;
748 erdr->pending_fill = ret_count;
749 }
750
751 /* At this point skb should go to stack */
752 napi_gro_receive(napi, skb);
753 }
754
755 /* Check if we still have NAPI budget */
756 if (!work_to_do)
757 break;
758
759 /* Read index once again since we still have NAPI budget */
760 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
761 hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
762 EDMA_RFD_CONS_IDX_MASK;
763 } while (hw_next_to_clean != sw_next_to_clean);
764
765 erdr->sw_next_to_clean = sw_next_to_clean;
766
767 /* Refill here in case refill threshold wasn't reached */
768 if (likely(cleaned_count)) {
769 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
770 erdr->pending_fill = ret_count;
771 if (ret_count) {
772 if (net_ratelimit())
773 dev_dbg(&pdev->dev, "Not all buffers was reallocated");
774 }
775
776 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
777 erdr->sw_next_to_clean);
778 }
779
780 return erdr->pending_fill;
781 }
782
783 /* edma_delete_rfs_filter()
784 * Remove RFS filter from switch
785 */
786 static int edma_delete_rfs_filter(struct edma_adapter *adapter,
787 struct edma_rfs_filter_node *filter_node)
788 {
789 int res = -1;
790
791 struct flow_keys *keys = &filter_node->keys;
792
793 if (likely(adapter->set_rfs_rule))
794 res = (*adapter->set_rfs_rule)(adapter->netdev,
795 flow_get_u32_src(keys), flow_get_u32_dst(keys),
796 keys->ports.src, keys->ports.dst,
797 keys->basic.ip_proto, filter_node->rq_id, 0);
798
799 return res;
800 }
801
802 /* edma_add_rfs_filter()
803 * Add RFS filter to switch
804 */
805 static int edma_add_rfs_filter(struct edma_adapter *adapter,
806 struct flow_keys *keys, u16 rq,
807 struct edma_rfs_filter_node *filter_node)
808 {
809 int res = -1;
810
811 struct flow_keys *dest_keys = &filter_node->keys;
812
813 memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
814 /*
815 dest_keys->control = keys->control;
816 dest_keys->basic = keys->basic;
817 dest_keys->addrs = keys->addrs;
818 dest_keys->ports = keys->ports;
819 dest_keys.ip_proto = keys->ip_proto;
820 */
821 /* Call callback registered by ESS driver */
822 if (likely(adapter->set_rfs_rule))
823 res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
824 flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
825 keys->basic.ip_proto, rq, 1);
826
827 return res;
828 }
829
830 /* edma_rfs_key_search()
831 * Look for existing RFS entry
832 */
833 static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
834 struct flow_keys *key)
835 {
836 struct edma_rfs_filter_node *p;
837
838 hlist_for_each_entry(p, h, node)
839 if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
840 flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
841 p->keys.ports.src == key->ports.src &&
842 p->keys.ports.dst == key->ports.dst &&
843 p->keys.basic.ip_proto == key->basic.ip_proto)
844 return p;
845 return NULL;
846 }
847
848 /* edma_initialise_rfs_flow_table()
849 * Initialise EDMA RFS flow table
850 */
851 static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
852 {
853 int i;
854
855 spin_lock_init(&adapter->rfs.rfs_ftab_lock);
856
857 /* Initialize EDMA flow hash table */
858 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
859 INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
860
861 adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
862 adapter->rfs.filter_available = adapter->rfs.max_num_filter;
863 adapter->rfs.hashtoclean = 0;
864
865 /* Add timer to get periodic RFS updates from OS */
866 timer_setup(&adapter->rfs.expire_rfs, edma_flow_may_expire, 0);
867 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
868 }
869
870 /* edma_free_rfs_flow_table()
871 * Free EDMA RFS flow table
872 */
873 static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
874 {
875 int i;
876
877 /* Remove sync timer */
878 del_timer_sync(&adapter->rfs.expire_rfs);
879 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
880
881 /* Free EDMA RFS table entries */
882 adapter->rfs.filter_available = 0;
883
884 /* Clean-up EDMA flow hash table */
885 for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
886 struct hlist_head *hhead;
887 struct hlist_node *tmp;
888 struct edma_rfs_filter_node *filter_node;
889 int res;
890
891 hhead = &adapter->rfs.hlist_head[i];
892 hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
893 res = edma_delete_rfs_filter(adapter, filter_node);
894 if (res < 0)
895 dev_warn(&adapter->netdev->dev,
896 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
897 filter_node->flow_id);
898 hlist_del(&filter_node->node);
899 kfree(filter_node);
900 }
901 }
902 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
903 }
904
905 /* edma_tx_unmap_and_free()
906 * clean TX buffer
907 */
908 static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
909 struct edma_sw_desc *sw_desc)
910 {
911 struct sk_buff *skb = sw_desc->skb;
912
913 if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
914 (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
915 /* unmap_single for skb head area */
916 dma_unmap_single(&pdev->dev, sw_desc->dma,
917 sw_desc->length, DMA_TO_DEVICE);
918 else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
919 /* unmap page for paged fragments */
920 dma_unmap_page(&pdev->dev, sw_desc->dma,
921 sw_desc->length, DMA_TO_DEVICE);
922
923 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
924 dev_kfree_skb_any(skb);
925
926 sw_desc->flags = 0;
927 }
928
929 /* edma_tx_complete()
930 * Used to clean tx queues and update hardware and consumer index
931 */
932 static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
933 {
934 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
935 struct edma_sw_desc *sw_desc;
936 struct platform_device *pdev = edma_cinfo->pdev;
937 int i;
938
939 u16 sw_next_to_clean = etdr->sw_next_to_clean;
940 u16 hw_next_to_clean;
941 u32 data = 0;
942
943 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
944 hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
945
946 /* clean the buffer here */
947 while (sw_next_to_clean != hw_next_to_clean) {
948 sw_desc = &etdr->sw_desc[sw_next_to_clean];
949 edma_tx_unmap_and_free(pdev, sw_desc);
950 sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
951 }
952
953 etdr->sw_next_to_clean = sw_next_to_clean;
954
955 /* update the TPD consumer index register */
956 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
957
958 /* Wake the queue if queue is stopped and netdev link is up */
959 for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
960 if (netif_tx_queue_stopped(etdr->nq[i])) {
961 if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
962 netif_tx_wake_queue(etdr->nq[i]);
963 }
964 }
965 }
966
967 /* edma_get_tx_buffer()
968 * Get sw_desc corresponding to the TPD
969 */
970 static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
971 struct edma_tx_desc *tpd, int queue_id)
972 {
973 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
974 return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
975 }
976
977 /* edma_get_next_tpd()
978 * Return a TPD descriptor for transfer
979 */
980 static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
981 int queue_id)
982 {
983 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
984 u16 sw_next_to_fill = etdr->sw_next_to_fill;
985 struct edma_tx_desc *tpd_desc =
986 (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
987
988 etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
989
990 return tpd_desc;
991 }
992
993 /* edma_tpd_available()
994 * Check number of free TPDs
995 */
996 static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
997 int queue_id)
998 {
999 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1000
1001 u16 sw_next_to_fill;
1002 u16 sw_next_to_clean;
1003 u16 count = 0;
1004
1005 sw_next_to_clean = etdr->sw_next_to_clean;
1006 sw_next_to_fill = etdr->sw_next_to_fill;
1007
1008 if (likely(sw_next_to_clean <= sw_next_to_fill))
1009 count = etdr->count;
1010
1011 return count + sw_next_to_clean - sw_next_to_fill - 1;
1012 }
1013
1014 /* edma_tx_queue_get()
1015 * Get the starting number of the queue
1016 */
1017 static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1018 struct sk_buff *skb, int txq_id)
1019 {
1020 /* skb->priority is used as an index to skb priority table
1021 * and based on packet priority, correspong queue is assigned.
1022 */
1023 return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1024 }
1025
1026 /* edma_tx_update_hw_idx()
1027 * update the producer index for the ring transmitted
1028 */
1029 static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1030 struct sk_buff *skb, int queue_id)
1031 {
1032 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1033 u32 tpd_idx_data;
1034
1035 /* Read and update the producer index */
1036 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1037 tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1038 tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1039 << EDMA_TPD_PROD_IDX_SHIFT;
1040
1041 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1042 }
1043
1044 /* edma_rollback_tx()
1045 * Function to retrieve tx resources in case of error
1046 */
1047 static void edma_rollback_tx(struct edma_adapter *adapter,
1048 struct edma_tx_desc *start_tpd, int queue_id)
1049 {
1050 struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1051 struct edma_sw_desc *sw_desc;
1052 struct edma_tx_desc *tpd = NULL;
1053 u16 start_index, index;
1054
1055 start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1056
1057 index = start_index;
1058 while (index != etdr->sw_next_to_fill) {
1059 tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1060 sw_desc = &etdr->sw_desc[index];
1061 edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1062 memset(tpd, 0, sizeof(struct edma_tx_desc));
1063 if (++index == etdr->count)
1064 index = 0;
1065 }
1066 etdr->sw_next_to_fill = start_index;
1067 }
1068
1069 /* edma_tx_map_and_fill()
1070 * gets called from edma_xmit_frame
1071 *
1072 * This is where the dma of the buffer to be transmitted
1073 * gets mapped
1074 */
1075 static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1076 struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
1077 unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
1078 bool packet_is_rstp, int nr_frags)
1079 {
1080 struct edma_sw_desc *sw_desc = NULL;
1081 struct platform_device *pdev = edma_cinfo->pdev;
1082 struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
1083 struct sk_buff *iter_skb;
1084 int i = 0;
1085 u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1086 u16 buf_len, lso_desc_len = 0;
1087
1088 /* It should either be a nr_frags skb or fraglist skb but not both */
1089 BUG_ON(nr_frags && skb_has_frag_list(skb));
1090
1091 if (skb_is_gso(skb)) {
1092 /* TODO: What additional checks need to be performed here */
1093 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1094 lso_word1 |= EDMA_TPD_IPV4_EN;
1095 ip_hdr(skb)->check = 0;
1096 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1097 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1098 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1099 lso_word1 |= EDMA_TPD_LSO_V2_EN;
1100 ipv6_hdr(skb)->payload_len = 0;
1101 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1102 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1103 } else
1104 return -EINVAL;
1105
1106 lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1107 (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1108 } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1109 u8 css, cso;
1110 cso = skb_checksum_start_offset(skb);
1111 css = cso + skb->csum_offset;
1112
1113 word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1114 word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1115 word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1116 }
1117
1118 if (skb->protocol == htons(ETH_P_PPP_SES))
1119 word1 |= EDMA_TPD_PPPOE_EN;
1120
1121 if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1122 switch(skb->vlan_proto) {
1123 case htons(ETH_P_8021Q):
1124 word3 |= (1 << EDMA_TX_INS_CVLAN);
1125 word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1126 break;
1127 case htons(ETH_P_8021AD):
1128 word1 |= (1 << EDMA_TX_INS_SVLAN);
1129 svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1130 break;
1131 default:
1132 dev_err(&pdev->dev, "no ctag or stag present\n");
1133 goto vlan_tag_error;
1134 }
1135 } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1136 word3 |= (1 << EDMA_TX_INS_CVLAN);
1137 word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1138 }
1139
1140 if (packet_is_rstp) {
1141 word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1142 word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1143 } else {
1144 word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1145 }
1146
1147 buf_len = skb_headlen(skb);
1148
1149 if (lso_word1) {
1150 if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1151
1152 /* IPv6 LSOv2 descriptor */
1153 start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1154 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1155 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1156
1157 /* LSOv2 descriptor overrides addr field to pass length */
1158 tpd->addr = cpu_to_le16(skb->len);
1159 tpd->svlan_tag = svlan_tag;
1160 tpd->word1 = word1 | lso_word1;
1161 tpd->word3 = word3;
1162 }
1163
1164 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1165 if (!start_tpd)
1166 start_tpd = tpd;
1167 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1168
1169 /* The last buffer info contain the skb address,
1170 * so skb will be freed after unmap
1171 */
1172 sw_desc->length = lso_desc_len;
1173 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1174
1175 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1176 skb->data, buf_len, DMA_TO_DEVICE);
1177 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1178 goto dma_error;
1179
1180 tpd->addr = cpu_to_le32(sw_desc->dma);
1181 tpd->len = cpu_to_le16(buf_len);
1182
1183 tpd->svlan_tag = svlan_tag;
1184 tpd->word1 = word1 | lso_word1;
1185 tpd->word3 = word3;
1186
1187 /* The last buffer info contain the skb address,
1188 * so it will be freed after unmap
1189 */
1190 sw_desc->length = lso_desc_len;
1191 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1192
1193 buf_len = 0;
1194 }
1195
1196 if (likely(buf_len)) {
1197
1198 /* TODO Do not dequeue descriptor if there is a potential error */
1199 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1200
1201 if (!start_tpd)
1202 start_tpd = tpd;
1203
1204 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1205
1206 /* The last buffer info contain the skb address,
1207 * so it will be free after unmap
1208 */
1209 sw_desc->length = buf_len;
1210 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1211 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1212 skb->data, buf_len, DMA_TO_DEVICE);
1213 if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1214 goto dma_error;
1215
1216 tpd->addr = cpu_to_le32(sw_desc->dma);
1217 tpd->len = cpu_to_le16(buf_len);
1218
1219 tpd->svlan_tag = svlan_tag;
1220 tpd->word1 = word1 | lso_word1;
1221 tpd->word3 = word3;
1222 }
1223
1224 /* Walk through all paged fragments */
1225 while (nr_frags--) {
1226 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1227 buf_len = skb_frag_size(frag);
1228 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1229 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1230 sw_desc->length = buf_len;
1231 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1232
1233 sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1234
1235 if (dma_mapping_error(NULL, sw_desc->dma))
1236 goto dma_error;
1237
1238 tpd->addr = cpu_to_le32(sw_desc->dma);
1239 tpd->len = cpu_to_le16(buf_len);
1240
1241 tpd->svlan_tag = svlan_tag;
1242 tpd->word1 = word1 | lso_word1;
1243 tpd->word3 = word3;
1244 i++;
1245 }
1246
1247 /* Walk through all fraglist skbs */
1248 skb_walk_frags(skb, iter_skb) {
1249 buf_len = iter_skb->len;
1250 tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1251 sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1252 sw_desc->length = buf_len;
1253 sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1254 iter_skb->data, buf_len, DMA_TO_DEVICE);
1255
1256 if (dma_mapping_error(NULL, sw_desc->dma))
1257 goto dma_error;
1258
1259 tpd->addr = cpu_to_le32(sw_desc->dma);
1260 tpd->len = cpu_to_le16(buf_len);
1261 tpd->svlan_tag = svlan_tag;
1262 tpd->word1 = word1 | lso_word1;
1263 tpd->word3 = word3;
1264 sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1265 }
1266
1267 if (tpd)
1268 tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1269
1270 sw_desc->skb = skb;
1271 sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1272
1273 return 0;
1274
1275 dma_error:
1276 edma_rollback_tx(adapter, start_tpd, queue_id);
1277 dev_err(&pdev->dev, "TX DMA map failed\n");
1278 vlan_tag_error:
1279 return -ENOMEM;
1280 }
1281
1282 /* edma_check_link()
1283 * check Link status
1284 */
1285 static int edma_check_link(struct edma_adapter *adapter)
1286 {
1287 struct phy_device *phydev = adapter->phydev;
1288
1289 if (!(adapter->poll_required))
1290 return __EDMA_LINKUP;
1291
1292 if (phydev->link)
1293 return __EDMA_LINKUP;
1294
1295 return __EDMA_LINKDOWN;
1296 }
1297
1298 /* edma_adjust_link()
1299 * check for edma link status
1300 */
1301 void edma_adjust_link(struct net_device *netdev)
1302 {
1303 int status;
1304 struct edma_adapter *adapter = netdev_priv(netdev);
1305 struct phy_device *phydev = adapter->phydev;
1306
1307 if (!test_bit(__EDMA_UP, &adapter->state_flags))
1308 return;
1309
1310 status = edma_check_link(adapter);
1311
1312 if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1313 dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1314 adapter->link_state = __EDMA_LINKUP;
1315 if (adapter->edma_cinfo->is_single_phy) {
1316 ess_set_port_status_speed(adapter->edma_cinfo, phydev,
1317 ffs(adapter->dp_bitmap) - 1);
1318 }
1319 netif_carrier_on(netdev);
1320 if (netif_running(netdev))
1321 netif_tx_wake_all_queues(netdev);
1322 } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1323 dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1324 adapter->link_state = __EDMA_LINKDOWN;
1325 netif_carrier_off(netdev);
1326 netif_tx_stop_all_queues(netdev);
1327 }
1328 }
1329
1330 /* edma_get_stats()
1331 * Statistics api used to retreive the tx/rx statistics
1332 */
1333 struct net_device_stats *edma_get_stats(struct net_device *netdev)
1334 {
1335 struct edma_adapter *adapter = netdev_priv(netdev);
1336
1337 return &adapter->stats;
1338 }
1339
1340 /* edma_xmit()
1341 * Main api to be called by the core for packet transmission
1342 */
1343 netdev_tx_t edma_xmit(struct sk_buff *skb,
1344 struct net_device *net_dev)
1345 {
1346 struct edma_adapter *adapter = netdev_priv(net_dev);
1347 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1348 struct edma_tx_desc_ring *etdr;
1349 u16 from_cpu, dp_bitmap, txq_id;
1350 int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
1351 unsigned int flags_transmit = 0;
1352 bool packet_is_rstp = false;
1353 struct netdev_queue *nq = NULL;
1354
1355 if (skb_shinfo(skb)->nr_frags) {
1356 nr_frags = skb_shinfo(skb)->nr_frags;
1357 num_tpds_needed += nr_frags;
1358 } else if (skb_has_frag_list(skb)) {
1359 struct sk_buff *iter_skb;
1360
1361 skb_walk_frags(skb, iter_skb)
1362 num_tpds_needed++;
1363 }
1364
1365 if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
1366 dev_err(&net_dev->dev,
1367 "skb received with fragments %d which is more than %lu",
1368 num_tpds_needed, EDMA_MAX_SKB_FRAGS);
1369 dev_kfree_skb_any(skb);
1370 adapter->stats.tx_errors++;
1371 return NETDEV_TX_OK;
1372 }
1373
1374 if (edma_stp_rstp) {
1375 u16 ath_hdr, ath_eth_type;
1376 u8 mac_addr[EDMA_ETH_HDR_LEN];
1377 ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1378 if (ath_eth_type == edma_ath_eth_type) {
1379 packet_is_rstp = true;
1380 ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1381 dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1382 from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1383 memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1384
1385 skb_pull(skb, 4);
1386
1387 memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1388 }
1389 }
1390
1391 /* this will be one of the 4 TX queues exposed to linux kernel */
1392 txq_id = skb_get_queue_mapping(skb);
1393 queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1394 etdr = edma_cinfo->tpd_ring[queue_id];
1395 nq = netdev_get_tx_queue(net_dev, txq_id);
1396
1397 local_bh_disable();
1398 /* Tx is not handled in bottom half context. Hence, we need to protect
1399 * Tx from tasks and bottom half
1400 */
1401
1402 if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1403 /* not enough descriptor, just stop queue */
1404 netif_tx_stop_queue(nq);
1405 local_bh_enable();
1406 dev_dbg(&net_dev->dev, "Not enough descriptors available");
1407 edma_cinfo->edma_ethstats.tx_desc_error++;
1408 return NETDEV_TX_BUSY;
1409 }
1410
1411 /* Check and mark VLAN tag offload */
1412 if (unlikely(skb_vlan_tag_present(skb)))
1413 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1414 else if (!adapter->edma_cinfo->is_single_phy && adapter->default_vlan_tag)
1415 flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1416
1417 /* Check and mark checksum offload */
1418 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1419 flags_transmit |= EDMA_HW_CHECKSUM;
1420
1421 /* Map and fill descriptor for Tx */
1422 ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1423 flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
1424 if (ret) {
1425 dev_kfree_skb_any(skb);
1426 adapter->stats.tx_errors++;
1427 goto netdev_okay;
1428 }
1429
1430 /* Update SW producer index */
1431 edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1432
1433 /* update tx statistics */
1434 adapter->stats.tx_packets++;
1435 adapter->stats.tx_bytes += skb->len;
1436
1437 netdev_okay:
1438 local_bh_enable();
1439 return NETDEV_TX_OK;
1440 }
1441
1442 /*
1443 * edma_flow_may_expire()
1444 * Timer function called periodically to delete the node
1445 */
1446 void edma_flow_may_expire(struct timer_list *t)
1447 {
1448 struct edma_rfs_flow_table *table = from_timer(table, t, expire_rfs);
1449 struct edma_adapter *adapter =
1450 container_of(table, typeof(*adapter), rfs);
1451 int j;
1452
1453 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1454 for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1455 struct hlist_head *hhead;
1456 struct hlist_node *tmp;
1457 struct edma_rfs_filter_node *n;
1458 bool res;
1459
1460 hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1461 hlist_for_each_entry_safe(n, tmp, hhead, node) {
1462 res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1463 n->flow_id, n->filter_id);
1464 if (res) {
1465 int ret;
1466 ret = edma_delete_rfs_filter(adapter, n);
1467 if (ret < 0)
1468 dev_dbg(&adapter->netdev->dev,
1469 "RFS entry %d not allowed to be flushed by Switch",
1470 n->flow_id);
1471 else {
1472 hlist_del(&n->node);
1473 kfree(n);
1474 adapter->rfs.filter_available++;
1475 }
1476 }
1477 }
1478 }
1479
1480 adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1481 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1482 mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
1483 }
1484
1485 /* edma_rx_flow_steer()
1486 * Called by core to to steer the flow to CPU
1487 */
1488 int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1489 u16 rxq, u32 flow_id)
1490 {
1491 struct flow_keys keys;
1492 struct edma_rfs_filter_node *filter_node;
1493 struct edma_adapter *adapter = netdev_priv(dev);
1494 u16 hash_tblid;
1495 int res;
1496
1497 if (skb->protocol == htons(ETH_P_IPV6)) {
1498 dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
1499 res = -EINVAL;
1500 goto no_protocol_err;
1501 }
1502
1503 /* Dissect flow parameters
1504 * We only support IPv4 + TCP/UDP
1505 */
1506 res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1507 if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1508 res = -EPROTONOSUPPORT;
1509 goto no_protocol_err;
1510 }
1511
1512 /* Check if table entry exists */
1513 hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1514
1515 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1516 filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1517
1518 if (filter_node) {
1519 if (rxq == filter_node->rq_id) {
1520 res = -EEXIST;
1521 goto out;
1522 } else {
1523 res = edma_delete_rfs_filter(adapter, filter_node);
1524 if (res < 0)
1525 dev_warn(&adapter->netdev->dev,
1526 "Cannot steer flow %d to different queue",
1527 filter_node->flow_id);
1528 else {
1529 adapter->rfs.filter_available++;
1530 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1531 if (res < 0) {
1532 dev_warn(&adapter->netdev->dev,
1533 "Cannot steer flow %d to different queue",
1534 filter_node->flow_id);
1535 } else {
1536 adapter->rfs.filter_available--;
1537 filter_node->rq_id = rxq;
1538 filter_node->filter_id = res;
1539 }
1540 }
1541 }
1542 } else {
1543 if (adapter->rfs.filter_available == 0) {
1544 res = -EBUSY;
1545 goto out;
1546 }
1547
1548 filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1549 if (!filter_node) {
1550 res = -ENOMEM;
1551 goto out;
1552 }
1553
1554 res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1555 if (res < 0) {
1556 kfree(filter_node);
1557 goto out;
1558 }
1559
1560 adapter->rfs.filter_available--;
1561 filter_node->rq_id = rxq;
1562 filter_node->filter_id = res;
1563 filter_node->flow_id = flow_id;
1564 filter_node->keys = keys;
1565 INIT_HLIST_NODE(&filter_node->node);
1566 hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1567 }
1568
1569 out:
1570 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1571 no_protocol_err:
1572 return res;
1573 }
1574
1575 /* edma_register_rfs_filter()
1576 * Add RFS filter callback
1577 */
1578 int edma_register_rfs_filter(struct net_device *netdev,
1579 set_rfs_filter_callback_t set_filter)
1580 {
1581 struct edma_adapter *adapter = netdev_priv(netdev);
1582
1583 spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1584
1585 if (adapter->set_rfs_rule) {
1586 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1587 return -1;
1588 }
1589
1590 adapter->set_rfs_rule = set_filter;
1591 spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1592
1593 return 0;
1594 }
1595
1596 /* edma_alloc_tx_rings()
1597 * Allocate rx rings
1598 */
1599 int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1600 {
1601 struct platform_device *pdev = edma_cinfo->pdev;
1602 int i, err = 0;
1603
1604 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1605 err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1606 if (err) {
1607 dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1608 return err;
1609 }
1610 }
1611
1612 return 0;
1613 }
1614
1615 /* edma_free_tx_rings()
1616 * Free tx rings
1617 */
1618 void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1619 {
1620 int i;
1621
1622 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1623 edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1624 }
1625
1626 /* edma_free_tx_resources()
1627 * Free buffers associated with tx rings
1628 */
1629 void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1630 {
1631 struct edma_tx_desc_ring *etdr;
1632 struct edma_sw_desc *sw_desc;
1633 struct platform_device *pdev = edma_cinfo->pdev;
1634 int i, j;
1635
1636 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1637 etdr = edma_cinfo->tpd_ring[i];
1638 for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1639 sw_desc = &etdr->sw_desc[j];
1640 if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1641 EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1642 edma_tx_unmap_and_free(pdev, sw_desc);
1643 }
1644 }
1645 }
1646
1647 /* edma_alloc_rx_rings()
1648 * Allocate rx rings
1649 */
1650 int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1651 {
1652 struct platform_device *pdev = edma_cinfo->pdev;
1653 int i, j, err = 0;
1654
1655 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1656 err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1657 if (err) {
1658 dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1659 return err;
1660 }
1661 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1662 }
1663
1664 return 0;
1665 }
1666
1667 /* edma_free_rx_rings()
1668 * free rx rings
1669 */
1670 void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1671 {
1672 int i, j;
1673
1674 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1675 edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1676 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1677 }
1678 }
1679
1680 /* edma_free_queues()
1681 * Free the queues allocaated
1682 */
1683 void edma_free_queues(struct edma_common_info *edma_cinfo)
1684 {
1685 int i , j;
1686
1687 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1688 if (edma_cinfo->tpd_ring[i])
1689 kfree(edma_cinfo->tpd_ring[i]);
1690 edma_cinfo->tpd_ring[i] = NULL;
1691 }
1692
1693 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1694 if (edma_cinfo->rfd_ring[j])
1695 kfree(edma_cinfo->rfd_ring[j]);
1696 edma_cinfo->rfd_ring[j] = NULL;
1697 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1698 }
1699
1700 edma_cinfo->num_rx_queues = 0;
1701 edma_cinfo->num_tx_queues = 0;
1702
1703 return;
1704 }
1705
1706 /* edma_free_rx_resources()
1707 * Free buffers associated with tx rings
1708 */
1709 void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1710 {
1711 struct edma_rfd_desc_ring *erdr;
1712 struct edma_sw_desc *sw_desc;
1713 struct platform_device *pdev = edma_cinfo->pdev;
1714 int i, j, k;
1715
1716 for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1717 erdr = edma_cinfo->rfd_ring[k];
1718 for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1719 sw_desc = &erdr->sw_desc[j];
1720 if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
1721 dma_unmap_single(&pdev->dev, sw_desc->dma,
1722 sw_desc->length, DMA_FROM_DEVICE);
1723 edma_clean_rfd(erdr, j);
1724 } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
1725 dma_unmap_page(&pdev->dev, sw_desc->dma,
1726 sw_desc->length, DMA_FROM_DEVICE);
1727 edma_clean_rfd(erdr, j);
1728 }
1729 }
1730 k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1731
1732 }
1733 }
1734
1735 /* edma_alloc_queues_tx()
1736 * Allocate memory for all rings
1737 */
1738 int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1739 {
1740 int i;
1741
1742 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1743 struct edma_tx_desc_ring *etdr;
1744 etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
1745 if (!etdr)
1746 goto err;
1747 etdr->count = edma_cinfo->tx_ring_count;
1748 edma_cinfo->tpd_ring[i] = etdr;
1749 }
1750
1751 return 0;
1752 err:
1753 edma_free_queues(edma_cinfo);
1754 return -1;
1755 }
1756
1757 /* edma_alloc_queues_rx()
1758 * Allocate memory for all rings
1759 */
1760 int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
1761 {
1762 int i, j;
1763
1764 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1765 struct edma_rfd_desc_ring *rfd_ring;
1766 rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
1767 GFP_KERNEL);
1768 if (!rfd_ring)
1769 goto err;
1770 rfd_ring->count = edma_cinfo->rx_ring_count;
1771 edma_cinfo->rfd_ring[j] = rfd_ring;
1772 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1773 }
1774 return 0;
1775 err:
1776 edma_free_queues(edma_cinfo);
1777 return -1;
1778 }
1779
1780 /* edma_clear_irq_status()
1781 * Clear interrupt status
1782 */
1783 void edma_clear_irq_status()
1784 {
1785 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1786 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1787 edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
1788 edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
1789 };
1790
1791 /* edma_configure()
1792 * Configure skb, edma interrupts and control register.
1793 */
1794 int edma_configure(struct edma_common_info *edma_cinfo)
1795 {
1796 struct edma_hw *hw = &edma_cinfo->hw;
1797 u32 intr_modrt_data;
1798 u32 intr_ctrl_data = 0;
1799 int i, j, ret_count;
1800
1801 edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
1802 intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
1803 intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
1804 edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
1805
1806 edma_clear_irq_status();
1807
1808 /* Clear any WOL status */
1809 edma_write_reg(EDMA_REG_WOL_CTRL, 0);
1810 intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
1811 intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
1812 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1813 edma_configure_tx(edma_cinfo);
1814 edma_configure_rx(edma_cinfo);
1815
1816 /* Allocate the RX buffer */
1817 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1818 struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
1819 ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
1820 if (ret_count) {
1821 dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
1822 }
1823 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1824 }
1825
1826 /* Configure descriptor Ring */
1827 edma_init_desc(edma_cinfo);
1828 return 0;
1829 }
1830
1831 /* edma_irq_enable()
1832 * Enable default interrupt generation settings
1833 */
1834 void edma_irq_enable(struct edma_common_info *edma_cinfo)
1835 {
1836 struct edma_hw *hw = &edma_cinfo->hw;
1837 int i, j;
1838
1839 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1840 for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1841 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
1842 j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1843 }
1844 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1845 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1846 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
1847 }
1848
1849 /* edma_irq_disable()
1850 * Disable Interrupt
1851 */
1852 void edma_irq_disable(struct edma_common_info *edma_cinfo)
1853 {
1854 int i;
1855
1856 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
1857 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
1858
1859 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
1860 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
1861 edma_write_reg(EDMA_REG_MISC_IMR, 0);
1862 edma_write_reg(EDMA_REG_WOL_IMR, 0);
1863 }
1864
1865 /* edma_free_irqs()
1866 * Free All IRQs
1867 */
1868 void edma_free_irqs(struct edma_adapter *adapter)
1869 {
1870 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1871 int i, j;
1872 int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
1873
1874 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1875 for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
1876 free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1877
1878 for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
1879 free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1880 }
1881 }
1882
1883 /* edma_enable_rx_ctrl()
1884 * Enable RX queue control
1885 */
1886 void edma_enable_rx_ctrl(struct edma_hw *hw)
1887 {
1888 u32 data;
1889
1890 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1891 data |= EDMA_RXQ_CTRL_EN;
1892 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1893 }
1894
1895
1896 /* edma_enable_tx_ctrl()
1897 * Enable TX queue control
1898 */
1899 void edma_enable_tx_ctrl(struct edma_hw *hw)
1900 {
1901 u32 data;
1902
1903 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1904 data |= EDMA_TXQ_CTRL_TXQ_EN;
1905 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1906 }
1907
1908 /* edma_stop_rx_tx()
1909 * Disable RX/TQ Queue control
1910 */
1911 void edma_stop_rx_tx(struct edma_hw *hw)
1912 {
1913 u32 data;
1914
1915 edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1916 data &= ~EDMA_RXQ_CTRL_EN;
1917 edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1918 edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1919 data &= ~EDMA_TXQ_CTRL_TXQ_EN;
1920 edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1921 }
1922
1923 /* edma_reset()
1924 * Reset the EDMA
1925 */
1926 int edma_reset(struct edma_common_info *edma_cinfo)
1927 {
1928 struct edma_hw *hw = &edma_cinfo->hw;
1929
1930 edma_irq_disable(edma_cinfo);
1931
1932 edma_clear_irq_status();
1933
1934 edma_stop_rx_tx(hw);
1935
1936 return 0;
1937 }
1938
1939 /* edma_fill_netdev()
1940 * Fill netdev for each etdr
1941 */
1942 int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
1943 int dev, int txq_id)
1944 {
1945 struct edma_tx_desc_ring *etdr;
1946 int i = 0;
1947
1948 etdr = edma_cinfo->tpd_ring[queue_id];
1949
1950 while (etdr->netdev[i])
1951 i++;
1952
1953 if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
1954 return -1;
1955
1956 /* Populate the netdev associated with the tpd ring */
1957 etdr->netdev[i] = edma_netdev[dev];
1958 etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
1959
1960 return 0;
1961 }
1962
1963 /* edma_set_mac()
1964 * Change the Ethernet Address of the NIC
1965 */
1966 int edma_set_mac_addr(struct net_device *netdev, void *p)
1967 {
1968 struct sockaddr *addr = p;
1969
1970 if (!is_valid_ether_addr(addr->sa_data))
1971 return -EINVAL;
1972
1973 if (netif_running(netdev))
1974 return -EBUSY;
1975
1976 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1977 return 0;
1978 }
1979
1980 /* edma_set_stp_rstp()
1981 * set stp/rstp
1982 */
1983 void edma_set_stp_rstp(bool rstp)
1984 {
1985 edma_stp_rstp = rstp;
1986 }
1987
1988 /* edma_assign_ath_hdr_type()
1989 * assign atheros header eth type
1990 */
1991 void edma_assign_ath_hdr_type(int eth_type)
1992 {
1993 edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
1994 }
1995
1996 /* edma_get_default_vlan_tag()
1997 * Used by other modules to get the default vlan tag
1998 */
1999 int edma_get_default_vlan_tag(struct net_device *netdev)
2000 {
2001 struct edma_adapter *adapter = netdev_priv(netdev);
2002
2003 if (adapter->default_vlan_tag)
2004 return adapter->default_vlan_tag;
2005
2006 return 0;
2007 }
2008
2009 /* edma_open()
2010 * gets called when netdevice is up, start the queue.
2011 */
2012 int edma_open(struct net_device *netdev)
2013 {
2014 struct edma_adapter *adapter = netdev_priv(netdev);
2015 struct platform_device *pdev = adapter->edma_cinfo->pdev;
2016
2017 netif_tx_start_all_queues(netdev);
2018 edma_initialise_rfs_flow_table(adapter);
2019 set_bit(__EDMA_UP, &adapter->state_flags);
2020
2021 /* if Link polling is enabled, in our case enabled for WAN, then
2022 * do a phy start, else always set link as UP
2023 */
2024 if (adapter->poll_required) {
2025 if (!IS_ERR(adapter->phydev)) {
2026 phy_start(adapter->phydev);
2027 phy_start_aneg(adapter->phydev);
2028 adapter->link_state = __EDMA_LINKDOWN;
2029 } else {
2030 dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2031 }
2032 } else {
2033 adapter->link_state = __EDMA_LINKUP;
2034 netif_carrier_on(netdev);
2035 }
2036
2037 return 0;
2038 }
2039
2040
2041 /* edma_close()
2042 * gets called when netdevice is down, stops the queue.
2043 */
2044 int edma_close(struct net_device *netdev)
2045 {
2046 struct edma_adapter *adapter = netdev_priv(netdev);
2047
2048 edma_free_rfs_flow_table(adapter);
2049 netif_carrier_off(netdev);
2050 netif_tx_stop_all_queues(netdev);
2051
2052 if (adapter->poll_required) {
2053 if (!IS_ERR(adapter->phydev))
2054 phy_stop(adapter->phydev);
2055 }
2056
2057 adapter->link_state = __EDMA_LINKDOWN;
2058
2059 /* Set GMAC state to UP before link state is checked
2060 */
2061 clear_bit(__EDMA_UP, &adapter->state_flags);
2062
2063 return 0;
2064 }
2065
2066 /* edma_poll
2067 * polling function that gets called when the napi gets scheduled.
2068 *
2069 * Main sequence of task performed in this api
2070 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2071 * enable interrupts.
2072 */
2073 int edma_poll(struct napi_struct *napi, int budget)
2074 {
2075 struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2076 struct edma_per_cpu_queues_info, napi);
2077 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2078 u32 reg_data;
2079 u32 shadow_rx_status, shadow_tx_status;
2080 int queue_id;
2081 int i, work_done = 0;
2082 u16 rx_pending_fill;
2083
2084 /* Store the Rx/Tx status by ANDing it with
2085 * appropriate CPU RX?TX mask
2086 */
2087 edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2088 edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2089 shadow_rx_status = edma_percpu_info->rx_status;
2090 edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2091 edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2092 shadow_tx_status = edma_percpu_info->tx_status;
2093
2094 /* Every core will have a start, which will be computed
2095 * in probe and stored in edma_percpu_info->tx_start variable.
2096 * We will shift the status bit by tx_start to obtain
2097 * status bits for the core on which the current processing
2098 * is happening. Since, there are 4 tx queues per core,
2099 * we will run the loop till we get the correct queue to clear.
2100 */
2101 while (edma_percpu_info->tx_status) {
2102 queue_id = ffs(edma_percpu_info->tx_status) - 1;
2103 edma_tx_complete(edma_cinfo, queue_id);
2104 edma_percpu_info->tx_status &= ~(1 << queue_id);
2105 }
2106
2107 /* Every core will have a start, which will be computed
2108 * in probe and stored in edma_percpu_info->tx_start variable.
2109 * We will shift the status bit by tx_start to obtain
2110 * status bits for the core on which the current processing
2111 * is happening. Since, there are 4 tx queues per core, we
2112 * will run the loop till we get the correct queue to clear.
2113 */
2114 while (edma_percpu_info->rx_status) {
2115 queue_id = ffs(edma_percpu_info->rx_status) - 1;
2116 rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
2117 budget, queue_id, napi);
2118
2119 if (likely(work_done < budget)) {
2120 if (rx_pending_fill) {
2121 /* reschedule poll() to refill rx buffer deficit */
2122 work_done = budget;
2123 break;
2124 }
2125 edma_percpu_info->rx_status &= ~(1 << queue_id);
2126 } else {
2127 break;
2128 }
2129 }
2130
2131 /* Clear the status register, to avoid the interrupts to
2132 * reoccur.This clearing of interrupt status register is
2133 * done here as writing to status register only takes place
2134 * once the producer/consumer index has been updated to
2135 * reflect that the packet transmission/reception went fine.
2136 */
2137 edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2138 edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2139
2140 /* If budget not fully consumed, exit the polling mode */
2141 if (likely(work_done < budget)) {
2142 napi_complete(napi);
2143
2144 /* re-enable the interrupts */
2145 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2146 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2147 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2148 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2149 }
2150
2151 return work_done;
2152 }
2153
2154 /* edma interrupt()
2155 * interrupt handler
2156 */
2157 irqreturn_t edma_interrupt(int irq, void *dev)
2158 {
2159 struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2160 struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2161 int i;
2162
2163 /* Unmask the TX/RX interrupt register */
2164 for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2165 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2166
2167 for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2168 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2169
2170 napi_schedule(&edma_percpu_info->napi);
2171
2172 return IRQ_HANDLED;
2173 }