1 From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001
2 From: Christian Lamparter <chunkeey@googlemail.com>
3 Date: Thu, 19 Jan 2017 02:01:31 +0100
4 Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver
6 Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
8 drivers/net/ethernet/qualcomm/Kconfig | 9 +++++++++
9 drivers/net/ethernet/qualcomm/Makefile | 1 +
10 2 files changed, 10 insertions(+)
12 --- a/drivers/net/ethernet/qualcomm/Kconfig
13 +++ b/drivers/net/ethernet/qualcomm/Kconfig
14 @@ -61,4 +61,13 @@ config QCOM_EMAC
16 source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
19 + tristate "Qualcomm Atheros ESS Edma support"
21 + This driver supports ethernet edma adapter.
22 + Say Y to build this driver.
24 + To compile this driver as a module, choose M here. The module
25 + will be called essedma.ko.
27 endif # NET_VENDOR_QUALCOMM
28 --- a/drivers/net/ethernet/qualcomm/Makefile
29 +++ b/drivers/net/ethernet/qualcomm/Makefile
30 @@ -10,5 +10,6 @@ obj-$(CONFIG_QCA7000_UART) += qcauart.o
31 qcauart-objs := qca_uart.o
34 +obj-$(CONFIG_ESSEDMA) += essedma/
36 obj-$(CONFIG_RMNET) += rmnet/
38 +++ b/drivers/net/ethernet/qualcomm/essedma/Makefile
41 +## Makefile for the Qualcomm Atheros ethernet edma driver
45 +obj-$(CONFIG_ESSEDMA) += essedma.o
47 +essedma-objs := edma_axi.o edma.o edma_ethtool.o
50 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
53 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
55 + * Permission to use, copy, modify, and/or distribute this software for
56 + * any purpose with or without fee is hereby granted, provided that the
57 + * above copyright notice and this permission notice appear in all copies.
58 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
59 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
60 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
61 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
62 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
63 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
64 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
67 +#include <linux/platform_device.h>
68 +#include <linux/if_vlan.h>
69 +#include "ess_edma.h"
72 +extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
74 +u16 edma_ath_eth_type;
76 +/* edma_skb_priority_offset()
77 + * get edma skb priority
79 +static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
81 + return (skb->priority >> 2) & 1;
84 +/* edma_alloc_tx_ring()
85 + * Allocate Tx descriptors ring
87 +static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
88 + struct edma_tx_desc_ring *etdr)
90 + struct platform_device *pdev = edma_cinfo->pdev;
92 + /* Initialize ring */
93 + etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
94 + etdr->sw_next_to_fill = 0;
95 + etdr->sw_next_to_clean = 0;
97 + /* Allocate SW descriptors */
98 + etdr->sw_desc = vzalloc(etdr->size);
99 + if (!etdr->sw_desc) {
100 + dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
104 + /* Allocate HW descriptors */
105 + etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
107 + if (!etdr->hw_desc) {
108 + dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
109 + vfree(etdr->sw_desc);
116 +/* edma_free_tx_ring()
117 + * Free tx rings allocated by edma_alloc_tx_rings
119 +static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
120 + struct edma_tx_desc_ring *etdr)
122 + struct platform_device *pdev = edma_cinfo->pdev;
124 + if (likely(etdr->dma))
125 + dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
128 + vfree(etdr->sw_desc);
129 + etdr->sw_desc = NULL;
132 +/* edma_alloc_rx_ring()
133 + * allocate rx descriptor ring
135 +static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
136 + struct edma_rfd_desc_ring *erxd)
138 + struct platform_device *pdev = edma_cinfo->pdev;
140 + erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
141 + erxd->sw_next_to_fill = 0;
142 + erxd->sw_next_to_clean = 0;
144 + /* Allocate SW descriptors */
145 + erxd->sw_desc = vzalloc(erxd->size);
146 + if (!erxd->sw_desc)
149 + /* Alloc HW descriptors */
150 + erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
152 + if (!erxd->hw_desc) {
153 + vfree(erxd->sw_desc);
160 +/* edma_free_rx_ring()
161 + * Free rx ring allocated by alloc_rx_ring
163 +static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
164 + struct edma_rfd_desc_ring *rxdr)
166 + struct platform_device *pdev = edma_cinfo->pdev;
168 + if (likely(rxdr->dma))
169 + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
172 + vfree(rxdr->sw_desc);
173 + rxdr->sw_desc = NULL;
176 +/* edma_configure_tx()
177 + * Configure transmission control data
179 +static void edma_configure_tx(struct edma_common_info *edma_cinfo)
183 + txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
184 + txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
185 + txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
186 + edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
190 +/* edma_configure_rx()
191 + * configure reception control data
193 +static void edma_configure_rx(struct edma_common_info *edma_cinfo)
195 + struct edma_hw *hw = &edma_cinfo->hw;
196 + u32 rss_type, rx_desc1, rxq_ctrl_data;
199 + rss_type = hw->rss_type;
200 + edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
202 + /* Set RFD burst number */
203 + rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
205 + /* Set RFD prefetch threshold */
206 + rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
208 + /* Set RFD in host ring low threshold to generte interrupt */
209 + rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
210 + edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
212 + /* Set Rx FIFO threshold to start to DMA data to host */
213 + rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
215 + /* Set RX remove vlan bit */
216 + rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
218 + edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
221 +/* edma_alloc_rx_buf()
222 + * does skb allocation for the received packets.
224 +static int edma_alloc_rx_buf(struct edma_common_info
226 + struct edma_rfd_desc_ring *erdr,
227 + int cleaned_count, int queue_id)
229 + struct platform_device *pdev = edma_cinfo->pdev;
230 + struct edma_rx_free_desc *rx_desc;
231 + struct edma_sw_desc *sw_desc;
232 + struct sk_buff *skb;
234 + u16 prod_idx, length;
237 + if (cleaned_count > erdr->count) {
238 + dev_err(&pdev->dev, "Incorrect cleaned_count %d",
243 + i = erdr->sw_next_to_fill;
245 + while (cleaned_count) {
246 + sw_desc = &erdr->sw_desc[i];
247 + length = edma_cinfo->rx_head_buffer_len;
249 + if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
250 + skb = sw_desc->skb;
253 + skb = netdev_alloc_skb(edma_netdev[0], length);
255 + /* Better luck next round */
260 + if (edma_cinfo->page_mode) {
261 + struct page *pg = alloc_page(GFP_ATOMIC);
264 + dev_kfree_skb_any(skb);
268 + sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
269 + edma_cinfo->rx_page_buffer_len,
271 + if (dma_mapping_error(&pdev->dev,
274 + dev_kfree_skb_any(skb);
278 + skb_fill_page_desc(skb, 0, pg, 0,
279 + edma_cinfo->rx_page_buffer_len);
280 + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
281 + sw_desc->length = edma_cinfo->rx_page_buffer_len;
283 + sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
284 + length, DMA_FROM_DEVICE);
285 + if (dma_mapping_error(&pdev->dev,
287 + dev_kfree_skb_any(skb);
291 + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
292 + sw_desc->length = length;
295 + /* Update the buffer info */
296 + sw_desc->skb = skb;
297 + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
298 + rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
299 + if (++i == erdr->count)
304 + erdr->sw_next_to_fill = i;
307 + prod_idx = erdr->count - 1;
311 + /* Update the producer index */
312 + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), ®_data);
313 + reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
314 + reg_data |= prod_idx;
315 + edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
316 + return cleaned_count;
320 + * update descriptor ring size, buffer and producer/consumer index
322 +static void edma_init_desc(struct edma_common_info *edma_cinfo)
324 + struct edma_rfd_desc_ring *rfd_ring;
325 + struct edma_tx_desc_ring *etdr;
328 + u16 hw_cons_idx = 0;
330 + /* Set the base address of every TPD ring. */
331 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
332 + etdr = edma_cinfo->tpd_ring[i];
334 + /* Update descriptor ring base address */
335 + edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
336 + edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
338 + /* Calculate hardware consumer index */
339 + hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
340 + etdr->sw_next_to_fill = hw_cons_idx;
341 + etdr->sw_next_to_clean = hw_cons_idx;
342 + data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
343 + data |= hw_cons_idx;
345 + /* update producer index */
346 + edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
348 + /* update SW consumer index register */
349 + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
351 + /* Set TPD ring size */
352 + edma_write_reg(EDMA_REG_TPD_RING_SIZE,
353 + edma_cinfo->tx_ring_count &
354 + EDMA_TPD_RING_SIZE_MASK);
357 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
358 + rfd_ring = edma_cinfo->rfd_ring[j];
359 + /* Update Receive Free descriptor ring base address */
360 + edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
361 + (u32)(rfd_ring->dma));
362 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
365 + data = edma_cinfo->rx_head_buffer_len;
366 + if (edma_cinfo->page_mode)
367 + data = edma_cinfo->rx_page_buffer_len;
369 + data &= EDMA_RX_BUF_SIZE_MASK;
370 + data <<= EDMA_RX_BUF_SIZE_SHIFT;
372 + /* Update RFD ring size and RX buffer size */
373 + data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
374 + << EDMA_RFD_RING_SIZE_SHIFT;
376 + edma_write_reg(EDMA_REG_RX_DESC0, data);
378 + /* Disable TX FIFO low watermark and high watermark */
379 + edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
381 + /* Load all of base address above */
382 + edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
383 + data |= 1 << EDMA_LOAD_PTR_SHIFT;
384 + edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
387 +/* edma_receive_checksum
388 + * Api to check checksum on receive packets
390 +static void edma_receive_checksum(struct edma_rx_return_desc *rd,
391 + struct sk_buff *skb)
393 + skb_checksum_none_assert(skb);
395 + /* check the RRD IP/L4 checksum bit to see if
396 + * its set, which in turn indicates checksum
399 + if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
402 + skb->ip_summed = CHECKSUM_UNNECESSARY;
406 + * clean up rx resourcers on error
408 +static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
410 + struct edma_rx_free_desc *rx_desc;
411 + struct edma_sw_desc *sw_desc;
413 + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
414 + sw_desc = &erdr->sw_desc[index];
415 + if (sw_desc->skb) {
416 + dev_kfree_skb_any(sw_desc->skb);
417 + sw_desc->skb = NULL;
420 + memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
423 +/* edma_rx_complete_fraglist()
424 + * Complete Rx processing for fraglist skbs
426 +static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
431 + u8 mac_addr[EDMA_ETH_HDR_LEN];
433 + port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
434 + & EDMA_RRD_PORT_TYPE_MASK;
435 + /* if port type is 0x4, then only proceed with
436 + * other stp/rstp calculation
438 + if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
439 + u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
441 + /* calculate the frame priority */
442 + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
443 + & EDMA_RRD_PRIORITY_MASK;
445 + for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
446 + mac_addr[i] = skb->data[i];
448 + /* Check if destination mac addr is bpdu addr */
449 + if (!memcmp(mac_addr, bpdu_mac, 6)) {
450 + /* destination mac address is BPDU
451 + * destination mac address, then add
452 + * atheros header to the packet.
454 + u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
455 + (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
456 + (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
458 + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
459 + *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
460 + *(uint16_t *)&skb->data[14] = htons(athr_hdr);
466 + * edma_rx_complete_fraglist()
467 + * Complete Rx processing for fraglist skbs
469 +static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
470 + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
472 + struct platform_device *pdev = edma_cinfo->pdev;
473 + struct edma_hw *hw = &edma_cinfo->hw;
474 + struct sk_buff *skb_temp;
475 + struct edma_sw_desc *sw_desc;
477 + u16 size_remaining;
480 + skb->tail += (hw->rx_head_buff_size - 16);
481 + skb->len = skb->truesize = length;
482 + size_remaining = length - (hw->rx_head_buff_size - 16);
484 + /* clean-up all related sw_descs */
485 + for (i = 1; i < num_rfds; i++) {
486 + struct sk_buff *skb_prev;
487 + sw_desc = &erdr->sw_desc[sw_next_to_clean];
488 + skb_temp = sw_desc->skb;
490 + dma_unmap_single(&pdev->dev, sw_desc->dma,
491 + sw_desc->length, DMA_FROM_DEVICE);
493 + if (size_remaining < hw->rx_head_buff_size)
494 + skb_put(skb_temp, size_remaining);
496 + skb_put(skb_temp, hw->rx_head_buff_size);
499 + * If we are processing the first rfd, we link
500 + * skb->frag_list to the skb corresponding to the
504 + skb_shinfo(skb)->frag_list = skb_temp;
506 + skb_prev->next = skb_temp;
507 + skb_prev = skb_temp;
508 + skb_temp->next = NULL;
510 + skb->data_len += skb_temp->len;
511 + size_remaining -= skb_temp->len;
513 + /* Increment SW index */
514 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
515 + (*cleaned_count)++;
518 + return sw_next_to_clean;
521 +/* edma_rx_complete_paged()
522 + * Complete Rx processing for paged skbs
524 +static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
525 + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
527 + struct platform_device *pdev = edma_cinfo->pdev;
528 + struct sk_buff *skb_temp;
529 + struct edma_sw_desc *sw_desc;
531 + u16 size_remaining;
533 + skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
535 + /* Setup skbuff fields */
538 + if (likely(num_rfds <= 1)) {
539 + skb->data_len = length;
540 + skb->truesize += edma_cinfo->rx_page_buffer_len;
541 + skb_fill_page_desc(skb, 0, skb_frag_page(frag),
545 + skb->data_len = frag->size;
546 + skb->truesize += edma_cinfo->rx_page_buffer_len;
547 + size_remaining = length - frag->size;
549 + skb_fill_page_desc(skb, 0, skb_frag_page(frag),
552 + /* clean-up all related sw_descs */
553 + for (i = 1; i < num_rfds; i++) {
554 + sw_desc = &erdr->sw_desc[sw_next_to_clean];
555 + skb_temp = sw_desc->skb;
556 + frag = &skb_shinfo(skb_temp)->frags[0];
557 + dma_unmap_page(&pdev->dev, sw_desc->dma,
558 + sw_desc->length, DMA_FROM_DEVICE);
560 + if (size_remaining < edma_cinfo->rx_page_buffer_len)
561 + frag->size = size_remaining;
563 + skb_fill_page_desc(skb, i, skb_frag_page(frag),
566 + skb_shinfo(skb_temp)->nr_frags = 0;
567 + dev_kfree_skb_any(skb_temp);
569 + skb->data_len += frag->size;
570 + skb->truesize += edma_cinfo->rx_page_buffer_len;
571 + size_remaining -= frag->size;
573 + /* Increment SW index */
574 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
575 + (*cleaned_count)++;
579 + return sw_next_to_clean;
583 + * edma_rx_complete()
584 + * Main api called from the poll function to process rx packets.
586 +static void edma_rx_complete(struct edma_common_info *edma_cinfo,
587 + int *work_done, int work_to_do, int queue_id,
588 + struct napi_struct *napi)
590 + struct platform_device *pdev = edma_cinfo->pdev;
591 + struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
592 + struct net_device *netdev;
593 + struct edma_adapter *adapter;
594 + struct edma_sw_desc *sw_desc;
595 + struct sk_buff *skb;
596 + struct edma_rx_return_desc *rd;
597 + u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
598 + sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
601 + int port_id, i, drop_count = 0;
603 + u16 count = erdr->count, rfd_avail;
604 + u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
606 + sw_next_to_clean = erdr->sw_next_to_clean;
608 + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
609 + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
610 + EDMA_RFD_CONS_IDX_MASK;
613 + while (sw_next_to_clean != hw_next_to_clean) {
617 + sw_desc = &erdr->sw_desc[sw_next_to_clean];
618 + skb = sw_desc->skb;
620 + /* Unmap the allocated buffer */
621 + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
622 + dma_unmap_single(&pdev->dev, sw_desc->dma,
623 + sw_desc->length, DMA_FROM_DEVICE);
625 + dma_unmap_page(&pdev->dev, sw_desc->dma,
626 + sw_desc->length, DMA_FROM_DEVICE);
629 + if (edma_cinfo->page_mode) {
630 + vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
631 + memcpy((uint8_t *)&rrd[0], vaddr, 16);
632 + rd = (struct edma_rx_return_desc *)rrd;
633 + kunmap_atomic(vaddr);
635 + rd = (struct edma_rx_return_desc *)skb->data;
638 + /* Check if RRD is valid */
639 + if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
640 + edma_clean_rfd(erdr, sw_next_to_clean);
641 + sw_next_to_clean = (sw_next_to_clean + 1) &
647 + /* Get the number of RFDs from RRD */
648 + num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
650 + /* Get Rx port ID from switch */
651 + port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
652 + if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
653 + dev_err(&pdev->dev, "Invalid RRD source port bit set");
654 + for (i = 0; i < num_rfds; i++) {
655 + edma_clean_rfd(erdr, sw_next_to_clean);
656 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
662 + /* check if we have a sink for the data we receive.
663 + * If the interface isn't setup, we have to drop the
664 + * incoming data for now.
666 + netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
668 + edma_clean_rfd(erdr, sw_next_to_clean);
669 + sw_next_to_clean = (sw_next_to_clean + 1) &
674 + adapter = netdev_priv(netdev);
676 + /* This code is added to handle a usecase where high
677 + * priority stream and a low priority stream are
678 + * received simultaneously on DUT. The problem occurs
679 + * if one of the Rx rings is full and the corresponding
680 + * core is busy with other stuff. This causes ESS CPU
681 + * port to backpressure all incoming traffic including
682 + * high priority one. We monitor free descriptor count
683 + * on each CPU and whenever it reaches threshold (< 80),
684 + * we drop all low priority traffic and let only high
685 + * priotiy traffic pass through. We can hence avoid
686 + * ESS CPU port to send backpressure on high priroity
689 + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
690 + & EDMA_RRD_PRIORITY_MASK;
691 + if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
692 + rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
693 + if (rfd_avail < EDMA_RFD_AVAIL_THR) {
694 + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
695 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
696 + adapter->stats.rx_dropped++;
699 + if (drop_count == 3) {
704 + if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
705 + /* If buffer clean count reaches 16, we replenish HW buffers. */
706 + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
707 + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
709 + cleaned_count = ret_count;
718 + /* Increment SW index */
719 + sw_next_to_clean = (sw_next_to_clean + 1) &
724 + /* Get the packet size and allocate buffer */
725 + length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
727 + if (edma_cinfo->page_mode) {
729 + sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
730 + if (!pskb_may_pull(skb, ETH_HLEN)) {
731 + dev_kfree_skb_any(skb);
735 + /* single or fraglist skb */
737 + /* Addition of 16 bytes is required, as in the packet
738 + * first 16 bytes are rrd descriptors, so actual data
739 + * starts from an offset of 16.
741 + skb_reserve(skb, 16);
742 + if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
743 + skb_put(skb, length);
745 + sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
749 + if (edma_stp_rstp) {
750 + edma_rx_complete_stp_rstp(skb, port_id, rd);
753 + skb->protocol = eth_type_trans(skb, netdev);
755 + /* Record Rx queue for RFS/RPS and fill flow hash from HW */
756 + skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
757 + if (netdev->features & NETIF_F_RXHASH) {
758 + hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
759 + if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
760 + skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
763 +#ifdef CONFIG_NF_FLOW_COOKIE
764 + skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
766 + edma_receive_checksum(rd, skb);
768 + /* Process VLAN HW acceleration indication provided by HW */
769 + if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
771 + if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
772 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
773 + else if (rd->rrd1 & EDMA_RRD_SVLAN)
774 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
777 + /* Update rx statistics */
778 + adapter->stats.rx_packets++;
779 + adapter->stats.rx_bytes += length;
781 + /* Check if we reached refill threshold */
782 + if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
783 + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
784 + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
786 + cleaned_count = ret_count;
789 + /* At this point skb should go to stack */
790 + napi_gro_receive(napi, skb);
793 + /* Check if we still have NAPI budget */
797 + /* Read index once again since we still have NAPI budget */
798 + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
799 + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
800 + EDMA_RFD_CONS_IDX_MASK;
801 + } while (hw_next_to_clean != sw_next_to_clean);
803 + erdr->sw_next_to_clean = sw_next_to_clean;
805 + /* Refill here in case refill threshold wasn't reached */
806 + if (likely(cleaned_count)) {
807 + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
809 + dev_dbg(&pdev->dev, "Not all buffers was reallocated");
810 + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
811 + erdr->sw_next_to_clean);
815 +/* edma_delete_rfs_filter()
816 + * Remove RFS filter from switch
818 +static int edma_delete_rfs_filter(struct edma_adapter *adapter,
819 + struct edma_rfs_filter_node *filter_node)
823 + struct flow_keys *keys = &filter_node->keys;
825 + if (likely(adapter->set_rfs_rule))
826 + res = (*adapter->set_rfs_rule)(adapter->netdev,
827 + flow_get_u32_src(keys), flow_get_u32_dst(keys),
828 + keys->ports.src, keys->ports.dst,
829 + keys->basic.ip_proto, filter_node->rq_id, 0);
834 +/* edma_add_rfs_filter()
835 + * Add RFS filter to switch
837 +static int edma_add_rfs_filter(struct edma_adapter *adapter,
838 + struct flow_keys *keys, u16 rq,
839 + struct edma_rfs_filter_node *filter_node)
843 + struct flow_keys *dest_keys = &filter_node->keys;
845 + memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
847 + dest_keys->control = keys->control;
848 + dest_keys->basic = keys->basic;
849 + dest_keys->addrs = keys->addrs;
850 + dest_keys->ports = keys->ports;
851 + dest_keys.ip_proto = keys->ip_proto;
853 + /* Call callback registered by ESS driver */
854 + if (likely(adapter->set_rfs_rule))
855 + res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
856 + flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
857 + keys->basic.ip_proto, rq, 1);
862 +/* edma_rfs_key_search()
863 + * Look for existing RFS entry
865 +static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
866 + struct flow_keys *key)
868 + struct edma_rfs_filter_node *p;
870 + hlist_for_each_entry(p, h, node)
871 + if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
872 + flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
873 + p->keys.ports.src == key->ports.src &&
874 + p->keys.ports.dst == key->ports.dst &&
875 + p->keys.basic.ip_proto == key->basic.ip_proto)
880 +/* edma_initialise_rfs_flow_table()
881 + * Initialise EDMA RFS flow table
883 +static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
887 + spin_lock_init(&adapter->rfs.rfs_ftab_lock);
889 + /* Initialize EDMA flow hash table */
890 + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
891 + INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
893 + adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
894 + adapter->rfs.filter_available = adapter->rfs.max_num_filter;
895 + adapter->rfs.hashtoclean = 0;
897 + /* Add timer to get periodic RFS updates from OS */
898 + timer_setup(&adapter->rfs.expire_rfs, edma_flow_may_expire, 0);
899 + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
902 +/* edma_free_rfs_flow_table()
903 + * Free EDMA RFS flow table
905 +static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
909 + /* Remove sync timer */
910 + del_timer_sync(&adapter->rfs.expire_rfs);
911 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
913 + /* Free EDMA RFS table entries */
914 + adapter->rfs.filter_available = 0;
916 + /* Clean-up EDMA flow hash table */
917 + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
918 + struct hlist_head *hhead;
919 + struct hlist_node *tmp;
920 + struct edma_rfs_filter_node *filter_node;
923 + hhead = &adapter->rfs.hlist_head[i];
924 + hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
925 + res = edma_delete_rfs_filter(adapter, filter_node);
927 + dev_warn(&adapter->netdev->dev,
928 + "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
929 + filter_node->flow_id);
930 + hlist_del(&filter_node->node);
931 + kfree(filter_node);
934 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
937 +/* edma_tx_unmap_and_free()
940 +static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
941 + struct edma_sw_desc *sw_desc)
943 + struct sk_buff *skb = sw_desc->skb;
945 + if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
946 + (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
947 + /* unmap_single for skb head area */
948 + dma_unmap_single(&pdev->dev, sw_desc->dma,
949 + sw_desc->length, DMA_TO_DEVICE);
950 + else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
951 + /* unmap page for paged fragments */
952 + dma_unmap_page(&pdev->dev, sw_desc->dma,
953 + sw_desc->length, DMA_TO_DEVICE);
955 + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
956 + dev_kfree_skb_any(skb);
958 + sw_desc->flags = 0;
961 +/* edma_tx_complete()
962 + * Used to clean tx queues and update hardware and consumer index
964 +static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
966 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
967 + struct edma_sw_desc *sw_desc;
968 + struct platform_device *pdev = edma_cinfo->pdev;
971 + u16 sw_next_to_clean = etdr->sw_next_to_clean;
972 + u16 hw_next_to_clean;
975 + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
976 + hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
978 + /* clean the buffer here */
979 + while (sw_next_to_clean != hw_next_to_clean) {
980 + sw_desc = &etdr->sw_desc[sw_next_to_clean];
981 + edma_tx_unmap_and_free(pdev, sw_desc);
982 + sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
985 + etdr->sw_next_to_clean = sw_next_to_clean;
987 + /* update the TPD consumer index register */
988 + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
990 + /* Wake the queue if queue is stopped and netdev link is up */
991 + for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
992 + if (netif_tx_queue_stopped(etdr->nq[i])) {
993 + if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
994 + netif_tx_wake_queue(etdr->nq[i]);
999 +/* edma_get_tx_buffer()
1000 + * Get sw_desc corresponding to the TPD
1002 +static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
1003 + struct edma_tx_desc *tpd, int queue_id)
1005 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1006 + return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
1009 +/* edma_get_next_tpd()
1010 + * Return a TPD descriptor for transfer
1012 +static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
1015 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1016 + u16 sw_next_to_fill = etdr->sw_next_to_fill;
1017 + struct edma_tx_desc *tpd_desc =
1018 + (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
1020 + etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
1025 +/* edma_tpd_available()
1026 + * Check number of free TPDs
1028 +static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
1031 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1033 + u16 sw_next_to_fill;
1034 + u16 sw_next_to_clean;
1037 + sw_next_to_clean = etdr->sw_next_to_clean;
1038 + sw_next_to_fill = etdr->sw_next_to_fill;
1040 + if (likely(sw_next_to_clean <= sw_next_to_fill))
1041 + count = etdr->count;
1043 + return count + sw_next_to_clean - sw_next_to_fill - 1;
1046 +/* edma_tx_queue_get()
1047 + * Get the starting number of the queue
1049 +static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1050 + struct sk_buff *skb, int txq_id)
1052 + /* skb->priority is used as an index to skb priority table
1053 + * and based on packet priority, correspong queue is assigned.
1055 + return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1058 +/* edma_tx_update_hw_idx()
1059 + * update the producer index for the ring transmitted
1061 +static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1062 + struct sk_buff *skb, int queue_id)
1064 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1067 + /* Read and update the producer index */
1068 + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1069 + tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1070 + tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1071 + << EDMA_TPD_PROD_IDX_SHIFT;
1073 + edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1076 +/* edma_rollback_tx()
1077 + * Function to retrieve tx resources in case of error
1079 +static void edma_rollback_tx(struct edma_adapter *adapter,
1080 + struct edma_tx_desc *start_tpd, int queue_id)
1082 + struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1083 + struct edma_sw_desc *sw_desc;
1084 + struct edma_tx_desc *tpd = NULL;
1085 + u16 start_index, index;
1087 + start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1089 + index = start_index;
1090 + while (index != etdr->sw_next_to_fill) {
1091 + tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1092 + sw_desc = &etdr->sw_desc[index];
1093 + edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1094 + memset(tpd, 0, sizeof(struct edma_tx_desc));
1095 + if (++index == etdr->count)
1098 + etdr->sw_next_to_fill = start_index;
1101 +/* edma_tx_map_and_fill()
1102 + * gets called from edma_xmit_frame
1104 + * This is where the dma of the buffer to be transmitted
1107 +static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1108 + struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
1109 + unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
1110 + bool packet_is_rstp, int nr_frags)
1112 + struct edma_sw_desc *sw_desc = NULL;
1113 + struct platform_device *pdev = edma_cinfo->pdev;
1114 + struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
1115 + struct sk_buff *iter_skb;
1117 + u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1118 + u16 buf_len, lso_desc_len = 0;
1120 + /* It should either be a nr_frags skb or fraglist skb but not both */
1121 + BUG_ON(nr_frags && skb_has_frag_list(skb));
1123 + if (skb_is_gso(skb)) {
1124 + /* TODO: What additional checks need to be performed here */
1125 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1126 + lso_word1 |= EDMA_TPD_IPV4_EN;
1127 + ip_hdr(skb)->check = 0;
1128 + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1129 + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1130 + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1131 + lso_word1 |= EDMA_TPD_LSO_V2_EN;
1132 + ipv6_hdr(skb)->payload_len = 0;
1133 + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1134 + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1138 + lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1139 + (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1140 + } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1142 + cso = skb_checksum_start_offset(skb);
1143 + css = cso + skb->csum_offset;
1145 + word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1146 + word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1147 + word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1150 + if (skb->protocol == htons(ETH_P_PPP_SES))
1151 + word1 |= EDMA_TPD_PPPOE_EN;
1153 + if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1154 + switch(skb->vlan_proto) {
1155 + case htons(ETH_P_8021Q):
1156 + word3 |= (1 << EDMA_TX_INS_CVLAN);
1157 + word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1159 + case htons(ETH_P_8021AD):
1160 + word1 |= (1 << EDMA_TX_INS_SVLAN);
1161 + svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1164 + dev_err(&pdev->dev, "no ctag or stag present\n");
1165 + goto vlan_tag_error;
1167 + } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1168 + word3 |= (1 << EDMA_TX_INS_CVLAN);
1169 + word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1172 + if (packet_is_rstp) {
1173 + word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1174 + word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1176 + word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1179 + buf_len = skb_headlen(skb);
1182 + if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1184 + /* IPv6 LSOv2 descriptor */
1185 + start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1186 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1187 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1189 + /* LSOv2 descriptor overrides addr field to pass length */
1190 + tpd->addr = cpu_to_le16(skb->len);
1191 + tpd->svlan_tag = svlan_tag;
1192 + tpd->word1 = word1 | lso_word1;
1193 + tpd->word3 = word3;
1196 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1199 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1201 + /* The last buffer info contain the skb address,
1202 + * so skb will be freed after unmap
1204 + sw_desc->length = lso_desc_len;
1205 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1207 + sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1208 + skb->data, buf_len, DMA_TO_DEVICE);
1209 + if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1212 + tpd->addr = cpu_to_le32(sw_desc->dma);
1213 + tpd->len = cpu_to_le16(buf_len);
1215 + tpd->svlan_tag = svlan_tag;
1216 + tpd->word1 = word1 | lso_word1;
1217 + tpd->word3 = word3;
1219 + /* The last buffer info contain the skb address,
1220 + * so it will be freed after unmap
1222 + sw_desc->length = lso_desc_len;
1223 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1228 + if (likely(buf_len)) {
1230 + /* TODO Do not dequeue descriptor if there is a potential error */
1231 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1236 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1238 + /* The last buffer info contain the skb address,
1239 + * so it will be free after unmap
1241 + sw_desc->length = buf_len;
1242 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1243 + sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1244 + skb->data, buf_len, DMA_TO_DEVICE);
1245 + if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1248 + tpd->addr = cpu_to_le32(sw_desc->dma);
1249 + tpd->len = cpu_to_le16(buf_len);
1251 + tpd->svlan_tag = svlan_tag;
1252 + tpd->word1 = word1 | lso_word1;
1253 + tpd->word3 = word3;
1256 + /* Walk through all paged fragments */
1257 + while (nr_frags--) {
1258 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1259 + buf_len = skb_frag_size(frag);
1260 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1261 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1262 + sw_desc->length = buf_len;
1263 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1265 + sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1267 + if (dma_mapping_error(NULL, sw_desc->dma))
1270 + tpd->addr = cpu_to_le32(sw_desc->dma);
1271 + tpd->len = cpu_to_le16(buf_len);
1273 + tpd->svlan_tag = svlan_tag;
1274 + tpd->word1 = word1 | lso_word1;
1275 + tpd->word3 = word3;
1279 + /* Walk through all fraglist skbs */
1280 + skb_walk_frags(skb, iter_skb) {
1281 + buf_len = iter_skb->len;
1282 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1283 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1284 + sw_desc->length = buf_len;
1285 + sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1286 + iter_skb->data, buf_len, DMA_TO_DEVICE);
1288 + if (dma_mapping_error(NULL, sw_desc->dma))
1291 + tpd->addr = cpu_to_le32(sw_desc->dma);
1292 + tpd->len = cpu_to_le16(buf_len);
1293 + tpd->svlan_tag = svlan_tag;
1294 + tpd->word1 = word1 | lso_word1;
1295 + tpd->word3 = word3;
1296 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1300 + tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1302 + sw_desc->skb = skb;
1303 + sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1308 + edma_rollback_tx(adapter, start_tpd, queue_id);
1309 + dev_err(&pdev->dev, "TX DMA map failed\n");
1314 +/* edma_check_link()
1315 + * check Link status
1317 +static int edma_check_link(struct edma_adapter *adapter)
1319 + struct phy_device *phydev = adapter->phydev;
1321 + if (!(adapter->poll_required))
1322 + return __EDMA_LINKUP;
1325 + return __EDMA_LINKUP;
1327 + return __EDMA_LINKDOWN;
1330 +/* edma_adjust_link()
1331 + * check for edma link status
1333 +void edma_adjust_link(struct net_device *netdev)
1336 + struct edma_adapter *adapter = netdev_priv(netdev);
1337 + struct phy_device *phydev = adapter->phydev;
1339 + if (!test_bit(__EDMA_UP, &adapter->state_flags))
1342 + status = edma_check_link(adapter);
1344 + if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1345 + dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1346 + adapter->link_state = __EDMA_LINKUP;
1347 + netif_carrier_on(netdev);
1348 + if (netif_running(netdev))
1349 + netif_tx_wake_all_queues(netdev);
1350 + } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1351 + dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1352 + adapter->link_state = __EDMA_LINKDOWN;
1353 + netif_carrier_off(netdev);
1354 + netif_tx_stop_all_queues(netdev);
1358 +/* edma_get_stats()
1359 + * Statistics api used to retreive the tx/rx statistics
1361 +struct net_device_stats *edma_get_stats(struct net_device *netdev)
1363 + struct edma_adapter *adapter = netdev_priv(netdev);
1365 + return &adapter->stats;
1369 + * Main api to be called by the core for packet transmission
1371 +netdev_tx_t edma_xmit(struct sk_buff *skb,
1372 + struct net_device *net_dev)
1374 + struct edma_adapter *adapter = netdev_priv(net_dev);
1375 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1376 + struct edma_tx_desc_ring *etdr;
1377 + u16 from_cpu, dp_bitmap, txq_id;
1378 + int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
1379 + unsigned int flags_transmit = 0;
1380 + bool packet_is_rstp = false;
1381 + struct netdev_queue *nq = NULL;
1383 + if (skb_shinfo(skb)->nr_frags) {
1384 + nr_frags = skb_shinfo(skb)->nr_frags;
1385 + num_tpds_needed += nr_frags;
1386 + } else if (skb_has_frag_list(skb)) {
1387 + struct sk_buff *iter_skb;
1389 + skb_walk_frags(skb, iter_skb)
1390 + num_tpds_needed++;
1393 + if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
1394 + dev_err(&net_dev->dev,
1395 + "skb received with fragments %d which is more than %lu",
1396 + num_tpds_needed, EDMA_MAX_SKB_FRAGS);
1397 + dev_kfree_skb_any(skb);
1398 + adapter->stats.tx_errors++;
1399 + return NETDEV_TX_OK;
1402 + if (edma_stp_rstp) {
1403 + u16 ath_hdr, ath_eth_type;
1404 + u8 mac_addr[EDMA_ETH_HDR_LEN];
1405 + ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1406 + if (ath_eth_type == edma_ath_eth_type) {
1407 + packet_is_rstp = true;
1408 + ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1409 + dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1410 + from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1411 + memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1415 + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1419 + /* this will be one of the 4 TX queues exposed to linux kernel */
1420 + txq_id = skb_get_queue_mapping(skb);
1421 + queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1422 + etdr = edma_cinfo->tpd_ring[queue_id];
1423 + nq = netdev_get_tx_queue(net_dev, txq_id);
1425 + local_bh_disable();
1426 + /* Tx is not handled in bottom half context. Hence, we need to protect
1427 + * Tx from tasks and bottom half
1430 + if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1431 + /* not enough descriptor, just stop queue */
1432 + netif_tx_stop_queue(nq);
1433 + local_bh_enable();
1434 + dev_dbg(&net_dev->dev, "Not enough descriptors available");
1435 + edma_cinfo->edma_ethstats.tx_desc_error++;
1436 + return NETDEV_TX_BUSY;
1439 + /* Check and mark VLAN tag offload */
1440 + if (skb_vlan_tag_present(skb))
1441 + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1442 + else if (adapter->default_vlan_tag)
1443 + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1445 + /* Check and mark checksum offload */
1446 + if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1447 + flags_transmit |= EDMA_HW_CHECKSUM;
1449 + /* Map and fill descriptor for Tx */
1450 + ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1451 + flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
1453 + dev_kfree_skb_any(skb);
1454 + adapter->stats.tx_errors++;
1458 + /* Update SW producer index */
1459 + edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1461 + /* update tx statistics */
1462 + adapter->stats.tx_packets++;
1463 + adapter->stats.tx_bytes += skb->len;
1466 + local_bh_enable();
1467 + return NETDEV_TX_OK;
1471 + * edma_flow_may_expire()
1472 + * Timer function called periodically to delete the node
1474 +void edma_flow_may_expire(struct timer_list *t)
1476 + struct edma_rfs_flow_table *table = from_timer(table, t, expire_rfs);
1477 + struct edma_adapter *adapter =
1478 + container_of(table, typeof(*adapter), rfs);
1481 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1482 + for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1483 + struct hlist_head *hhead;
1484 + struct hlist_node *tmp;
1485 + struct edma_rfs_filter_node *n;
1488 + hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1489 + hlist_for_each_entry_safe(n, tmp, hhead, node) {
1490 + res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1491 + n->flow_id, n->filter_id);
1494 + ret = edma_delete_rfs_filter(adapter, n);
1496 + dev_dbg(&adapter->netdev->dev,
1497 + "RFS entry %d not allowed to be flushed by Switch",
1500 + hlist_del(&n->node);
1502 + adapter->rfs.filter_available++;
1508 + adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1509 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1510 + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
1513 +/* edma_rx_flow_steer()
1514 + * Called by core to to steer the flow to CPU
1516 +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1517 + u16 rxq, u32 flow_id)
1519 + struct flow_keys keys;
1520 + struct edma_rfs_filter_node *filter_node;
1521 + struct edma_adapter *adapter = netdev_priv(dev);
1525 + if (skb->protocol == htons(ETH_P_IPV6)) {
1526 + dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
1528 + goto no_protocol_err;
1531 + /* Dissect flow parameters
1532 + * We only support IPv4 + TCP/UDP
1534 + res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1535 + if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1536 + res = -EPROTONOSUPPORT;
1537 + goto no_protocol_err;
1540 + /* Check if table entry exists */
1541 + hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1543 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1544 + filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1546 + if (filter_node) {
1547 + if (rxq == filter_node->rq_id) {
1551 + res = edma_delete_rfs_filter(adapter, filter_node);
1553 + dev_warn(&adapter->netdev->dev,
1554 + "Cannot steer flow %d to different queue",
1555 + filter_node->flow_id);
1557 + adapter->rfs.filter_available++;
1558 + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1560 + dev_warn(&adapter->netdev->dev,
1561 + "Cannot steer flow %d to different queue",
1562 + filter_node->flow_id);
1564 + adapter->rfs.filter_available--;
1565 + filter_node->rq_id = rxq;
1566 + filter_node->filter_id = res;
1571 + if (adapter->rfs.filter_available == 0) {
1576 + filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1577 + if (!filter_node) {
1582 + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1584 + kfree(filter_node);
1588 + adapter->rfs.filter_available--;
1589 + filter_node->rq_id = rxq;
1590 + filter_node->filter_id = res;
1591 + filter_node->flow_id = flow_id;
1592 + filter_node->keys = keys;
1593 + INIT_HLIST_NODE(&filter_node->node);
1594 + hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1598 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1603 +/* edma_register_rfs_filter()
1604 + * Add RFS filter callback
1606 +int edma_register_rfs_filter(struct net_device *netdev,
1607 + set_rfs_filter_callback_t set_filter)
1609 + struct edma_adapter *adapter = netdev_priv(netdev);
1611 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1613 + if (adapter->set_rfs_rule) {
1614 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1618 + adapter->set_rfs_rule = set_filter;
1619 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1624 +/* edma_alloc_tx_rings()
1625 + * Allocate rx rings
1627 +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1629 + struct platform_device *pdev = edma_cinfo->pdev;
1632 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1633 + err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1635 + dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1643 +/* edma_free_tx_rings()
1646 +void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1650 + for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1651 + edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1654 +/* edma_free_tx_resources()
1655 + * Free buffers associated with tx rings
1657 +void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1659 + struct edma_tx_desc_ring *etdr;
1660 + struct edma_sw_desc *sw_desc;
1661 + struct platform_device *pdev = edma_cinfo->pdev;
1664 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1665 + etdr = edma_cinfo->tpd_ring[i];
1666 + for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1667 + sw_desc = &etdr->sw_desc[j];
1668 + if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1669 + EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1670 + edma_tx_unmap_and_free(pdev, sw_desc);
1675 +/* edma_alloc_rx_rings()
1676 + * Allocate rx rings
1678 +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1680 + struct platform_device *pdev = edma_cinfo->pdev;
1681 + int i, j, err = 0;
1683 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1684 + err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1686 + dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1689 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1695 +/* edma_free_rx_rings()
1698 +void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1702 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1703 + edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1704 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1708 +/* edma_free_queues()
1709 + * Free the queues allocaated
1711 +void edma_free_queues(struct edma_common_info *edma_cinfo)
1715 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1716 + if (edma_cinfo->tpd_ring[i])
1717 + kfree(edma_cinfo->tpd_ring[i]);
1718 + edma_cinfo->tpd_ring[i] = NULL;
1721 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1722 + if (edma_cinfo->rfd_ring[j])
1723 + kfree(edma_cinfo->rfd_ring[j]);
1724 + edma_cinfo->rfd_ring[j] = NULL;
1725 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1728 + edma_cinfo->num_rx_queues = 0;
1729 + edma_cinfo->num_tx_queues = 0;
1734 +/* edma_free_rx_resources()
1735 + * Free buffers associated with tx rings
1737 +void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1739 + struct edma_rfd_desc_ring *erdr;
1740 + struct edma_sw_desc *sw_desc;
1741 + struct platform_device *pdev = edma_cinfo->pdev;
1744 + for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1745 + erdr = edma_cinfo->rfd_ring[k];
1746 + for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1747 + sw_desc = &erdr->sw_desc[j];
1748 + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
1749 + dma_unmap_single(&pdev->dev, sw_desc->dma,
1750 + sw_desc->length, DMA_FROM_DEVICE);
1751 + edma_clean_rfd(erdr, j);
1752 + } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
1753 + dma_unmap_page(&pdev->dev, sw_desc->dma,
1754 + sw_desc->length, DMA_FROM_DEVICE);
1755 + edma_clean_rfd(erdr, j);
1758 + k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1763 +/* edma_alloc_queues_tx()
1764 + * Allocate memory for all rings
1766 +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1770 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1771 + struct edma_tx_desc_ring *etdr;
1772 + etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
1775 + etdr->count = edma_cinfo->tx_ring_count;
1776 + edma_cinfo->tpd_ring[i] = etdr;
1781 + edma_free_queues(edma_cinfo);
1785 +/* edma_alloc_queues_rx()
1786 + * Allocate memory for all rings
1788 +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
1792 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1793 + struct edma_rfd_desc_ring *rfd_ring;
1794 + rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
1798 + rfd_ring->count = edma_cinfo->rx_ring_count;
1799 + edma_cinfo->rfd_ring[j] = rfd_ring;
1800 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1804 + edma_free_queues(edma_cinfo);
1808 +/* edma_clear_irq_status()
1809 + * Clear interrupt status
1811 +void edma_clear_irq_status()
1813 + edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1814 + edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1815 + edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
1816 + edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
1819 +/* edma_configure()
1820 + * Configure skb, edma interrupts and control register.
1822 +int edma_configure(struct edma_common_info *edma_cinfo)
1824 + struct edma_hw *hw = &edma_cinfo->hw;
1825 + u32 intr_modrt_data;
1826 + u32 intr_ctrl_data = 0;
1827 + int i, j, ret_count;
1829 + edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
1830 + intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
1831 + intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
1832 + edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
1834 + edma_clear_irq_status();
1836 + /* Clear any WOL status */
1837 + edma_write_reg(EDMA_REG_WOL_CTRL, 0);
1838 + intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
1839 + intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
1840 + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1841 + edma_configure_tx(edma_cinfo);
1842 + edma_configure_rx(edma_cinfo);
1844 + /* Allocate the RX buffer */
1845 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1846 + struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
1847 + ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
1849 + dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
1851 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1854 + /* Configure descriptor Ring */
1855 + edma_init_desc(edma_cinfo);
1859 +/* edma_irq_enable()
1860 + * Enable default interrupt generation settings
1862 +void edma_irq_enable(struct edma_common_info *edma_cinfo)
1864 + struct edma_hw *hw = &edma_cinfo->hw;
1867 + edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1868 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1869 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
1870 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1872 + edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1873 + for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1874 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
1877 +/* edma_irq_disable()
1878 + * Disable Interrupt
1880 +void edma_irq_disable(struct edma_common_info *edma_cinfo)
1884 + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
1885 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
1887 + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
1888 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
1889 + edma_write_reg(EDMA_REG_MISC_IMR, 0);
1890 + edma_write_reg(EDMA_REG_WOL_IMR, 0);
1893 +/* edma_free_irqs()
1896 +void edma_free_irqs(struct edma_adapter *adapter)
1898 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1900 + int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
1902 + for (i = 0; i < CONFIG_NR_CPUS; i++) {
1903 + for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
1904 + free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1906 + for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
1907 + free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1911 +/* edma_enable_rx_ctrl()
1912 + * Enable RX queue control
1914 +void edma_enable_rx_ctrl(struct edma_hw *hw)
1918 + edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1919 + data |= EDMA_RXQ_CTRL_EN;
1920 + edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1924 +/* edma_enable_tx_ctrl()
1925 + * Enable TX queue control
1927 +void edma_enable_tx_ctrl(struct edma_hw *hw)
1931 + edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1932 + data |= EDMA_TXQ_CTRL_TXQ_EN;
1933 + edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1936 +/* edma_stop_rx_tx()
1937 + * Disable RX/TQ Queue control
1939 +void edma_stop_rx_tx(struct edma_hw *hw)
1943 + edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1944 + data &= ~EDMA_RXQ_CTRL_EN;
1945 + edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1946 + edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1947 + data &= ~EDMA_TXQ_CTRL_TXQ_EN;
1948 + edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1954 +int edma_reset(struct edma_common_info *edma_cinfo)
1956 + struct edma_hw *hw = &edma_cinfo->hw;
1958 + edma_irq_disable(edma_cinfo);
1960 + edma_clear_irq_status();
1962 + edma_stop_rx_tx(hw);
1967 +/* edma_fill_netdev()
1968 + * Fill netdev for each etdr
1970 +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
1971 + int dev, int txq_id)
1973 + struct edma_tx_desc_ring *etdr;
1976 + etdr = edma_cinfo->tpd_ring[queue_id];
1978 + while (etdr->netdev[i])
1981 + if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
1984 + /* Populate the netdev associated with the tpd ring */
1985 + etdr->netdev[i] = edma_netdev[dev];
1986 + etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
1992 + * Change the Ethernet Address of the NIC
1994 +int edma_set_mac_addr(struct net_device *netdev, void *p)
1996 + struct sockaddr *addr = p;
1998 + if (!is_valid_ether_addr(addr->sa_data))
2001 + if (netif_running(netdev))
2004 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2008 +/* edma_set_stp_rstp()
2011 +void edma_set_stp_rstp(bool rstp)
2013 + edma_stp_rstp = rstp;
2016 +/* edma_assign_ath_hdr_type()
2017 + * assign atheros header eth type
2019 +void edma_assign_ath_hdr_type(int eth_type)
2021 + edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
2024 +/* edma_get_default_vlan_tag()
2025 + * Used by other modules to get the default vlan tag
2027 +int edma_get_default_vlan_tag(struct net_device *netdev)
2029 + struct edma_adapter *adapter = netdev_priv(netdev);
2031 + if (adapter->default_vlan_tag)
2032 + return adapter->default_vlan_tag;
2038 + * gets called when netdevice is up, start the queue.
2040 +int edma_open(struct net_device *netdev)
2042 + struct edma_adapter *adapter = netdev_priv(netdev);
2043 + struct platform_device *pdev = adapter->edma_cinfo->pdev;
2045 + netif_tx_start_all_queues(netdev);
2046 + edma_initialise_rfs_flow_table(adapter);
2047 + set_bit(__EDMA_UP, &adapter->state_flags);
2049 + /* if Link polling is enabled, in our case enabled for WAN, then
2050 + * do a phy start, else always set link as UP
2052 + if (adapter->poll_required) {
2053 + if (!IS_ERR(adapter->phydev)) {
2054 + phy_start(adapter->phydev);
2055 + phy_start_aneg(adapter->phydev);
2056 + adapter->link_state = __EDMA_LINKDOWN;
2058 + dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2061 + adapter->link_state = __EDMA_LINKUP;
2062 + netif_carrier_on(netdev);
2070 + * gets called when netdevice is down, stops the queue.
2072 +int edma_close(struct net_device *netdev)
2074 + struct edma_adapter *adapter = netdev_priv(netdev);
2076 + edma_free_rfs_flow_table(adapter);
2077 + netif_carrier_off(netdev);
2078 + netif_tx_stop_all_queues(netdev);
2080 + if (adapter->poll_required) {
2081 + if (!IS_ERR(adapter->phydev))
2082 + phy_stop(adapter->phydev);
2085 + adapter->link_state = __EDMA_LINKDOWN;
2087 + /* Set GMAC state to UP before link state is checked
2089 + clear_bit(__EDMA_UP, &adapter->state_flags);
2095 + * polling function that gets called when the napi gets scheduled.
2097 + * Main sequence of task performed in this api
2098 + * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2099 + * enable interrupts.
2101 +int edma_poll(struct napi_struct *napi, int budget)
2103 + struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2104 + struct edma_per_cpu_queues_info, napi);
2105 + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2107 + u32 shadow_rx_status, shadow_tx_status;
2109 + int i, work_done = 0;
2111 + /* Store the Rx/Tx status by ANDing it with
2112 + * appropriate CPU RX?TX mask
2114 + edma_read_reg(EDMA_REG_RX_ISR, ®_data);
2115 + edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2116 + shadow_rx_status = edma_percpu_info->rx_status;
2117 + edma_read_reg(EDMA_REG_TX_ISR, ®_data);
2118 + edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2119 + shadow_tx_status = edma_percpu_info->tx_status;
2121 + /* Every core will have a start, which will be computed
2122 + * in probe and stored in edma_percpu_info->tx_start variable.
2123 + * We will shift the status bit by tx_start to obtain
2124 + * status bits for the core on which the current processing
2125 + * is happening. Since, there are 4 tx queues per core,
2126 + * we will run the loop till we get the correct queue to clear.
2128 + while (edma_percpu_info->tx_status) {
2129 + queue_id = ffs(edma_percpu_info->tx_status) - 1;
2130 + edma_tx_complete(edma_cinfo, queue_id);
2131 + edma_percpu_info->tx_status &= ~(1 << queue_id);
2134 + /* Every core will have a start, which will be computed
2135 + * in probe and stored in edma_percpu_info->tx_start variable.
2136 + * We will shift the status bit by tx_start to obtain
2137 + * status bits for the core on which the current processing
2138 + * is happening. Since, there are 4 tx queues per core, we
2139 + * will run the loop till we get the correct queue to clear.
2141 + while (edma_percpu_info->rx_status) {
2142 + queue_id = ffs(edma_percpu_info->rx_status) - 1;
2143 + edma_rx_complete(edma_cinfo, &work_done,
2144 + budget, queue_id, napi);
2146 + if (likely(work_done < budget))
2147 + edma_percpu_info->rx_status &= ~(1 << queue_id);
2152 + /* Clear the status register, to avoid the interrupts to
2153 + * reoccur.This clearing of interrupt status register is
2154 + * done here as writing to status register only takes place
2155 + * once the producer/consumer index has been updated to
2156 + * reflect that the packet transmission/reception went fine.
2158 + edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2159 + edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2161 + /* If budget not fully consumed, exit the polling mode */
2162 + if (likely(work_done < budget)) {
2163 + napi_complete(napi);
2165 + /* re-enable the interrupts */
2166 + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2167 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2168 + for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2169 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2175 +/* edma interrupt()
2176 + * interrupt handler
2178 +irqreturn_t edma_interrupt(int irq, void *dev)
2180 + struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2181 + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2184 + /* Unmask the TX/RX interrupt register */
2185 + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2186 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2188 + for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2189 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2191 + napi_schedule(&edma_percpu_info->napi);
2193 + return IRQ_HANDLED;
2196 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
2199 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
2201 + * Permission to use, copy, modify, and/or distribute this software for
2202 + * any purpose with or without fee is hereby granted, provided that the
2203 + * above copyright notice and this permission notice appear in all copies.
2204 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2205 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2206 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2207 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2208 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2209 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2210 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2216 +#include <linux/init.h>
2217 +#include <linux/interrupt.h>
2218 +#include <linux/types.h>
2219 +#include <linux/errno.h>
2220 +#include <linux/module.h>
2221 +#include <linux/netdevice.h>
2222 +#include <linux/etherdevice.h>
2223 +#include <linux/skbuff.h>
2224 +#include <linux/io.h>
2225 +#include <linux/vmalloc.h>
2226 +#include <linux/pagemap.h>
2227 +#include <linux/smp.h>
2228 +#include <linux/platform_device.h>
2229 +#include <linux/of.h>
2230 +#include <linux/of_device.h>
2231 +#include <linux/kernel.h>
2232 +#include <linux/device.h>
2233 +#include <linux/sysctl.h>
2234 +#include <linux/phy.h>
2235 +#include <linux/of_net.h>
2236 +#include <net/checksum.h>
2237 +#include <net/ip6_checksum.h>
2238 +#include <asm-generic/bug.h>
2239 +#include "ess_edma.h"
2241 +#define EDMA_CPU_CORES_SUPPORTED 4
2242 +#define EDMA_MAX_PORTID_SUPPORTED 5
2243 +#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED
2244 +#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
2245 +#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */
2246 +#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
2248 +#define EDMA_MAX_RECEIVE_QUEUE 8
2249 +#define EDMA_MAX_TRANSMIT_QUEUE 16
2251 +/* WAN/LAN adapter number */
2256 +#define EDMA_LAN_DEFAULT_VLAN 1
2257 +#define EDMA_WAN_DEFAULT_VLAN 2
2259 +#define EDMA_DEFAULT_GROUP1_VLAN 1
2260 +#define EDMA_DEFAULT_GROUP2_VLAN 2
2261 +#define EDMA_DEFAULT_GROUP3_VLAN 3
2262 +#define EDMA_DEFAULT_GROUP4_VLAN 4
2263 +#define EDMA_DEFAULT_GROUP5_VLAN 5
2265 +/* Queues exposed to linux kernel */
2266 +#define EDMA_NETDEV_TX_QUEUE 4
2267 +#define EDMA_NETDEV_RX_QUEUE 4
2269 +/* Number of queues per core */
2270 +#define EDMA_NUM_TXQ_PER_CORE 4
2271 +#define EDMA_NUM_RXQ_PER_CORE 2
2273 +#define EDMA_TPD_EOP_SHIFT 31
2275 +#define EDMA_PORT_ID_SHIFT 12
2276 +#define EDMA_PORT_ID_MASK 0x7
2278 +/* tpd word 3 bit 18-28 */
2279 +#define EDMA_TPD_PORT_BITMAP_SHIFT 18
2281 +#define EDMA_TPD_FROM_CPU_SHIFT 25
2283 +#define EDMA_FROM_CPU_MASK 0x80
2284 +#define EDMA_SKB_PRIORITY_MASK 0x38
2286 +/* TX/RX descriptor ring count */
2287 +/* should be a power of 2 */
2288 +#define EDMA_RX_RING_SIZE 128
2289 +#define EDMA_TX_RING_SIZE 128
2291 +/* Flags used in paged/non paged mode */
2292 +#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
2293 +#define EDMA_RX_HEAD_BUFF_SIZE 1540
2295 +/* MAX frame size supported by switch */
2296 +#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
2298 +/* Configurations */
2299 +#define EDMA_INTR_CLEAR_TYPE 0
2300 +#define EDMA_INTR_SW_IDX_W_TYPE 0
2301 +#define EDMA_FIFO_THRESH_TYPE 0
2302 +#define EDMA_RSS_TYPE 0
2303 +#define EDMA_RX_IMT 0x0020
2304 +#define EDMA_TX_IMT 0x0050
2305 +#define EDMA_TPD_BURST 5
2306 +#define EDMA_TXF_BURST 0x100
2307 +#define EDMA_RFD_BURST 8
2308 +#define EDMA_RFD_THR 16
2309 +#define EDMA_RFD_LTHR 0
2311 +/* RX/TX per CPU based mask/shift */
2312 +#define EDMA_TX_PER_CPU_MASK 0xF
2313 +#define EDMA_RX_PER_CPU_MASK 0x3
2314 +#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
2315 +#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
2316 +#define EDMA_TX_CPU_START_SHIFT 0x2
2317 +#define EDMA_RX_CPU_START_SHIFT 0x1
2319 +/* FLags used in transmit direction */
2320 +#define EDMA_HW_CHECKSUM 0x00000001
2321 +#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
2322 +#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
2324 +#define EDMA_SW_DESC_FLAG_LAST 0x1
2325 +#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
2326 +#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
2327 +#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
2328 +#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
2329 +#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
2332 +#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
2334 +/* Ethtool specific list of EDMA supported features */
2335 +#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
2336 + | SUPPORTED_10baseT_Full \
2337 + | SUPPORTED_100baseT_Half \
2338 + | SUPPORTED_100baseT_Full \
2339 + | SUPPORTED_1000baseT_Full)
2341 +/* Recevie side atheros Header */
2342 +#define EDMA_RX_ATH_HDR_VERSION 0x2
2343 +#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
2344 +#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
2345 +#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
2346 +#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
2348 +/* Transmit side atheros Header */
2349 +#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
2350 +#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
2351 +#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
2353 +#define EDMA_TXQ_START_CORE0 8
2354 +#define EDMA_TXQ_START_CORE1 12
2355 +#define EDMA_TXQ_START_CORE2 0
2356 +#define EDMA_TXQ_START_CORE3 4
2358 +#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
2359 +#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
2360 +#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
2361 +#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
2363 +#define EDMA_ETH_HDR_LEN 12
2364 +#define EDMA_ETH_TYPE_MASK 0xFFFF
2366 +#define EDMA_RX_BUFFER_WRITE 16
2367 +#define EDMA_RFD_AVAIL_THR 80
2369 +#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR
2371 +extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
2372 + __be16 sport, __be16 dport,
2373 + uint8_t proto, u16 loadbalance, bool action);
2374 +struct edma_ethtool_statistics {
2423 + u32 tx_desc_error;
2426 +struct edma_mdio_data {
2427 + struct mii_bus *mii_bus;
2428 + void __iomem *membase;
2429 + int phy_irq[PHY_MAX_ADDR];
2432 +/* EDMA LINK state */
2433 +enum edma_link_state {
2434 + __EDMA_LINKUP, /* Indicate link is UP */
2435 + __EDMA_LINKDOWN /* Indicate link is down */
2438 +/* EDMA GMAC state */
2439 +enum edma_gmac_state {
2440 + __EDMA_UP /* use to indicate GMAC is up */
2443 +/* edma transmit descriptor */
2444 +struct edma_tx_desc {
2445 + __le16 len; /* full packet including CRC */
2446 + __le16 svlan_tag; /* vlan tag */
2447 + __le32 word1; /* byte 4-7 */
2448 + __le32 addr; /* address of buffer */
2449 + __le32 word3; /* byte 12 */
2452 +/* edma receive return descriptor */
2453 +struct edma_rx_return_desc {
2464 +/* RFD descriptor */
2465 +struct edma_rx_free_desc {
2466 + __le32 buffer_addr; /* buffer address */
2469 +/* edma hw specific data */
2471 + u32 __iomem *hw_addr; /* inner register address */
2472 + struct edma_adapter *adapter; /* netdevice adapter */
2473 + u32 rx_intr_mask; /*rx interrupt mask */
2474 + u32 tx_intr_mask; /* tx interrupt nask */
2475 + u32 misc_intr_mask; /* misc interrupt mask */
2476 + u32 wol_intr_mask; /* wake on lan interrupt mask */
2477 + bool intr_clear_type; /* interrupt clear */
2478 + bool intr_sw_idx_w; /* interrupt software index */
2479 + u32 rx_head_buff_size; /* Rx buffer size */
2480 + u8 rss_type; /* rss protocol type */
2483 +/* edma_sw_desc stores software descriptor
2484 + * SW descriptor has 1:1 map with HW descriptor
2486 +struct edma_sw_desc {
2487 + struct sk_buff *skb;
2488 + dma_addr_t dma; /* dma address */
2489 + u16 length; /* Tx/Rx buffer length */
2493 +/* per core related information */
2494 +struct edma_per_cpu_queues_info {
2495 + struct napi_struct napi; /* napi associated with the core */
2496 + u32 tx_mask; /* tx interrupt mask */
2497 + u32 rx_mask; /* rx interrupt mask */
2498 + u32 tx_status; /* tx interrupt status */
2499 + u32 rx_status; /* rx interrupt status */
2500 + u32 tx_start; /* tx queue start */
2501 + u32 rx_start; /* rx queue start */
2502 + struct edma_common_info *edma_cinfo; /* edma common info */
2505 +/* edma specific common info */
2506 +struct edma_common_info {
2507 + struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
2508 + struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
2509 + struct platform_device *pdev; /* device structure */
2510 + struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
2511 + struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
2512 + struct ctl_table_header *edma_ctl_table_hdr;
2514 + struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
2515 + int num_rx_queues; /* number of rx queue */
2516 + u32 num_tx_queues; /* number of tx queue */
2517 + u32 tx_irq[16]; /* number of tx irq */
2518 + u32 rx_irq[8]; /* number of rx irq */
2519 + u32 from_cpu; /* from CPU TPD field */
2520 + u32 num_rxq_per_core; /* Rx queues per core */
2521 + u32 num_txq_per_core; /* Tx queues per core */
2522 + u16 tx_ring_count; /* Tx ring count */
2523 + u16 rx_ring_count; /* Rx ring*/
2524 + u16 rx_head_buffer_len; /* rx buffer length */
2525 + u16 rx_page_buffer_len; /* rx buffer length */
2526 + u32 page_mode; /* Jumbo frame supported flag */
2527 + u32 fraglist_mode; /* fraglist supported flag */
2528 + struct edma_hw hw; /* edma hw specific structure */
2529 + struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
2530 + spinlock_t stats_lock; /* protect edma stats area for updation */
2531 + struct timer_list edma_stats_timer;
2534 +/* transimit packet descriptor (tpd) ring */
2535 +struct edma_tx_desc_ring {
2536 + struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
2537 + struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
2538 + /* Array of netdevs associated with the tpd ring */
2539 + void *hw_desc; /* descriptor ring virtual address */
2540 + struct edma_sw_desc *sw_desc; /* buffer associated with ring */
2541 + int netdev_bmp; /* Bitmap for per-ring netdevs */
2542 + u32 size; /* descriptor ring length in bytes */
2543 + u16 count; /* number of descriptors in the ring */
2544 + dma_addr_t dma; /* descriptor ring physical address */
2545 + u16 sw_next_to_fill; /* next Tx descriptor to fill */
2546 + u16 sw_next_to_clean; /* next Tx descriptor to clean */
2549 +/* receive free descriptor (rfd) ring */
2550 +struct edma_rfd_desc_ring {
2551 + void *hw_desc; /* descriptor ring virtual address */
2552 + struct edma_sw_desc *sw_desc; /* buffer associated with ring */
2553 + u16 size; /* bytes allocated to sw_desc */
2554 + u16 count; /* number of descriptors in the ring */
2555 + dma_addr_t dma; /* descriptor ring physical address */
2556 + u16 sw_next_to_fill; /* next descriptor to fill */
2557 + u16 sw_next_to_clean; /* next descriptor to clean */
2560 +/* edma_rfs_flter_node - rfs filter node in hash table */
2561 +struct edma_rfs_filter_node {
2562 + struct flow_keys keys;
2563 + u32 flow_id; /* flow_id of filter provided by kernel */
2564 + u16 filter_id; /* filter id of filter returned by adaptor */
2565 + u16 rq_id; /* desired rq index */
2566 + struct hlist_node node; /* edma rfs list node */
2569 +/* edma_rfs_flow_tbl - rfs flow table */
2570 +struct edma_rfs_flow_table {
2571 + u16 max_num_filter; /* Maximum number of filters edma supports */
2572 + u16 hashtoclean; /* hash table index to clean next */
2573 + int filter_available; /* Number of free filters available */
2574 + struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
2575 + spinlock_t rfs_ftab_lock;
2576 + struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
2579 +/* EDMA net device structure */
2580 +struct edma_adapter {
2581 + struct net_device *netdev; /* netdevice */
2582 + struct platform_device *pdev; /* platform device */
2583 + struct edma_common_info *edma_cinfo; /* edma common info */
2584 + struct phy_device *phydev; /* Phy device */
2585 + struct edma_rfs_flow_table rfs; /* edma rfs flow table */
2586 + struct net_device_stats stats; /* netdev statistics */
2587 + set_rfs_filter_callback_t set_rfs_rule;
2588 + u32 flags;/* status flags */
2589 + unsigned long state_flags; /* GMAC up/down flags */
2590 + u32 forced_speed; /* link force speed */
2591 + u32 forced_duplex; /* link force duplex */
2592 + u32 link_state; /* phy link state */
2593 + u32 phy_mdio_addr; /* PHY device address on MII interface */
2594 + u32 poll_required; /* check if link polling is required */
2595 + u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
2596 + u32 default_vlan_tag; /* vlan tag */
2598 + uint8_t phy_id[MII_BUS_ID_SIZE + 3];
2601 +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
2602 +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
2603 +int edma_open(struct net_device *netdev);
2604 +int edma_close(struct net_device *netdev);
2605 +void edma_free_tx_resources(struct edma_common_info *edma_c_info);
2606 +void edma_free_rx_resources(struct edma_common_info *edma_c_info);
2607 +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
2608 +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
2609 +void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
2610 +void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
2611 +void edma_free_queues(struct edma_common_info *edma_cinfo);
2612 +void edma_irq_disable(struct edma_common_info *edma_cinfo);
2613 +int edma_reset(struct edma_common_info *edma_cinfo);
2614 +int edma_poll(struct napi_struct *napi, int budget);
2615 +netdev_tx_t edma_xmit(struct sk_buff *skb,
2616 + struct net_device *netdev);
2617 +int edma_configure(struct edma_common_info *edma_cinfo);
2618 +void edma_irq_enable(struct edma_common_info *edma_cinfo);
2619 +void edma_enable_tx_ctrl(struct edma_hw *hw);
2620 +void edma_enable_rx_ctrl(struct edma_hw *hw);
2621 +void edma_stop_rx_tx(struct edma_hw *hw);
2622 +void edma_free_irqs(struct edma_adapter *adapter);
2623 +irqreturn_t edma_interrupt(int irq, void *dev);
2624 +void edma_write_reg(u16 reg_addr, u32 reg_value);
2625 +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
2626 +struct net_device_stats *edma_get_stats(struct net_device *netdev);
2627 +int edma_set_mac_addr(struct net_device *netdev, void *p);
2628 +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2629 + u16 rxq, u32 flow_id);
2630 +int edma_register_rfs_filter(struct net_device *netdev,
2631 + set_rfs_filter_callback_t set_filter);
2632 +void edma_flow_may_expire(struct timer_list *t);
2633 +void edma_set_ethtool_ops(struct net_device *netdev);
2634 +void edma_set_stp_rstp(bool tag);
2635 +void edma_assign_ath_hdr_type(int tag);
2636 +int edma_get_default_vlan_tag(struct net_device *netdev);
2637 +void edma_adjust_link(struct net_device *netdev);
2638 +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
2639 +void edma_read_append_stats(struct edma_common_info *edma_cinfo);
2640 +void edma_change_tx_coalesce(int usecs);
2641 +void edma_change_rx_coalesce(int usecs);
2642 +void edma_get_tx_rx_coalesce(u32 *reg_val);
2643 +void edma_clear_irq_status(void);
2644 +#endif /* _EDMA_H_ */
2646 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
2649 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
2651 + * Permission to use, copy, modify, and/or distribute this software for
2652 + * any purpose with or without fee is hereby granted, provided that the
2653 + * above copyright notice and this permission notice appear in all copies.
2654 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2655 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2656 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2657 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2658 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2659 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2660 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2663 +#include <linux/cpu_rmap.h>
2664 +#include <linux/of.h>
2665 +#include <linux/of_net.h>
2666 +#include <linux/timer.h>
2668 +#include "ess_edma.h"
2670 +/* Weight round robin and virtual QID mask */
2671 +#define EDMA_WRR_VID_SCTL_MASK 0xffff
2673 +/* Weight round robin and virtual QID shift */
2674 +#define EDMA_WRR_VID_SCTL_SHIFT 16
2676 +char edma_axi_driver_name[] = "ess_edma";
2677 +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2678 + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
2680 +static u32 edma_hw_addr;
2682 +char edma_tx_irq[16][64];
2683 +char edma_rx_irq[8][64];
2684 +struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
2685 +static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
2686 + EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
2687 +static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
2688 + EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
2690 +static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
2691 +static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
2692 +static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
2693 +static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
2694 +static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
2695 +static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
2696 +static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
2697 +static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
2698 +static u32 edma_rss_idt_idx;
2700 +static int edma_weight_assigned_to_q __read_mostly;
2701 +static int edma_queue_to_virtual_q __read_mostly;
2702 +static bool edma_enable_rstp __read_mostly;
2703 +static int edma_athr_hdr_eth_type __read_mostly;
2705 +static int page_mode;
2706 +module_param(page_mode, int, 0);
2707 +MODULE_PARM_DESC(page_mode, "enable page mode");
2709 +static int overwrite_mode;
2710 +module_param(overwrite_mode, int, 0);
2711 +MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
2713 +static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
2714 +module_param(jumbo_mru, int, 0);
2715 +MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
2717 +static int num_rxq = 4;
2718 +module_param(num_rxq, int, 0);
2719 +MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
2721 +void edma_write_reg(u16 reg_addr, u32 reg_value)
2723 + writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
2726 +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
2728 + *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
2731 +/* edma_change_tx_coalesce()
2732 + * change tx interrupt moderation timer
2734 +void edma_change_tx_coalesce(int usecs)
2738 + /* Here, we right shift the value from the user by 1, this is
2739 + * done because IMT resolution timer is 2usecs. 1 count
2740 + * of this register corresponds to 2 usecs.
2742 + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value);
2743 + reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
2744 + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
2747 +/* edma_change_rx_coalesce()
2748 + * change rx interrupt moderation timer
2750 +void edma_change_rx_coalesce(int usecs)
2754 + /* Here, we right shift the value from the user by 1, this is
2755 + * done because IMT resolution timer is 2usecs. 1 count
2756 + * of this register corresponds to 2 usecs.
2758 + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value);
2759 + reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
2760 + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
2763 +/* edma_get_tx_rx_coalesce()
2764 + * Get tx/rx interrupt moderation value
2766 +void edma_get_tx_rx_coalesce(u32 *reg_val)
2768 + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
2771 +void edma_read_append_stats(struct edma_common_info *edma_cinfo)
2777 + spin_lock_bh(&edma_cinfo->stats_lock);
2778 + p = (uint32_t *)&(edma_cinfo->edma_ethstats);
2780 + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
2781 + edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
2786 + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
2787 + edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
2792 + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
2793 + edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
2798 + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
2799 + edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
2804 + spin_unlock_bh(&edma_cinfo->stats_lock);
2807 +static void edma_statistics_timer(struct timer_list *t)
2809 + struct edma_common_info *edma_cinfo =
2810 + from_timer(edma_cinfo, t, edma_stats_timer);
2812 + edma_read_append_stats(edma_cinfo);
2814 + mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
2817 +static int edma_enable_stp_rstp(struct ctl_table *table, int write,
2818 + void __user *buffer, size_t *lenp,
2823 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2825 + edma_set_stp_rstp(edma_enable_rstp);
2830 +static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
2831 + void __user *buffer, size_t *lenp,
2836 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2838 + edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
2843 +static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
2844 + void __user *buffer, size_t *lenp,
2847 + struct edma_adapter *adapter;
2850 + if (!edma_netdev[1]) {
2851 + pr_err("Netdevice for default_lan does not exist\n");
2855 + adapter = netdev_priv(edma_netdev[1]);
2857 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2860 + adapter->default_vlan_tag = edma_default_ltag;
2865 +static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
2866 + void __user *buffer, size_t *lenp,
2869 + struct edma_adapter *adapter;
2872 + if (!edma_netdev[0]) {
2873 + pr_err("Netdevice for default_wan does not exist\n");
2877 + adapter = netdev_priv(edma_netdev[0]);
2879 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2882 + adapter->default_vlan_tag = edma_default_wtag;
2887 +static int edma_change_group1_vtag(struct ctl_table *table, int write,
2888 + void __user *buffer, size_t *lenp,
2891 + struct edma_adapter *adapter;
2892 + struct edma_common_info *edma_cinfo;
2895 + if (!edma_netdev[0]) {
2896 + pr_err("Netdevice for Group 1 does not exist\n");
2900 + adapter = netdev_priv(edma_netdev[0]);
2901 + edma_cinfo = adapter->edma_cinfo;
2903 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2906 + adapter->default_vlan_tag = edma_default_group1_vtag;
2911 +static int edma_change_group2_vtag(struct ctl_table *table, int write,
2912 + void __user *buffer, size_t *lenp,
2915 + struct edma_adapter *adapter;
2916 + struct edma_common_info *edma_cinfo;
2919 + if (!edma_netdev[1]) {
2920 + pr_err("Netdevice for Group 2 does not exist\n");
2924 + adapter = netdev_priv(edma_netdev[1]);
2925 + edma_cinfo = adapter->edma_cinfo;
2927 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2930 + adapter->default_vlan_tag = edma_default_group2_vtag;
2935 +static int edma_change_group3_vtag(struct ctl_table *table, int write,
2936 + void __user *buffer, size_t *lenp,
2939 + struct edma_adapter *adapter;
2940 + struct edma_common_info *edma_cinfo;
2943 + if (!edma_netdev[2]) {
2944 + pr_err("Netdevice for Group 3 does not exist\n");
2948 + adapter = netdev_priv(edma_netdev[2]);
2949 + edma_cinfo = adapter->edma_cinfo;
2951 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2954 + adapter->default_vlan_tag = edma_default_group3_vtag;
2959 +static int edma_change_group4_vtag(struct ctl_table *table, int write,
2960 + void __user *buffer, size_t *lenp,
2963 + struct edma_adapter *adapter;
2964 + struct edma_common_info *edma_cinfo;
2967 + if (!edma_netdev[3]) {
2968 + pr_err("Netdevice for Group 4 does not exist\n");
2972 + adapter = netdev_priv(edma_netdev[3]);
2973 + edma_cinfo = adapter->edma_cinfo;
2975 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2978 + adapter->default_vlan_tag = edma_default_group4_vtag;
2983 +static int edma_change_group5_vtag(struct ctl_table *table, int write,
2984 + void __user *buffer, size_t *lenp,
2987 + struct edma_adapter *adapter;
2988 + struct edma_common_info *edma_cinfo;
2991 + if (!edma_netdev[4]) {
2992 + pr_err("Netdevice for Group 5 does not exist\n");
2996 + adapter = netdev_priv(edma_netdev[4]);
2997 + edma_cinfo = adapter->edma_cinfo;
2999 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3002 + adapter->default_vlan_tag = edma_default_group5_vtag;
3007 +static int edma_set_rss_idt_value(struct ctl_table *table, int write,
3008 + void __user *buffer, size_t *lenp,
3013 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3014 + if (write && !ret)
3015 + edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
3016 + edma_rss_idt_val);
3020 +static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
3021 + void __user *buffer, size_t *lenp,
3025 + u32 old_value = edma_rss_idt_idx;
3027 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3028 + if (!write || ret)
3031 + if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
3032 + pr_err("Invalid RSS indirection table index %d\n",
3033 + edma_rss_idt_idx);
3034 + edma_rss_idt_idx = old_value;
3040 +static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
3041 + void __user *buffer, size_t *lenp,
3044 + int ret, queue_id, weight;
3045 + u32 reg_data, data, reg_addr;
3047 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3049 + queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
3050 + if (queue_id < 0 || queue_id > 15) {
3051 + pr_err("queue_id not within desired range\n");
3055 + weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
3056 + if (weight < 0 || weight > 0xF) {
3057 + pr_err("queue_id not within desired range\n");
3061 + data = weight << EDMA_WRR_SHIFT(queue_id);
3063 + reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
3064 + edma_read_reg(reg_addr, ®_data);
3065 + reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
3066 + edma_write_reg(reg_addr, data | reg_data);
3072 +static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
3073 + void __user *buffer, size_t *lenp,
3076 + int ret, queue_id, virtual_qid;
3077 + u32 reg_data, data, reg_addr;
3079 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3081 + queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
3082 + if (queue_id < 0 || queue_id > 15) {
3083 + pr_err("queue_id not within desired range\n");
3087 + virtual_qid = edma_queue_to_virtual_q >>
3088 + EDMA_WRR_VID_SCTL_SHIFT;
3089 + if (virtual_qid < 0 || virtual_qid > 8) {
3090 + pr_err("queue_id not within desired range\n");
3094 + data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
3096 + reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
3097 + edma_read_reg(reg_addr, ®_data);
3098 + reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
3099 + edma_write_reg(reg_addr, data | reg_data);
3105 +static struct ctl_table edma_table[] = {
3107 + .procname = "default_lan_tag",
3108 + .data = &edma_default_ltag,
3109 + .maxlen = sizeof(int),
3111 + .proc_handler = edma_change_default_lan_vlan
3114 + .procname = "default_wan_tag",
3115 + .data = &edma_default_wtag,
3116 + .maxlen = sizeof(int),
3118 + .proc_handler = edma_change_default_wan_vlan
3121 + .procname = "weight_assigned_to_queues",
3122 + .data = &edma_weight_assigned_to_q,
3123 + .maxlen = sizeof(int),
3125 + .proc_handler = edma_weight_assigned_to_queues
3128 + .procname = "queue_to_virtual_queue_map",
3129 + .data = &edma_queue_to_virtual_q,
3130 + .maxlen = sizeof(int),
3132 + .proc_handler = edma_queue_to_virtual_queue_map
3135 + .procname = "enable_stp_rstp",
3136 + .data = &edma_enable_rstp,
3137 + .maxlen = sizeof(int),
3139 + .proc_handler = edma_enable_stp_rstp
3142 + .procname = "athr_hdr_eth_type",
3143 + .data = &edma_athr_hdr_eth_type,
3144 + .maxlen = sizeof(int),
3146 + .proc_handler = edma_ath_hdr_eth_type
3149 + .procname = "default_group1_vlan_tag",
3150 + .data = &edma_default_group1_vtag,
3151 + .maxlen = sizeof(int),
3153 + .proc_handler = edma_change_group1_vtag
3156 + .procname = "default_group2_vlan_tag",
3157 + .data = &edma_default_group2_vtag,
3158 + .maxlen = sizeof(int),
3160 + .proc_handler = edma_change_group2_vtag
3163 + .procname = "default_group3_vlan_tag",
3164 + .data = &edma_default_group3_vtag,
3165 + .maxlen = sizeof(int),
3167 + .proc_handler = edma_change_group3_vtag
3170 + .procname = "default_group4_vlan_tag",
3171 + .data = &edma_default_group4_vtag,
3172 + .maxlen = sizeof(int),
3174 + .proc_handler = edma_change_group4_vtag
3177 + .procname = "default_group5_vlan_tag",
3178 + .data = &edma_default_group5_vtag,
3179 + .maxlen = sizeof(int),
3181 + .proc_handler = edma_change_group5_vtag
3184 + .procname = "edma_rss_idt_value",
3185 + .data = &edma_rss_idt_val,
3186 + .maxlen = sizeof(int),
3188 + .proc_handler = edma_set_rss_idt_value
3191 + .procname = "edma_rss_idt_idx",
3192 + .data = &edma_rss_idt_idx,
3193 + .maxlen = sizeof(int),
3195 + .proc_handler = edma_set_rss_idt_idx
3200 +/* edma_axi_netdev_ops
3201 + * Describe the operations supported by registered netdevices
3203 + * static const struct net_device_ops edma_axi_netdev_ops = {
3204 + * .ndo_open = edma_open,
3205 + * .ndo_stop = edma_close,
3206 + * .ndo_start_xmit = edma_xmit_frame,
3207 + * .ndo_set_mac_address = edma_set_mac_addr,
3210 +static const struct net_device_ops edma_axi_netdev_ops = {
3211 + .ndo_open = edma_open,
3212 + .ndo_stop = edma_close,
3213 + .ndo_start_xmit = edma_xmit,
3214 + .ndo_set_mac_address = edma_set_mac_addr,
3215 +#ifdef CONFIG_RFS_ACCEL
3216 + .ndo_rx_flow_steer = edma_rx_flow_steer,
3217 + .ndo_register_rfs_filter = edma_register_rfs_filter,
3218 + .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
3220 + .ndo_get_stats = edma_get_stats,
3223 +/* edma_axi_probe()
3224 + * Initialise an adapter identified by a platform_device structure.
3226 + * The OS initialization, configuring of the adapter private structure,
3227 + * and a hardware reset occur in the probe.
3229 +static int edma_axi_probe(struct platform_device *pdev)
3231 + struct edma_common_info *edma_cinfo;
3232 + struct edma_hw *hw;
3233 + struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
3234 + struct resource *res;
3235 + struct device_node *np = pdev->dev.of_node;
3236 + struct device_node *pnp;
3237 + struct device_node *mdio_node = NULL;
3238 + struct platform_device *mdio_plat = NULL;
3239 + struct mii_bus *miibus = NULL;
3240 + struct edma_mdio_data *mdio_data = NULL;
3241 + int i, j, k, err = 0;
3243 + int idx = 0, idx_mac = 0;
3245 + if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
3246 + dev_err(&pdev->dev, "Invalid CPU Cores\n");
3250 + if ((num_rxq != 4) && (num_rxq != 8)) {
3251 + dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
3254 + edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
3255 + if (!edma_cinfo) {
3260 + edma_cinfo->pdev = pdev;
3262 + of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
3263 + if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
3264 + pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
3269 + /* Initialize the netdev array before allocation
3270 + * to avoid double free
3272 + for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
3273 + edma_netdev[i] = NULL;
3275 + for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
3276 + edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
3277 + EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
3279 + if (!edma_netdev[i]) {
3280 + dev_err(&pdev->dev,
3281 + "net device alloc fails for index=%d\n", i);
3286 + SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
3287 + platform_set_drvdata(pdev, edma_netdev[i]);
3288 + edma_cinfo->netdev[i] = edma_netdev[i];
3291 + /* Fill ring details */
3292 + edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
3293 + edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
3294 + edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
3296 + /* Update num rx queues based on module parameter */
3297 + edma_cinfo->num_rx_queues = num_rxq;
3298 + edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
3300 + edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
3302 + hw = &edma_cinfo->hw;
3304 + /* Fill HW defaults */
3305 + hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
3306 + hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
3308 + of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
3309 + of_property_read_u32(np, "qcom,rx_head_buf_size",
3310 + &hw->rx_head_buff_size);
3312 + if (overwrite_mode) {
3313 + dev_info(&pdev->dev, "page mode overwritten");
3314 + edma_cinfo->page_mode = page_mode;
3318 + edma_cinfo->fraglist_mode = 1;
3320 + if (edma_cinfo->page_mode)
3321 + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
3322 + else if (edma_cinfo->fraglist_mode)
3323 + hw->rx_head_buff_size = jumbo_mru;
3324 + else if (!hw->rx_head_buff_size)
3325 + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
3327 + hw->misc_intr_mask = 0;
3328 + hw->wol_intr_mask = 0;
3330 + hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
3331 + hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
3333 + /* configure RSS type to the different protocol that can be
3336 + hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
3337 + EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
3338 + EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
3340 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3342 + edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
3343 + if (IS_ERR(edma_cinfo->hw.hw_addr)) {
3344 + err = PTR_ERR(edma_cinfo->hw.hw_addr);
3348 + edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
3350 + /* Parse tx queue interrupt number from device tree */
3351 + for (i = 0; i < edma_cinfo->num_tx_queues; i++)
3352 + edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
3354 + /* Parse rx queue interrupt number from device tree
3355 + * Here we are setting j to point to the point where we
3356 + * left tx interrupt parsing(i.e 16) and run run the loop
3357 + * from 0 to 7 to parse rx interrupt number.
3359 + for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
3360 + i < edma_cinfo->num_rx_queues; i++) {
3361 + edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
3362 + k += ((num_rxq == 4) ? 2 : 1);
3363 + j += ((num_rxq == 4) ? 2 : 1);
3366 + edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
3367 + edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
3369 + err = edma_alloc_queues_tx(edma_cinfo);
3371 + dev_err(&pdev->dev, "Allocation of TX queue failed\n");
3372 + goto err_tx_qinit;
3375 + err = edma_alloc_queues_rx(edma_cinfo);
3377 + dev_err(&pdev->dev, "Allocation of RX queue failed\n");
3378 + goto err_rx_qinit;
3381 + err = edma_alloc_tx_rings(edma_cinfo);
3383 + dev_err(&pdev->dev, "Allocation of TX resources failed\n");
3384 + goto err_tx_rinit;
3387 + err = edma_alloc_rx_rings(edma_cinfo);
3389 + dev_err(&pdev->dev, "Allocation of RX resources failed\n");
3390 + goto err_rx_rinit;
3393 + /* Initialize netdev and netdev bitmap for transmit descriptor rings */
3394 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
3395 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
3398 + etdr->netdev_bmp = 0;
3399 + for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
3400 + etdr->netdev[j] = NULL;
3401 + etdr->nq[j] = NULL;
3405 + if (of_property_read_bool(np, "qcom,mdio_supported")) {
3406 + mdio_node = of_find_compatible_node(NULL, NULL,
3407 + "qcom,ipq4019-mdio");
3409 + dev_err(&pdev->dev, "cannot find mdio node by phandle");
3411 + goto err_mdiobus_init_fail;
3414 + mdio_plat = of_find_device_by_node(mdio_node);
3416 + dev_err(&pdev->dev,
3417 + "cannot find platform device from mdio node");
3418 + of_node_put(mdio_node);
3420 + goto err_mdiobus_init_fail;
3423 + mdio_data = dev_get_drvdata(&mdio_plat->dev);
3425 + dev_err(&pdev->dev,
3426 + "cannot get mii bus reference from device data");
3427 + of_node_put(mdio_node);
3429 + goto err_mdiobus_init_fail;
3432 + miibus = mdio_data->mii_bus;
3435 + for_each_available_child_of_node(np, pnp) {
3436 + const char *mac_addr;
3438 + /* this check is needed if parent and daughter dts have
3439 + * different number of gmac nodes
3441 + if (idx_mac == edma_cinfo->num_gmac) {
3446 + mac_addr = of_get_mac_address(pnp);
3448 + memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
3453 + /* Populate the adapter structure register the netdevice */
3454 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3457 + adapter[i] = netdev_priv(edma_netdev[i]);
3458 + adapter[i]->netdev = edma_netdev[i];
3459 + adapter[i]->pdev = pdev;
3460 + for (j = 0; j < CONFIG_NR_CPUS; j++) {
3462 + adapter[i]->tx_start_offset[j] =
3463 + ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
3464 + /* Share the queues with available net-devices.
3465 + * For instance , with 5 net-devices
3466 + * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
3467 + * and eth1/eth3 will get the remaining.
3469 + for (k = adapter[i]->tx_start_offset[j]; k <
3470 + (adapter[i]->tx_start_offset[j] + 2); k++) {
3471 + if (edma_fill_netdev(edma_cinfo, k, i, j)) {
3472 + pr_err("Netdev overflow Error\n");
3473 + goto err_register;
3478 + adapter[i]->edma_cinfo = edma_cinfo;
3479 + edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
3480 + edma_netdev[i]->max_mtu = 9000;
3481 + edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
3482 + | NETIF_F_HW_VLAN_CTAG_TX
3483 + | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
3484 + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
3485 + edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
3486 + NETIF_F_HW_VLAN_CTAG_RX
3487 + | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3489 + edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
3490 + NETIF_F_TSO | NETIF_F_TSO6 |
3492 + edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
3493 + NETIF_F_TSO | NETIF_F_TSO6 |
3496 +#ifdef CONFIG_RFS_ACCEL
3497 + edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3498 + edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3499 + edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3500 + edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3502 + edma_set_ethtool_ops(edma_netdev[i]);
3504 + /* This just fill in some default MAC address
3506 + if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
3507 + random_ether_addr(edma_netdev[i]->dev_addr);
3508 + pr_info("EDMA using MAC@ - using");
3509 + pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
3510 + *(edma_netdev[i]->dev_addr),
3511 + *(edma_netdev[i]->dev_addr + 1),
3512 + *(edma_netdev[i]->dev_addr + 2),
3513 + *(edma_netdev[i]->dev_addr + 3),
3514 + *(edma_netdev[i]->dev_addr + 4),
3515 + *(edma_netdev[i]->dev_addr + 5));
3518 + err = register_netdev(edma_netdev[i]);
3520 + goto err_register;
3522 + /* carrier off reporting is important to
3523 + * ethtool even BEFORE open
3525 + netif_carrier_off(edma_netdev[i]);
3527 + /* Allocate reverse irq cpu mapping structure for
3530 +#ifdef CONFIG_RFS_ACCEL
3531 + edma_netdev[i]->rx_cpu_rmap =
3532 + alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
3533 + if (!edma_netdev[i]->rx_cpu_rmap) {
3535 + goto err_rmap_alloc_fail;
3540 + for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
3541 + edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
3543 + for_each_available_child_of_node(np, pnp) {
3544 + const uint32_t *vlan_tag = NULL;
3547 + /* this check is needed if parent and daughter dts have
3548 + * different number of gmac nodes
3550 + if (idx == edma_cinfo->num_gmac)
3553 + /* Populate port-id to netdev lookup table */
3554 + vlan_tag = of_get_property(pnp, "vlan_tag", &len);
3556 + pr_err("Vlan tag parsing Failed.\n");
3557 + goto err_rmap_alloc_fail;
3560 + adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
3562 + portid_bmp = of_read_number(vlan_tag, 1);
3563 + adapter[idx]->dp_bitmap = portid_bmp;
3565 + portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
3566 + while (portid_bmp) {
3567 + int port_bit = ffs(portid_bmp);
3569 + if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
3570 + goto err_rmap_alloc_fail;
3571 + edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
3573 + portid_bmp &= ~(1 << (port_bit - 1));
3576 + if (!of_property_read_u32(pnp, "qcom,poll_required",
3577 + &adapter[idx]->poll_required)) {
3578 + if (adapter[idx]->poll_required) {
3579 + of_property_read_u32(pnp, "qcom,phy_mdio_addr",
3580 + &adapter[idx]->phy_mdio_addr);
3581 + of_property_read_u32(pnp, "qcom,forced_speed",
3582 + &adapter[idx]->forced_speed);
3583 + of_property_read_u32(pnp, "qcom,forced_duplex",
3584 + &adapter[idx]->forced_duplex);
3586 + /* create a phyid using MDIO bus id
3587 + * and MDIO bus address
3589 + snprintf(adapter[idx]->phy_id,
3590 + MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
3592 + adapter[idx]->phy_mdio_addr);
3595 + adapter[idx]->poll_required = 0;
3596 + adapter[idx]->forced_speed = SPEED_1000;
3597 + adapter[idx]->forced_duplex = DUPLEX_FULL;
3603 + edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
3606 + if (!edma_cinfo->edma_ctl_table_hdr) {
3607 + dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
3608 + goto err_unregister_sysctl_tbl;
3611 + /* Disable all 16 Tx and 8 rx irqs */
3612 + edma_irq_disable(edma_cinfo);
3614 + err = edma_reset(edma_cinfo);
3620 + /* populate per_core_info, do a napi_Add, request 16 TX irqs,
3621 + * 8 RX irqs, do a napi enable
3623 + for (i = 0; i < CONFIG_NR_CPUS; i++) {
3626 + edma_cinfo->edma_percpu_info[i].napi.state = 0;
3628 + netif_napi_add(edma_netdev[0],
3629 + &edma_cinfo->edma_percpu_info[i].napi,
3631 + napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
3632 + edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
3633 + edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
3634 + << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
3635 + edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
3636 + edma_cinfo->edma_percpu_info[i].rx_start =
3637 + i << EDMA_RX_CPU_START_SHIFT;
3638 + rx_start = i << EDMA_RX_CPU_START_SHIFT;
3639 + edma_cinfo->edma_percpu_info[i].tx_status = 0;
3640 + edma_cinfo->edma_percpu_info[i].rx_status = 0;
3641 + edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
3643 + /* Request irq per core */
3644 + for (j = edma_cinfo->edma_percpu_info[i].tx_start;
3645 + j < tx_start[i] + 4; j++) {
3646 + sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
3647 + err = request_irq(edma_cinfo->tx_irq[j],
3650 + &edma_tx_irq[j][0],
3651 + &edma_cinfo->edma_percpu_info[i]);
3656 + for (j = edma_cinfo->edma_percpu_info[i].rx_start;
3658 + ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
3660 + sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
3661 + err = request_irq(edma_cinfo->rx_irq[j],
3664 + &edma_rx_irq[j][0],
3665 + &edma_cinfo->edma_percpu_info[i]);
3670 +#ifdef CONFIG_RFS_ACCEL
3671 + for (j = edma_cinfo->edma_percpu_info[i].rx_start;
3672 + j < rx_start + 2; j += 2) {
3673 + err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
3674 + edma_cinfo->rx_irq[j]);
3676 + goto err_rmap_add_fail;
3681 + /* Used to clear interrupt status, allocate rx buffer,
3682 + * configure edma descriptors registers
3684 + err = edma_configure(edma_cinfo);
3687 + goto err_configure;
3690 + /* Configure RSS indirection table.
3691 + * 128 hash will be configured in the following
3692 + * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
3695 + for (i = 0; i < EDMA_NUM_IDT; i++)
3696 + edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
3698 + /* Configure load balance mapping table.
3699 + * 4 table entry will be configured according to the
3700 + * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
3703 + edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
3705 + /* Configure Virtual queue for Tx rings
3706 + * User can also change this value runtime through
3709 + edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
3710 + edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
3712 + /* Configure Max AXI Burst write size to 128 bytes*/
3713 + edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
3714 + EDMA_AXIW_MAXWRSIZE_VALUE);
3716 + /* Enable All 16 tx and 8 rx irq mask */
3717 + edma_irq_enable(edma_cinfo);
3718 + edma_enable_tx_ctrl(&edma_cinfo->hw);
3719 + edma_enable_rx_ctrl(&edma_cinfo->hw);
3721 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3722 + if (adapter[i]->poll_required) {
3723 + adapter[i]->phydev =
3724 + phy_connect(edma_netdev[i],
3725 + (const char *)adapter[i]->phy_id,
3726 + &edma_adjust_link,
3727 + PHY_INTERFACE_MODE_SGMII);
3728 + if (IS_ERR(adapter[i]->phydev)) {
3729 + dev_dbg(&pdev->dev, "PHY attach FAIL");
3731 + goto edma_phy_attach_fail;
3733 + adapter[i]->phydev->advertising |=
3734 + ADVERTISED_Pause |
3735 + ADVERTISED_Asym_Pause;
3736 + adapter[i]->phydev->supported |=
3738 + SUPPORTED_Asym_Pause;
3741 + adapter[i]->phydev = NULL;
3745 + spin_lock_init(&edma_cinfo->stats_lock);
3747 + timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0);
3748 + mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
3752 +edma_phy_attach_fail:
3755 +#ifdef CONFIG_RFS_ACCEL
3756 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3757 + free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
3758 + adapter[i]->netdev->rx_cpu_rmap = NULL;
3762 + edma_free_irqs(adapter[0]);
3763 + for (i = 0; i < CONFIG_NR_CPUS; i++)
3764 + napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
3766 +err_unregister_sysctl_tbl:
3767 +err_rmap_alloc_fail:
3768 + for (i = 0; i < edma_cinfo->num_gmac; i++)
3769 + unregister_netdev(edma_netdev[i]);
3771 +err_mdiobus_init_fail:
3772 + edma_free_rx_rings(edma_cinfo);
3774 + edma_free_tx_rings(edma_cinfo);
3776 + edma_free_queues(edma_cinfo);
3779 + iounmap(edma_cinfo->hw.hw_addr);
3781 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3782 + if (edma_netdev[i])
3783 + free_netdev(edma_netdev[i]);
3786 + kfree(edma_cinfo);
3791 +/* edma_axi_remove()
3792 + * Device Removal Routine
3794 + * edma_axi_remove is called by the platform subsystem to alert the driver
3795 + * that it should release a platform device.
3797 +static int edma_axi_remove(struct platform_device *pdev)
3799 + struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
3800 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
3801 + struct edma_hw *hw = &edma_cinfo->hw;
3804 + for (i = 0; i < edma_cinfo->num_gmac; i++)
3805 + unregister_netdev(edma_netdev[i]);
3807 + edma_stop_rx_tx(hw);
3808 + for (i = 0; i < CONFIG_NR_CPUS; i++)
3809 + napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
3811 + edma_irq_disable(edma_cinfo);
3812 + edma_write_reg(EDMA_REG_RX_ISR, 0xff);
3813 + edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
3814 +#ifdef CONFIG_RFS_ACCEL
3815 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3816 + free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
3817 + edma_netdev[i]->rx_cpu_rmap = NULL;
3821 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3822 + struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
3824 + if (adapter->phydev)
3825 + phy_disconnect(adapter->phydev);
3828 + del_timer_sync(&edma_cinfo->edma_stats_timer);
3829 + edma_free_irqs(adapter);
3830 + unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
3831 + edma_free_tx_resources(edma_cinfo);
3832 + edma_free_rx_resources(edma_cinfo);
3833 + edma_free_tx_rings(edma_cinfo);
3834 + edma_free_rx_rings(edma_cinfo);
3835 + edma_free_queues(edma_cinfo);
3836 + for (i = 0; i < edma_cinfo->num_gmac; i++)
3837 + free_netdev(edma_netdev[i]);
3839 + kfree(edma_cinfo);
3844 +static const struct of_device_id edma_of_mtable[] = {
3845 + {.compatible = "qcom,ess-edma" },
3848 +MODULE_DEVICE_TABLE(of, edma_of_mtable);
3850 +static struct platform_driver edma_axi_driver = {
3852 + .name = edma_axi_driver_name,
3853 + .of_match_table = edma_of_mtable,
3855 + .probe = edma_axi_probe,
3856 + .remove = edma_axi_remove,
3859 +module_platform_driver(edma_axi_driver);
3861 +MODULE_AUTHOR("Qualcomm Atheros Inc");
3862 +MODULE_DESCRIPTION("QCA ESS EDMA driver");
3863 +MODULE_LICENSE("GPL");
3865 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
3868 + * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
3870 + * Permission to use, copy, modify, and/or distribute this software for
3871 + * any purpose with or without fee is hereby granted, provided that the
3872 + * above copyright notice and this permission notice appear in all copies.
3873 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
3874 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
3875 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
3876 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
3877 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
3878 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
3879 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
3882 +#include <linux/ethtool.h>
3883 +#include <linux/netdevice.h>
3884 +#include <linux/string.h>
3887 +struct edma_ethtool_stats {
3888 + uint8_t stat_string[ETH_GSTRING_LEN];
3889 + uint32_t stat_offset;
3892 +#define EDMA_STAT(m) offsetof(struct edma_ethtool_statistics, m)
3893 +#define DRVINFO_LEN 32
3895 +/* Array of strings describing statistics
3897 +static const struct edma_ethtool_stats edma_gstrings_stats[] = {
3898 + {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)},
3899 + {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)},
3900 + {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)},
3901 + {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)},
3902 + {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)},
3903 + {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)},
3904 + {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)},
3905 + {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)},
3906 + {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)},
3907 + {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)},
3908 + {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)},
3909 + {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)},
3910 + {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)},
3911 + {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)},
3912 + {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)},
3913 + {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)},
3914 + {"tx_q0_byte", EDMA_STAT(tx_q0_byte)},
3915 + {"tx_q1_byte", EDMA_STAT(tx_q1_byte)},
3916 + {"tx_q2_byte", EDMA_STAT(tx_q2_byte)},
3917 + {"tx_q3_byte", EDMA_STAT(tx_q3_byte)},
3918 + {"tx_q4_byte", EDMA_STAT(tx_q4_byte)},
3919 + {"tx_q5_byte", EDMA_STAT(tx_q5_byte)},
3920 + {"tx_q6_byte", EDMA_STAT(tx_q6_byte)},
3921 + {"tx_q7_byte", EDMA_STAT(tx_q7_byte)},
3922 + {"tx_q8_byte", EDMA_STAT(tx_q8_byte)},
3923 + {"tx_q9_byte", EDMA_STAT(tx_q9_byte)},
3924 + {"tx_q10_byte", EDMA_STAT(tx_q10_byte)},
3925 + {"tx_q11_byte", EDMA_STAT(tx_q11_byte)},
3926 + {"tx_q12_byte", EDMA_STAT(tx_q12_byte)},
3927 + {"tx_q13_byte", EDMA_STAT(tx_q13_byte)},
3928 + {"tx_q14_byte", EDMA_STAT(tx_q14_byte)},
3929 + {"tx_q15_byte", EDMA_STAT(tx_q15_byte)},
3930 + {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)},
3931 + {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)},
3932 + {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)},
3933 + {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)},
3934 + {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)},
3935 + {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)},
3936 + {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)},
3937 + {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)},
3938 + {"rx_q0_byte", EDMA_STAT(rx_q0_byte)},
3939 + {"rx_q1_byte", EDMA_STAT(rx_q1_byte)},
3940 + {"rx_q2_byte", EDMA_STAT(rx_q2_byte)},
3941 + {"rx_q3_byte", EDMA_STAT(rx_q3_byte)},
3942 + {"rx_q4_byte", EDMA_STAT(rx_q4_byte)},
3943 + {"rx_q5_byte", EDMA_STAT(rx_q5_byte)},
3944 + {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
3945 + {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
3946 + {"tx_desc_error", EDMA_STAT(tx_desc_error)},
3949 +#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
3951 +/* edma_get_strset_count()
3952 + * Get strset count
3954 +static int edma_get_strset_count(struct net_device *netdev,
3958 + case ETH_SS_STATS:
3959 + return EDMA_STATS_LEN;
3961 + netdev_dbg(netdev, "%s: Invalid string set", __func__);
3962 + return -EOPNOTSUPP;
3967 +/* edma_get_strings()
3968 + * get stats string
3970 +static void edma_get_strings(struct net_device *netdev, uint32_t stringset,
3973 + uint8_t *p = data;
3976 + switch (stringset) {
3977 + case ETH_SS_STATS:
3978 + for (i = 0; i < EDMA_STATS_LEN; i++) {
3979 + memcpy(p, edma_gstrings_stats[i].stat_string,
3980 + min((size_t)ETH_GSTRING_LEN,
3981 + strlen(edma_gstrings_stats[i].stat_string)
3983 + p += ETH_GSTRING_LEN;
3989 +/* edma_get_ethtool_stats()
3990 + * Get ethtool statistics
3992 +static void edma_get_ethtool_stats(struct net_device *netdev,
3993 + struct ethtool_stats *stats, uint64_t *data)
3995 + struct edma_adapter *adapter = netdev_priv(netdev);
3996 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
3998 + uint8_t *p = NULL;
4000 + edma_read_append_stats(edma_cinfo);
4002 + for(i = 0; i < EDMA_STATS_LEN; i++) {
4003 + p = (uint8_t *)&(edma_cinfo->edma_ethstats) +
4004 + edma_gstrings_stats[i].stat_offset;
4005 + data[i] = *(uint32_t *)p;
4009 +/* edma_get_drvinfo()
4010 + * get edma driver info
4012 +static void edma_get_drvinfo(struct net_device *dev,
4013 + struct ethtool_drvinfo *info)
4015 + strlcpy(info->driver, "ess_edma", DRVINFO_LEN);
4016 + strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
4019 +/* edma_nway_reset()
4020 + * Reset the phy, if available.
4022 +static int edma_nway_reset(struct net_device *netdev)
4028 + * get wake on lan info
4030 +static void edma_get_wol(struct net_device *netdev,
4031 + struct ethtool_wolinfo *wol)
4033 + wol->supported = 0;
4037 +/* edma_get_msglevel()
4038 + * get message level.
4040 +static uint32_t edma_get_msglevel(struct net_device *netdev)
4045 +/* edma_get_settings()
4046 + * Get edma settings
4048 +static int edma_get_settings(struct net_device *netdev,
4049 + struct ethtool_cmd *ecmd)
4051 + struct edma_adapter *adapter = netdev_priv(netdev);
4053 + if (adapter->poll_required) {
4054 + struct phy_device *phydev = NULL;
4057 + if ((adapter->forced_speed != SPEED_UNKNOWN)
4058 + && !(adapter->poll_required))
4061 + phydev = adapter->phydev;
4063 + ecmd->advertising = phydev->advertising;
4064 + ecmd->autoneg = phydev->autoneg;
4066 + if (adapter->link_state == __EDMA_LINKDOWN) {
4067 + ecmd->speed = SPEED_UNKNOWN;
4068 + ecmd->duplex = DUPLEX_UNKNOWN;
4070 + ecmd->speed = phydev->speed;
4071 + ecmd->duplex = phydev->duplex;
4074 + ecmd->phy_address = adapter->phy_mdio_addr;
4076 + phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA);
4077 + if (phyreg & LPA_10HALF)
4078 + ecmd->lp_advertising |= ADVERTISED_10baseT_Half;
4080 + if (phyreg & LPA_10FULL)
4081 + ecmd->lp_advertising |= ADVERTISED_10baseT_Full;
4083 + if (phyreg & LPA_100HALF)
4084 + ecmd->lp_advertising |= ADVERTISED_100baseT_Half;
4086 + if (phyreg & LPA_100FULL)
4087 + ecmd->lp_advertising |= ADVERTISED_100baseT_Full;
4089 + phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000);
4090 + if (phyreg & LPA_1000HALF)
4091 + ecmd->lp_advertising |= ADVERTISED_1000baseT_Half;
4093 + if (phyreg & LPA_1000FULL)
4094 + ecmd->lp_advertising |= ADVERTISED_1000baseT_Full;
4096 + /* If the speed/duplex for this GMAC is forced and we
4097 + * are not polling for link state changes, return the
4098 + * values as specified by platform. This will be true
4099 + * for GMACs connected to switch, and interfaces that
4100 + * do not use a PHY.
4102 + if (!(adapter->poll_required)) {
4103 + if (adapter->forced_speed != SPEED_UNKNOWN) {
4104 + /* set speed and duplex */
4105 + ethtool_cmd_speed_set(ecmd, SPEED_1000);
4106 + ecmd->duplex = DUPLEX_FULL;
4108 + /* Populate capabilities advertised by self */
4109 + ecmd->advertising = 0;
4110 + ecmd->autoneg = 0;
4111 + ecmd->port = PORT_TP;
4112 + ecmd->transceiver = XCVR_EXTERNAL;
4114 + /* non link polled and non
4115 + * forced speed/duplex interface
4125 +/* edma_set_settings()
4126 + * Set EDMA settings
4128 +static int edma_set_settings(struct net_device *netdev,
4129 + struct ethtool_cmd *ecmd)
4131 + struct edma_adapter *adapter = netdev_priv(netdev);
4132 + struct phy_device *phydev = NULL;
4134 + if ((adapter->forced_speed != SPEED_UNKNOWN) &&
4135 + !adapter->poll_required)
4138 + phydev = adapter->phydev;
4139 + phydev->advertising = ecmd->advertising;
4140 + phydev->autoneg = ecmd->autoneg;
4141 + phydev->speed = ethtool_cmd_speed(ecmd);
4142 + phydev->duplex = ecmd->duplex;
4144 + genphy_config_aneg(phydev);
4149 +/* edma_get_coalesce
4150 + * get interrupt mitigation
4152 +static int edma_get_coalesce(struct net_device *netdev,
4153 + struct ethtool_coalesce *ec)
4157 + edma_get_tx_rx_coalesce(®_val);
4159 + /* We read the Interrupt Moderation Timer(IMT) register value,
4160 + * use lower 16 bit for rx and higher 16 bit for Tx. We do a
4161 + * left shift by 1, because IMT resolution timer is 2usecs.
4162 + * Hence the value given by the register is multiplied by 2 to
4163 + * get the actual time in usecs.
4165 + ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1);
4166 + ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1);
4171 +/* edma_set_coalesce
4172 + * set interrupt mitigation
4174 +static int edma_set_coalesce(struct net_device *netdev,
4175 + struct ethtool_coalesce *ec)
4177 + if (ec->tx_coalesce_usecs)
4178 + edma_change_tx_coalesce(ec->tx_coalesce_usecs);
4179 + if (ec->rx_coalesce_usecs)
4180 + edma_change_rx_coalesce(ec->rx_coalesce_usecs);
4185 +/* edma_set_priv_flags()
4186 + * Set EDMA private flags
4188 +static int edma_set_priv_flags(struct net_device *netdev, u32 flags)
4193 +/* edma_get_priv_flags()
4194 + * get edma driver flags
4196 +static u32 edma_get_priv_flags(struct net_device *netdev)
4201 +/* edma_get_ringparam()
4204 +static void edma_get_ringparam(struct net_device *netdev,
4205 + struct ethtool_ringparam *ring)
4207 + struct edma_adapter *adapter = netdev_priv(netdev);
4208 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
4210 + ring->tx_max_pending = edma_cinfo->tx_ring_count;
4211 + ring->rx_max_pending = edma_cinfo->rx_ring_count;
4214 +/* Ethtool operations
4216 +static const struct ethtool_ops edma_ethtool_ops = {
4217 + .get_drvinfo = &edma_get_drvinfo,
4218 + .get_link = ðtool_op_get_link,
4219 + .get_msglevel = &edma_get_msglevel,
4220 + .nway_reset = &edma_nway_reset,
4221 + .get_wol = &edma_get_wol,
4222 + .get_settings = &edma_get_settings,
4223 + .set_settings = &edma_set_settings,
4224 + .get_strings = &edma_get_strings,
4225 + .get_sset_count = &edma_get_strset_count,
4226 + .get_ethtool_stats = &edma_get_ethtool_stats,
4227 + .get_coalesce = &edma_get_coalesce,
4228 + .set_coalesce = &edma_set_coalesce,
4229 + .get_priv_flags = edma_get_priv_flags,
4230 + .set_priv_flags = edma_set_priv_flags,
4231 + .get_ringparam = edma_get_ringparam,
4234 +/* edma_set_ethtool_ops
4235 + * Set ethtool operations
4237 +void edma_set_ethtool_ops(struct net_device *netdev)
4239 + netdev->ethtool_ops = &edma_ethtool_ops;
4242 +++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
4245 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4247 + * Permission to use, copy, modify, and/or distribute this software for
4248 + * any purpose with or without fee is hereby granted, provided that the
4249 + * above copyright notice and this permission notice appear in all copies.
4250 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
4251 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
4252 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
4253 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
4254 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
4255 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
4256 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
4259 +#ifndef _ESS_EDMA_H_
4260 +#define _ESS_EDMA_H_
4262 +#include <linux/types.h>
4264 +struct edma_adapter;
4267 +/* register definition */
4268 +#define EDMA_REG_MAS_CTRL 0x0
4269 +#define EDMA_REG_TIMEOUT_CTRL 0x004
4270 +#define EDMA_REG_DBG0 0x008
4271 +#define EDMA_REG_DBG1 0x00C
4272 +#define EDMA_REG_SW_CTRL0 0x100
4273 +#define EDMA_REG_SW_CTRL1 0x104
4275 +/* Interrupt Status Register */
4276 +#define EDMA_REG_RX_ISR 0x200
4277 +#define EDMA_REG_TX_ISR 0x208
4278 +#define EDMA_REG_MISC_ISR 0x210
4279 +#define EDMA_REG_WOL_ISR 0x218
4281 +#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x)
4283 +#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
4284 +#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
4285 +#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
4286 +#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
4287 +#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
4289 +#define EDMA_WOL_ISR 0x00000001
4291 +/* Interrupt Mask Register */
4292 +#define EDMA_REG_MISC_IMR 0x214
4293 +#define EDMA_REG_WOL_IMR 0x218
4295 +#define EDMA_RX_IMR_NORMAL_MASK 0x1
4296 +#define EDMA_TX_IMR_NORMAL_MASK 0x1
4297 +#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
4298 +#define EDMA_WOL_IMR_NORMAL_MASK 0x1
4300 +/* Edma receive consumer index */
4301 +#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
4302 +/* Edma transmit consumer index */
4303 +#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
4305 +/* IRQ Moderator Initial Timer Register */
4306 +#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
4307 +#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
4308 +#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
4309 +#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
4311 +/* Interrupt Control Register */
4312 +#define EDMA_REG_INTR_CTRL 0x284
4313 +#define EDMA_INTR_CLR_TYP_SHIFT 0
4314 +#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
4315 +#define EDMA_INTR_CLEAR_TYPE_W1 0
4316 +#define EDMA_INTR_CLEAR_TYPE_R 1
4318 +/* RX Interrupt Mask Register */
4319 +#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
4321 +/* TX Interrupt mask register */
4322 +#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
4324 +/* Load Ptr Register
4325 + * Software sets this bit after the initialization of the head and tail
4327 +#define EDMA_REG_TX_SRAM_PART 0x400
4328 +#define EDMA_LOAD_PTR_SHIFT 16
4330 +/* TXQ Control Register */
4331 +#define EDMA_REG_TXQ_CTRL 0x404
4332 +#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
4333 +#define EDMA_TXQ_CTRL_TXQ_EN 0x20
4334 +#define EDMA_TXQ_CTRL_ENH_MODE 0x40
4335 +#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
4336 +#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
4337 +#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
4338 +#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
4339 +#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
4340 +#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
4341 +#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
4343 +#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
4344 +#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
4345 +#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
4346 +#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
4347 +#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
4349 +/* WRR Control Register */
4350 +#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
4351 +#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
4352 +#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
4353 +#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
4355 +/* Weight round robin(WRR), it takes queue as input, and computes
4356 + * starting bits where we need to write the weight for a particular
4359 +#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
4361 +/* Tx Descriptor Control Register */
4362 +#define EDMA_REG_TPD_RING_SIZE 0x41C
4363 +#define EDMA_TPD_RING_SIZE_SHIFT 0
4364 +#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
4366 +/* Transmit descriptor base address */
4367 +#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
4369 +/* TPD Index Register */
4370 +#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
4372 +#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
4373 +#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
4374 +#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
4375 +#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
4376 +#define EDMA_TPD_PROD_IDX_SHIFT 0
4377 +#define EDMA_TPD_CONS_IDX_SHIFT 16
4379 +/* TX Virtual Queue Mapping Control Register */
4380 +#define EDMA_REG_VQ_CTRL0 0x4A0
4381 +#define EDMA_REG_VQ_CTRL1 0x4A4
4383 +/* Virtual QID shift, it takes queue as input, and computes
4384 + * Virtual QID position in virtual qid control register
4386 +#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
4388 +/* Virtual Queue Default Value */
4389 +#define EDMA_VQ_REG_VALUE 0x240240
4391 +/* Tx side Port Interface Control Register */
4392 +#define EDMA_REG_PORT_CTRL 0x4A8
4393 +#define EDMA_PAD_EN_SHIFT 15
4395 +/* Tx side VLAN Configuration Register */
4396 +#define EDMA_REG_VLAN_CFG 0x4AC
4398 +#define EDMA_TX_CVLAN 16
4399 +#define EDMA_TX_INS_CVLAN 17
4400 +#define EDMA_TX_CVLAN_TAG_SHIFT 0
4402 +#define EDMA_TX_SVLAN 14
4403 +#define EDMA_TX_INS_SVLAN 15
4404 +#define EDMA_TX_SVLAN_TAG_SHIFT 16
4406 +/* Tx Queue Packet Statistic Register */
4407 +#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
4409 +#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
4411 +/* Tx Queue Byte Statistic Register */
4412 +#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
4414 +/* Load Balance Based Ring Offset Register */
4415 +#define EDMA_REG_LB_RING 0x800
4416 +#define EDMA_LB_RING_ENTRY_MASK 0xff
4417 +#define EDMA_LB_RING_ID_MASK 0x7
4418 +#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
4419 +#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
4420 +#define EDMA_LB_RING_ID_OFFSET 0
4421 +#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
4422 +#define EDMA_LB_REG_VALUE 0x6040200
4424 +/* Load Balance Priority Mapping Register */
4425 +#define EDMA_REG_LB_PRI_START 0x804
4426 +#define EDMA_REG_LB_PRI_END 0x810
4427 +#define EDMA_LB_PRI_REG_INC 4
4428 +#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
4429 +#define EDMA_LB_PRI_ENTRY_MASK 0xf
4431 +/* RSS Priority Mapping Register */
4432 +#define EDMA_REG_RSS_PRI 0x820
4433 +#define EDMA_RSS_PRI_ENTRY_MASK 0xf
4434 +#define EDMA_RSS_RING_ID_MASK 0x7
4435 +#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
4437 +/* RSS Indirection Register */
4438 +#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
4439 +#define EDMA_NUM_IDT 16
4440 +#define EDMA_RSS_IDT_VALUE 0x64206420
4442 +/* Default RSS Ring Register */
4443 +#define EDMA_REG_DEF_RSS 0x890
4444 +#define EDMA_DEF_RSS_MASK 0x7
4446 +/* RSS Hash Function Type Register */
4447 +#define EDMA_REG_RSS_TYPE 0x894
4448 +#define EDMA_RSS_TYPE_NONE 0x01
4449 +#define EDMA_RSS_TYPE_IPV4TCP 0x02
4450 +#define EDMA_RSS_TYPE_IPV6_TCP 0x04
4451 +#define EDMA_RSS_TYPE_IPV4_UDP 0x08
4452 +#define EDMA_RSS_TYPE_IPV6UDP 0x10
4453 +#define EDMA_RSS_TYPE_IPV4 0x20
4454 +#define EDMA_RSS_TYPE_IPV6 0x40
4455 +#define EDMA_RSS_HASH_MODE_MASK 0x7f
4457 +#define EDMA_REG_RSS_HASH_VALUE 0x8C0
4459 +#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
4461 +#define EDMA_HASH_TYPE_START 0
4462 +#define EDMA_HASH_TYPE_END 5
4463 +#define EDMA_HASH_TYPE_SHIFT 12
4465 +#define EDMA_RFS_FLOW_ENTRIES 1024
4466 +#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
4467 +#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
4469 +/* RFD Base Address Register */
4470 +#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
4472 +/* RFD Index Register */
4473 +#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
4475 +#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
4476 +#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
4477 +#define EDMA_RFD_PROD_IDX_MASK 0xFFF
4478 +#define EDMA_RFD_CONS_IDX_MASK 0xFFF
4479 +#define EDMA_RFD_PROD_IDX_SHIFT 0
4480 +#define EDMA_RFD_CONS_IDX_SHIFT 16
4482 +/* Rx Descriptor Control Register */
4483 +#define EDMA_REG_RX_DESC0 0xA10
4484 +#define EDMA_RFD_RING_SIZE_MASK 0xFFF
4485 +#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
4486 +#define EDMA_RFD_RING_SIZE_SHIFT 0
4487 +#define EDMA_RX_BUF_SIZE_SHIFT 16
4489 +#define EDMA_REG_RX_DESC1 0xA14
4490 +#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
4491 +#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
4492 +#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
4493 +#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
4494 +#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
4495 +#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
4497 +/* RXQ Control Register */
4498 +#define EDMA_REG_RXQ_CTRL 0xA18
4499 +#define EDMA_FIFO_THRESH_TYPE_SHIF 0
4500 +#define EDMA_FIFO_THRESH_128_BYTE 0x0
4501 +#define EDMA_FIFO_THRESH_64_BYTE 0x1
4502 +#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
4503 +#define EDMA_RXQ_CTRL_EN 0x0000FF00
4505 +/* AXI Burst Size Config */
4506 +#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
4507 +#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
4509 +/* Rx Statistics Register */
4510 +#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
4511 +#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
4513 +/* WoL Pattern Length Register */
4514 +#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
4515 +#define EDMA_WOL_PT_LEN_MASK 0xFF
4516 +#define EDMA_WOL_PT0_LEN_SHIFT 0
4517 +#define EDMA_WOL_PT1_LEN_SHIFT 8
4518 +#define EDMA_WOL_PT2_LEN_SHIFT 16
4519 +#define EDMA_WOL_PT3_LEN_SHIFT 24
4521 +#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
4522 +#define EDMA_WOL_PT4_LEN_SHIFT 0
4523 +#define EDMA_WOL_PT5_LEN_SHIFT 8
4524 +#define EDMA_WOL_PT6_LEN_SHIFT 16
4526 +/* WoL Control Register */
4527 +#define EDMA_REG_WOL_CTRL 0xC08
4528 +#define EDMA_WOL_WK_EN 0x00000001
4529 +#define EDMA_WOL_MG_EN 0x00000002
4530 +#define EDMA_WOL_PT0_EN 0x00000004
4531 +#define EDMA_WOL_PT1_EN 0x00000008
4532 +#define EDMA_WOL_PT2_EN 0x00000010
4533 +#define EDMA_WOL_PT3_EN 0x00000020
4534 +#define EDMA_WOL_PT4_EN 0x00000040
4535 +#define EDMA_WOL_PT5_EN 0x00000080
4536 +#define EDMA_WOL_PT6_EN 0x00000100
4538 +/* MAC Control Register */
4539 +#define EDMA_REG_MAC_CTRL0 0xC20
4540 +#define EDMA_REG_MAC_CTRL1 0xC24
4542 +/* WoL Pattern Register */
4543 +#define EDMA_REG_WOL_PATTERN_START 0x5000
4544 +#define EDMA_PATTERN_PART_REG_OFFSET 0x40
4547 +/* TX descriptor fields */
4548 +#define EDMA_TPD_HDR_SHIFT 0
4549 +#define EDMA_TPD_PPPOE_EN 0x00000100
4550 +#define EDMA_TPD_IP_CSUM_EN 0x00000200
4551 +#define EDMA_TPD_TCP_CSUM_EN 0x0000400
4552 +#define EDMA_TPD_UDP_CSUM_EN 0x00000800
4553 +#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
4554 +#define EDMA_TPD_LSO_EN 0x00001000
4555 +#define EDMA_TPD_LSO_V2_EN 0x00002000
4556 +#define EDMA_TPD_IPV4_EN 0x00010000
4557 +#define EDMA_TPD_MSS_MASK 0x1FFF
4558 +#define EDMA_TPD_MSS_SHIFT 18
4559 +#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
4561 +/* RRD descriptor fields */
4562 +#define EDMA_RRD_NUM_RFD_MASK 0x000F
4563 +#define EDMA_RRD_SVLAN 0x8000
4564 +#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF;
4566 +#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
4567 +#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
4568 +#define EDMA_RRD_CVLAN 0x0001
4569 +#define EDMA_RRD_DESC_VALID 0x8000
4571 +#define EDMA_RRD_PRIORITY_SHIFT 4
4572 +#define EDMA_RRD_PRIORITY_MASK 0x7
4573 +#define EDMA_RRD_PORT_TYPE_SHIFT 7
4574 +#define EDMA_RRD_PORT_TYPE_MASK 0x1F
4575 +#endif /* _ESS_EDMA_H_ */