ipq40xx: add target
[openwrt/staging/mkresin.git] / target / linux / ipq40xx / patches-4.14 / 710-net-add-qualcomm-essedma-ethernet-driver.patch
1 From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001
2 From: Christian Lamparter <chunkeey@googlemail.com>
3 Date: Thu, 19 Jan 2017 02:01:31 +0100
4 Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver
5
6 Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
7 ---
8 drivers/net/ethernet/qualcomm/Kconfig | 9 +++++++++
9 drivers/net/ethernet/qualcomm/Makefile | 1 +
10 2 files changed, 10 insertions(+)
11
12 --- a/drivers/net/ethernet/qualcomm/Kconfig
13 +++ b/drivers/net/ethernet/qualcomm/Kconfig
14 @@ -61,4 +61,13 @@ config QCOM_EMAC
15
16 source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
17
18 +config ESSEDMA
19 + tristate "Qualcomm Atheros ESS Edma support"
20 + ---help---
21 + This driver supports ethernet edma adapter.
22 + Say Y to build this driver.
23 +
24 + To compile this driver as a module, choose M here. The module
25 + will be called essedma.ko.
26 +
27 endif # NET_VENDOR_QUALCOMM
28 --- a/drivers/net/ethernet/qualcomm/Makefile
29 +++ b/drivers/net/ethernet/qualcomm/Makefile
30 @@ -10,5 +10,6 @@ obj-$(CONFIG_QCA7000_UART) += qcauart.o
31 qcauart-objs := qca_uart.o
32
33 obj-y += emac/
34 +obj-$(CONFIG_ESSEDMA) += essedma/
35
36 obj-$(CONFIG_RMNET) += rmnet/
37 --- /dev/null
38 +++ b/drivers/net/ethernet/qualcomm/essedma/Makefile
39 @@ -0,0 +1,9 @@
40 +#
41 +## Makefile for the Qualcomm Atheros ethernet edma driver
42 +#
43 +
44 +
45 +obj-$(CONFIG_ESSEDMA) += essedma.o
46 +
47 +essedma-objs := edma_axi.o edma.o edma_ethtool.o
48 +
49 --- /dev/null
50 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
51 @@ -0,0 +1,2143 @@
52 +/*
53 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
54 + *
55 + * Permission to use, copy, modify, and/or distribute this software for
56 + * any purpose with or without fee is hereby granted, provided that the
57 + * above copyright notice and this permission notice appear in all copies.
58 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
59 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
60 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
61 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
62 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
63 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
64 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
65 + */
66 +
67 +#include <linux/platform_device.h>
68 +#include <linux/if_vlan.h>
69 +#include "ess_edma.h"
70 +#include "edma.h"
71 +
72 +extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
73 +bool edma_stp_rstp;
74 +u16 edma_ath_eth_type;
75 +
76 +/* edma_skb_priority_offset()
77 + * get edma skb priority
78 + */
79 +static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
80 +{
81 + return (skb->priority >> 2) & 1;
82 +}
83 +
84 +/* edma_alloc_tx_ring()
85 + * Allocate Tx descriptors ring
86 + */
87 +static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
88 + struct edma_tx_desc_ring *etdr)
89 +{
90 + struct platform_device *pdev = edma_cinfo->pdev;
91 +
92 + /* Initialize ring */
93 + etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
94 + etdr->sw_next_to_fill = 0;
95 + etdr->sw_next_to_clean = 0;
96 +
97 + /* Allocate SW descriptors */
98 + etdr->sw_desc = vzalloc(etdr->size);
99 + if (!etdr->sw_desc) {
100 + dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
101 + return -ENOMEM;
102 + }
103 +
104 + /* Allocate HW descriptors */
105 + etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
106 + GFP_KERNEL);
107 + if (!etdr->hw_desc) {
108 + dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
109 + vfree(etdr->sw_desc);
110 + return -ENOMEM;
111 + }
112 +
113 + return 0;
114 +}
115 +
116 +/* edma_free_tx_ring()
117 + * Free tx rings allocated by edma_alloc_tx_rings
118 + */
119 +static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
120 + struct edma_tx_desc_ring *etdr)
121 +{
122 + struct platform_device *pdev = edma_cinfo->pdev;
123 +
124 + if (likely(etdr->dma))
125 + dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
126 + etdr->dma);
127 +
128 + vfree(etdr->sw_desc);
129 + etdr->sw_desc = NULL;
130 +}
131 +
132 +/* edma_alloc_rx_ring()
133 + * allocate rx descriptor ring
134 + */
135 +static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
136 + struct edma_rfd_desc_ring *erxd)
137 +{
138 + struct platform_device *pdev = edma_cinfo->pdev;
139 +
140 + erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
141 + erxd->sw_next_to_fill = 0;
142 + erxd->sw_next_to_clean = 0;
143 +
144 + /* Allocate SW descriptors */
145 + erxd->sw_desc = vzalloc(erxd->size);
146 + if (!erxd->sw_desc)
147 + return -ENOMEM;
148 +
149 + /* Alloc HW descriptors */
150 + erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
151 + GFP_KERNEL);
152 + if (!erxd->hw_desc) {
153 + vfree(erxd->sw_desc);
154 + return -ENOMEM;
155 + }
156 +
157 + return 0;
158 +}
159 +
160 +/* edma_free_rx_ring()
161 + * Free rx ring allocated by alloc_rx_ring
162 + */
163 +static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
164 + struct edma_rfd_desc_ring *rxdr)
165 +{
166 + struct platform_device *pdev = edma_cinfo->pdev;
167 +
168 + if (likely(rxdr->dma))
169 + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
170 + rxdr->dma);
171 +
172 + vfree(rxdr->sw_desc);
173 + rxdr->sw_desc = NULL;
174 +}
175 +
176 +/* edma_configure_tx()
177 + * Configure transmission control data
178 + */
179 +static void edma_configure_tx(struct edma_common_info *edma_cinfo)
180 +{
181 + u32 txq_ctrl_data;
182 +
183 + txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
184 + txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
185 + txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
186 + edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
187 +}
188 +
189 +
190 +/* edma_configure_rx()
191 + * configure reception control data
192 + */
193 +static void edma_configure_rx(struct edma_common_info *edma_cinfo)
194 +{
195 + struct edma_hw *hw = &edma_cinfo->hw;
196 + u32 rss_type, rx_desc1, rxq_ctrl_data;
197 +
198 + /* Set RSS type */
199 + rss_type = hw->rss_type;
200 + edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
201 +
202 + /* Set RFD burst number */
203 + rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
204 +
205 + /* Set RFD prefetch threshold */
206 + rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
207 +
208 + /* Set RFD in host ring low threshold to generte interrupt */
209 + rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
210 + edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
211 +
212 + /* Set Rx FIFO threshold to start to DMA data to host */
213 + rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
214 +
215 + /* Set RX remove vlan bit */
216 + rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
217 +
218 + edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
219 +}
220 +
221 +/* edma_alloc_rx_buf()
222 + * does skb allocation for the received packets.
223 + */
224 +static int edma_alloc_rx_buf(struct edma_common_info
225 + *edma_cinfo,
226 + struct edma_rfd_desc_ring *erdr,
227 + int cleaned_count, int queue_id)
228 +{
229 + struct platform_device *pdev = edma_cinfo->pdev;
230 + struct edma_rx_free_desc *rx_desc;
231 + struct edma_sw_desc *sw_desc;
232 + struct sk_buff *skb;
233 + unsigned int i;
234 + u16 prod_idx, length;
235 + u32 reg_data;
236 +
237 + if (cleaned_count > erdr->count) {
238 + dev_err(&pdev->dev, "Incorrect cleaned_count %d",
239 + cleaned_count);
240 + return -1;
241 + }
242 +
243 + i = erdr->sw_next_to_fill;
244 +
245 + while (cleaned_count) {
246 + sw_desc = &erdr->sw_desc[i];
247 + length = edma_cinfo->rx_head_buffer_len;
248 +
249 + if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
250 + skb = sw_desc->skb;
251 + } else {
252 + /* alloc skb */
253 + skb = netdev_alloc_skb(edma_netdev[0], length);
254 + if (!skb) {
255 + /* Better luck next round */
256 + break;
257 + }
258 + }
259 +
260 + if (edma_cinfo->page_mode) {
261 + struct page *pg = alloc_page(GFP_ATOMIC);
262 +
263 + if (!pg) {
264 + dev_kfree_skb_any(skb);
265 + break;
266 + }
267 +
268 + sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
269 + edma_cinfo->rx_page_buffer_len,
270 + DMA_FROM_DEVICE);
271 + if (dma_mapping_error(&pdev->dev,
272 + sw_desc->dma)) {
273 + __free_page(pg);
274 + dev_kfree_skb_any(skb);
275 + break;
276 + }
277 +
278 + skb_fill_page_desc(skb, 0, pg, 0,
279 + edma_cinfo->rx_page_buffer_len);
280 + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
281 + sw_desc->length = edma_cinfo->rx_page_buffer_len;
282 + } else {
283 + sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
284 + length, DMA_FROM_DEVICE);
285 + if (dma_mapping_error(&pdev->dev,
286 + sw_desc->dma)) {
287 + dev_kfree_skb_any(skb);
288 + break;
289 + }
290 +
291 + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
292 + sw_desc->length = length;
293 + }
294 +
295 + /* Update the buffer info */
296 + sw_desc->skb = skb;
297 + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
298 + rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
299 + if (++i == erdr->count)
300 + i = 0;
301 + cleaned_count--;
302 + }
303 +
304 + erdr->sw_next_to_fill = i;
305 +
306 + if (i == 0)
307 + prod_idx = erdr->count - 1;
308 + else
309 + prod_idx = i - 1;
310 +
311 + /* Update the producer index */
312 + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
313 + reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
314 + reg_data |= prod_idx;
315 + edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
316 + return cleaned_count;
317 +}
318 +
319 +/* edma_init_desc()
320 + * update descriptor ring size, buffer and producer/consumer index
321 + */
322 +static void edma_init_desc(struct edma_common_info *edma_cinfo)
323 +{
324 + struct edma_rfd_desc_ring *rfd_ring;
325 + struct edma_tx_desc_ring *etdr;
326 + int i = 0, j = 0;
327 + u32 data = 0;
328 + u16 hw_cons_idx = 0;
329 +
330 + /* Set the base address of every TPD ring. */
331 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
332 + etdr = edma_cinfo->tpd_ring[i];
333 +
334 + /* Update descriptor ring base address */
335 + edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
336 + edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
337 +
338 + /* Calculate hardware consumer index */
339 + hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
340 + etdr->sw_next_to_fill = hw_cons_idx;
341 + etdr->sw_next_to_clean = hw_cons_idx;
342 + data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
343 + data |= hw_cons_idx;
344 +
345 + /* update producer index */
346 + edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
347 +
348 + /* update SW consumer index register */
349 + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
350 +
351 + /* Set TPD ring size */
352 + edma_write_reg(EDMA_REG_TPD_RING_SIZE,
353 + edma_cinfo->tx_ring_count &
354 + EDMA_TPD_RING_SIZE_MASK);
355 + }
356 +
357 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
358 + rfd_ring = edma_cinfo->rfd_ring[j];
359 + /* Update Receive Free descriptor ring base address */
360 + edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
361 + (u32)(rfd_ring->dma));
362 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
363 + }
364 +
365 + data = edma_cinfo->rx_head_buffer_len;
366 + if (edma_cinfo->page_mode)
367 + data = edma_cinfo->rx_page_buffer_len;
368 +
369 + data &= EDMA_RX_BUF_SIZE_MASK;
370 + data <<= EDMA_RX_BUF_SIZE_SHIFT;
371 +
372 + /* Update RFD ring size and RX buffer size */
373 + data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
374 + << EDMA_RFD_RING_SIZE_SHIFT;
375 +
376 + edma_write_reg(EDMA_REG_RX_DESC0, data);
377 +
378 + /* Disable TX FIFO low watermark and high watermark */
379 + edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
380 +
381 + /* Load all of base address above */
382 + edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
383 + data |= 1 << EDMA_LOAD_PTR_SHIFT;
384 + edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
385 +}
386 +
387 +/* edma_receive_checksum
388 + * Api to check checksum on receive packets
389 + */
390 +static void edma_receive_checksum(struct edma_rx_return_desc *rd,
391 + struct sk_buff *skb)
392 +{
393 + skb_checksum_none_assert(skb);
394 +
395 + /* check the RRD IP/L4 checksum bit to see if
396 + * its set, which in turn indicates checksum
397 + * failure.
398 + */
399 + if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
400 + return;
401 +
402 + skb->ip_summed = CHECKSUM_UNNECESSARY;
403 +}
404 +
405 +/* edma_clean_rfd()
406 + * clean up rx resourcers on error
407 + */
408 +static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
409 +{
410 + struct edma_rx_free_desc *rx_desc;
411 + struct edma_sw_desc *sw_desc;
412 +
413 + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
414 + sw_desc = &erdr->sw_desc[index];
415 + if (sw_desc->skb) {
416 + dev_kfree_skb_any(sw_desc->skb);
417 + sw_desc->skb = NULL;
418 + }
419 +
420 + memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
421 +}
422 +
423 +/* edma_rx_complete_fraglist()
424 + * Complete Rx processing for fraglist skbs
425 + */
426 +static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
427 +{
428 + int i;
429 + u32 priority;
430 + u16 port_type;
431 + u8 mac_addr[EDMA_ETH_HDR_LEN];
432 +
433 + port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
434 + & EDMA_RRD_PORT_TYPE_MASK;
435 + /* if port type is 0x4, then only proceed with
436 + * other stp/rstp calculation
437 + */
438 + if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
439 + u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
440 +
441 + /* calculate the frame priority */
442 + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
443 + & EDMA_RRD_PRIORITY_MASK;
444 +
445 + for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
446 + mac_addr[i] = skb->data[i];
447 +
448 + /* Check if destination mac addr is bpdu addr */
449 + if (!memcmp(mac_addr, bpdu_mac, 6)) {
450 + /* destination mac address is BPDU
451 + * destination mac address, then add
452 + * atheros header to the packet.
453 + */
454 + u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
455 + (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
456 + (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
457 + skb_push(skb, 4);
458 + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
459 + *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
460 + *(uint16_t *)&skb->data[14] = htons(athr_hdr);
461 + }
462 + }
463 +}
464 +
465 +/*
466 + * edma_rx_complete_fraglist()
467 + * Complete Rx processing for fraglist skbs
468 + */
469 +static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
470 + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
471 +{
472 + struct platform_device *pdev = edma_cinfo->pdev;
473 + struct edma_hw *hw = &edma_cinfo->hw;
474 + struct sk_buff *skb_temp;
475 + struct edma_sw_desc *sw_desc;
476 + int i;
477 + u16 size_remaining;
478 +
479 + skb->data_len = 0;
480 + skb->tail += (hw->rx_head_buff_size - 16);
481 + skb->len = skb->truesize = length;
482 + size_remaining = length - (hw->rx_head_buff_size - 16);
483 +
484 + /* clean-up all related sw_descs */
485 + for (i = 1; i < num_rfds; i++) {
486 + struct sk_buff *skb_prev;
487 + sw_desc = &erdr->sw_desc[sw_next_to_clean];
488 + skb_temp = sw_desc->skb;
489 +
490 + dma_unmap_single(&pdev->dev, sw_desc->dma,
491 + sw_desc->length, DMA_FROM_DEVICE);
492 +
493 + if (size_remaining < hw->rx_head_buff_size)
494 + skb_put(skb_temp, size_remaining);
495 + else
496 + skb_put(skb_temp, hw->rx_head_buff_size);
497 +
498 + /*
499 + * If we are processing the first rfd, we link
500 + * skb->frag_list to the skb corresponding to the
501 + * first RFD
502 + */
503 + if (i == 1)
504 + skb_shinfo(skb)->frag_list = skb_temp;
505 + else
506 + skb_prev->next = skb_temp;
507 + skb_prev = skb_temp;
508 + skb_temp->next = NULL;
509 +
510 + skb->data_len += skb_temp->len;
511 + size_remaining -= skb_temp->len;
512 +
513 + /* Increment SW index */
514 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
515 + (*cleaned_count)++;
516 + }
517 +
518 + return sw_next_to_clean;
519 +}
520 +
521 +/* edma_rx_complete_paged()
522 + * Complete Rx processing for paged skbs
523 + */
524 +static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
525 + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
526 +{
527 + struct platform_device *pdev = edma_cinfo->pdev;
528 + struct sk_buff *skb_temp;
529 + struct edma_sw_desc *sw_desc;
530 + int i;
531 + u16 size_remaining;
532 +
533 + skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
534 +
535 + /* Setup skbuff fields */
536 + skb->len = length;
537 +
538 + if (likely(num_rfds <= 1)) {
539 + skb->data_len = length;
540 + skb->truesize += edma_cinfo->rx_page_buffer_len;
541 + skb_fill_page_desc(skb, 0, skb_frag_page(frag),
542 + 16, length);
543 + } else {
544 + frag->size -= 16;
545 + skb->data_len = frag->size;
546 + skb->truesize += edma_cinfo->rx_page_buffer_len;
547 + size_remaining = length - frag->size;
548 +
549 + skb_fill_page_desc(skb, 0, skb_frag_page(frag),
550 + 16, frag->size);
551 +
552 + /* clean-up all related sw_descs */
553 + for (i = 1; i < num_rfds; i++) {
554 + sw_desc = &erdr->sw_desc[sw_next_to_clean];
555 + skb_temp = sw_desc->skb;
556 + frag = &skb_shinfo(skb_temp)->frags[0];
557 + dma_unmap_page(&pdev->dev, sw_desc->dma,
558 + sw_desc->length, DMA_FROM_DEVICE);
559 +
560 + if (size_remaining < edma_cinfo->rx_page_buffer_len)
561 + frag->size = size_remaining;
562 +
563 + skb_fill_page_desc(skb, i, skb_frag_page(frag),
564 + 0, frag->size);
565 +
566 + skb_shinfo(skb_temp)->nr_frags = 0;
567 + dev_kfree_skb_any(skb_temp);
568 +
569 + skb->data_len += frag->size;
570 + skb->truesize += edma_cinfo->rx_page_buffer_len;
571 + size_remaining -= frag->size;
572 +
573 + /* Increment SW index */
574 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
575 + (*cleaned_count)++;
576 + }
577 + }
578 +
579 + return sw_next_to_clean;
580 +}
581 +
582 +/*
583 + * edma_rx_complete()
584 + * Main api called from the poll function to process rx packets.
585 + */
586 +static void edma_rx_complete(struct edma_common_info *edma_cinfo,
587 + int *work_done, int work_to_do, int queue_id,
588 + struct napi_struct *napi)
589 +{
590 + struct platform_device *pdev = edma_cinfo->pdev;
591 + struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
592 + struct net_device *netdev;
593 + struct edma_adapter *adapter;
594 + struct edma_sw_desc *sw_desc;
595 + struct sk_buff *skb;
596 + struct edma_rx_return_desc *rd;
597 + u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
598 + sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
599 + u32 data = 0;
600 + u8 *vaddr;
601 + int port_id, i, drop_count = 0;
602 + u32 priority;
603 + u16 count = erdr->count, rfd_avail;
604 + u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
605 +
606 + sw_next_to_clean = erdr->sw_next_to_clean;
607 +
608 + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
609 + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
610 + EDMA_RFD_CONS_IDX_MASK;
611 +
612 + do {
613 + while (sw_next_to_clean != hw_next_to_clean) {
614 + if (!work_to_do)
615 + break;
616 +
617 + sw_desc = &erdr->sw_desc[sw_next_to_clean];
618 + skb = sw_desc->skb;
619 +
620 + /* Unmap the allocated buffer */
621 + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
622 + dma_unmap_single(&pdev->dev, sw_desc->dma,
623 + sw_desc->length, DMA_FROM_DEVICE);
624 + else
625 + dma_unmap_page(&pdev->dev, sw_desc->dma,
626 + sw_desc->length, DMA_FROM_DEVICE);
627 +
628 + /* Get RRD */
629 + if (edma_cinfo->page_mode) {
630 + vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
631 + memcpy((uint8_t *)&rrd[0], vaddr, 16);
632 + rd = (struct edma_rx_return_desc *)rrd;
633 + kunmap_atomic(vaddr);
634 + } else {
635 + rd = (struct edma_rx_return_desc *)skb->data;
636 + }
637 +
638 + /* Check if RRD is valid */
639 + if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
640 + edma_clean_rfd(erdr, sw_next_to_clean);
641 + sw_next_to_clean = (sw_next_to_clean + 1) &
642 + (erdr->count - 1);
643 + cleaned_count++;
644 + continue;
645 + }
646 +
647 + /* Get the number of RFDs from RRD */
648 + num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
649 +
650 + /* Get Rx port ID from switch */
651 + port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
652 + if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
653 + dev_err(&pdev->dev, "Invalid RRD source port bit set");
654 + for (i = 0; i < num_rfds; i++) {
655 + edma_clean_rfd(erdr, sw_next_to_clean);
656 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
657 + cleaned_count++;
658 + }
659 + continue;
660 + }
661 +
662 + /* check if we have a sink for the data we receive.
663 + * If the interface isn't setup, we have to drop the
664 + * incoming data for now.
665 + */
666 + netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
667 + if (!netdev) {
668 + edma_clean_rfd(erdr, sw_next_to_clean);
669 + sw_next_to_clean = (sw_next_to_clean + 1) &
670 + (erdr->count - 1);
671 + cleaned_count++;
672 + continue;
673 + }
674 + adapter = netdev_priv(netdev);
675 +
676 + /* This code is added to handle a usecase where high
677 + * priority stream and a low priority stream are
678 + * received simultaneously on DUT. The problem occurs
679 + * if one of the Rx rings is full and the corresponding
680 + * core is busy with other stuff. This causes ESS CPU
681 + * port to backpressure all incoming traffic including
682 + * high priority one. We monitor free descriptor count
683 + * on each CPU and whenever it reaches threshold (< 80),
684 + * we drop all low priority traffic and let only high
685 + * priotiy traffic pass through. We can hence avoid
686 + * ESS CPU port to send backpressure on high priroity
687 + * stream.
688 + */
689 + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
690 + & EDMA_RRD_PRIORITY_MASK;
691 + if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
692 + rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
693 + if (rfd_avail < EDMA_RFD_AVAIL_THR) {
694 + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
695 + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
696 + adapter->stats.rx_dropped++;
697 + cleaned_count++;
698 + drop_count++;
699 + if (drop_count == 3) {
700 + work_to_do--;
701 + (*work_done)++;
702 + drop_count = 0;
703 + }
704 + if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
705 + /* If buffer clean count reaches 16, we replenish HW buffers. */
706 + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
707 + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
708 + sw_next_to_clean);
709 + cleaned_count = ret_count;
710 + }
711 + continue;
712 + }
713 + }
714 +
715 + work_to_do--;
716 + (*work_done)++;
717 +
718 + /* Increment SW index */
719 + sw_next_to_clean = (sw_next_to_clean + 1) &
720 + (erdr->count - 1);
721 +
722 + cleaned_count++;
723 +
724 + /* Get the packet size and allocate buffer */
725 + length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
726 +
727 + if (edma_cinfo->page_mode) {
728 + /* paged skb */
729 + sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
730 + if (!pskb_may_pull(skb, ETH_HLEN)) {
731 + dev_kfree_skb_any(skb);
732 + continue;
733 + }
734 + } else {
735 + /* single or fraglist skb */
736 +
737 + /* Addition of 16 bytes is required, as in the packet
738 + * first 16 bytes are rrd descriptors, so actual data
739 + * starts from an offset of 16.
740 + */
741 + skb_reserve(skb, 16);
742 + if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
743 + skb_put(skb, length);
744 + } else {
745 + sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
746 + }
747 + }
748 +
749 + if (edma_stp_rstp) {
750 + edma_rx_complete_stp_rstp(skb, port_id, rd);
751 + }
752 +
753 + skb->protocol = eth_type_trans(skb, netdev);
754 +
755 + /* Record Rx queue for RFS/RPS and fill flow hash from HW */
756 + skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
757 + if (netdev->features & NETIF_F_RXHASH) {
758 + hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
759 + if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
760 + skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
761 + }
762 +
763 +#ifdef CONFIG_NF_FLOW_COOKIE
764 + skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
765 +#endif
766 + edma_receive_checksum(rd, skb);
767 +
768 + /* Process VLAN HW acceleration indication provided by HW */
769 + if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
770 + vlan = rd->rrd4;
771 + if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
772 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
773 + else if (rd->rrd1 & EDMA_RRD_SVLAN)
774 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
775 + }
776 +
777 + /* Update rx statistics */
778 + adapter->stats.rx_packets++;
779 + adapter->stats.rx_bytes += length;
780 +
781 + /* Check if we reached refill threshold */
782 + if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
783 + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
784 + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
785 + sw_next_to_clean);
786 + cleaned_count = ret_count;
787 + }
788 +
789 + /* At this point skb should go to stack */
790 + napi_gro_receive(napi, skb);
791 + }
792 +
793 + /* Check if we still have NAPI budget */
794 + if (!work_to_do)
795 + break;
796 +
797 + /* Read index once again since we still have NAPI budget */
798 + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
799 + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
800 + EDMA_RFD_CONS_IDX_MASK;
801 + } while (hw_next_to_clean != sw_next_to_clean);
802 +
803 + erdr->sw_next_to_clean = sw_next_to_clean;
804 +
805 + /* Refill here in case refill threshold wasn't reached */
806 + if (likely(cleaned_count)) {
807 + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
808 + if (ret_count)
809 + dev_dbg(&pdev->dev, "Not all buffers was reallocated");
810 + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
811 + erdr->sw_next_to_clean);
812 + }
813 +}
814 +
815 +/* edma_delete_rfs_filter()
816 + * Remove RFS filter from switch
817 + */
818 +static int edma_delete_rfs_filter(struct edma_adapter *adapter,
819 + struct edma_rfs_filter_node *filter_node)
820 +{
821 + int res = -1;
822 +
823 + struct flow_keys *keys = &filter_node->keys;
824 +
825 + if (likely(adapter->set_rfs_rule))
826 + res = (*adapter->set_rfs_rule)(adapter->netdev,
827 + flow_get_u32_src(keys), flow_get_u32_dst(keys),
828 + keys->ports.src, keys->ports.dst,
829 + keys->basic.ip_proto, filter_node->rq_id, 0);
830 +
831 + return res;
832 +}
833 +
834 +/* edma_add_rfs_filter()
835 + * Add RFS filter to switch
836 + */
837 +static int edma_add_rfs_filter(struct edma_adapter *adapter,
838 + struct flow_keys *keys, u16 rq,
839 + struct edma_rfs_filter_node *filter_node)
840 +{
841 + int res = -1;
842 +
843 + struct flow_keys *dest_keys = &filter_node->keys;
844 +
845 + memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
846 +/*
847 + dest_keys->control = keys->control;
848 + dest_keys->basic = keys->basic;
849 + dest_keys->addrs = keys->addrs;
850 + dest_keys->ports = keys->ports;
851 + dest_keys.ip_proto = keys->ip_proto;
852 +*/
853 + /* Call callback registered by ESS driver */
854 + if (likely(adapter->set_rfs_rule))
855 + res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
856 + flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
857 + keys->basic.ip_proto, rq, 1);
858 +
859 + return res;
860 +}
861 +
862 +/* edma_rfs_key_search()
863 + * Look for existing RFS entry
864 + */
865 +static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
866 + struct flow_keys *key)
867 +{
868 + struct edma_rfs_filter_node *p;
869 +
870 + hlist_for_each_entry(p, h, node)
871 + if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
872 + flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
873 + p->keys.ports.src == key->ports.src &&
874 + p->keys.ports.dst == key->ports.dst &&
875 + p->keys.basic.ip_proto == key->basic.ip_proto)
876 + return p;
877 + return NULL;
878 +}
879 +
880 +/* edma_initialise_rfs_flow_table()
881 + * Initialise EDMA RFS flow table
882 + */
883 +static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
884 +{
885 + int i;
886 +
887 + spin_lock_init(&adapter->rfs.rfs_ftab_lock);
888 +
889 + /* Initialize EDMA flow hash table */
890 + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
891 + INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
892 +
893 + adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
894 + adapter->rfs.filter_available = adapter->rfs.max_num_filter;
895 + adapter->rfs.hashtoclean = 0;
896 +
897 + /* Add timer to get periodic RFS updates from OS */
898 + init_timer(&adapter->rfs.expire_rfs);
899 + adapter->rfs.expire_rfs.function = edma_flow_may_expire;
900 + adapter->rfs.expire_rfs.data = (unsigned long)adapter;
901 + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
902 +}
903 +
904 +/* edma_free_rfs_flow_table()
905 + * Free EDMA RFS flow table
906 + */
907 +static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
908 +{
909 + int i;
910 +
911 + /* Remove sync timer */
912 + del_timer_sync(&adapter->rfs.expire_rfs);
913 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
914 +
915 + /* Free EDMA RFS table entries */
916 + adapter->rfs.filter_available = 0;
917 +
918 + /* Clean-up EDMA flow hash table */
919 + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
920 + struct hlist_head *hhead;
921 + struct hlist_node *tmp;
922 + struct edma_rfs_filter_node *filter_node;
923 + int res;
924 +
925 + hhead = &adapter->rfs.hlist_head[i];
926 + hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
927 + res = edma_delete_rfs_filter(adapter, filter_node);
928 + if (res < 0)
929 + dev_warn(&adapter->netdev->dev,
930 + "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
931 + filter_node->flow_id);
932 + hlist_del(&filter_node->node);
933 + kfree(filter_node);
934 + }
935 + }
936 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
937 +}
938 +
939 +/* edma_tx_unmap_and_free()
940 + * clean TX buffer
941 + */
942 +static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
943 + struct edma_sw_desc *sw_desc)
944 +{
945 + struct sk_buff *skb = sw_desc->skb;
946 +
947 + if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
948 + (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
949 + /* unmap_single for skb head area */
950 + dma_unmap_single(&pdev->dev, sw_desc->dma,
951 + sw_desc->length, DMA_TO_DEVICE);
952 + else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
953 + /* unmap page for paged fragments */
954 + dma_unmap_page(&pdev->dev, sw_desc->dma,
955 + sw_desc->length, DMA_TO_DEVICE);
956 +
957 + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
958 + dev_kfree_skb_any(skb);
959 +
960 + sw_desc->flags = 0;
961 +}
962 +
963 +/* edma_tx_complete()
964 + * Used to clean tx queues and update hardware and consumer index
965 + */
966 +static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
967 +{
968 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
969 + struct edma_sw_desc *sw_desc;
970 + struct platform_device *pdev = edma_cinfo->pdev;
971 + int i;
972 +
973 + u16 sw_next_to_clean = etdr->sw_next_to_clean;
974 + u16 hw_next_to_clean;
975 + u32 data = 0;
976 +
977 + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
978 + hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
979 +
980 + /* clean the buffer here */
981 + while (sw_next_to_clean != hw_next_to_clean) {
982 + sw_desc = &etdr->sw_desc[sw_next_to_clean];
983 + edma_tx_unmap_and_free(pdev, sw_desc);
984 + sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
985 + }
986 +
987 + etdr->sw_next_to_clean = sw_next_to_clean;
988 +
989 + /* update the TPD consumer index register */
990 + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
991 +
992 + /* Wake the queue if queue is stopped and netdev link is up */
993 + for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
994 + if (netif_tx_queue_stopped(etdr->nq[i])) {
995 + if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
996 + netif_tx_wake_queue(etdr->nq[i]);
997 + }
998 + }
999 +}
1000 +
1001 +/* edma_get_tx_buffer()
1002 + * Get sw_desc corresponding to the TPD
1003 + */
1004 +static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
1005 + struct edma_tx_desc *tpd, int queue_id)
1006 +{
1007 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1008 + return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
1009 +}
1010 +
1011 +/* edma_get_next_tpd()
1012 + * Return a TPD descriptor for transfer
1013 + */
1014 +static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
1015 + int queue_id)
1016 +{
1017 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1018 + u16 sw_next_to_fill = etdr->sw_next_to_fill;
1019 + struct edma_tx_desc *tpd_desc =
1020 + (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
1021 +
1022 + etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
1023 +
1024 + return tpd_desc;
1025 +}
1026 +
1027 +/* edma_tpd_available()
1028 + * Check number of free TPDs
1029 + */
1030 +static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
1031 + int queue_id)
1032 +{
1033 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1034 +
1035 + u16 sw_next_to_fill;
1036 + u16 sw_next_to_clean;
1037 + u16 count = 0;
1038 +
1039 + sw_next_to_clean = etdr->sw_next_to_clean;
1040 + sw_next_to_fill = etdr->sw_next_to_fill;
1041 +
1042 + if (likely(sw_next_to_clean <= sw_next_to_fill))
1043 + count = etdr->count;
1044 +
1045 + return count + sw_next_to_clean - sw_next_to_fill - 1;
1046 +}
1047 +
1048 +/* edma_tx_queue_get()
1049 + * Get the starting number of the queue
1050 + */
1051 +static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1052 + struct sk_buff *skb, int txq_id)
1053 +{
1054 + /* skb->priority is used as an index to skb priority table
1055 + * and based on packet priority, correspong queue is assigned.
1056 + */
1057 + return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1058 +}
1059 +
1060 +/* edma_tx_update_hw_idx()
1061 + * update the producer index for the ring transmitted
1062 + */
1063 +static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1064 + struct sk_buff *skb, int queue_id)
1065 +{
1066 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1067 + u32 tpd_idx_data;
1068 +
1069 + /* Read and update the producer index */
1070 + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1071 + tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1072 + tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1073 + << EDMA_TPD_PROD_IDX_SHIFT;
1074 +
1075 + edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1076 +}
1077 +
1078 +/* edma_rollback_tx()
1079 + * Function to retrieve tx resources in case of error
1080 + */
1081 +static void edma_rollback_tx(struct edma_adapter *adapter,
1082 + struct edma_tx_desc *start_tpd, int queue_id)
1083 +{
1084 + struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1085 + struct edma_sw_desc *sw_desc;
1086 + struct edma_tx_desc *tpd = NULL;
1087 + u16 start_index, index;
1088 +
1089 + start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1090 +
1091 + index = start_index;
1092 + while (index != etdr->sw_next_to_fill) {
1093 + tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1094 + sw_desc = &etdr->sw_desc[index];
1095 + edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1096 + memset(tpd, 0, sizeof(struct edma_tx_desc));
1097 + if (++index == etdr->count)
1098 + index = 0;
1099 + }
1100 + etdr->sw_next_to_fill = start_index;
1101 +}
1102 +
1103 +/* edma_tx_map_and_fill()
1104 + * gets called from edma_xmit_frame
1105 + *
1106 + * This is where the dma of the buffer to be transmitted
1107 + * gets mapped
1108 + */
1109 +static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1110 + struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
1111 + unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
1112 + bool packet_is_rstp, int nr_frags)
1113 +{
1114 + struct edma_sw_desc *sw_desc = NULL;
1115 + struct platform_device *pdev = edma_cinfo->pdev;
1116 + struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
1117 + struct sk_buff *iter_skb;
1118 + int i = 0;
1119 + u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1120 + u16 buf_len, lso_desc_len = 0;
1121 +
1122 + /* It should either be a nr_frags skb or fraglist skb but not both */
1123 + BUG_ON(nr_frags && skb_has_frag_list(skb));
1124 +
1125 + if (skb_is_gso(skb)) {
1126 + /* TODO: What additional checks need to be performed here */
1127 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1128 + lso_word1 |= EDMA_TPD_IPV4_EN;
1129 + ip_hdr(skb)->check = 0;
1130 + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1131 + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1132 + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1133 + lso_word1 |= EDMA_TPD_LSO_V2_EN;
1134 + ipv6_hdr(skb)->payload_len = 0;
1135 + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1136 + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1137 + } else
1138 + return -EINVAL;
1139 +
1140 + lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1141 + (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1142 + } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1143 + u8 css, cso;
1144 + cso = skb_checksum_start_offset(skb);
1145 + css = cso + skb->csum_offset;
1146 +
1147 + word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1148 + word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1149 + word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1150 + }
1151 +
1152 + if (skb->protocol == htons(ETH_P_PPP_SES))
1153 + word1 |= EDMA_TPD_PPPOE_EN;
1154 +
1155 + if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1156 + switch(skb->vlan_proto) {
1157 + case htons(ETH_P_8021Q):
1158 + word3 |= (1 << EDMA_TX_INS_CVLAN);
1159 + word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1160 + break;
1161 + case htons(ETH_P_8021AD):
1162 + word1 |= (1 << EDMA_TX_INS_SVLAN);
1163 + svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1164 + break;
1165 + default:
1166 + dev_err(&pdev->dev, "no ctag or stag present\n");
1167 + goto vlan_tag_error;
1168 + }
1169 + } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1170 + word3 |= (1 << EDMA_TX_INS_CVLAN);
1171 + word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1172 + }
1173 +
1174 + if (packet_is_rstp) {
1175 + word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1176 + word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1177 + } else {
1178 + word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1179 + }
1180 +
1181 + buf_len = skb_headlen(skb);
1182 +
1183 + if (lso_word1) {
1184 + if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1185 +
1186 + /* IPv6 LSOv2 descriptor */
1187 + start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1188 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1189 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1190 +
1191 + /* LSOv2 descriptor overrides addr field to pass length */
1192 + tpd->addr = cpu_to_le16(skb->len);
1193 + tpd->svlan_tag = svlan_tag;
1194 + tpd->word1 = word1 | lso_word1;
1195 + tpd->word3 = word3;
1196 + }
1197 +
1198 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1199 + if (!start_tpd)
1200 + start_tpd = tpd;
1201 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1202 +
1203 + /* The last buffer info contain the skb address,
1204 + * so skb will be freed after unmap
1205 + */
1206 + sw_desc->length = lso_desc_len;
1207 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1208 +
1209 + sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1210 + skb->data, buf_len, DMA_TO_DEVICE);
1211 + if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1212 + goto dma_error;
1213 +
1214 + tpd->addr = cpu_to_le32(sw_desc->dma);
1215 + tpd->len = cpu_to_le16(buf_len);
1216 +
1217 + tpd->svlan_tag = svlan_tag;
1218 + tpd->word1 = word1 | lso_word1;
1219 + tpd->word3 = word3;
1220 +
1221 + /* The last buffer info contain the skb address,
1222 + * so it will be freed after unmap
1223 + */
1224 + sw_desc->length = lso_desc_len;
1225 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1226 +
1227 + buf_len = 0;
1228 + }
1229 +
1230 + if (likely(buf_len)) {
1231 +
1232 + /* TODO Do not dequeue descriptor if there is a potential error */
1233 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1234 +
1235 + if (!start_tpd)
1236 + start_tpd = tpd;
1237 +
1238 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1239 +
1240 + /* The last buffer info contain the skb address,
1241 + * so it will be free after unmap
1242 + */
1243 + sw_desc->length = buf_len;
1244 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1245 + sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1246 + skb->data, buf_len, DMA_TO_DEVICE);
1247 + if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1248 + goto dma_error;
1249 +
1250 + tpd->addr = cpu_to_le32(sw_desc->dma);
1251 + tpd->len = cpu_to_le16(buf_len);
1252 +
1253 + tpd->svlan_tag = svlan_tag;
1254 + tpd->word1 = word1 | lso_word1;
1255 + tpd->word3 = word3;
1256 + }
1257 +
1258 + /* Walk through all paged fragments */
1259 + while (nr_frags--) {
1260 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1261 + buf_len = skb_frag_size(frag);
1262 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1263 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1264 + sw_desc->length = buf_len;
1265 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1266 +
1267 + sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1268 +
1269 + if (dma_mapping_error(NULL, sw_desc->dma))
1270 + goto dma_error;
1271 +
1272 + tpd->addr = cpu_to_le32(sw_desc->dma);
1273 + tpd->len = cpu_to_le16(buf_len);
1274 +
1275 + tpd->svlan_tag = svlan_tag;
1276 + tpd->word1 = word1 | lso_word1;
1277 + tpd->word3 = word3;
1278 + i++;
1279 + }
1280 +
1281 + /* Walk through all fraglist skbs */
1282 + skb_walk_frags(skb, iter_skb) {
1283 + buf_len = iter_skb->len;
1284 + tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1285 + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1286 + sw_desc->length = buf_len;
1287 + sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1288 + iter_skb->data, buf_len, DMA_TO_DEVICE);
1289 +
1290 + if (dma_mapping_error(NULL, sw_desc->dma))
1291 + goto dma_error;
1292 +
1293 + tpd->addr = cpu_to_le32(sw_desc->dma);
1294 + tpd->len = cpu_to_le16(buf_len);
1295 + tpd->svlan_tag = svlan_tag;
1296 + tpd->word1 = word1 | lso_word1;
1297 + tpd->word3 = word3;
1298 + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1299 + }
1300 +
1301 + if (tpd)
1302 + tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1303 +
1304 + sw_desc->skb = skb;
1305 + sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1306 +
1307 + return 0;
1308 +
1309 +dma_error:
1310 + edma_rollback_tx(adapter, start_tpd, queue_id);
1311 + dev_err(&pdev->dev, "TX DMA map failed\n");
1312 +vlan_tag_error:
1313 + return -ENOMEM;
1314 +}
1315 +
1316 +/* edma_check_link()
1317 + * check Link status
1318 + */
1319 +static int edma_check_link(struct edma_adapter *adapter)
1320 +{
1321 + struct phy_device *phydev = adapter->phydev;
1322 +
1323 + if (!(adapter->poll_required))
1324 + return __EDMA_LINKUP;
1325 +
1326 + if (phydev->link)
1327 + return __EDMA_LINKUP;
1328 +
1329 + return __EDMA_LINKDOWN;
1330 +}
1331 +
1332 +/* edma_adjust_link()
1333 + * check for edma link status
1334 + */
1335 +void edma_adjust_link(struct net_device *netdev)
1336 +{
1337 + int status;
1338 + struct edma_adapter *adapter = netdev_priv(netdev);
1339 + struct phy_device *phydev = adapter->phydev;
1340 +
1341 + if (!test_bit(__EDMA_UP, &adapter->state_flags))
1342 + return;
1343 +
1344 + status = edma_check_link(adapter);
1345 +
1346 + if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1347 + dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1348 + adapter->link_state = __EDMA_LINKUP;
1349 + netif_carrier_on(netdev);
1350 + if (netif_running(netdev))
1351 + netif_tx_wake_all_queues(netdev);
1352 + } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1353 + dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1354 + adapter->link_state = __EDMA_LINKDOWN;
1355 + netif_carrier_off(netdev);
1356 + netif_tx_stop_all_queues(netdev);
1357 + }
1358 +}
1359 +
1360 +/* edma_get_stats()
1361 + * Statistics api used to retreive the tx/rx statistics
1362 + */
1363 +struct net_device_stats *edma_get_stats(struct net_device *netdev)
1364 +{
1365 + struct edma_adapter *adapter = netdev_priv(netdev);
1366 +
1367 + return &adapter->stats;
1368 +}
1369 +
1370 +/* edma_xmit()
1371 + * Main api to be called by the core for packet transmission
1372 + */
1373 +netdev_tx_t edma_xmit(struct sk_buff *skb,
1374 + struct net_device *net_dev)
1375 +{
1376 + struct edma_adapter *adapter = netdev_priv(net_dev);
1377 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1378 + struct edma_tx_desc_ring *etdr;
1379 + u16 from_cpu, dp_bitmap, txq_id;
1380 + int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
1381 + unsigned int flags_transmit = 0;
1382 + bool packet_is_rstp = false;
1383 + struct netdev_queue *nq = NULL;
1384 +
1385 + if (skb_shinfo(skb)->nr_frags) {
1386 + nr_frags = skb_shinfo(skb)->nr_frags;
1387 + num_tpds_needed += nr_frags;
1388 + } else if (skb_has_frag_list(skb)) {
1389 + struct sk_buff *iter_skb;
1390 +
1391 + skb_walk_frags(skb, iter_skb)
1392 + num_tpds_needed++;
1393 + }
1394 +
1395 + if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
1396 + dev_err(&net_dev->dev,
1397 + "skb received with fragments %d which is more than %lu",
1398 + num_tpds_needed, EDMA_MAX_SKB_FRAGS);
1399 + dev_kfree_skb_any(skb);
1400 + adapter->stats.tx_errors++;
1401 + return NETDEV_TX_OK;
1402 + }
1403 +
1404 + if (edma_stp_rstp) {
1405 + u16 ath_hdr, ath_eth_type;
1406 + u8 mac_addr[EDMA_ETH_HDR_LEN];
1407 + ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1408 + if (ath_eth_type == edma_ath_eth_type) {
1409 + packet_is_rstp = true;
1410 + ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1411 + dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1412 + from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1413 + memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1414 +
1415 + skb_pull(skb, 4);
1416 +
1417 + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1418 + }
1419 + }
1420 +
1421 + /* this will be one of the 4 TX queues exposed to linux kernel */
1422 + txq_id = skb_get_queue_mapping(skb);
1423 + queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1424 + etdr = edma_cinfo->tpd_ring[queue_id];
1425 + nq = netdev_get_tx_queue(net_dev, txq_id);
1426 +
1427 + local_bh_disable();
1428 + /* Tx is not handled in bottom half context. Hence, we need to protect
1429 + * Tx from tasks and bottom half
1430 + */
1431 +
1432 + if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1433 + /* not enough descriptor, just stop queue */
1434 + netif_tx_stop_queue(nq);
1435 + local_bh_enable();
1436 + dev_dbg(&net_dev->dev, "Not enough descriptors available");
1437 + edma_cinfo->edma_ethstats.tx_desc_error++;
1438 + return NETDEV_TX_BUSY;
1439 + }
1440 +
1441 + /* Check and mark VLAN tag offload */
1442 + if (skb_vlan_tag_present(skb))
1443 + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1444 + else if (adapter->default_vlan_tag)
1445 + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1446 +
1447 + /* Check and mark checksum offload */
1448 + if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1449 + flags_transmit |= EDMA_HW_CHECKSUM;
1450 +
1451 + /* Map and fill descriptor for Tx */
1452 + ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1453 + flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
1454 + if (ret) {
1455 + dev_kfree_skb_any(skb);
1456 + adapter->stats.tx_errors++;
1457 + goto netdev_okay;
1458 + }
1459 +
1460 + /* Update SW producer index */
1461 + edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1462 +
1463 + /* update tx statistics */
1464 + adapter->stats.tx_packets++;
1465 + adapter->stats.tx_bytes += skb->len;
1466 +
1467 +netdev_okay:
1468 + local_bh_enable();
1469 + return NETDEV_TX_OK;
1470 +}
1471 +
1472 +/*
1473 + * edma_flow_may_expire()
1474 + * Timer function called periodically to delete the node
1475 + */
1476 +void edma_flow_may_expire(unsigned long data)
1477 +{
1478 + struct edma_adapter *adapter = (struct edma_adapter *)data;
1479 + int j;
1480 +
1481 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1482 + for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1483 + struct hlist_head *hhead;
1484 + struct hlist_node *tmp;
1485 + struct edma_rfs_filter_node *n;
1486 + bool res;
1487 +
1488 + hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1489 + hlist_for_each_entry_safe(n, tmp, hhead, node) {
1490 + res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1491 + n->flow_id, n->filter_id);
1492 + if (res) {
1493 + int ret;
1494 + ret = edma_delete_rfs_filter(adapter, n);
1495 + if (ret < 0)
1496 + dev_dbg(&adapter->netdev->dev,
1497 + "RFS entry %d not allowed to be flushed by Switch",
1498 + n->flow_id);
1499 + else {
1500 + hlist_del(&n->node);
1501 + kfree(n);
1502 + adapter->rfs.filter_available++;
1503 + }
1504 + }
1505 + }
1506 + }
1507 +
1508 + adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1509 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1510 + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
1511 +}
1512 +
1513 +/* edma_rx_flow_steer()
1514 + * Called by core to to steer the flow to CPU
1515 + */
1516 +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1517 + u16 rxq, u32 flow_id)
1518 +{
1519 + struct flow_keys keys;
1520 + struct edma_rfs_filter_node *filter_node;
1521 + struct edma_adapter *adapter = netdev_priv(dev);
1522 + u16 hash_tblid;
1523 + int res;
1524 +
1525 + if (skb->protocol == htons(ETH_P_IPV6)) {
1526 + dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
1527 + res = -EINVAL;
1528 + goto no_protocol_err;
1529 + }
1530 +
1531 + /* Dissect flow parameters
1532 + * We only support IPv4 + TCP/UDP
1533 + */
1534 + res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1535 + if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1536 + res = -EPROTONOSUPPORT;
1537 + goto no_protocol_err;
1538 + }
1539 +
1540 + /* Check if table entry exists */
1541 + hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1542 +
1543 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1544 + filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1545 +
1546 + if (filter_node) {
1547 + if (rxq == filter_node->rq_id) {
1548 + res = -EEXIST;
1549 + goto out;
1550 + } else {
1551 + res = edma_delete_rfs_filter(adapter, filter_node);
1552 + if (res < 0)
1553 + dev_warn(&adapter->netdev->dev,
1554 + "Cannot steer flow %d to different queue",
1555 + filter_node->flow_id);
1556 + else {
1557 + adapter->rfs.filter_available++;
1558 + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1559 + if (res < 0) {
1560 + dev_warn(&adapter->netdev->dev,
1561 + "Cannot steer flow %d to different queue",
1562 + filter_node->flow_id);
1563 + } else {
1564 + adapter->rfs.filter_available--;
1565 + filter_node->rq_id = rxq;
1566 + filter_node->filter_id = res;
1567 + }
1568 + }
1569 + }
1570 + } else {
1571 + if (adapter->rfs.filter_available == 0) {
1572 + res = -EBUSY;
1573 + goto out;
1574 + }
1575 +
1576 + filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1577 + if (!filter_node) {
1578 + res = -ENOMEM;
1579 + goto out;
1580 + }
1581 +
1582 + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1583 + if (res < 0) {
1584 + kfree(filter_node);
1585 + goto out;
1586 + }
1587 +
1588 + adapter->rfs.filter_available--;
1589 + filter_node->rq_id = rxq;
1590 + filter_node->filter_id = res;
1591 + filter_node->flow_id = flow_id;
1592 + filter_node->keys = keys;
1593 + INIT_HLIST_NODE(&filter_node->node);
1594 + hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1595 + }
1596 +
1597 +out:
1598 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1599 +no_protocol_err:
1600 + return res;
1601 +}
1602 +
1603 +/* edma_register_rfs_filter()
1604 + * Add RFS filter callback
1605 + */
1606 +int edma_register_rfs_filter(struct net_device *netdev,
1607 + set_rfs_filter_callback_t set_filter)
1608 +{
1609 + struct edma_adapter *adapter = netdev_priv(netdev);
1610 +
1611 + spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1612 +
1613 + if (adapter->set_rfs_rule) {
1614 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1615 + return -1;
1616 + }
1617 +
1618 + adapter->set_rfs_rule = set_filter;
1619 + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1620 +
1621 + return 0;
1622 +}
1623 +
1624 +/* edma_alloc_tx_rings()
1625 + * Allocate rx rings
1626 + */
1627 +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1628 +{
1629 + struct platform_device *pdev = edma_cinfo->pdev;
1630 + int i, err = 0;
1631 +
1632 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1633 + err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1634 + if (err) {
1635 + dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1636 + return err;
1637 + }
1638 + }
1639 +
1640 + return 0;
1641 +}
1642 +
1643 +/* edma_free_tx_rings()
1644 + * Free tx rings
1645 + */
1646 +void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1647 +{
1648 + int i;
1649 +
1650 + for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1651 + edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1652 +}
1653 +
1654 +/* edma_free_tx_resources()
1655 + * Free buffers associated with tx rings
1656 + */
1657 +void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1658 +{
1659 + struct edma_tx_desc_ring *etdr;
1660 + struct edma_sw_desc *sw_desc;
1661 + struct platform_device *pdev = edma_cinfo->pdev;
1662 + int i, j;
1663 +
1664 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1665 + etdr = edma_cinfo->tpd_ring[i];
1666 + for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1667 + sw_desc = &etdr->sw_desc[j];
1668 + if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1669 + EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1670 + edma_tx_unmap_and_free(pdev, sw_desc);
1671 + }
1672 + }
1673 +}
1674 +
1675 +/* edma_alloc_rx_rings()
1676 + * Allocate rx rings
1677 + */
1678 +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1679 +{
1680 + struct platform_device *pdev = edma_cinfo->pdev;
1681 + int i, j, err = 0;
1682 +
1683 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1684 + err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1685 + if (err) {
1686 + dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1687 + return err;
1688 + }
1689 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1690 + }
1691 +
1692 + return 0;
1693 +}
1694 +
1695 +/* edma_free_rx_rings()
1696 + * free rx rings
1697 + */
1698 +void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1699 +{
1700 + int i, j;
1701 +
1702 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1703 + edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1704 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1705 + }
1706 +}
1707 +
1708 +/* edma_free_queues()
1709 + * Free the queues allocaated
1710 + */
1711 +void edma_free_queues(struct edma_common_info *edma_cinfo)
1712 +{
1713 + int i , j;
1714 +
1715 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1716 + if (edma_cinfo->tpd_ring[i])
1717 + kfree(edma_cinfo->tpd_ring[i]);
1718 + edma_cinfo->tpd_ring[i] = NULL;
1719 + }
1720 +
1721 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1722 + if (edma_cinfo->rfd_ring[j])
1723 + kfree(edma_cinfo->rfd_ring[j]);
1724 + edma_cinfo->rfd_ring[j] = NULL;
1725 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1726 + }
1727 +
1728 + edma_cinfo->num_rx_queues = 0;
1729 + edma_cinfo->num_tx_queues = 0;
1730 +
1731 + return;
1732 +}
1733 +
1734 +/* edma_free_rx_resources()
1735 + * Free buffers associated with tx rings
1736 + */
1737 +void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1738 +{
1739 + struct edma_rfd_desc_ring *erdr;
1740 + struct edma_sw_desc *sw_desc;
1741 + struct platform_device *pdev = edma_cinfo->pdev;
1742 + int i, j, k;
1743 +
1744 + for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1745 + erdr = edma_cinfo->rfd_ring[k];
1746 + for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1747 + sw_desc = &erdr->sw_desc[j];
1748 + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
1749 + dma_unmap_single(&pdev->dev, sw_desc->dma,
1750 + sw_desc->length, DMA_FROM_DEVICE);
1751 + edma_clean_rfd(erdr, j);
1752 + } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
1753 + dma_unmap_page(&pdev->dev, sw_desc->dma,
1754 + sw_desc->length, DMA_FROM_DEVICE);
1755 + edma_clean_rfd(erdr, j);
1756 + }
1757 + }
1758 + k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1759 +
1760 + }
1761 +}
1762 +
1763 +/* edma_alloc_queues_tx()
1764 + * Allocate memory for all rings
1765 + */
1766 +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1767 +{
1768 + int i;
1769 +
1770 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1771 + struct edma_tx_desc_ring *etdr;
1772 + etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
1773 + if (!etdr)
1774 + goto err;
1775 + etdr->count = edma_cinfo->tx_ring_count;
1776 + edma_cinfo->tpd_ring[i] = etdr;
1777 + }
1778 +
1779 + return 0;
1780 +err:
1781 + edma_free_queues(edma_cinfo);
1782 + return -1;
1783 +}
1784 +
1785 +/* edma_alloc_queues_rx()
1786 + * Allocate memory for all rings
1787 + */
1788 +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
1789 +{
1790 + int i, j;
1791 +
1792 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1793 + struct edma_rfd_desc_ring *rfd_ring;
1794 + rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
1795 + GFP_KERNEL);
1796 + if (!rfd_ring)
1797 + goto err;
1798 + rfd_ring->count = edma_cinfo->rx_ring_count;
1799 + edma_cinfo->rfd_ring[j] = rfd_ring;
1800 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1801 + }
1802 + return 0;
1803 +err:
1804 + edma_free_queues(edma_cinfo);
1805 + return -1;
1806 +}
1807 +
1808 +/* edma_clear_irq_status()
1809 + * Clear interrupt status
1810 + */
1811 +void edma_clear_irq_status()
1812 +{
1813 + edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1814 + edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1815 + edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
1816 + edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
1817 +};
1818 +
1819 +/* edma_configure()
1820 + * Configure skb, edma interrupts and control register.
1821 + */
1822 +int edma_configure(struct edma_common_info *edma_cinfo)
1823 +{
1824 + struct edma_hw *hw = &edma_cinfo->hw;
1825 + u32 intr_modrt_data;
1826 + u32 intr_ctrl_data = 0;
1827 + int i, j, ret_count;
1828 +
1829 + edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
1830 + intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
1831 + intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
1832 + edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
1833 +
1834 + edma_clear_irq_status();
1835 +
1836 + /* Clear any WOL status */
1837 + edma_write_reg(EDMA_REG_WOL_CTRL, 0);
1838 + intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
1839 + intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
1840 + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1841 + edma_configure_tx(edma_cinfo);
1842 + edma_configure_rx(edma_cinfo);
1843 +
1844 + /* Allocate the RX buffer */
1845 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1846 + struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
1847 + ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
1848 + if (ret_count) {
1849 + dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
1850 + }
1851 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1852 + }
1853 +
1854 + /* Configure descriptor Ring */
1855 + edma_init_desc(edma_cinfo);
1856 + return 0;
1857 +}
1858 +
1859 +/* edma_irq_enable()
1860 + * Enable default interrupt generation settings
1861 + */
1862 +void edma_irq_enable(struct edma_common_info *edma_cinfo)
1863 +{
1864 + struct edma_hw *hw = &edma_cinfo->hw;
1865 + int i, j;
1866 +
1867 + edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1868 + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1869 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
1870 + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1871 + }
1872 + edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1873 + for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1874 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
1875 +}
1876 +
1877 +/* edma_irq_disable()
1878 + * Disable Interrupt
1879 + */
1880 +void edma_irq_disable(struct edma_common_info *edma_cinfo)
1881 +{
1882 + int i;
1883 +
1884 + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
1885 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
1886 +
1887 + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
1888 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
1889 + edma_write_reg(EDMA_REG_MISC_IMR, 0);
1890 + edma_write_reg(EDMA_REG_WOL_IMR, 0);
1891 +}
1892 +
1893 +/* edma_free_irqs()
1894 + * Free All IRQs
1895 + */
1896 +void edma_free_irqs(struct edma_adapter *adapter)
1897 +{
1898 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1899 + int i, j;
1900 + int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
1901 +
1902 + for (i = 0; i < CONFIG_NR_CPUS; i++) {
1903 + for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
1904 + free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1905 +
1906 + for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
1907 + free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1908 + }
1909 +}
1910 +
1911 +/* edma_enable_rx_ctrl()
1912 + * Enable RX queue control
1913 + */
1914 +void edma_enable_rx_ctrl(struct edma_hw *hw)
1915 +{
1916 + u32 data;
1917 +
1918 + edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1919 + data |= EDMA_RXQ_CTRL_EN;
1920 + edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1921 +}
1922 +
1923 +
1924 +/* edma_enable_tx_ctrl()
1925 + * Enable TX queue control
1926 + */
1927 +void edma_enable_tx_ctrl(struct edma_hw *hw)
1928 +{
1929 + u32 data;
1930 +
1931 + edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1932 + data |= EDMA_TXQ_CTRL_TXQ_EN;
1933 + edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1934 +}
1935 +
1936 +/* edma_stop_rx_tx()
1937 + * Disable RX/TQ Queue control
1938 + */
1939 +void edma_stop_rx_tx(struct edma_hw *hw)
1940 +{
1941 + u32 data;
1942 +
1943 + edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1944 + data &= ~EDMA_RXQ_CTRL_EN;
1945 + edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1946 + edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1947 + data &= ~EDMA_TXQ_CTRL_TXQ_EN;
1948 + edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1949 +}
1950 +
1951 +/* edma_reset()
1952 + * Reset the EDMA
1953 + */
1954 +int edma_reset(struct edma_common_info *edma_cinfo)
1955 +{
1956 + struct edma_hw *hw = &edma_cinfo->hw;
1957 +
1958 + edma_irq_disable(edma_cinfo);
1959 +
1960 + edma_clear_irq_status();
1961 +
1962 + edma_stop_rx_tx(hw);
1963 +
1964 + return 0;
1965 +}
1966 +
1967 +/* edma_fill_netdev()
1968 + * Fill netdev for each etdr
1969 + */
1970 +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
1971 + int dev, int txq_id)
1972 +{
1973 + struct edma_tx_desc_ring *etdr;
1974 + int i = 0;
1975 +
1976 + etdr = edma_cinfo->tpd_ring[queue_id];
1977 +
1978 + while (etdr->netdev[i])
1979 + i++;
1980 +
1981 + if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
1982 + return -1;
1983 +
1984 + /* Populate the netdev associated with the tpd ring */
1985 + etdr->netdev[i] = edma_netdev[dev];
1986 + etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
1987 +
1988 + return 0;
1989 +}
1990 +
1991 +/* edma_set_mac()
1992 + * Change the Ethernet Address of the NIC
1993 + */
1994 +int edma_set_mac_addr(struct net_device *netdev, void *p)
1995 +{
1996 + struct sockaddr *addr = p;
1997 +
1998 + if (!is_valid_ether_addr(addr->sa_data))
1999 + return -EINVAL;
2000 +
2001 + if (netif_running(netdev))
2002 + return -EBUSY;
2003 +
2004 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2005 + return 0;
2006 +}
2007 +
2008 +/* edma_set_stp_rstp()
2009 + * set stp/rstp
2010 + */
2011 +void edma_set_stp_rstp(bool rstp)
2012 +{
2013 + edma_stp_rstp = rstp;
2014 +}
2015 +
2016 +/* edma_assign_ath_hdr_type()
2017 + * assign atheros header eth type
2018 + */
2019 +void edma_assign_ath_hdr_type(int eth_type)
2020 +{
2021 + edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
2022 +}
2023 +
2024 +/* edma_get_default_vlan_tag()
2025 + * Used by other modules to get the default vlan tag
2026 + */
2027 +int edma_get_default_vlan_tag(struct net_device *netdev)
2028 +{
2029 + struct edma_adapter *adapter = netdev_priv(netdev);
2030 +
2031 + if (adapter->default_vlan_tag)
2032 + return adapter->default_vlan_tag;
2033 +
2034 + return 0;
2035 +}
2036 +
2037 +/* edma_open()
2038 + * gets called when netdevice is up, start the queue.
2039 + */
2040 +int edma_open(struct net_device *netdev)
2041 +{
2042 + struct edma_adapter *adapter = netdev_priv(netdev);
2043 + struct platform_device *pdev = adapter->edma_cinfo->pdev;
2044 +
2045 + netif_tx_start_all_queues(netdev);
2046 + edma_initialise_rfs_flow_table(adapter);
2047 + set_bit(__EDMA_UP, &adapter->state_flags);
2048 +
2049 + /* if Link polling is enabled, in our case enabled for WAN, then
2050 + * do a phy start, else always set link as UP
2051 + */
2052 + if (adapter->poll_required) {
2053 + if (!IS_ERR(adapter->phydev)) {
2054 + phy_start(adapter->phydev);
2055 + phy_start_aneg(adapter->phydev);
2056 + adapter->link_state = __EDMA_LINKDOWN;
2057 + } else {
2058 + dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2059 + }
2060 + } else {
2061 + adapter->link_state = __EDMA_LINKUP;
2062 + netif_carrier_on(netdev);
2063 + }
2064 +
2065 + return 0;
2066 +}
2067 +
2068 +
2069 +/* edma_close()
2070 + * gets called when netdevice is down, stops the queue.
2071 + */
2072 +int edma_close(struct net_device *netdev)
2073 +{
2074 + struct edma_adapter *adapter = netdev_priv(netdev);
2075 +
2076 + edma_free_rfs_flow_table(adapter);
2077 + netif_carrier_off(netdev);
2078 + netif_tx_stop_all_queues(netdev);
2079 +
2080 + if (adapter->poll_required) {
2081 + if (!IS_ERR(adapter->phydev))
2082 + phy_stop(adapter->phydev);
2083 + }
2084 +
2085 + adapter->link_state = __EDMA_LINKDOWN;
2086 +
2087 + /* Set GMAC state to UP before link state is checked
2088 + */
2089 + clear_bit(__EDMA_UP, &adapter->state_flags);
2090 +
2091 + return 0;
2092 +}
2093 +
2094 +/* edma_poll
2095 + * polling function that gets called when the napi gets scheduled.
2096 + *
2097 + * Main sequence of task performed in this api
2098 + * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2099 + * enable interrupts.
2100 + */
2101 +int edma_poll(struct napi_struct *napi, int budget)
2102 +{
2103 + struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2104 + struct edma_per_cpu_queues_info, napi);
2105 + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2106 + u32 reg_data;
2107 + u32 shadow_rx_status, shadow_tx_status;
2108 + int queue_id;
2109 + int i, work_done = 0;
2110 +
2111 + /* Store the Rx/Tx status by ANDing it with
2112 + * appropriate CPU RX?TX mask
2113 + */
2114 + edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2115 + edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2116 + shadow_rx_status = edma_percpu_info->rx_status;
2117 + edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2118 + edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2119 + shadow_tx_status = edma_percpu_info->tx_status;
2120 +
2121 + /* Every core will have a start, which will be computed
2122 + * in probe and stored in edma_percpu_info->tx_start variable.
2123 + * We will shift the status bit by tx_start to obtain
2124 + * status bits for the core on which the current processing
2125 + * is happening. Since, there are 4 tx queues per core,
2126 + * we will run the loop till we get the correct queue to clear.
2127 + */
2128 + while (edma_percpu_info->tx_status) {
2129 + queue_id = ffs(edma_percpu_info->tx_status) - 1;
2130 + edma_tx_complete(edma_cinfo, queue_id);
2131 + edma_percpu_info->tx_status &= ~(1 << queue_id);
2132 + }
2133 +
2134 + /* Every core will have a start, which will be computed
2135 + * in probe and stored in edma_percpu_info->tx_start variable.
2136 + * We will shift the status bit by tx_start to obtain
2137 + * status bits for the core on which the current processing
2138 + * is happening. Since, there are 4 tx queues per core, we
2139 + * will run the loop till we get the correct queue to clear.
2140 + */
2141 + while (edma_percpu_info->rx_status) {
2142 + queue_id = ffs(edma_percpu_info->rx_status) - 1;
2143 + edma_rx_complete(edma_cinfo, &work_done,
2144 + budget, queue_id, napi);
2145 +
2146 + if (likely(work_done < budget))
2147 + edma_percpu_info->rx_status &= ~(1 << queue_id);
2148 + else
2149 + break;
2150 + }
2151 +
2152 + /* Clear the status register, to avoid the interrupts to
2153 + * reoccur.This clearing of interrupt status register is
2154 + * done here as writing to status register only takes place
2155 + * once the producer/consumer index has been updated to
2156 + * reflect that the packet transmission/reception went fine.
2157 + */
2158 + edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2159 + edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2160 +
2161 + /* If budget not fully consumed, exit the polling mode */
2162 + if (likely(work_done < budget)) {
2163 + napi_complete(napi);
2164 +
2165 + /* re-enable the interrupts */
2166 + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2167 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2168 + for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2169 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2170 + }
2171 +
2172 + return work_done;
2173 +}
2174 +
2175 +/* edma interrupt()
2176 + * interrupt handler
2177 + */
2178 +irqreturn_t edma_interrupt(int irq, void *dev)
2179 +{
2180 + struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2181 + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2182 + int i;
2183 +
2184 + /* Unmask the TX/RX interrupt register */
2185 + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2186 + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2187 +
2188 + for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2189 + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2190 +
2191 + napi_schedule(&edma_percpu_info->napi);
2192 +
2193 + return IRQ_HANDLED;
2194 +}
2195 --- /dev/null
2196 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
2197 @@ -0,0 +1,446 @@
2198 +/*
2199 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
2200 + *
2201 + * Permission to use, copy, modify, and/or distribute this software for
2202 + * any purpose with or without fee is hereby granted, provided that the
2203 + * above copyright notice and this permission notice appear in all copies.
2204 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2205 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2206 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2207 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2208 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2209 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2210 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2211 + */
2212 +
2213 +#ifndef _EDMA_H_
2214 +#define _EDMA_H_
2215 +
2216 +#include <linux/init.h>
2217 +#include <linux/interrupt.h>
2218 +#include <linux/types.h>
2219 +#include <linux/errno.h>
2220 +#include <linux/module.h>
2221 +#include <linux/netdevice.h>
2222 +#include <linux/etherdevice.h>
2223 +#include <linux/skbuff.h>
2224 +#include <linux/io.h>
2225 +#include <linux/vmalloc.h>
2226 +#include <linux/pagemap.h>
2227 +#include <linux/smp.h>
2228 +#include <linux/platform_device.h>
2229 +#include <linux/of.h>
2230 +#include <linux/of_device.h>
2231 +#include <linux/kernel.h>
2232 +#include <linux/device.h>
2233 +#include <linux/sysctl.h>
2234 +#include <linux/phy.h>
2235 +#include <linux/of_net.h>
2236 +#include <net/checksum.h>
2237 +#include <net/ip6_checksum.h>
2238 +#include <asm-generic/bug.h>
2239 +#include "ess_edma.h"
2240 +
2241 +#define EDMA_CPU_CORES_SUPPORTED 4
2242 +#define EDMA_MAX_PORTID_SUPPORTED 5
2243 +#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED
2244 +#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
2245 +#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */
2246 +#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
2247 +
2248 +#define EDMA_MAX_RECEIVE_QUEUE 8
2249 +#define EDMA_MAX_TRANSMIT_QUEUE 16
2250 +
2251 +/* WAN/LAN adapter number */
2252 +#define EDMA_WAN 0
2253 +#define EDMA_LAN 1
2254 +
2255 +/* VLAN tag */
2256 +#define EDMA_LAN_DEFAULT_VLAN 1
2257 +#define EDMA_WAN_DEFAULT_VLAN 2
2258 +
2259 +#define EDMA_DEFAULT_GROUP1_VLAN 1
2260 +#define EDMA_DEFAULT_GROUP2_VLAN 2
2261 +#define EDMA_DEFAULT_GROUP3_VLAN 3
2262 +#define EDMA_DEFAULT_GROUP4_VLAN 4
2263 +#define EDMA_DEFAULT_GROUP5_VLAN 5
2264 +
2265 +/* Queues exposed to linux kernel */
2266 +#define EDMA_NETDEV_TX_QUEUE 4
2267 +#define EDMA_NETDEV_RX_QUEUE 4
2268 +
2269 +/* Number of queues per core */
2270 +#define EDMA_NUM_TXQ_PER_CORE 4
2271 +#define EDMA_NUM_RXQ_PER_CORE 2
2272 +
2273 +#define EDMA_TPD_EOP_SHIFT 31
2274 +
2275 +#define EDMA_PORT_ID_SHIFT 12
2276 +#define EDMA_PORT_ID_MASK 0x7
2277 +
2278 +/* tpd word 3 bit 18-28 */
2279 +#define EDMA_TPD_PORT_BITMAP_SHIFT 18
2280 +
2281 +#define EDMA_TPD_FROM_CPU_SHIFT 25
2282 +
2283 +#define EDMA_FROM_CPU_MASK 0x80
2284 +#define EDMA_SKB_PRIORITY_MASK 0x38
2285 +
2286 +/* TX/RX descriptor ring count */
2287 +/* should be a power of 2 */
2288 +#define EDMA_RX_RING_SIZE 128
2289 +#define EDMA_TX_RING_SIZE 128
2290 +
2291 +/* Flags used in paged/non paged mode */
2292 +#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
2293 +#define EDMA_RX_HEAD_BUFF_SIZE 1540
2294 +
2295 +/* MAX frame size supported by switch */
2296 +#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
2297 +
2298 +/* Configurations */
2299 +#define EDMA_INTR_CLEAR_TYPE 0
2300 +#define EDMA_INTR_SW_IDX_W_TYPE 0
2301 +#define EDMA_FIFO_THRESH_TYPE 0
2302 +#define EDMA_RSS_TYPE 0
2303 +#define EDMA_RX_IMT 0x0020
2304 +#define EDMA_TX_IMT 0x0050
2305 +#define EDMA_TPD_BURST 5
2306 +#define EDMA_TXF_BURST 0x100
2307 +#define EDMA_RFD_BURST 8
2308 +#define EDMA_RFD_THR 16
2309 +#define EDMA_RFD_LTHR 0
2310 +
2311 +/* RX/TX per CPU based mask/shift */
2312 +#define EDMA_TX_PER_CPU_MASK 0xF
2313 +#define EDMA_RX_PER_CPU_MASK 0x3
2314 +#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
2315 +#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
2316 +#define EDMA_TX_CPU_START_SHIFT 0x2
2317 +#define EDMA_RX_CPU_START_SHIFT 0x1
2318 +
2319 +/* FLags used in transmit direction */
2320 +#define EDMA_HW_CHECKSUM 0x00000001
2321 +#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
2322 +#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
2323 +
2324 +#define EDMA_SW_DESC_FLAG_LAST 0x1
2325 +#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
2326 +#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
2327 +#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
2328 +#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
2329 +#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
2330 +
2331 +
2332 +#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
2333 +
2334 +/* Ethtool specific list of EDMA supported features */
2335 +#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
2336 + | SUPPORTED_10baseT_Full \
2337 + | SUPPORTED_100baseT_Half \
2338 + | SUPPORTED_100baseT_Full \
2339 + | SUPPORTED_1000baseT_Full)
2340 +
2341 +/* Recevie side atheros Header */
2342 +#define EDMA_RX_ATH_HDR_VERSION 0x2
2343 +#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
2344 +#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
2345 +#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
2346 +#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
2347 +
2348 +/* Transmit side atheros Header */
2349 +#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
2350 +#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
2351 +#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
2352 +
2353 +#define EDMA_TXQ_START_CORE0 8
2354 +#define EDMA_TXQ_START_CORE1 12
2355 +#define EDMA_TXQ_START_CORE2 0
2356 +#define EDMA_TXQ_START_CORE3 4
2357 +
2358 +#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
2359 +#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
2360 +#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
2361 +#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
2362 +
2363 +#define EDMA_ETH_HDR_LEN 12
2364 +#define EDMA_ETH_TYPE_MASK 0xFFFF
2365 +
2366 +#define EDMA_RX_BUFFER_WRITE 16
2367 +#define EDMA_RFD_AVAIL_THR 80
2368 +
2369 +#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR
2370 +
2371 +extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
2372 + __be16 sport, __be16 dport,
2373 + uint8_t proto, u16 loadbalance, bool action);
2374 +struct edma_ethtool_statistics {
2375 + u32 tx_q0_pkt;
2376 + u32 tx_q1_pkt;
2377 + u32 tx_q2_pkt;
2378 + u32 tx_q3_pkt;
2379 + u32 tx_q4_pkt;
2380 + u32 tx_q5_pkt;
2381 + u32 tx_q6_pkt;
2382 + u32 tx_q7_pkt;
2383 + u32 tx_q8_pkt;
2384 + u32 tx_q9_pkt;
2385 + u32 tx_q10_pkt;
2386 + u32 tx_q11_pkt;
2387 + u32 tx_q12_pkt;
2388 + u32 tx_q13_pkt;
2389 + u32 tx_q14_pkt;
2390 + u32 tx_q15_pkt;
2391 + u32 tx_q0_byte;
2392 + u32 tx_q1_byte;
2393 + u32 tx_q2_byte;
2394 + u32 tx_q3_byte;
2395 + u32 tx_q4_byte;
2396 + u32 tx_q5_byte;
2397 + u32 tx_q6_byte;
2398 + u32 tx_q7_byte;
2399 + u32 tx_q8_byte;
2400 + u32 tx_q9_byte;
2401 + u32 tx_q10_byte;
2402 + u32 tx_q11_byte;
2403 + u32 tx_q12_byte;
2404 + u32 tx_q13_byte;
2405 + u32 tx_q14_byte;
2406 + u32 tx_q15_byte;
2407 + u32 rx_q0_pkt;
2408 + u32 rx_q1_pkt;
2409 + u32 rx_q2_pkt;
2410 + u32 rx_q3_pkt;
2411 + u32 rx_q4_pkt;
2412 + u32 rx_q5_pkt;
2413 + u32 rx_q6_pkt;
2414 + u32 rx_q7_pkt;
2415 + u32 rx_q0_byte;
2416 + u32 rx_q1_byte;
2417 + u32 rx_q2_byte;
2418 + u32 rx_q3_byte;
2419 + u32 rx_q4_byte;
2420 + u32 rx_q5_byte;
2421 + u32 rx_q6_byte;
2422 + u32 rx_q7_byte;
2423 + u32 tx_desc_error;
2424 +};
2425 +
2426 +struct edma_mdio_data {
2427 + struct mii_bus *mii_bus;
2428 + void __iomem *membase;
2429 + int phy_irq[PHY_MAX_ADDR];
2430 +};
2431 +
2432 +/* EDMA LINK state */
2433 +enum edma_link_state {
2434 + __EDMA_LINKUP, /* Indicate link is UP */
2435 + __EDMA_LINKDOWN /* Indicate link is down */
2436 +};
2437 +
2438 +/* EDMA GMAC state */
2439 +enum edma_gmac_state {
2440 + __EDMA_UP /* use to indicate GMAC is up */
2441 +};
2442 +
2443 +/* edma transmit descriptor */
2444 +struct edma_tx_desc {
2445 + __le16 len; /* full packet including CRC */
2446 + __le16 svlan_tag; /* vlan tag */
2447 + __le32 word1; /* byte 4-7 */
2448 + __le32 addr; /* address of buffer */
2449 + __le32 word3; /* byte 12 */
2450 +};
2451 +
2452 +/* edma receive return descriptor */
2453 +struct edma_rx_return_desc {
2454 + u16 rrd0;
2455 + u16 rrd1;
2456 + u16 rrd2;
2457 + u16 rrd3;
2458 + u16 rrd4;
2459 + u16 rrd5;
2460 + u16 rrd6;
2461 + u16 rrd7;
2462 +};
2463 +
2464 +/* RFD descriptor */
2465 +struct edma_rx_free_desc {
2466 + __le32 buffer_addr; /* buffer address */
2467 +};
2468 +
2469 +/* edma hw specific data */
2470 +struct edma_hw {
2471 + u32 __iomem *hw_addr; /* inner register address */
2472 + struct edma_adapter *adapter; /* netdevice adapter */
2473 + u32 rx_intr_mask; /*rx interrupt mask */
2474 + u32 tx_intr_mask; /* tx interrupt nask */
2475 + u32 misc_intr_mask; /* misc interrupt mask */
2476 + u32 wol_intr_mask; /* wake on lan interrupt mask */
2477 + bool intr_clear_type; /* interrupt clear */
2478 + bool intr_sw_idx_w; /* interrupt software index */
2479 + u32 rx_head_buff_size; /* Rx buffer size */
2480 + u8 rss_type; /* rss protocol type */
2481 +};
2482 +
2483 +/* edma_sw_desc stores software descriptor
2484 + * SW descriptor has 1:1 map with HW descriptor
2485 + */
2486 +struct edma_sw_desc {
2487 + struct sk_buff *skb;
2488 + dma_addr_t dma; /* dma address */
2489 + u16 length; /* Tx/Rx buffer length */
2490 + u32 flags;
2491 +};
2492 +
2493 +/* per core related information */
2494 +struct edma_per_cpu_queues_info {
2495 + struct napi_struct napi; /* napi associated with the core */
2496 + u32 tx_mask; /* tx interrupt mask */
2497 + u32 rx_mask; /* rx interrupt mask */
2498 + u32 tx_status; /* tx interrupt status */
2499 + u32 rx_status; /* rx interrupt status */
2500 + u32 tx_start; /* tx queue start */
2501 + u32 rx_start; /* rx queue start */
2502 + struct edma_common_info *edma_cinfo; /* edma common info */
2503 +};
2504 +
2505 +/* edma specific common info */
2506 +struct edma_common_info {
2507 + struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
2508 + struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
2509 + struct platform_device *pdev; /* device structure */
2510 + struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
2511 + struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
2512 + struct ctl_table_header *edma_ctl_table_hdr;
2513 + int num_gmac;
2514 + struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
2515 + int num_rx_queues; /* number of rx queue */
2516 + u32 num_tx_queues; /* number of tx queue */
2517 + u32 tx_irq[16]; /* number of tx irq */
2518 + u32 rx_irq[8]; /* number of rx irq */
2519 + u32 from_cpu; /* from CPU TPD field */
2520 + u32 num_rxq_per_core; /* Rx queues per core */
2521 + u32 num_txq_per_core; /* Tx queues per core */
2522 + u16 tx_ring_count; /* Tx ring count */
2523 + u16 rx_ring_count; /* Rx ring*/
2524 + u16 rx_head_buffer_len; /* rx buffer length */
2525 + u16 rx_page_buffer_len; /* rx buffer length */
2526 + u32 page_mode; /* Jumbo frame supported flag */
2527 + u32 fraglist_mode; /* fraglist supported flag */
2528 + struct edma_hw hw; /* edma hw specific structure */
2529 + struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
2530 + spinlock_t stats_lock; /* protect edma stats area for updation */
2531 +};
2532 +
2533 +/* transimit packet descriptor (tpd) ring */
2534 +struct edma_tx_desc_ring {
2535 + struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
2536 + struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
2537 + /* Array of netdevs associated with the tpd ring */
2538 + void *hw_desc; /* descriptor ring virtual address */
2539 + struct edma_sw_desc *sw_desc; /* buffer associated with ring */
2540 + int netdev_bmp; /* Bitmap for per-ring netdevs */
2541 + u32 size; /* descriptor ring length in bytes */
2542 + u16 count; /* number of descriptors in the ring */
2543 + dma_addr_t dma; /* descriptor ring physical address */
2544 + u16 sw_next_to_fill; /* next Tx descriptor to fill */
2545 + u16 sw_next_to_clean; /* next Tx descriptor to clean */
2546 +};
2547 +
2548 +/* receive free descriptor (rfd) ring */
2549 +struct edma_rfd_desc_ring {
2550 + void *hw_desc; /* descriptor ring virtual address */
2551 + struct edma_sw_desc *sw_desc; /* buffer associated with ring */
2552 + u16 size; /* bytes allocated to sw_desc */
2553 + u16 count; /* number of descriptors in the ring */
2554 + dma_addr_t dma; /* descriptor ring physical address */
2555 + u16 sw_next_to_fill; /* next descriptor to fill */
2556 + u16 sw_next_to_clean; /* next descriptor to clean */
2557 +};
2558 +
2559 +/* edma_rfs_flter_node - rfs filter node in hash table */
2560 +struct edma_rfs_filter_node {
2561 + struct flow_keys keys;
2562 + u32 flow_id; /* flow_id of filter provided by kernel */
2563 + u16 filter_id; /* filter id of filter returned by adaptor */
2564 + u16 rq_id; /* desired rq index */
2565 + struct hlist_node node; /* edma rfs list node */
2566 +};
2567 +
2568 +/* edma_rfs_flow_tbl - rfs flow table */
2569 +struct edma_rfs_flow_table {
2570 + u16 max_num_filter; /* Maximum number of filters edma supports */
2571 + u16 hashtoclean; /* hash table index to clean next */
2572 + int filter_available; /* Number of free filters available */
2573 + struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
2574 + spinlock_t rfs_ftab_lock;
2575 + struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
2576 +};
2577 +
2578 +/* EDMA net device structure */
2579 +struct edma_adapter {
2580 + struct net_device *netdev; /* netdevice */
2581 + struct platform_device *pdev; /* platform device */
2582 + struct edma_common_info *edma_cinfo; /* edma common info */
2583 + struct phy_device *phydev; /* Phy device */
2584 + struct edma_rfs_flow_table rfs; /* edma rfs flow table */
2585 + struct net_device_stats stats; /* netdev statistics */
2586 + set_rfs_filter_callback_t set_rfs_rule;
2587 + u32 flags;/* status flags */
2588 + unsigned long state_flags; /* GMAC up/down flags */
2589 + u32 forced_speed; /* link force speed */
2590 + u32 forced_duplex; /* link force duplex */
2591 + u32 link_state; /* phy link state */
2592 + u32 phy_mdio_addr; /* PHY device address on MII interface */
2593 + u32 poll_required; /* check if link polling is required */
2594 + u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
2595 + u32 default_vlan_tag; /* vlan tag */
2596 + u32 dp_bitmap;
2597 + uint8_t phy_id[MII_BUS_ID_SIZE + 3];
2598 +};
2599 +
2600 +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
2601 +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
2602 +int edma_open(struct net_device *netdev);
2603 +int edma_close(struct net_device *netdev);
2604 +void edma_free_tx_resources(struct edma_common_info *edma_c_info);
2605 +void edma_free_rx_resources(struct edma_common_info *edma_c_info);
2606 +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
2607 +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
2608 +void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
2609 +void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
2610 +void edma_free_queues(struct edma_common_info *edma_cinfo);
2611 +void edma_irq_disable(struct edma_common_info *edma_cinfo);
2612 +int edma_reset(struct edma_common_info *edma_cinfo);
2613 +int edma_poll(struct napi_struct *napi, int budget);
2614 +netdev_tx_t edma_xmit(struct sk_buff *skb,
2615 + struct net_device *netdev);
2616 +int edma_configure(struct edma_common_info *edma_cinfo);
2617 +void edma_irq_enable(struct edma_common_info *edma_cinfo);
2618 +void edma_enable_tx_ctrl(struct edma_hw *hw);
2619 +void edma_enable_rx_ctrl(struct edma_hw *hw);
2620 +void edma_stop_rx_tx(struct edma_hw *hw);
2621 +void edma_free_irqs(struct edma_adapter *adapter);
2622 +irqreturn_t edma_interrupt(int irq, void *dev);
2623 +void edma_write_reg(u16 reg_addr, u32 reg_value);
2624 +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
2625 +struct net_device_stats *edma_get_stats(struct net_device *netdev);
2626 +int edma_set_mac_addr(struct net_device *netdev, void *p);
2627 +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2628 + u16 rxq, u32 flow_id);
2629 +int edma_register_rfs_filter(struct net_device *netdev,
2630 + set_rfs_filter_callback_t set_filter);
2631 +void edma_flow_may_expire(unsigned long data);
2632 +void edma_set_ethtool_ops(struct net_device *netdev);
2633 +void edma_set_stp_rstp(bool tag);
2634 +void edma_assign_ath_hdr_type(int tag);
2635 +int edma_get_default_vlan_tag(struct net_device *netdev);
2636 +void edma_adjust_link(struct net_device *netdev);
2637 +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
2638 +void edma_read_append_stats(struct edma_common_info *edma_cinfo);
2639 +void edma_change_tx_coalesce(int usecs);
2640 +void edma_change_rx_coalesce(int usecs);
2641 +void edma_get_tx_rx_coalesce(u32 *reg_val);
2642 +void edma_clear_irq_status(void);
2643 +#endif /* _EDMA_H_ */
2644 --- /dev/null
2645 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
2646 @@ -0,0 +1,1220 @@
2647 +/*
2648 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
2649 + *
2650 + * Permission to use, copy, modify, and/or distribute this software for
2651 + * any purpose with or without fee is hereby granted, provided that the
2652 + * above copyright notice and this permission notice appear in all copies.
2653 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2654 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2655 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2656 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2657 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2658 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2659 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2660 + */
2661 +
2662 +#include <linux/cpu_rmap.h>
2663 +#include <linux/of.h>
2664 +#include <linux/of_net.h>
2665 +#include <linux/timer.h>
2666 +#include "edma.h"
2667 +#include "ess_edma.h"
2668 +
2669 +/* Weight round robin and virtual QID mask */
2670 +#define EDMA_WRR_VID_SCTL_MASK 0xffff
2671 +
2672 +/* Weight round robin and virtual QID shift */
2673 +#define EDMA_WRR_VID_SCTL_SHIFT 16
2674 +
2675 +char edma_axi_driver_name[] = "ess_edma";
2676 +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2677 + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
2678 +
2679 +static u32 edma_hw_addr;
2680 +
2681 +struct timer_list edma_stats_timer;
2682 +
2683 +char edma_tx_irq[16][64];
2684 +char edma_rx_irq[8][64];
2685 +struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
2686 +static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
2687 + EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
2688 +static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
2689 + EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
2690 +
2691 +static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
2692 +static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
2693 +static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
2694 +static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
2695 +static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
2696 +static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
2697 +static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
2698 +static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
2699 +static u32 edma_rss_idt_idx;
2700 +
2701 +static int edma_weight_assigned_to_q __read_mostly;
2702 +static int edma_queue_to_virtual_q __read_mostly;
2703 +static bool edma_enable_rstp __read_mostly;
2704 +static int edma_athr_hdr_eth_type __read_mostly;
2705 +
2706 +static int page_mode;
2707 +module_param(page_mode, int, 0);
2708 +MODULE_PARM_DESC(page_mode, "enable page mode");
2709 +
2710 +static int overwrite_mode;
2711 +module_param(overwrite_mode, int, 0);
2712 +MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
2713 +
2714 +static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
2715 +module_param(jumbo_mru, int, 0);
2716 +MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
2717 +
2718 +static int num_rxq = 4;
2719 +module_param(num_rxq, int, 0);
2720 +MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
2721 +
2722 +void edma_write_reg(u16 reg_addr, u32 reg_value)
2723 +{
2724 + writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
2725 +}
2726 +
2727 +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
2728 +{
2729 + *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
2730 +}
2731 +
2732 +/* edma_change_tx_coalesce()
2733 + * change tx interrupt moderation timer
2734 + */
2735 +void edma_change_tx_coalesce(int usecs)
2736 +{
2737 + u32 reg_value;
2738 +
2739 + /* Here, we right shift the value from the user by 1, this is
2740 + * done because IMT resolution timer is 2usecs. 1 count
2741 + * of this register corresponds to 2 usecs.
2742 + */
2743 + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
2744 + reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
2745 + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
2746 +}
2747 +
2748 +/* edma_change_rx_coalesce()
2749 + * change rx interrupt moderation timer
2750 + */
2751 +void edma_change_rx_coalesce(int usecs)
2752 +{
2753 + u32 reg_value;
2754 +
2755 + /* Here, we right shift the value from the user by 1, this is
2756 + * done because IMT resolution timer is 2usecs. 1 count
2757 + * of this register corresponds to 2 usecs.
2758 + */
2759 + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
2760 + reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
2761 + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
2762 +}
2763 +
2764 +/* edma_get_tx_rx_coalesce()
2765 + * Get tx/rx interrupt moderation value
2766 + */
2767 +void edma_get_tx_rx_coalesce(u32 *reg_val)
2768 +{
2769 + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
2770 +}
2771 +
2772 +void edma_read_append_stats(struct edma_common_info *edma_cinfo)
2773 +{
2774 + uint32_t *p;
2775 + int i;
2776 + u32 stat;
2777 +
2778 + spin_lock(&edma_cinfo->stats_lock);
2779 + p = (uint32_t *)&(edma_cinfo->edma_ethstats);
2780 +
2781 + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
2782 + edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
2783 + *p += stat;
2784 + p++;
2785 + }
2786 +
2787 + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
2788 + edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
2789 + *p += stat;
2790 + p++;
2791 + }
2792 +
2793 + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
2794 + edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
2795 + *p += stat;
2796 + p++;
2797 + }
2798 +
2799 + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
2800 + edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
2801 + *p += stat;
2802 + p++;
2803 + }
2804 +
2805 + spin_unlock(&edma_cinfo->stats_lock);
2806 +}
2807 +
2808 +static void edma_statistics_timer(unsigned long data)
2809 +{
2810 + struct edma_common_info *edma_cinfo = (struct edma_common_info *)data;
2811 +
2812 + edma_read_append_stats(edma_cinfo);
2813 +
2814 + mod_timer(&edma_stats_timer, jiffies + 1*HZ);
2815 +}
2816 +
2817 +static int edma_enable_stp_rstp(struct ctl_table *table, int write,
2818 + void __user *buffer, size_t *lenp,
2819 + loff_t *ppos)
2820 +{
2821 + int ret;
2822 +
2823 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2824 + if (write)
2825 + edma_set_stp_rstp(edma_enable_rstp);
2826 +
2827 + return ret;
2828 +}
2829 +
2830 +static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
2831 + void __user *buffer, size_t *lenp,
2832 + loff_t *ppos)
2833 +{
2834 + int ret;
2835 +
2836 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2837 + if (write)
2838 + edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
2839 +
2840 + return ret;
2841 +}
2842 +
2843 +static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
2844 + void __user *buffer, size_t *lenp,
2845 + loff_t *ppos)
2846 +{
2847 + struct edma_adapter *adapter;
2848 + int ret;
2849 +
2850 + if (!edma_netdev[1]) {
2851 + pr_err("Netdevice for default_lan does not exist\n");
2852 + return -1;
2853 + }
2854 +
2855 + adapter = netdev_priv(edma_netdev[1]);
2856 +
2857 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2858 +
2859 + if (write)
2860 + adapter->default_vlan_tag = edma_default_ltag;
2861 +
2862 + return ret;
2863 +}
2864 +
2865 +static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
2866 + void __user *buffer, size_t *lenp,
2867 + loff_t *ppos)
2868 +{
2869 + struct edma_adapter *adapter;
2870 + int ret;
2871 +
2872 + if (!edma_netdev[0]) {
2873 + pr_err("Netdevice for default_wan does not exist\n");
2874 + return -1;
2875 + }
2876 +
2877 + adapter = netdev_priv(edma_netdev[0]);
2878 +
2879 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2880 +
2881 + if (write)
2882 + adapter->default_vlan_tag = edma_default_wtag;
2883 +
2884 + return ret;
2885 +}
2886 +
2887 +static int edma_change_group1_vtag(struct ctl_table *table, int write,
2888 + void __user *buffer, size_t *lenp,
2889 + loff_t *ppos)
2890 +{
2891 + struct edma_adapter *adapter;
2892 + struct edma_common_info *edma_cinfo;
2893 + int ret;
2894 +
2895 + if (!edma_netdev[0]) {
2896 + pr_err("Netdevice for Group 1 does not exist\n");
2897 + return -1;
2898 + }
2899 +
2900 + adapter = netdev_priv(edma_netdev[0]);
2901 + edma_cinfo = adapter->edma_cinfo;
2902 +
2903 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2904 +
2905 + if (write)
2906 + adapter->default_vlan_tag = edma_default_group1_vtag;
2907 +
2908 + return ret;
2909 +}
2910 +
2911 +static int edma_change_group2_vtag(struct ctl_table *table, int write,
2912 + void __user *buffer, size_t *lenp,
2913 + loff_t *ppos)
2914 +{
2915 + struct edma_adapter *adapter;
2916 + struct edma_common_info *edma_cinfo;
2917 + int ret;
2918 +
2919 + if (!edma_netdev[1]) {
2920 + pr_err("Netdevice for Group 2 does not exist\n");
2921 + return -1;
2922 + }
2923 +
2924 + adapter = netdev_priv(edma_netdev[1]);
2925 + edma_cinfo = adapter->edma_cinfo;
2926 +
2927 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2928 +
2929 + if (write)
2930 + adapter->default_vlan_tag = edma_default_group2_vtag;
2931 +
2932 + return ret;
2933 +}
2934 +
2935 +static int edma_change_group3_vtag(struct ctl_table *table, int write,
2936 + void __user *buffer, size_t *lenp,
2937 + loff_t *ppos)
2938 +{
2939 + struct edma_adapter *adapter;
2940 + struct edma_common_info *edma_cinfo;
2941 + int ret;
2942 +
2943 + if (!edma_netdev[2]) {
2944 + pr_err("Netdevice for Group 3 does not exist\n");
2945 + return -1;
2946 + }
2947 +
2948 + adapter = netdev_priv(edma_netdev[2]);
2949 + edma_cinfo = adapter->edma_cinfo;
2950 +
2951 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2952 +
2953 + if (write)
2954 + adapter->default_vlan_tag = edma_default_group3_vtag;
2955 +
2956 + return ret;
2957 +}
2958 +
2959 +static int edma_change_group4_vtag(struct ctl_table *table, int write,
2960 + void __user *buffer, size_t *lenp,
2961 + loff_t *ppos)
2962 +{
2963 + struct edma_adapter *adapter;
2964 + struct edma_common_info *edma_cinfo;
2965 + int ret;
2966 +
2967 + if (!edma_netdev[3]) {
2968 + pr_err("Netdevice for Group 4 does not exist\n");
2969 + return -1;
2970 + }
2971 +
2972 + adapter = netdev_priv(edma_netdev[3]);
2973 + edma_cinfo = adapter->edma_cinfo;
2974 +
2975 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2976 +
2977 + if (write)
2978 + adapter->default_vlan_tag = edma_default_group4_vtag;
2979 +
2980 + return ret;
2981 +}
2982 +
2983 +static int edma_change_group5_vtag(struct ctl_table *table, int write,
2984 + void __user *buffer, size_t *lenp,
2985 + loff_t *ppos)
2986 +{
2987 + struct edma_adapter *adapter;
2988 + struct edma_common_info *edma_cinfo;
2989 + int ret;
2990 +
2991 + if (!edma_netdev[4]) {
2992 + pr_err("Netdevice for Group 5 does not exist\n");
2993 + return -1;
2994 + }
2995 +
2996 + adapter = netdev_priv(edma_netdev[4]);
2997 + edma_cinfo = adapter->edma_cinfo;
2998 +
2999 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3000 +
3001 + if (write)
3002 + adapter->default_vlan_tag = edma_default_group5_vtag;
3003 +
3004 + return ret;
3005 +}
3006 +
3007 +static int edma_set_rss_idt_value(struct ctl_table *table, int write,
3008 + void __user *buffer, size_t *lenp,
3009 + loff_t *ppos)
3010 +{
3011 + int ret;
3012 +
3013 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3014 + if (write && !ret)
3015 + edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
3016 + edma_rss_idt_val);
3017 + return ret;
3018 +}
3019 +
3020 +static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
3021 + void __user *buffer, size_t *lenp,
3022 + loff_t *ppos)
3023 +{
3024 + int ret;
3025 + u32 old_value = edma_rss_idt_idx;
3026 +
3027 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3028 + if (!write || ret)
3029 + return ret;
3030 +
3031 + if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
3032 + pr_err("Invalid RSS indirection table index %d\n",
3033 + edma_rss_idt_idx);
3034 + edma_rss_idt_idx = old_value;
3035 + return -EINVAL;
3036 + }
3037 + return ret;
3038 +}
3039 +
3040 +static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
3041 + void __user *buffer, size_t *lenp,
3042 + loff_t *ppos)
3043 +{
3044 + int ret, queue_id, weight;
3045 + u32 reg_data, data, reg_addr;
3046 +
3047 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3048 + if (write) {
3049 + queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
3050 + if (queue_id < 0 || queue_id > 15) {
3051 + pr_err("queue_id not within desired range\n");
3052 + return -EINVAL;
3053 + }
3054 +
3055 + weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
3056 + if (weight < 0 || weight > 0xF) {
3057 + pr_err("queue_id not within desired range\n");
3058 + return -EINVAL;
3059 + }
3060 +
3061 + data = weight << EDMA_WRR_SHIFT(queue_id);
3062 +
3063 + reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
3064 + edma_read_reg(reg_addr, &reg_data);
3065 + reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
3066 + edma_write_reg(reg_addr, data | reg_data);
3067 + }
3068 +
3069 + return ret;
3070 +}
3071 +
3072 +static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
3073 + void __user *buffer, size_t *lenp,
3074 + loff_t *ppos)
3075 +{
3076 + int ret, queue_id, virtual_qid;
3077 + u32 reg_data, data, reg_addr;
3078 +
3079 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3080 + if (write) {
3081 + queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
3082 + if (queue_id < 0 || queue_id > 15) {
3083 + pr_err("queue_id not within desired range\n");
3084 + return -EINVAL;
3085 + }
3086 +
3087 + virtual_qid = edma_queue_to_virtual_q >>
3088 + EDMA_WRR_VID_SCTL_SHIFT;
3089 + if (virtual_qid < 0 || virtual_qid > 8) {
3090 + pr_err("queue_id not within desired range\n");
3091 + return -EINVAL;
3092 + }
3093 +
3094 + data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
3095 +
3096 + reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
3097 + edma_read_reg(reg_addr, &reg_data);
3098 + reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
3099 + edma_write_reg(reg_addr, data | reg_data);
3100 + }
3101 +
3102 + return ret;
3103 +}
3104 +
3105 +static struct ctl_table edma_table[] = {
3106 + {
3107 + .procname = "default_lan_tag",
3108 + .data = &edma_default_ltag,
3109 + .maxlen = sizeof(int),
3110 + .mode = 0644,
3111 + .proc_handler = edma_change_default_lan_vlan
3112 + },
3113 + {
3114 + .procname = "default_wan_tag",
3115 + .data = &edma_default_wtag,
3116 + .maxlen = sizeof(int),
3117 + .mode = 0644,
3118 + .proc_handler = edma_change_default_wan_vlan
3119 + },
3120 + {
3121 + .procname = "weight_assigned_to_queues",
3122 + .data = &edma_weight_assigned_to_q,
3123 + .maxlen = sizeof(int),
3124 + .mode = 0644,
3125 + .proc_handler = edma_weight_assigned_to_queues
3126 + },
3127 + {
3128 + .procname = "queue_to_virtual_queue_map",
3129 + .data = &edma_queue_to_virtual_q,
3130 + .maxlen = sizeof(int),
3131 + .mode = 0644,
3132 + .proc_handler = edma_queue_to_virtual_queue_map
3133 + },
3134 + {
3135 + .procname = "enable_stp_rstp",
3136 + .data = &edma_enable_rstp,
3137 + .maxlen = sizeof(int),
3138 + .mode = 0644,
3139 + .proc_handler = edma_enable_stp_rstp
3140 + },
3141 + {
3142 + .procname = "athr_hdr_eth_type",
3143 + .data = &edma_athr_hdr_eth_type,
3144 + .maxlen = sizeof(int),
3145 + .mode = 0644,
3146 + .proc_handler = edma_ath_hdr_eth_type
3147 + },
3148 + {
3149 + .procname = "default_group1_vlan_tag",
3150 + .data = &edma_default_group1_vtag,
3151 + .maxlen = sizeof(int),
3152 + .mode = 0644,
3153 + .proc_handler = edma_change_group1_vtag
3154 + },
3155 + {
3156 + .procname = "default_group2_vlan_tag",
3157 + .data = &edma_default_group2_vtag,
3158 + .maxlen = sizeof(int),
3159 + .mode = 0644,
3160 + .proc_handler = edma_change_group2_vtag
3161 + },
3162 + {
3163 + .procname = "default_group3_vlan_tag",
3164 + .data = &edma_default_group3_vtag,
3165 + .maxlen = sizeof(int),
3166 + .mode = 0644,
3167 + .proc_handler = edma_change_group3_vtag
3168 + },
3169 + {
3170 + .procname = "default_group4_vlan_tag",
3171 + .data = &edma_default_group4_vtag,
3172 + .maxlen = sizeof(int),
3173 + .mode = 0644,
3174 + .proc_handler = edma_change_group4_vtag
3175 + },
3176 + {
3177 + .procname = "default_group5_vlan_tag",
3178 + .data = &edma_default_group5_vtag,
3179 + .maxlen = sizeof(int),
3180 + .mode = 0644,
3181 + .proc_handler = edma_change_group5_vtag
3182 + },
3183 + {
3184 + .procname = "edma_rss_idt_value",
3185 + .data = &edma_rss_idt_val,
3186 + .maxlen = sizeof(int),
3187 + .mode = 0644,
3188 + .proc_handler = edma_set_rss_idt_value
3189 + },
3190 + {
3191 + .procname = "edma_rss_idt_idx",
3192 + .data = &edma_rss_idt_idx,
3193 + .maxlen = sizeof(int),
3194 + .mode = 0644,
3195 + .proc_handler = edma_set_rss_idt_idx
3196 + },
3197 + {}
3198 +};
3199 +
3200 +/* edma_axi_netdev_ops
3201 + * Describe the operations supported by registered netdevices
3202 + *
3203 + * static const struct net_device_ops edma_axi_netdev_ops = {
3204 + * .ndo_open = edma_open,
3205 + * .ndo_stop = edma_close,
3206 + * .ndo_start_xmit = edma_xmit_frame,
3207 + * .ndo_set_mac_address = edma_set_mac_addr,
3208 + * }
3209 + */
3210 +static const struct net_device_ops edma_axi_netdev_ops = {
3211 + .ndo_open = edma_open,
3212 + .ndo_stop = edma_close,
3213 + .ndo_start_xmit = edma_xmit,
3214 + .ndo_set_mac_address = edma_set_mac_addr,
3215 +#ifdef CONFIG_RFS_ACCEL
3216 + .ndo_rx_flow_steer = edma_rx_flow_steer,
3217 + .ndo_register_rfs_filter = edma_register_rfs_filter,
3218 + .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
3219 +#endif
3220 + .ndo_get_stats = edma_get_stats,
3221 +};
3222 +
3223 +/* edma_axi_probe()
3224 + * Initialise an adapter identified by a platform_device structure.
3225 + *
3226 + * The OS initialization, configuring of the adapter private structure,
3227 + * and a hardware reset occur in the probe.
3228 + */
3229 +static int edma_axi_probe(struct platform_device *pdev)
3230 +{
3231 + struct edma_common_info *edma_cinfo;
3232 + struct edma_hw *hw;
3233 + struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
3234 + struct resource *res;
3235 + struct device_node *np = pdev->dev.of_node;
3236 + struct device_node *pnp;
3237 + struct device_node *mdio_node = NULL;
3238 + struct platform_device *mdio_plat = NULL;
3239 + struct mii_bus *miibus = NULL;
3240 + struct edma_mdio_data *mdio_data = NULL;
3241 + int i, j, k, err = 0;
3242 + int portid_bmp;
3243 + int idx = 0, idx_mac = 0;
3244 +
3245 + if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
3246 + dev_err(&pdev->dev, "Invalid CPU Cores\n");
3247 + return -EINVAL;
3248 + }
3249 +
3250 + if ((num_rxq != 4) && (num_rxq != 8)) {
3251 + dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
3252 + return -EINVAL;
3253 + }
3254 + edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
3255 + if (!edma_cinfo) {
3256 + err = -ENOMEM;
3257 + goto err_alloc;
3258 + }
3259 +
3260 + edma_cinfo->pdev = pdev;
3261 +
3262 + of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
3263 + if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
3264 + pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
3265 + err = -EINVAL;
3266 + goto err_cinfo;
3267 + }
3268 +
3269 + /* Initialize the netdev array before allocation
3270 + * to avoid double free
3271 + */
3272 + for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
3273 + edma_netdev[i] = NULL;
3274 +
3275 + for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
3276 + edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
3277 + EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
3278 +
3279 + if (!edma_netdev[i]) {
3280 + dev_err(&pdev->dev,
3281 + "net device alloc fails for index=%d\n", i);
3282 + err = -ENODEV;
3283 + goto err_ioremap;
3284 + }
3285 +
3286 + SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
3287 + platform_set_drvdata(pdev, edma_netdev[i]);
3288 + edma_cinfo->netdev[i] = edma_netdev[i];
3289 + }
3290 +
3291 + /* Fill ring details */
3292 + edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
3293 + edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
3294 + edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
3295 +
3296 + /* Update num rx queues based on module parameter */
3297 + edma_cinfo->num_rx_queues = num_rxq;
3298 + edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
3299 +
3300 + edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
3301 +
3302 + hw = &edma_cinfo->hw;
3303 +
3304 + /* Fill HW defaults */
3305 + hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
3306 + hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
3307 +
3308 + of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
3309 + of_property_read_u32(np, "qcom,rx_head_buf_size",
3310 + &hw->rx_head_buff_size);
3311 +
3312 + if (overwrite_mode) {
3313 + dev_info(&pdev->dev, "page mode overwritten");
3314 + edma_cinfo->page_mode = page_mode;
3315 + }
3316 +
3317 + if (jumbo_mru)
3318 + edma_cinfo->fraglist_mode = 1;
3319 +
3320 + if (edma_cinfo->page_mode)
3321 + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
3322 + else if (edma_cinfo->fraglist_mode)
3323 + hw->rx_head_buff_size = jumbo_mru;
3324 + else if (!hw->rx_head_buff_size)
3325 + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
3326 +
3327 + hw->misc_intr_mask = 0;
3328 + hw->wol_intr_mask = 0;
3329 +
3330 + hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
3331 + hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
3332 +
3333 + /* configure RSS type to the different protocol that can be
3334 + * supported
3335 + */
3336 + hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
3337 + EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
3338 + EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
3339 +
3340 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3341 +
3342 + edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
3343 + if (IS_ERR(edma_cinfo->hw.hw_addr)) {
3344 + err = PTR_ERR(edma_cinfo->hw.hw_addr);
3345 + goto err_ioremap;
3346 + }
3347 +
3348 + edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
3349 +
3350 + /* Parse tx queue interrupt number from device tree */
3351 + for (i = 0; i < edma_cinfo->num_tx_queues; i++)
3352 + edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
3353 +
3354 + /* Parse rx queue interrupt number from device tree
3355 + * Here we are setting j to point to the point where we
3356 + * left tx interrupt parsing(i.e 16) and run run the loop
3357 + * from 0 to 7 to parse rx interrupt number.
3358 + */
3359 + for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
3360 + i < edma_cinfo->num_rx_queues; i++) {
3361 + edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
3362 + k += ((num_rxq == 4) ? 2 : 1);
3363 + j += ((num_rxq == 4) ? 2 : 1);
3364 + }
3365 +
3366 + edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
3367 + edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
3368 +
3369 + err = edma_alloc_queues_tx(edma_cinfo);
3370 + if (err) {
3371 + dev_err(&pdev->dev, "Allocation of TX queue failed\n");
3372 + goto err_tx_qinit;
3373 + }
3374 +
3375 + err = edma_alloc_queues_rx(edma_cinfo);
3376 + if (err) {
3377 + dev_err(&pdev->dev, "Allocation of RX queue failed\n");
3378 + goto err_rx_qinit;
3379 + }
3380 +
3381 + err = edma_alloc_tx_rings(edma_cinfo);
3382 + if (err) {
3383 + dev_err(&pdev->dev, "Allocation of TX resources failed\n");
3384 + goto err_tx_rinit;
3385 + }
3386 +
3387 + err = edma_alloc_rx_rings(edma_cinfo);
3388 + if (err) {
3389 + dev_err(&pdev->dev, "Allocation of RX resources failed\n");
3390 + goto err_rx_rinit;
3391 + }
3392 +
3393 + /* Initialize netdev and netdev bitmap for transmit descriptor rings */
3394 + for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
3395 + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
3396 + int j;
3397 +
3398 + etdr->netdev_bmp = 0;
3399 + for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
3400 + etdr->netdev[j] = NULL;
3401 + etdr->nq[j] = NULL;
3402 + }
3403 + }
3404 +
3405 + if (of_property_read_bool(np, "qcom,mdio_supported")) {
3406 + mdio_node = of_find_compatible_node(NULL, NULL,
3407 + "qcom,ipq4019-mdio");
3408 + if (!mdio_node) {
3409 + dev_err(&pdev->dev, "cannot find mdio node by phandle");
3410 + err = -EIO;
3411 + goto err_mdiobus_init_fail;
3412 + }
3413 +
3414 + mdio_plat = of_find_device_by_node(mdio_node);
3415 + if (!mdio_plat) {
3416 + dev_err(&pdev->dev,
3417 + "cannot find platform device from mdio node");
3418 + of_node_put(mdio_node);
3419 + err = -EIO;
3420 + goto err_mdiobus_init_fail;
3421 + }
3422 +
3423 + mdio_data = dev_get_drvdata(&mdio_plat->dev);
3424 + if (!mdio_data) {
3425 + dev_err(&pdev->dev,
3426 + "cannot get mii bus reference from device data");
3427 + of_node_put(mdio_node);
3428 + err = -EIO;
3429 + goto err_mdiobus_init_fail;
3430 + }
3431 +
3432 + miibus = mdio_data->mii_bus;
3433 + }
3434 +
3435 + for_each_available_child_of_node(np, pnp) {
3436 + const char *mac_addr;
3437 +
3438 + /* this check is needed if parent and daughter dts have
3439 + * different number of gmac nodes
3440 + */
3441 + if (idx_mac == edma_cinfo->num_gmac) {
3442 + of_node_put(np);
3443 + break;
3444 + }
3445 +
3446 + mac_addr = of_get_mac_address(pnp);
3447 + if (mac_addr)
3448 + memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
3449 +
3450 + idx_mac++;
3451 + }
3452 +
3453 + /* Populate the adapter structure register the netdevice */
3454 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3455 + int k, m;
3456 +
3457 + adapter[i] = netdev_priv(edma_netdev[i]);
3458 + adapter[i]->netdev = edma_netdev[i];
3459 + adapter[i]->pdev = pdev;
3460 + for (j = 0; j < CONFIG_NR_CPUS; j++) {
3461 + m = i % 2;
3462 + adapter[i]->tx_start_offset[j] =
3463 + ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
3464 + /* Share the queues with available net-devices.
3465 + * For instance , with 5 net-devices
3466 + * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
3467 + * and eth1/eth3 will get the remaining.
3468 + */
3469 + for (k = adapter[i]->tx_start_offset[j]; k <
3470 + (adapter[i]->tx_start_offset[j] + 2); k++) {
3471 + if (edma_fill_netdev(edma_cinfo, k, i, j)) {
3472 + pr_err("Netdev overflow Error\n");
3473 + goto err_register;
3474 + }
3475 + }
3476 + }
3477 +
3478 + adapter[i]->edma_cinfo = edma_cinfo;
3479 + edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
3480 + edma_netdev[i]->max_mtu = 9000;
3481 + edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
3482 + | NETIF_F_HW_VLAN_CTAG_TX
3483 + | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
3484 + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
3485 + edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
3486 + NETIF_F_HW_VLAN_CTAG_RX
3487 + | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3488 + NETIF_F_GRO;
3489 + edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
3490 + NETIF_F_TSO | NETIF_F_TSO6 |
3491 + NETIF_F_GRO;
3492 + edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
3493 + NETIF_F_TSO | NETIF_F_TSO6 |
3494 + NETIF_F_GRO;
3495 +
3496 +#ifdef CONFIG_RFS_ACCEL
3497 + edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3498 + edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3499 + edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3500 + edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3501 +#endif
3502 + edma_set_ethtool_ops(edma_netdev[i]);
3503 +
3504 + /* This just fill in some default MAC address
3505 + */
3506 + if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
3507 + random_ether_addr(edma_netdev[i]->dev_addr);
3508 + pr_info("EDMA using MAC@ - using");
3509 + pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
3510 + *(edma_netdev[i]->dev_addr),
3511 + *(edma_netdev[i]->dev_addr + 1),
3512 + *(edma_netdev[i]->dev_addr + 2),
3513 + *(edma_netdev[i]->dev_addr + 3),
3514 + *(edma_netdev[i]->dev_addr + 4),
3515 + *(edma_netdev[i]->dev_addr + 5));
3516 + }
3517 +
3518 + err = register_netdev(edma_netdev[i]);
3519 + if (err)
3520 + goto err_register;
3521 +
3522 + /* carrier off reporting is important to
3523 + * ethtool even BEFORE open
3524 + */
3525 + netif_carrier_off(edma_netdev[i]);
3526 +
3527 + /* Allocate reverse irq cpu mapping structure for
3528 + * receive queues
3529 + */
3530 +#ifdef CONFIG_RFS_ACCEL
3531 + edma_netdev[i]->rx_cpu_rmap =
3532 + alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
3533 + if (!edma_netdev[i]->rx_cpu_rmap) {
3534 + err = -ENOMEM;
3535 + goto err_rmap_alloc_fail;
3536 + }
3537 +#endif
3538 + }
3539 +
3540 + for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
3541 + edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
3542 +
3543 + for_each_available_child_of_node(np, pnp) {
3544 + const uint32_t *vlan_tag = NULL;
3545 + int len;
3546 +
3547 + /* this check is needed if parent and daughter dts have
3548 + * different number of gmac nodes
3549 + */
3550 + if (idx == edma_cinfo->num_gmac)
3551 + break;
3552 +
3553 + /* Populate port-id to netdev lookup table */
3554 + vlan_tag = of_get_property(pnp, "vlan_tag", &len);
3555 + if (!vlan_tag) {
3556 + pr_err("Vlan tag parsing Failed.\n");
3557 + goto err_rmap_alloc_fail;
3558 + }
3559 +
3560 + adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
3561 + vlan_tag++;
3562 + portid_bmp = of_read_number(vlan_tag, 1);
3563 + adapter[idx]->dp_bitmap = portid_bmp;
3564 +
3565 + portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
3566 + while (portid_bmp) {
3567 + int port_bit = ffs(portid_bmp);
3568 +
3569 + if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
3570 + goto err_rmap_alloc_fail;
3571 + edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
3572 + edma_netdev[idx];
3573 + portid_bmp &= ~(1 << (port_bit - 1));
3574 + }
3575 +
3576 + if (!of_property_read_u32(pnp, "qcom,poll_required",
3577 + &adapter[idx]->poll_required)) {
3578 + if (adapter[idx]->poll_required) {
3579 + of_property_read_u32(pnp, "qcom,phy_mdio_addr",
3580 + &adapter[idx]->phy_mdio_addr);
3581 + of_property_read_u32(pnp, "qcom,forced_speed",
3582 + &adapter[idx]->forced_speed);
3583 + of_property_read_u32(pnp, "qcom,forced_duplex",
3584 + &adapter[idx]->forced_duplex);
3585 +
3586 + /* create a phyid using MDIO bus id
3587 + * and MDIO bus address
3588 + */
3589 + snprintf(adapter[idx]->phy_id,
3590 + MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
3591 + miibus->id,
3592 + adapter[idx]->phy_mdio_addr);
3593 + }
3594 + } else {
3595 + adapter[idx]->poll_required = 0;
3596 + adapter[idx]->forced_speed = SPEED_1000;
3597 + adapter[idx]->forced_duplex = DUPLEX_FULL;
3598 + }
3599 +
3600 + idx++;
3601 + }
3602 +
3603 + edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
3604 + "net/edma",
3605 + edma_table);
3606 + if (!edma_cinfo->edma_ctl_table_hdr) {
3607 + dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
3608 + goto err_unregister_sysctl_tbl;
3609 + }
3610 +
3611 + /* Disable all 16 Tx and 8 rx irqs */
3612 + edma_irq_disable(edma_cinfo);
3613 +
3614 + err = edma_reset(edma_cinfo);
3615 + if (err) {
3616 + err = -EIO;
3617 + goto err_reset;
3618 + }
3619 +
3620 + /* populate per_core_info, do a napi_Add, request 16 TX irqs,
3621 + * 8 RX irqs, do a napi enable
3622 + */
3623 + for (i = 0; i < CONFIG_NR_CPUS; i++) {
3624 + u8 rx_start;
3625 +
3626 + edma_cinfo->edma_percpu_info[i].napi.state = 0;
3627 +
3628 + netif_napi_add(edma_netdev[0],
3629 + &edma_cinfo->edma_percpu_info[i].napi,
3630 + edma_poll, 64);
3631 + napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
3632 + edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
3633 + edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
3634 + << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
3635 + edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
3636 + edma_cinfo->edma_percpu_info[i].rx_start =
3637 + i << EDMA_RX_CPU_START_SHIFT;
3638 + rx_start = i << EDMA_RX_CPU_START_SHIFT;
3639 + edma_cinfo->edma_percpu_info[i].tx_status = 0;
3640 + edma_cinfo->edma_percpu_info[i].rx_status = 0;
3641 + edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
3642 +
3643 + /* Request irq per core */
3644 + for (j = edma_cinfo->edma_percpu_info[i].tx_start;
3645 + j < tx_start[i] + 4; j++) {
3646 + sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
3647 + err = request_irq(edma_cinfo->tx_irq[j],
3648 + edma_interrupt,
3649 + 0,
3650 + &edma_tx_irq[j][0],
3651 + &edma_cinfo->edma_percpu_info[i]);
3652 + if (err)
3653 + goto err_reset;
3654 + }
3655 +
3656 + for (j = edma_cinfo->edma_percpu_info[i].rx_start;
3657 + j < (rx_start +
3658 + ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
3659 + j++) {
3660 + sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
3661 + err = request_irq(edma_cinfo->rx_irq[j],
3662 + edma_interrupt,
3663 + 0,
3664 + &edma_rx_irq[j][0],
3665 + &edma_cinfo->edma_percpu_info[i]);
3666 + if (err)
3667 + goto err_reset;
3668 + }
3669 +
3670 +#ifdef CONFIG_RFS_ACCEL
3671 + for (j = edma_cinfo->edma_percpu_info[i].rx_start;
3672 + j < rx_start + 2; j += 2) {
3673 + err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
3674 + edma_cinfo->rx_irq[j]);
3675 + if (err)
3676 + goto err_rmap_add_fail;
3677 + }
3678 +#endif
3679 + }
3680 +
3681 + /* Used to clear interrupt status, allocate rx buffer,
3682 + * configure edma descriptors registers
3683 + */
3684 + err = edma_configure(edma_cinfo);
3685 + if (err) {
3686 + err = -EIO;
3687 + goto err_configure;
3688 + }
3689 +
3690 + /* Configure RSS indirection table.
3691 + * 128 hash will be configured in the following
3692 + * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
3693 + * and so on
3694 + */
3695 + for (i = 0; i < EDMA_NUM_IDT; i++)
3696 + edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
3697 +
3698 + /* Configure load balance mapping table.
3699 + * 4 table entry will be configured according to the
3700 + * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
3701 + * respectively.
3702 + */
3703 + edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
3704 +
3705 + /* Configure Virtual queue for Tx rings
3706 + * User can also change this value runtime through
3707 + * a sysctl
3708 + */
3709 + edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
3710 + edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
3711 +
3712 + /* Configure Max AXI Burst write size to 128 bytes*/
3713 + edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
3714 + EDMA_AXIW_MAXWRSIZE_VALUE);
3715 +
3716 + /* Enable All 16 tx and 8 rx irq mask */
3717 + edma_irq_enable(edma_cinfo);
3718 + edma_enable_tx_ctrl(&edma_cinfo->hw);
3719 + edma_enable_rx_ctrl(&edma_cinfo->hw);
3720 +
3721 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3722 + if (adapter[i]->poll_required) {
3723 + adapter[i]->phydev =
3724 + phy_connect(edma_netdev[i],
3725 + (const char *)adapter[i]->phy_id,
3726 + &edma_adjust_link,
3727 + PHY_INTERFACE_MODE_SGMII);
3728 + if (IS_ERR(adapter[i]->phydev)) {
3729 + dev_dbg(&pdev->dev, "PHY attach FAIL");
3730 + err = -EIO;
3731 + goto edma_phy_attach_fail;
3732 + } else {
3733 + adapter[i]->phydev->advertising |=
3734 + ADVERTISED_Pause |
3735 + ADVERTISED_Asym_Pause;
3736 + adapter[i]->phydev->supported |=
3737 + SUPPORTED_Pause |
3738 + SUPPORTED_Asym_Pause;
3739 + }
3740 + } else {
3741 + adapter[i]->phydev = NULL;
3742 + }
3743 + }
3744 +
3745 + spin_lock_init(&edma_cinfo->stats_lock);
3746 +
3747 + init_timer(&edma_stats_timer);
3748 + edma_stats_timer.expires = jiffies + 1*HZ;
3749 + edma_stats_timer.data = (unsigned long)edma_cinfo;
3750 + edma_stats_timer.function = edma_statistics_timer; /* timer handler */
3751 + add_timer(&edma_stats_timer);
3752 +
3753 + return 0;
3754 +
3755 +edma_phy_attach_fail:
3756 + miibus = NULL;
3757 +err_configure:
3758 +#ifdef CONFIG_RFS_ACCEL
3759 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3760 + free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
3761 + adapter[i]->netdev->rx_cpu_rmap = NULL;
3762 + }
3763 +#endif
3764 +err_rmap_add_fail:
3765 + edma_free_irqs(adapter[0]);
3766 + for (i = 0; i < CONFIG_NR_CPUS; i++)
3767 + napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
3768 +err_reset:
3769 +err_unregister_sysctl_tbl:
3770 +err_rmap_alloc_fail:
3771 + for (i = 0; i < edma_cinfo->num_gmac; i++)
3772 + unregister_netdev(edma_netdev[i]);
3773 +err_register:
3774 +err_mdiobus_init_fail:
3775 + edma_free_rx_rings(edma_cinfo);
3776 +err_rx_rinit:
3777 + edma_free_tx_rings(edma_cinfo);
3778 +err_tx_rinit:
3779 + edma_free_queues(edma_cinfo);
3780 +err_rx_qinit:
3781 +err_tx_qinit:
3782 + iounmap(edma_cinfo->hw.hw_addr);
3783 +err_ioremap:
3784 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3785 + if (edma_netdev[i])
3786 + free_netdev(edma_netdev[i]);
3787 + }
3788 +err_cinfo:
3789 + kfree(edma_cinfo);
3790 +err_alloc:
3791 + return err;
3792 +}
3793 +
3794 +/* edma_axi_remove()
3795 + * Device Removal Routine
3796 + *
3797 + * edma_axi_remove is called by the platform subsystem to alert the driver
3798 + * that it should release a platform device.
3799 + */
3800 +static int edma_axi_remove(struct platform_device *pdev)
3801 +{
3802 + struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
3803 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
3804 + struct edma_hw *hw = &edma_cinfo->hw;
3805 + int i;
3806 +
3807 + for (i = 0; i < edma_cinfo->num_gmac; i++)
3808 + unregister_netdev(edma_netdev[i]);
3809 +
3810 + edma_stop_rx_tx(hw);
3811 + for (i = 0; i < CONFIG_NR_CPUS; i++)
3812 + napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
3813 +
3814 + edma_irq_disable(edma_cinfo);
3815 + edma_write_reg(EDMA_REG_RX_ISR, 0xff);
3816 + edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
3817 +#ifdef CONFIG_RFS_ACCEL
3818 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3819 + free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
3820 + edma_netdev[i]->rx_cpu_rmap = NULL;
3821 + }
3822 +#endif
3823 +
3824 + for (i = 0; i < edma_cinfo->num_gmac; i++) {
3825 + struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
3826 +
3827 + if (adapter->phydev)
3828 + phy_disconnect(adapter->phydev);
3829 + }
3830 +
3831 + del_timer_sync(&edma_stats_timer);
3832 + edma_free_irqs(adapter);
3833 + unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
3834 + edma_free_tx_resources(edma_cinfo);
3835 + edma_free_rx_resources(edma_cinfo);
3836 + edma_free_tx_rings(edma_cinfo);
3837 + edma_free_rx_rings(edma_cinfo);
3838 + edma_free_queues(edma_cinfo);
3839 + for (i = 0; i < edma_cinfo->num_gmac; i++)
3840 + free_netdev(edma_netdev[i]);
3841 +
3842 + kfree(edma_cinfo);
3843 +
3844 + return 0;
3845 +}
3846 +
3847 +static const struct of_device_id edma_of_mtable[] = {
3848 + {.compatible = "qcom,ess-edma" },
3849 + {}
3850 +};
3851 +MODULE_DEVICE_TABLE(of, edma_of_mtable);
3852 +
3853 +static struct platform_driver edma_axi_driver = {
3854 + .driver = {
3855 + .name = edma_axi_driver_name,
3856 + .of_match_table = edma_of_mtable,
3857 + },
3858 + .probe = edma_axi_probe,
3859 + .remove = edma_axi_remove,
3860 +};
3861 +
3862 +module_platform_driver(edma_axi_driver);
3863 +
3864 +MODULE_AUTHOR("Qualcomm Atheros Inc");
3865 +MODULE_DESCRIPTION("QCA ESS EDMA driver");
3866 +MODULE_LICENSE("GPL");
3867 --- /dev/null
3868 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
3869 @@ -0,0 +1,374 @@
3870 +/*
3871 + * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
3872 + *
3873 + * Permission to use, copy, modify, and/or distribute this software for
3874 + * any purpose with or without fee is hereby granted, provided that the
3875 + * above copyright notice and this permission notice appear in all copies.
3876 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
3877 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
3878 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
3879 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
3880 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
3881 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
3882 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
3883 + */
3884 +
3885 +#include <linux/ethtool.h>
3886 +#include <linux/netdevice.h>
3887 +#include <linux/string.h>
3888 +#include "edma.h"
3889 +
3890 +struct edma_ethtool_stats {
3891 + uint8_t stat_string[ETH_GSTRING_LEN];
3892 + uint32_t stat_offset;
3893 +};
3894 +
3895 +#define EDMA_STAT(m) offsetof(struct edma_ethtool_statistics, m)
3896 +#define DRVINFO_LEN 32
3897 +
3898 +/* Array of strings describing statistics
3899 + */
3900 +static const struct edma_ethtool_stats edma_gstrings_stats[] = {
3901 + {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)},
3902 + {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)},
3903 + {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)},
3904 + {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)},
3905 + {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)},
3906 + {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)},
3907 + {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)},
3908 + {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)},
3909 + {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)},
3910 + {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)},
3911 + {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)},
3912 + {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)},
3913 + {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)},
3914 + {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)},
3915 + {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)},
3916 + {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)},
3917 + {"tx_q0_byte", EDMA_STAT(tx_q0_byte)},
3918 + {"tx_q1_byte", EDMA_STAT(tx_q1_byte)},
3919 + {"tx_q2_byte", EDMA_STAT(tx_q2_byte)},
3920 + {"tx_q3_byte", EDMA_STAT(tx_q3_byte)},
3921 + {"tx_q4_byte", EDMA_STAT(tx_q4_byte)},
3922 + {"tx_q5_byte", EDMA_STAT(tx_q5_byte)},
3923 + {"tx_q6_byte", EDMA_STAT(tx_q6_byte)},
3924 + {"tx_q7_byte", EDMA_STAT(tx_q7_byte)},
3925 + {"tx_q8_byte", EDMA_STAT(tx_q8_byte)},
3926 + {"tx_q9_byte", EDMA_STAT(tx_q9_byte)},
3927 + {"tx_q10_byte", EDMA_STAT(tx_q10_byte)},
3928 + {"tx_q11_byte", EDMA_STAT(tx_q11_byte)},
3929 + {"tx_q12_byte", EDMA_STAT(tx_q12_byte)},
3930 + {"tx_q13_byte", EDMA_STAT(tx_q13_byte)},
3931 + {"tx_q14_byte", EDMA_STAT(tx_q14_byte)},
3932 + {"tx_q15_byte", EDMA_STAT(tx_q15_byte)},
3933 + {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)},
3934 + {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)},
3935 + {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)},
3936 + {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)},
3937 + {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)},
3938 + {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)},
3939 + {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)},
3940 + {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)},
3941 + {"rx_q0_byte", EDMA_STAT(rx_q0_byte)},
3942 + {"rx_q1_byte", EDMA_STAT(rx_q1_byte)},
3943 + {"rx_q2_byte", EDMA_STAT(rx_q2_byte)},
3944 + {"rx_q3_byte", EDMA_STAT(rx_q3_byte)},
3945 + {"rx_q4_byte", EDMA_STAT(rx_q4_byte)},
3946 + {"rx_q5_byte", EDMA_STAT(rx_q5_byte)},
3947 + {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
3948 + {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
3949 + {"tx_desc_error", EDMA_STAT(tx_desc_error)},
3950 +};
3951 +
3952 +#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
3953 +
3954 +/* edma_get_strset_count()
3955 + * Get strset count
3956 + */
3957 +static int edma_get_strset_count(struct net_device *netdev,
3958 + int sset)
3959 +{
3960 + switch (sset) {
3961 + case ETH_SS_STATS:
3962 + return EDMA_STATS_LEN;
3963 + default:
3964 + netdev_dbg(netdev, "%s: Invalid string set", __func__);
3965 + return -EOPNOTSUPP;
3966 + }
3967 +}
3968 +
3969 +
3970 +/* edma_get_strings()
3971 + * get stats string
3972 + */
3973 +static void edma_get_strings(struct net_device *netdev, uint32_t stringset,
3974 + uint8_t *data)
3975 +{
3976 + uint8_t *p = data;
3977 + uint32_t i;
3978 +
3979 + switch (stringset) {
3980 + case ETH_SS_STATS:
3981 + for (i = 0; i < EDMA_STATS_LEN; i++) {
3982 + memcpy(p, edma_gstrings_stats[i].stat_string,
3983 + min((size_t)ETH_GSTRING_LEN,
3984 + strlen(edma_gstrings_stats[i].stat_string)
3985 + + 1));
3986 + p += ETH_GSTRING_LEN;
3987 + }
3988 + break;
3989 + }
3990 +}
3991 +
3992 +/* edma_get_ethtool_stats()
3993 + * Get ethtool statistics
3994 + */
3995 +static void edma_get_ethtool_stats(struct net_device *netdev,
3996 + struct ethtool_stats *stats, uint64_t *data)
3997 +{
3998 + struct edma_adapter *adapter = netdev_priv(netdev);
3999 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
4000 + int i;
4001 + uint8_t *p = NULL;
4002 +
4003 + edma_read_append_stats(edma_cinfo);
4004 +
4005 + for(i = 0; i < EDMA_STATS_LEN; i++) {
4006 + p = (uint8_t *)&(edma_cinfo->edma_ethstats) +
4007 + edma_gstrings_stats[i].stat_offset;
4008 + data[i] = *(uint32_t *)p;
4009 + }
4010 +}
4011 +
4012 +/* edma_get_drvinfo()
4013 + * get edma driver info
4014 + */
4015 +static void edma_get_drvinfo(struct net_device *dev,
4016 + struct ethtool_drvinfo *info)
4017 +{
4018 + strlcpy(info->driver, "ess_edma", DRVINFO_LEN);
4019 + strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
4020 +}
4021 +
4022 +/* edma_nway_reset()
4023 + * Reset the phy, if available.
4024 + */
4025 +static int edma_nway_reset(struct net_device *netdev)
4026 +{
4027 + return -EINVAL;
4028 +}
4029 +
4030 +/* edma_get_wol()
4031 + * get wake on lan info
4032 + */
4033 +static void edma_get_wol(struct net_device *netdev,
4034 + struct ethtool_wolinfo *wol)
4035 +{
4036 + wol->supported = 0;
4037 + wol->wolopts = 0;
4038 +}
4039 +
4040 +/* edma_get_msglevel()
4041 + * get message level.
4042 + */
4043 +static uint32_t edma_get_msglevel(struct net_device *netdev)
4044 +{
4045 + return 0;
4046 +}
4047 +
4048 +/* edma_get_settings()
4049 + * Get edma settings
4050 + */
4051 +static int edma_get_settings(struct net_device *netdev,
4052 + struct ethtool_cmd *ecmd)
4053 +{
4054 + struct edma_adapter *adapter = netdev_priv(netdev);
4055 +
4056 + if (adapter->poll_required) {
4057 + struct phy_device *phydev = NULL;
4058 + uint16_t phyreg;
4059 +
4060 + if ((adapter->forced_speed != SPEED_UNKNOWN)
4061 + && !(adapter->poll_required))
4062 + return -EPERM;
4063 +
4064 + phydev = adapter->phydev;
4065 +
4066 + ecmd->advertising = phydev->advertising;
4067 + ecmd->autoneg = phydev->autoneg;
4068 +
4069 + if (adapter->link_state == __EDMA_LINKDOWN) {
4070 + ecmd->speed = SPEED_UNKNOWN;
4071 + ecmd->duplex = DUPLEX_UNKNOWN;
4072 + } else {
4073 + ecmd->speed = phydev->speed;
4074 + ecmd->duplex = phydev->duplex;
4075 + }
4076 +
4077 + ecmd->phy_address = adapter->phy_mdio_addr;
4078 +
4079 + phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA);
4080 + if (phyreg & LPA_10HALF)
4081 + ecmd->lp_advertising |= ADVERTISED_10baseT_Half;
4082 +
4083 + if (phyreg & LPA_10FULL)
4084 + ecmd->lp_advertising |= ADVERTISED_10baseT_Full;
4085 +
4086 + if (phyreg & LPA_100HALF)
4087 + ecmd->lp_advertising |= ADVERTISED_100baseT_Half;
4088 +
4089 + if (phyreg & LPA_100FULL)
4090 + ecmd->lp_advertising |= ADVERTISED_100baseT_Full;
4091 +
4092 + phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000);
4093 + if (phyreg & LPA_1000HALF)
4094 + ecmd->lp_advertising |= ADVERTISED_1000baseT_Half;
4095 +
4096 + if (phyreg & LPA_1000FULL)
4097 + ecmd->lp_advertising |= ADVERTISED_1000baseT_Full;
4098 + } else {
4099 + /* If the speed/duplex for this GMAC is forced and we
4100 + * are not polling for link state changes, return the
4101 + * values as specified by platform. This will be true
4102 + * for GMACs connected to switch, and interfaces that
4103 + * do not use a PHY.
4104 + */
4105 + if (!(adapter->poll_required)) {
4106 + if (adapter->forced_speed != SPEED_UNKNOWN) {
4107 + /* set speed and duplex */
4108 + ethtool_cmd_speed_set(ecmd, SPEED_1000);
4109 + ecmd->duplex = DUPLEX_FULL;
4110 +
4111 + /* Populate capabilities advertised by self */
4112 + ecmd->advertising = 0;
4113 + ecmd->autoneg = 0;
4114 + ecmd->port = PORT_TP;
4115 + ecmd->transceiver = XCVR_EXTERNAL;
4116 + } else {
4117 + /* non link polled and non
4118 + * forced speed/duplex interface
4119 + */
4120 + return -EIO;
4121 + }
4122 + }
4123 + }
4124 +
4125 + return 0;
4126 +}
4127 +
4128 +/* edma_set_settings()
4129 + * Set EDMA settings
4130 + */
4131 +static int edma_set_settings(struct net_device *netdev,
4132 + struct ethtool_cmd *ecmd)
4133 +{
4134 + struct edma_adapter *adapter = netdev_priv(netdev);
4135 + struct phy_device *phydev = NULL;
4136 +
4137 + if ((adapter->forced_speed != SPEED_UNKNOWN) &&
4138 + !adapter->poll_required)
4139 + return -EPERM;
4140 +
4141 + phydev = adapter->phydev;
4142 + phydev->advertising = ecmd->advertising;
4143 + phydev->autoneg = ecmd->autoneg;
4144 + phydev->speed = ethtool_cmd_speed(ecmd);
4145 + phydev->duplex = ecmd->duplex;
4146 +
4147 + genphy_config_aneg(phydev);
4148 +
4149 + return 0;
4150 +}
4151 +
4152 +/* edma_get_coalesce
4153 + * get interrupt mitigation
4154 + */
4155 +static int edma_get_coalesce(struct net_device *netdev,
4156 + struct ethtool_coalesce *ec)
4157 +{
4158 + u32 reg_val;
4159 +
4160 + edma_get_tx_rx_coalesce(&reg_val);
4161 +
4162 + /* We read the Interrupt Moderation Timer(IMT) register value,
4163 + * use lower 16 bit for rx and higher 16 bit for Tx. We do a
4164 + * left shift by 1, because IMT resolution timer is 2usecs.
4165 + * Hence the value given by the register is multiplied by 2 to
4166 + * get the actual time in usecs.
4167 + */
4168 + ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1);
4169 + ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1);
4170 +
4171 + return 0;
4172 +}
4173 +
4174 +/* edma_set_coalesce
4175 + * set interrupt mitigation
4176 + */
4177 +static int edma_set_coalesce(struct net_device *netdev,
4178 + struct ethtool_coalesce *ec)
4179 +{
4180 + if (ec->tx_coalesce_usecs)
4181 + edma_change_tx_coalesce(ec->tx_coalesce_usecs);
4182 + if (ec->rx_coalesce_usecs)
4183 + edma_change_rx_coalesce(ec->rx_coalesce_usecs);
4184 +
4185 + return 0;
4186 +}
4187 +
4188 +/* edma_set_priv_flags()
4189 + * Set EDMA private flags
4190 + */
4191 +static int edma_set_priv_flags(struct net_device *netdev, u32 flags)
4192 +{
4193 + return 0;
4194 +}
4195 +
4196 +/* edma_get_priv_flags()
4197 + * get edma driver flags
4198 + */
4199 +static u32 edma_get_priv_flags(struct net_device *netdev)
4200 +{
4201 + return 0;
4202 +}
4203 +
4204 +/* edma_get_ringparam()
4205 + * get ring size
4206 + */
4207 +static void edma_get_ringparam(struct net_device *netdev,
4208 + struct ethtool_ringparam *ring)
4209 +{
4210 + struct edma_adapter *adapter = netdev_priv(netdev);
4211 + struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
4212 +
4213 + ring->tx_max_pending = edma_cinfo->tx_ring_count;
4214 + ring->rx_max_pending = edma_cinfo->rx_ring_count;
4215 +}
4216 +
4217 +/* Ethtool operations
4218 + */
4219 +static const struct ethtool_ops edma_ethtool_ops = {
4220 + .get_drvinfo = &edma_get_drvinfo,
4221 + .get_link = &ethtool_op_get_link,
4222 + .get_msglevel = &edma_get_msglevel,
4223 + .nway_reset = &edma_nway_reset,
4224 + .get_wol = &edma_get_wol,
4225 + .get_settings = &edma_get_settings,
4226 + .set_settings = &edma_set_settings,
4227 + .get_strings = &edma_get_strings,
4228 + .get_sset_count = &edma_get_strset_count,
4229 + .get_ethtool_stats = &edma_get_ethtool_stats,
4230 + .get_coalesce = &edma_get_coalesce,
4231 + .set_coalesce = &edma_set_coalesce,
4232 + .get_priv_flags = edma_get_priv_flags,
4233 + .set_priv_flags = edma_set_priv_flags,
4234 + .get_ringparam = edma_get_ringparam,
4235 +};
4236 +
4237 +/* edma_set_ethtool_ops
4238 + * Set ethtool operations
4239 + */
4240 +void edma_set_ethtool_ops(struct net_device *netdev)
4241 +{
4242 + netdev->ethtool_ops = &edma_ethtool_ops;
4243 +}
4244 --- /dev/null
4245 +++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
4246 @@ -0,0 +1,332 @@
4247 +/*
4248 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4249 + *
4250 + * Permission to use, copy, modify, and/or distribute this software for
4251 + * any purpose with or without fee is hereby granted, provided that the
4252 + * above copyright notice and this permission notice appear in all copies.
4253 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
4254 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
4255 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
4256 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
4257 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
4258 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
4259 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
4260 + */
4261 +
4262 +#ifndef _ESS_EDMA_H_
4263 +#define _ESS_EDMA_H_
4264 +
4265 +#include <linux/types.h>
4266 +
4267 +struct edma_adapter;
4268 +struct edma_hw;
4269 +
4270 +/* register definition */
4271 +#define EDMA_REG_MAS_CTRL 0x0
4272 +#define EDMA_REG_TIMEOUT_CTRL 0x004
4273 +#define EDMA_REG_DBG0 0x008
4274 +#define EDMA_REG_DBG1 0x00C
4275 +#define EDMA_REG_SW_CTRL0 0x100
4276 +#define EDMA_REG_SW_CTRL1 0x104
4277 +
4278 +/* Interrupt Status Register */
4279 +#define EDMA_REG_RX_ISR 0x200
4280 +#define EDMA_REG_TX_ISR 0x208
4281 +#define EDMA_REG_MISC_ISR 0x210
4282 +#define EDMA_REG_WOL_ISR 0x218
4283 +
4284 +#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x)
4285 +
4286 +#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
4287 +#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
4288 +#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
4289 +#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
4290 +#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
4291 +
4292 +#define EDMA_WOL_ISR 0x00000001
4293 +
4294 +/* Interrupt Mask Register */
4295 +#define EDMA_REG_MISC_IMR 0x214
4296 +#define EDMA_REG_WOL_IMR 0x218
4297 +
4298 +#define EDMA_RX_IMR_NORMAL_MASK 0x1
4299 +#define EDMA_TX_IMR_NORMAL_MASK 0x1
4300 +#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
4301 +#define EDMA_WOL_IMR_NORMAL_MASK 0x1
4302 +
4303 +/* Edma receive consumer index */
4304 +#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
4305 +/* Edma transmit consumer index */
4306 +#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
4307 +
4308 +/* IRQ Moderator Initial Timer Register */
4309 +#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
4310 +#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
4311 +#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
4312 +#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
4313 +
4314 +/* Interrupt Control Register */
4315 +#define EDMA_REG_INTR_CTRL 0x284
4316 +#define EDMA_INTR_CLR_TYP_SHIFT 0
4317 +#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
4318 +#define EDMA_INTR_CLEAR_TYPE_W1 0
4319 +#define EDMA_INTR_CLEAR_TYPE_R 1
4320 +
4321 +/* RX Interrupt Mask Register */
4322 +#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
4323 +
4324 +/* TX Interrupt mask register */
4325 +#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
4326 +
4327 +/* Load Ptr Register
4328 + * Software sets this bit after the initialization of the head and tail
4329 + */
4330 +#define EDMA_REG_TX_SRAM_PART 0x400
4331 +#define EDMA_LOAD_PTR_SHIFT 16
4332 +
4333 +/* TXQ Control Register */
4334 +#define EDMA_REG_TXQ_CTRL 0x404
4335 +#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
4336 +#define EDMA_TXQ_CTRL_TXQ_EN 0x20
4337 +#define EDMA_TXQ_CTRL_ENH_MODE 0x40
4338 +#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
4339 +#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
4340 +#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
4341 +#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
4342 +#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
4343 +#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
4344 +#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
4345 +
4346 +#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
4347 +#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
4348 +#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
4349 +#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
4350 +#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
4351 +
4352 +/* WRR Control Register */
4353 +#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
4354 +#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
4355 +#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
4356 +#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
4357 +
4358 +/* Weight round robin(WRR), it takes queue as input, and computes
4359 + * starting bits where we need to write the weight for a particular
4360 + * queue
4361 + */
4362 +#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
4363 +
4364 +/* Tx Descriptor Control Register */
4365 +#define EDMA_REG_TPD_RING_SIZE 0x41C
4366 +#define EDMA_TPD_RING_SIZE_SHIFT 0
4367 +#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
4368 +
4369 +/* Transmit descriptor base address */
4370 +#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
4371 +
4372 +/* TPD Index Register */
4373 +#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
4374 +
4375 +#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
4376 +#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
4377 +#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
4378 +#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
4379 +#define EDMA_TPD_PROD_IDX_SHIFT 0
4380 +#define EDMA_TPD_CONS_IDX_SHIFT 16
4381 +
4382 +/* TX Virtual Queue Mapping Control Register */
4383 +#define EDMA_REG_VQ_CTRL0 0x4A0
4384 +#define EDMA_REG_VQ_CTRL1 0x4A4
4385 +
4386 +/* Virtual QID shift, it takes queue as input, and computes
4387 + * Virtual QID position in virtual qid control register
4388 + */
4389 +#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
4390 +
4391 +/* Virtual Queue Default Value */
4392 +#define EDMA_VQ_REG_VALUE 0x240240
4393 +
4394 +/* Tx side Port Interface Control Register */
4395 +#define EDMA_REG_PORT_CTRL 0x4A8
4396 +#define EDMA_PAD_EN_SHIFT 15
4397 +
4398 +/* Tx side VLAN Configuration Register */
4399 +#define EDMA_REG_VLAN_CFG 0x4AC
4400 +
4401 +#define EDMA_TX_CVLAN 16
4402 +#define EDMA_TX_INS_CVLAN 17
4403 +#define EDMA_TX_CVLAN_TAG_SHIFT 0
4404 +
4405 +#define EDMA_TX_SVLAN 14
4406 +#define EDMA_TX_INS_SVLAN 15
4407 +#define EDMA_TX_SVLAN_TAG_SHIFT 16
4408 +
4409 +/* Tx Queue Packet Statistic Register */
4410 +#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
4411 +
4412 +#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
4413 +
4414 +/* Tx Queue Byte Statistic Register */
4415 +#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
4416 +
4417 +/* Load Balance Based Ring Offset Register */
4418 +#define EDMA_REG_LB_RING 0x800
4419 +#define EDMA_LB_RING_ENTRY_MASK 0xff
4420 +#define EDMA_LB_RING_ID_MASK 0x7
4421 +#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
4422 +#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
4423 +#define EDMA_LB_RING_ID_OFFSET 0
4424 +#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
4425 +#define EDMA_LB_REG_VALUE 0x6040200
4426 +
4427 +/* Load Balance Priority Mapping Register */
4428 +#define EDMA_REG_LB_PRI_START 0x804
4429 +#define EDMA_REG_LB_PRI_END 0x810
4430 +#define EDMA_LB_PRI_REG_INC 4
4431 +#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
4432 +#define EDMA_LB_PRI_ENTRY_MASK 0xf
4433 +
4434 +/* RSS Priority Mapping Register */
4435 +#define EDMA_REG_RSS_PRI 0x820
4436 +#define EDMA_RSS_PRI_ENTRY_MASK 0xf
4437 +#define EDMA_RSS_RING_ID_MASK 0x7
4438 +#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
4439 +
4440 +/* RSS Indirection Register */
4441 +#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
4442 +#define EDMA_NUM_IDT 16
4443 +#define EDMA_RSS_IDT_VALUE 0x64206420
4444 +
4445 +/* Default RSS Ring Register */
4446 +#define EDMA_REG_DEF_RSS 0x890
4447 +#define EDMA_DEF_RSS_MASK 0x7
4448 +
4449 +/* RSS Hash Function Type Register */
4450 +#define EDMA_REG_RSS_TYPE 0x894
4451 +#define EDMA_RSS_TYPE_NONE 0x01
4452 +#define EDMA_RSS_TYPE_IPV4TCP 0x02
4453 +#define EDMA_RSS_TYPE_IPV6_TCP 0x04
4454 +#define EDMA_RSS_TYPE_IPV4_UDP 0x08
4455 +#define EDMA_RSS_TYPE_IPV6UDP 0x10
4456 +#define EDMA_RSS_TYPE_IPV4 0x20
4457 +#define EDMA_RSS_TYPE_IPV6 0x40
4458 +#define EDMA_RSS_HASH_MODE_MASK 0x7f
4459 +
4460 +#define EDMA_REG_RSS_HASH_VALUE 0x8C0
4461 +
4462 +#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
4463 +
4464 +#define EDMA_HASH_TYPE_START 0
4465 +#define EDMA_HASH_TYPE_END 5
4466 +#define EDMA_HASH_TYPE_SHIFT 12
4467 +
4468 +#define EDMA_RFS_FLOW_ENTRIES 1024
4469 +#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
4470 +#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
4471 +
4472 +/* RFD Base Address Register */
4473 +#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
4474 +
4475 +/* RFD Index Register */
4476 +#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
4477 +
4478 +#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
4479 +#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
4480 +#define EDMA_RFD_PROD_IDX_MASK 0xFFF
4481 +#define EDMA_RFD_CONS_IDX_MASK 0xFFF
4482 +#define EDMA_RFD_PROD_IDX_SHIFT 0
4483 +#define EDMA_RFD_CONS_IDX_SHIFT 16
4484 +
4485 +/* Rx Descriptor Control Register */
4486 +#define EDMA_REG_RX_DESC0 0xA10
4487 +#define EDMA_RFD_RING_SIZE_MASK 0xFFF
4488 +#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
4489 +#define EDMA_RFD_RING_SIZE_SHIFT 0
4490 +#define EDMA_RX_BUF_SIZE_SHIFT 16
4491 +
4492 +#define EDMA_REG_RX_DESC1 0xA14
4493 +#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
4494 +#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
4495 +#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
4496 +#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
4497 +#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
4498 +#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
4499 +
4500 +/* RXQ Control Register */
4501 +#define EDMA_REG_RXQ_CTRL 0xA18
4502 +#define EDMA_FIFO_THRESH_TYPE_SHIF 0
4503 +#define EDMA_FIFO_THRESH_128_BYTE 0x0
4504 +#define EDMA_FIFO_THRESH_64_BYTE 0x1
4505 +#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
4506 +#define EDMA_RXQ_CTRL_EN 0x0000FF00
4507 +
4508 +/* AXI Burst Size Config */
4509 +#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
4510 +#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
4511 +
4512 +/* Rx Statistics Register */
4513 +#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
4514 +#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
4515 +
4516 +/* WoL Pattern Length Register */
4517 +#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
4518 +#define EDMA_WOL_PT_LEN_MASK 0xFF
4519 +#define EDMA_WOL_PT0_LEN_SHIFT 0
4520 +#define EDMA_WOL_PT1_LEN_SHIFT 8
4521 +#define EDMA_WOL_PT2_LEN_SHIFT 16
4522 +#define EDMA_WOL_PT3_LEN_SHIFT 24
4523 +
4524 +#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
4525 +#define EDMA_WOL_PT4_LEN_SHIFT 0
4526 +#define EDMA_WOL_PT5_LEN_SHIFT 8
4527 +#define EDMA_WOL_PT6_LEN_SHIFT 16
4528 +
4529 +/* WoL Control Register */
4530 +#define EDMA_REG_WOL_CTRL 0xC08
4531 +#define EDMA_WOL_WK_EN 0x00000001
4532 +#define EDMA_WOL_MG_EN 0x00000002
4533 +#define EDMA_WOL_PT0_EN 0x00000004
4534 +#define EDMA_WOL_PT1_EN 0x00000008
4535 +#define EDMA_WOL_PT2_EN 0x00000010
4536 +#define EDMA_WOL_PT3_EN 0x00000020
4537 +#define EDMA_WOL_PT4_EN 0x00000040
4538 +#define EDMA_WOL_PT5_EN 0x00000080
4539 +#define EDMA_WOL_PT6_EN 0x00000100
4540 +
4541 +/* MAC Control Register */
4542 +#define EDMA_REG_MAC_CTRL0 0xC20
4543 +#define EDMA_REG_MAC_CTRL1 0xC24
4544 +
4545 +/* WoL Pattern Register */
4546 +#define EDMA_REG_WOL_PATTERN_START 0x5000
4547 +#define EDMA_PATTERN_PART_REG_OFFSET 0x40
4548 +
4549 +
4550 +/* TX descriptor fields */
4551 +#define EDMA_TPD_HDR_SHIFT 0
4552 +#define EDMA_TPD_PPPOE_EN 0x00000100
4553 +#define EDMA_TPD_IP_CSUM_EN 0x00000200
4554 +#define EDMA_TPD_TCP_CSUM_EN 0x0000400
4555 +#define EDMA_TPD_UDP_CSUM_EN 0x00000800
4556 +#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
4557 +#define EDMA_TPD_LSO_EN 0x00001000
4558 +#define EDMA_TPD_LSO_V2_EN 0x00002000
4559 +#define EDMA_TPD_IPV4_EN 0x00010000
4560 +#define EDMA_TPD_MSS_MASK 0x1FFF
4561 +#define EDMA_TPD_MSS_SHIFT 18
4562 +#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
4563 +
4564 +/* RRD descriptor fields */
4565 +#define EDMA_RRD_NUM_RFD_MASK 0x000F
4566 +#define EDMA_RRD_SVLAN 0x8000
4567 +#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF;
4568 +
4569 +#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
4570 +#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
4571 +#define EDMA_RRD_CVLAN 0x0001
4572 +#define EDMA_RRD_DESC_VALID 0x8000
4573 +
4574 +#define EDMA_RRD_PRIORITY_SHIFT 4
4575 +#define EDMA_RRD_PRIORITY_MASK 0x7
4576 +#define EDMA_RRD_PORT_TYPE_SHIFT 7
4577 +#define EDMA_RRD_PORT_TYPE_MASK 0x1F
4578 +#endif /* _ESS_EDMA_H_ */