2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/platform_device.h>
17 #include <linux/if_vlan.h>
21 extern struct net_device
*edma_netdev
[EDMA_MAX_PORTID_SUPPORTED
];
23 u16 edma_ath_eth_type
;
25 /* edma_skb_priority_offset()
26 * get edma skb priority
28 static unsigned int edma_skb_priority_offset(struct sk_buff
*skb
)
30 return (skb
->priority
>> 2) & 1;
33 /* edma_alloc_tx_ring()
34 * Allocate Tx descriptors ring
36 static int edma_alloc_tx_ring(struct edma_common_info
*edma_cinfo
,
37 struct edma_tx_desc_ring
*etdr
)
39 struct platform_device
*pdev
= edma_cinfo
->pdev
;
42 etdr
->size
= sizeof(struct edma_sw_desc
) * etdr
->count
;
43 etdr
->sw_next_to_fill
= 0;
44 etdr
->sw_next_to_clean
= 0;
46 /* Allocate SW descriptors */
47 etdr
->sw_desc
= vzalloc(etdr
->size
);
49 dev_err(&pdev
->dev
, "buffer alloc of tx ring failed=%p", etdr
);
53 /* Allocate HW descriptors */
54 etdr
->hw_desc
= dma_alloc_coherent(&pdev
->dev
, etdr
->size
, &etdr
->dma
,
57 dev_err(&pdev
->dev
, "descriptor allocation for tx ring failed");
65 /* edma_free_tx_ring()
66 * Free tx rings allocated by edma_alloc_tx_rings
68 static void edma_free_tx_ring(struct edma_common_info
*edma_cinfo
,
69 struct edma_tx_desc_ring
*etdr
)
71 struct platform_device
*pdev
= edma_cinfo
->pdev
;
73 if (likely(etdr
->dma
))
74 dma_free_coherent(&pdev
->dev
, etdr
->size
, etdr
->hw_desc
,
81 /* edma_alloc_rx_ring()
82 * allocate rx descriptor ring
84 static int edma_alloc_rx_ring(struct edma_common_info
*edma_cinfo
,
85 struct edma_rfd_desc_ring
*erxd
)
87 struct platform_device
*pdev
= edma_cinfo
->pdev
;
89 erxd
->size
= sizeof(struct edma_sw_desc
) * erxd
->count
;
90 erxd
->sw_next_to_fill
= 0;
91 erxd
->sw_next_to_clean
= 0;
93 /* Allocate SW descriptors */
94 erxd
->sw_desc
= vzalloc(erxd
->size
);
98 /* Alloc HW descriptors */
99 erxd
->hw_desc
= dma_alloc_coherent(&pdev
->dev
, erxd
->size
, &erxd
->dma
,
101 if (!erxd
->hw_desc
) {
102 vfree(erxd
->sw_desc
);
106 /* Initialize pending_fill */
107 erxd
->pending_fill
= 0;
112 /* edma_free_rx_ring()
113 * Free rx ring allocated by alloc_rx_ring
115 static void edma_free_rx_ring(struct edma_common_info
*edma_cinfo
,
116 struct edma_rfd_desc_ring
*rxdr
)
118 struct platform_device
*pdev
= edma_cinfo
->pdev
;
120 if (likely(rxdr
->dma
))
121 dma_free_coherent(&pdev
->dev
, rxdr
->size
, rxdr
->hw_desc
,
124 vfree(rxdr
->sw_desc
);
125 rxdr
->sw_desc
= NULL
;
128 /* edma_configure_tx()
129 * Configure transmission control data
131 static void edma_configure_tx(struct edma_common_info
*edma_cinfo
)
135 txq_ctrl_data
= (EDMA_TPD_BURST
<< EDMA_TXQ_NUM_TPD_BURST_SHIFT
);
136 txq_ctrl_data
|= EDMA_TXQ_CTRL_TPD_BURST_EN
;
137 txq_ctrl_data
|= (EDMA_TXF_BURST
<< EDMA_TXQ_TXF_BURST_NUM_SHIFT
);
138 edma_write_reg(EDMA_REG_TXQ_CTRL
, txq_ctrl_data
);
142 /* edma_configure_rx()
143 * configure reception control data
145 static void edma_configure_rx(struct edma_common_info
*edma_cinfo
)
147 struct edma_hw
*hw
= &edma_cinfo
->hw
;
148 u32 rss_type
, rx_desc1
, rxq_ctrl_data
;
151 rss_type
= hw
->rss_type
;
152 edma_write_reg(EDMA_REG_RSS_TYPE
, rss_type
);
154 /* Set RFD burst number */
155 rx_desc1
= (EDMA_RFD_BURST
<< EDMA_RXQ_RFD_BURST_NUM_SHIFT
);
157 /* Set RFD prefetch threshold */
158 rx_desc1
|= (EDMA_RFD_THR
<< EDMA_RXQ_RFD_PF_THRESH_SHIFT
);
160 /* Set RFD in host ring low threshold to generte interrupt */
161 rx_desc1
|= (EDMA_RFD_LTHR
<< EDMA_RXQ_RFD_LOW_THRESH_SHIFT
);
162 edma_write_reg(EDMA_REG_RX_DESC1
, rx_desc1
);
164 /* Set Rx FIFO threshold to start to DMA data to host */
165 rxq_ctrl_data
= EDMA_FIFO_THRESH_128_BYTE
;
167 /* Set RX remove vlan bit */
168 rxq_ctrl_data
|= EDMA_RXQ_CTRL_RMV_VLAN
;
170 edma_write_reg(EDMA_REG_RXQ_CTRL
, rxq_ctrl_data
);
173 /* edma_alloc_rx_buf()
174 * does skb allocation for the received packets.
176 static int edma_alloc_rx_buf(struct edma_common_info
178 struct edma_rfd_desc_ring
*erdr
,
179 int cleaned_count
, int queue_id
)
181 struct platform_device
*pdev
= edma_cinfo
->pdev
;
182 struct edma_rx_free_desc
*rx_desc
;
183 struct edma_sw_desc
*sw_desc
;
186 u16 prod_idx
, length
;
189 if (cleaned_count
> erdr
->count
)
190 cleaned_count
= erdr
->count
- 1;
192 i
= erdr
->sw_next_to_fill
;
194 while (cleaned_count
) {
195 sw_desc
= &erdr
->sw_desc
[i
];
196 length
= edma_cinfo
->rx_head_buffer_len
;
198 if (sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_REUSE
) {
201 /* Clear REUSE Flag */
202 sw_desc
->flags
&= ~EDMA_SW_DESC_FLAG_SKB_REUSE
;
205 skb
= netdev_alloc_skb_ip_align(edma_netdev
[0], length
);
207 /* Better luck next round */
212 if (edma_cinfo
->page_mode
) {
213 struct page
*pg
= alloc_page(GFP_ATOMIC
);
216 dev_kfree_skb_any(skb
);
220 sw_desc
->dma
= dma_map_page(&pdev
->dev
, pg
, 0,
221 edma_cinfo
->rx_page_buffer_len
,
223 if (dma_mapping_error(&pdev
->dev
,
226 dev_kfree_skb_any(skb
);
230 skb_fill_page_desc(skb
, 0, pg
, 0,
231 edma_cinfo
->rx_page_buffer_len
);
232 sw_desc
->flags
= EDMA_SW_DESC_FLAG_SKB_FRAG
;
233 sw_desc
->length
= edma_cinfo
->rx_page_buffer_len
;
235 sw_desc
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
236 length
, DMA_FROM_DEVICE
);
237 if (dma_mapping_error(&pdev
->dev
,
239 dev_kfree_skb_any(skb
);
243 sw_desc
->flags
= EDMA_SW_DESC_FLAG_SKB_HEAD
;
244 sw_desc
->length
= length
;
247 /* Update the buffer info */
249 rx_desc
= (&((struct edma_rx_free_desc
*)(erdr
->hw_desc
))[i
]);
250 rx_desc
->buffer_addr
= cpu_to_le64(sw_desc
->dma
);
251 if (++i
== erdr
->count
)
256 erdr
->sw_next_to_fill
= i
;
259 prod_idx
= erdr
->count
- 1;
263 /* Update the producer index */
264 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id
), ®_data
);
265 reg_data
&= ~EDMA_RFD_PROD_IDX_BITS
;
266 reg_data
|= prod_idx
;
267 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id
), reg_data
);
269 /* If we couldn't allocate all the buffers
270 * we increment the alloc failure counters
273 edma_cinfo
->edma_ethstats
.rx_alloc_fail_ctr
++;
275 return cleaned_count
;
279 * update descriptor ring size, buffer and producer/consumer index
281 static void edma_init_desc(struct edma_common_info
*edma_cinfo
)
283 struct edma_rfd_desc_ring
*rfd_ring
;
284 struct edma_tx_desc_ring
*etdr
;
289 /* Set the base address of every TPD ring. */
290 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
291 etdr
= edma_cinfo
->tpd_ring
[i
];
293 /* Update descriptor ring base address */
294 edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i
), (u32
)etdr
->dma
);
295 edma_read_reg(EDMA_REG_TPD_IDX_Q(i
), &data
);
297 /* Calculate hardware consumer index */
298 hw_cons_idx
= (data
>> EDMA_TPD_CONS_IDX_SHIFT
) & 0xffff;
299 etdr
->sw_next_to_fill
= hw_cons_idx
;
300 etdr
->sw_next_to_clean
= hw_cons_idx
;
301 data
&= ~(EDMA_TPD_PROD_IDX_MASK
<< EDMA_TPD_PROD_IDX_SHIFT
);
304 /* update producer index */
305 edma_write_reg(EDMA_REG_TPD_IDX_Q(i
), data
);
307 /* update SW consumer index register */
308 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i
), hw_cons_idx
);
310 /* Set TPD ring size */
311 edma_write_reg(EDMA_REG_TPD_RING_SIZE
,
312 edma_cinfo
->tx_ring_count
&
313 EDMA_TPD_RING_SIZE_MASK
);
316 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
317 rfd_ring
= edma_cinfo
->rfd_ring
[j
];
318 /* Update Receive Free descriptor ring base address */
319 edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j
),
320 (u32
)(rfd_ring
->dma
));
321 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
324 data
= edma_cinfo
->rx_head_buffer_len
;
325 if (edma_cinfo
->page_mode
)
326 data
= edma_cinfo
->rx_page_buffer_len
;
328 data
&= EDMA_RX_BUF_SIZE_MASK
;
329 data
<<= EDMA_RX_BUF_SIZE_SHIFT
;
331 /* Update RFD ring size and RX buffer size */
332 data
|= (edma_cinfo
->rx_ring_count
& EDMA_RFD_RING_SIZE_MASK
)
333 << EDMA_RFD_RING_SIZE_SHIFT
;
335 edma_write_reg(EDMA_REG_RX_DESC0
, data
);
337 /* Disable TX FIFO low watermark and high watermark */
338 edma_write_reg(EDMA_REG_TXF_WATER_MARK
, 0);
340 /* Load all of base address above */
341 edma_read_reg(EDMA_REG_TX_SRAM_PART
, &data
);
342 data
|= 1 << EDMA_LOAD_PTR_SHIFT
;
343 edma_write_reg(EDMA_REG_TX_SRAM_PART
, data
);
346 /* edma_receive_checksum
347 * Api to check checksum on receive packets
349 static void edma_receive_checksum(struct edma_rx_return_desc
*rd
,
352 skb_checksum_none_assert(skb
);
354 /* check the RRD IP/L4 checksum bit to see if
355 * its set, which in turn indicates checksum
358 if (rd
->rrd6
& EDMA_RRD_CSUM_FAIL_MASK
)
361 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
365 * clean up rx resourcers on error
367 static void edma_clean_rfd(struct edma_rfd_desc_ring
*erdr
, u16 index
)
369 struct edma_rx_free_desc
*rx_desc
;
370 struct edma_sw_desc
*sw_desc
;
372 rx_desc
= (&((struct edma_rx_free_desc
*)(erdr
->hw_desc
))[index
]);
373 sw_desc
= &erdr
->sw_desc
[index
];
375 dev_kfree_skb_any(sw_desc
->skb
);
379 memset(rx_desc
, 0, sizeof(struct edma_rx_free_desc
));
382 /* edma_rx_complete_fraglist()
383 * Complete Rx processing for fraglist skbs
385 static void edma_rx_complete_stp_rstp(struct sk_buff
*skb
, int port_id
, struct edma_rx_return_desc
*rd
)
390 u8 mac_addr
[EDMA_ETH_HDR_LEN
];
392 port_type
= (rd
->rrd1
>> EDMA_RRD_PORT_TYPE_SHIFT
)
393 & EDMA_RRD_PORT_TYPE_MASK
;
394 /* if port type is 0x4, then only proceed with
395 * other stp/rstp calculation
397 if (port_type
== EDMA_RX_ATH_HDR_RSTP_PORT_TYPE
) {
398 u8 bpdu_mac
[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
400 /* calculate the frame priority */
401 priority
= (rd
->rrd1
>> EDMA_RRD_PRIORITY_SHIFT
)
402 & EDMA_RRD_PRIORITY_MASK
;
404 for (i
= 0; i
< EDMA_ETH_HDR_LEN
; i
++)
405 mac_addr
[i
] = skb
->data
[i
];
407 /* Check if destination mac addr is bpdu addr */
408 if (!memcmp(mac_addr
, bpdu_mac
, 6)) {
409 /* destination mac address is BPDU
410 * destination mac address, then add
411 * atheros header to the packet.
413 u16 athr_hdr
= (EDMA_RX_ATH_HDR_VERSION
<< EDMA_RX_ATH_HDR_VERSION_SHIFT
) |
414 (priority
<< EDMA_RX_ATH_HDR_PRIORITY_SHIFT
) |
415 (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE
<< EDMA_RX_ATH_PORT_TYPE_SHIFT
) | port_id
;
417 memcpy(skb
->data
, mac_addr
, EDMA_ETH_HDR_LEN
);
418 *(uint16_t *)&skb
->data
[12] = htons(edma_ath_eth_type
);
419 *(uint16_t *)&skb
->data
[14] = htons(athr_hdr
);
425 * edma_rx_complete_fraglist()
426 * Complete Rx processing for fraglist skbs
428 static int edma_rx_complete_fraglist(struct sk_buff
*skb
, u16 num_rfds
, u16 length
, u32 sw_next_to_clean
,
429 u16
*cleaned_count
, struct edma_rfd_desc_ring
*erdr
, struct edma_common_info
*edma_cinfo
)
431 struct platform_device
*pdev
= edma_cinfo
->pdev
;
432 struct edma_hw
*hw
= &edma_cinfo
->hw
;
433 struct sk_buff
*skb_temp
;
434 struct edma_sw_desc
*sw_desc
;
439 skb
->tail
+= (hw
->rx_head_buff_size
- 16);
440 skb
->len
= skb
->truesize
= length
;
441 size_remaining
= length
- (hw
->rx_head_buff_size
- 16);
443 /* clean-up all related sw_descs */
444 for (i
= 1; i
< num_rfds
; i
++) {
445 struct sk_buff
*skb_prev
;
446 sw_desc
= &erdr
->sw_desc
[sw_next_to_clean
];
447 skb_temp
= sw_desc
->skb
;
449 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
450 sw_desc
->length
, DMA_FROM_DEVICE
);
452 if (size_remaining
< hw
->rx_head_buff_size
)
453 skb_put(skb_temp
, size_remaining
);
455 skb_put(skb_temp
, hw
->rx_head_buff_size
);
458 * If we are processing the first rfd, we link
459 * skb->frag_list to the skb corresponding to the
463 skb_shinfo(skb
)->frag_list
= skb_temp
;
465 skb_prev
->next
= skb_temp
;
467 skb_temp
->next
= NULL
;
469 skb
->data_len
+= skb_temp
->len
;
470 size_remaining
-= skb_temp
->len
;
472 /* Increment SW index */
473 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
477 return sw_next_to_clean
;
480 /* edma_rx_complete_paged()
481 * Complete Rx processing for paged skbs
483 static int edma_rx_complete_paged(struct sk_buff
*skb
, u16 num_rfds
, u16 length
, u32 sw_next_to_clean
,
484 u16
*cleaned_count
, struct edma_rfd_desc_ring
*erdr
, struct edma_common_info
*edma_cinfo
)
486 struct platform_device
*pdev
= edma_cinfo
->pdev
;
487 struct sk_buff
*skb_temp
;
488 struct edma_sw_desc
*sw_desc
;
492 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[0];
494 /* Setup skbuff fields */
497 if (likely(num_rfds
<= 1)) {
498 skb
->data_len
= length
;
499 skb
->truesize
+= edma_cinfo
->rx_page_buffer_len
;
500 skb_fill_page_desc(skb
, 0, skb_frag_page(frag
),
503 skb_frag_size_sub(frag
, 16);
504 skb
->data_len
= skb_frag_size(frag
);
505 skb
->truesize
+= edma_cinfo
->rx_page_buffer_len
;
506 size_remaining
= length
- skb_frag_size(frag
);
508 skb_fill_page_desc(skb
, 0, skb_frag_page(frag
),
509 16, skb_frag_size(frag
));
511 /* clean-up all related sw_descs */
512 for (i
= 1; i
< num_rfds
; i
++) {
513 sw_desc
= &erdr
->sw_desc
[sw_next_to_clean
];
514 skb_temp
= sw_desc
->skb
;
515 frag
= &skb_shinfo(skb_temp
)->frags
[0];
516 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
517 sw_desc
->length
, DMA_FROM_DEVICE
);
519 if (size_remaining
< edma_cinfo
->rx_page_buffer_len
)
520 skb_frag_size_set(frag
, size_remaining
);
522 skb_fill_page_desc(skb
, i
, skb_frag_page(frag
),
523 0, skb_frag_size(frag
));
525 skb_shinfo(skb_temp
)->nr_frags
= 0;
526 dev_kfree_skb_any(skb_temp
);
528 skb
->data_len
+= skb_frag_size(frag
);
529 skb
->truesize
+= edma_cinfo
->rx_page_buffer_len
;
530 size_remaining
-= skb_frag_size(frag
);
532 /* Increment SW index */
533 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
538 return sw_next_to_clean
;
543 * Main api called from the poll function to process rx packets.
545 static u16
edma_rx_complete(struct edma_common_info
*edma_cinfo
,
546 int *work_done
, int work_to_do
, int queue_id
,
547 struct napi_struct
*napi
)
549 struct platform_device
*pdev
= edma_cinfo
->pdev
;
550 struct edma_rfd_desc_ring
*erdr
= edma_cinfo
->rfd_ring
[queue_id
];
551 struct net_device
*netdev
;
552 struct edma_adapter
*adapter
;
553 struct edma_sw_desc
*sw_desc
;
555 struct edma_rx_return_desc
*rd
;
556 u16 hash_type
, rrd
[8], cleaned_count
= 0, length
= 0, num_rfds
= 1,
557 sw_next_to_clean
, hw_next_to_clean
= 0, vlan
= 0, ret_count
= 0;
560 int port_id
, i
, drop_count
= 0;
562 u16 count
= erdr
->count
, rfd_avail
;
563 u8 queue_to_rxid
[8] = {0, 0, 1, 1, 2, 2, 3, 3};
565 cleaned_count
= erdr
->pending_fill
;
566 sw_next_to_clean
= erdr
->sw_next_to_clean
;
568 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id
), &data
);
569 hw_next_to_clean
= (data
>> EDMA_RFD_CONS_IDX_SHIFT
) &
570 EDMA_RFD_CONS_IDX_MASK
;
573 while (sw_next_to_clean
!= hw_next_to_clean
) {
577 sw_desc
= &erdr
->sw_desc
[sw_next_to_clean
];
580 /* Unmap the allocated buffer */
581 if (likely(sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_HEAD
))
582 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
583 sw_desc
->length
, DMA_FROM_DEVICE
);
585 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
586 sw_desc
->length
, DMA_FROM_DEVICE
);
589 if (edma_cinfo
->page_mode
) {
590 vaddr
= kmap_atomic(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
591 memcpy((uint8_t *)&rrd
[0], vaddr
, 16);
592 rd
= (struct edma_rx_return_desc
*)rrd
;
593 kunmap_atomic(vaddr
);
595 rd
= (struct edma_rx_return_desc
*)skb
->data
;
598 /* Check if RRD is valid */
599 if (!(rd
->rrd7
& EDMA_RRD_DESC_VALID
)) {
600 edma_clean_rfd(erdr
, sw_next_to_clean
);
601 sw_next_to_clean
= (sw_next_to_clean
+ 1) &
607 /* Get the number of RFDs from RRD */
608 num_rfds
= rd
->rrd1
& EDMA_RRD_NUM_RFD_MASK
;
610 /* Get Rx port ID from switch */
611 port_id
= (rd
->rrd1
>> EDMA_PORT_ID_SHIFT
) & EDMA_PORT_ID_MASK
;
612 if ((!port_id
) || (port_id
> EDMA_MAX_PORTID_SUPPORTED
)) {
613 dev_err(&pdev
->dev
, "Invalid RRD source port bit set");
614 for (i
= 0; i
< num_rfds
; i
++) {
615 edma_clean_rfd(erdr
, sw_next_to_clean
);
616 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
622 /* check if we have a sink for the data we receive.
623 * If the interface isn't setup, we have to drop the
624 * incoming data for now.
626 netdev
= edma_cinfo
->portid_netdev_lookup_tbl
[port_id
];
628 edma_clean_rfd(erdr
, sw_next_to_clean
);
629 sw_next_to_clean
= (sw_next_to_clean
+ 1) &
634 adapter
= netdev_priv(netdev
);
636 /* This code is added to handle a usecase where high
637 * priority stream and a low priority stream are
638 * received simultaneously on DUT. The problem occurs
639 * if one of the Rx rings is full and the corresponding
640 * core is busy with other stuff. This causes ESS CPU
641 * port to backpressure all incoming traffic including
642 * high priority one. We monitor free descriptor count
643 * on each CPU and whenever it reaches threshold (< 80),
644 * we drop all low priority traffic and let only high
645 * priotiy traffic pass through. We can hence avoid
646 * ESS CPU port to send backpressure on high priroity
649 priority
= (rd
->rrd1
>> EDMA_RRD_PRIORITY_SHIFT
)
650 & EDMA_RRD_PRIORITY_MASK
;
651 if (likely(!priority
&& !edma_cinfo
->page_mode
&& (num_rfds
<= 1))) {
652 rfd_avail
= (count
+ sw_next_to_clean
- hw_next_to_clean
- 1) & (count
- 1);
653 if (rfd_avail
< EDMA_RFD_AVAIL_THR
) {
654 sw_desc
->flags
= EDMA_SW_DESC_FLAG_SKB_REUSE
;
655 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (erdr
->count
- 1);
656 adapter
->stats
.rx_dropped
++;
659 if (drop_count
== 3) {
664 if (cleaned_count
>= EDMA_RX_BUFFER_WRITE
) {
665 /* If buffer clean count reaches 16, we replenish HW buffers. */
666 ret_count
= edma_alloc_rx_buf(edma_cinfo
, erdr
, cleaned_count
, queue_id
);
667 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id
),
669 cleaned_count
= ret_count
;
670 erdr
->pending_fill
= ret_count
;
679 /* Increment SW index */
680 sw_next_to_clean
= (sw_next_to_clean
+ 1) &
685 /* Get the packet size and allocate buffer */
686 length
= rd
->rrd6
& EDMA_RRD_PKT_SIZE_MASK
;
688 if (edma_cinfo
->page_mode
) {
690 sw_next_to_clean
= edma_rx_complete_paged(skb
, num_rfds
, length
, sw_next_to_clean
, &cleaned_count
, erdr
, edma_cinfo
);
691 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
692 dev_kfree_skb_any(skb
);
696 /* single or fraglist skb */
698 /* Addition of 16 bytes is required, as in the packet
699 * first 16 bytes are rrd descriptors, so actual data
700 * starts from an offset of 16.
702 skb_reserve(skb
, 16);
703 if (likely((num_rfds
<= 1) || !edma_cinfo
->fraglist_mode
)) {
704 skb_put(skb
, length
);
706 sw_next_to_clean
= edma_rx_complete_fraglist(skb
, num_rfds
, length
, sw_next_to_clean
, &cleaned_count
, erdr
, edma_cinfo
);
711 edma_rx_complete_stp_rstp(skb
, port_id
, rd
);
714 skb
->protocol
= eth_type_trans(skb
, netdev
);
716 /* Record Rx queue for RFS/RPS and fill flow hash from HW */
717 skb_record_rx_queue(skb
, queue_to_rxid
[queue_id
]);
718 if (netdev
->features
& NETIF_F_RXHASH
) {
719 hash_type
= (rd
->rrd5
>> EDMA_HASH_TYPE_SHIFT
);
720 if ((hash_type
> EDMA_HASH_TYPE_START
) && (hash_type
< EDMA_HASH_TYPE_END
))
721 skb_set_hash(skb
, rd
->rrd2
, PKT_HASH_TYPE_L4
);
724 #ifdef CONFIG_NF_FLOW_COOKIE
725 skb
->flow_cookie
= rd
->rrd3
& EDMA_RRD_FLOW_COOKIE_MASK
;
727 edma_receive_checksum(rd
, skb
);
729 /* Process VLAN HW acceleration indication provided by HW */
730 if (unlikely(adapter
->default_vlan_tag
!= rd
->rrd4
)) {
732 if (likely(rd
->rrd7
& EDMA_RRD_CVLAN
))
733 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan
);
734 else if (rd
->rrd1
& EDMA_RRD_SVLAN
)
735 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021AD
), vlan
);
738 /* Update rx statistics */
739 adapter
->stats
.rx_packets
++;
740 adapter
->stats
.rx_bytes
+= length
;
742 /* Check if we reached refill threshold */
743 if (cleaned_count
>= EDMA_RX_BUFFER_WRITE
) {
744 ret_count
= edma_alloc_rx_buf(edma_cinfo
, erdr
, cleaned_count
, queue_id
);
745 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id
),
747 cleaned_count
= ret_count
;
748 erdr
->pending_fill
= ret_count
;
751 /* At this point skb should go to stack */
752 napi_gro_receive(napi
, skb
);
755 /* Check if we still have NAPI budget */
759 /* Read index once again since we still have NAPI budget */
760 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id
), &data
);
761 hw_next_to_clean
= (data
>> EDMA_RFD_CONS_IDX_SHIFT
) &
762 EDMA_RFD_CONS_IDX_MASK
;
763 } while (hw_next_to_clean
!= sw_next_to_clean
);
765 erdr
->sw_next_to_clean
= sw_next_to_clean
;
767 /* Refill here in case refill threshold wasn't reached */
768 if (likely(cleaned_count
)) {
769 ret_count
= edma_alloc_rx_buf(edma_cinfo
, erdr
, cleaned_count
, queue_id
);
770 erdr
->pending_fill
= ret_count
;
773 dev_dbg(&pdev
->dev
, "Not all buffers was reallocated");
776 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id
),
777 erdr
->sw_next_to_clean
);
780 return erdr
->pending_fill
;
783 /* edma_delete_rfs_filter()
784 * Remove RFS filter from switch
786 static int edma_delete_rfs_filter(struct edma_adapter
*adapter
,
787 struct edma_rfs_filter_node
*filter_node
)
791 struct flow_keys
*keys
= &filter_node
->keys
;
793 if (likely(adapter
->set_rfs_rule
))
794 res
= (*adapter
->set_rfs_rule
)(adapter
->netdev
,
795 flow_get_u32_src(keys
), flow_get_u32_dst(keys
),
796 keys
->ports
.src
, keys
->ports
.dst
,
797 keys
->basic
.ip_proto
, filter_node
->rq_id
, 0);
802 /* edma_add_rfs_filter()
803 * Add RFS filter to switch
805 static int edma_add_rfs_filter(struct edma_adapter
*adapter
,
806 struct flow_keys
*keys
, u16 rq
,
807 struct edma_rfs_filter_node
*filter_node
)
811 struct flow_keys
*dest_keys
= &filter_node
->keys
;
813 memcpy(dest_keys
, &filter_node
->keys
, sizeof(*dest_keys
));
815 dest_keys->control = keys->control;
816 dest_keys->basic = keys->basic;
817 dest_keys->addrs = keys->addrs;
818 dest_keys->ports = keys->ports;
819 dest_keys.ip_proto = keys->ip_proto;
821 /* Call callback registered by ESS driver */
822 if (likely(adapter
->set_rfs_rule
))
823 res
= (*adapter
->set_rfs_rule
)(adapter
->netdev
, flow_get_u32_src(keys
),
824 flow_get_u32_dst(keys
), keys
->ports
.src
, keys
->ports
.dst
,
825 keys
->basic
.ip_proto
, rq
, 1);
830 /* edma_rfs_key_search()
831 * Look for existing RFS entry
833 static struct edma_rfs_filter_node
*edma_rfs_key_search(struct hlist_head
*h
,
834 struct flow_keys
*key
)
836 struct edma_rfs_filter_node
*p
;
838 hlist_for_each_entry(p
, h
, node
)
839 if (flow_get_u32_src(&p
->keys
) == flow_get_u32_src(key
) &&
840 flow_get_u32_dst(&p
->keys
) == flow_get_u32_dst(key
) &&
841 p
->keys
.ports
.src
== key
->ports
.src
&&
842 p
->keys
.ports
.dst
== key
->ports
.dst
&&
843 p
->keys
.basic
.ip_proto
== key
->basic
.ip_proto
)
848 /* edma_initialise_rfs_flow_table()
849 * Initialise EDMA RFS flow table
851 static void edma_initialise_rfs_flow_table(struct edma_adapter
*adapter
)
855 spin_lock_init(&adapter
->rfs
.rfs_ftab_lock
);
857 /* Initialize EDMA flow hash table */
858 for (i
= 0; i
< EDMA_RFS_FLOW_ENTRIES
; i
++)
859 INIT_HLIST_HEAD(&adapter
->rfs
.hlist_head
[i
]);
861 adapter
->rfs
.max_num_filter
= EDMA_RFS_FLOW_ENTRIES
;
862 adapter
->rfs
.filter_available
= adapter
->rfs
.max_num_filter
;
863 adapter
->rfs
.hashtoclean
= 0;
865 /* Add timer to get periodic RFS updates from OS */
866 timer_setup(&adapter
->rfs
.expire_rfs
, edma_flow_may_expire
, 0);
867 mod_timer(&adapter
->rfs
.expire_rfs
, jiffies
+ HZ
/ 4);
870 /* edma_free_rfs_flow_table()
871 * Free EDMA RFS flow table
873 static void edma_free_rfs_flow_table(struct edma_adapter
*adapter
)
877 /* Remove sync timer */
878 del_timer_sync(&adapter
->rfs
.expire_rfs
);
879 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
881 /* Free EDMA RFS table entries */
882 adapter
->rfs
.filter_available
= 0;
884 /* Clean-up EDMA flow hash table */
885 for (i
= 0; i
< EDMA_RFS_FLOW_ENTRIES
; i
++) {
886 struct hlist_head
*hhead
;
887 struct hlist_node
*tmp
;
888 struct edma_rfs_filter_node
*filter_node
;
891 hhead
= &adapter
->rfs
.hlist_head
[i
];
892 hlist_for_each_entry_safe(filter_node
, tmp
, hhead
, node
) {
893 res
= edma_delete_rfs_filter(adapter
, filter_node
);
895 dev_warn(&adapter
->netdev
->dev
,
896 "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
897 filter_node
->flow_id
);
898 hlist_del(&filter_node
->node
);
902 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
905 /* edma_tx_unmap_and_free()
908 static inline void edma_tx_unmap_and_free(struct platform_device
*pdev
,
909 struct edma_sw_desc
*sw_desc
)
911 struct sk_buff
*skb
= sw_desc
->skb
;
913 if (likely((sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_HEAD
) ||
914 (sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_FRAGLIST
)))
915 /* unmap_single for skb head area */
916 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
917 sw_desc
->length
, DMA_TO_DEVICE
);
918 else if (sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_FRAG
)
919 /* unmap page for paged fragments */
920 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
921 sw_desc
->length
, DMA_TO_DEVICE
);
923 if (likely(sw_desc
->flags
& EDMA_SW_DESC_FLAG_LAST
))
924 dev_kfree_skb_any(skb
);
929 /* edma_tx_complete()
930 * Used to clean tx queues and update hardware and consumer index
932 static void edma_tx_complete(struct edma_common_info
*edma_cinfo
, int queue_id
)
934 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
935 struct edma_sw_desc
*sw_desc
;
936 struct platform_device
*pdev
= edma_cinfo
->pdev
;
939 u16 sw_next_to_clean
= etdr
->sw_next_to_clean
;
940 u16 hw_next_to_clean
;
943 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id
), &data
);
944 hw_next_to_clean
= (data
>> EDMA_TPD_CONS_IDX_SHIFT
) & EDMA_TPD_CONS_IDX_MASK
;
946 /* clean the buffer here */
947 while (sw_next_to_clean
!= hw_next_to_clean
) {
948 sw_desc
= &etdr
->sw_desc
[sw_next_to_clean
];
949 edma_tx_unmap_and_free(pdev
, sw_desc
);
950 sw_next_to_clean
= (sw_next_to_clean
+ 1) & (etdr
->count
- 1);
953 etdr
->sw_next_to_clean
= sw_next_to_clean
;
955 /* update the TPD consumer index register */
956 edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id
), sw_next_to_clean
);
958 /* Wake the queue if queue is stopped and netdev link is up */
959 for (i
= 0; i
< EDMA_MAX_NETDEV_PER_QUEUE
&& etdr
->nq
[i
] ; i
++) {
960 if (netif_tx_queue_stopped(etdr
->nq
[i
])) {
961 if ((etdr
->netdev
[i
]) && netif_carrier_ok(etdr
->netdev
[i
]))
962 netif_tx_wake_queue(etdr
->nq
[i
]);
967 /* edma_get_tx_buffer()
968 * Get sw_desc corresponding to the TPD
970 static struct edma_sw_desc
*edma_get_tx_buffer(struct edma_common_info
*edma_cinfo
,
971 struct edma_tx_desc
*tpd
, int queue_id
)
973 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
974 return &etdr
->sw_desc
[tpd
- (struct edma_tx_desc
*)etdr
->hw_desc
];
977 /* edma_get_next_tpd()
978 * Return a TPD descriptor for transfer
980 static struct edma_tx_desc
*edma_get_next_tpd(struct edma_common_info
*edma_cinfo
,
983 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
984 u16 sw_next_to_fill
= etdr
->sw_next_to_fill
;
985 struct edma_tx_desc
*tpd_desc
=
986 (&((struct edma_tx_desc
*)(etdr
->hw_desc
))[sw_next_to_fill
]);
988 etdr
->sw_next_to_fill
= (etdr
->sw_next_to_fill
+ 1) & (etdr
->count
- 1);
993 /* edma_tpd_available()
994 * Check number of free TPDs
996 static inline u16
edma_tpd_available(struct edma_common_info
*edma_cinfo
,
999 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1001 u16 sw_next_to_fill
;
1002 u16 sw_next_to_clean
;
1005 sw_next_to_clean
= etdr
->sw_next_to_clean
;
1006 sw_next_to_fill
= etdr
->sw_next_to_fill
;
1008 if (likely(sw_next_to_clean
<= sw_next_to_fill
))
1009 count
= etdr
->count
;
1011 return count
+ sw_next_to_clean
- sw_next_to_fill
- 1;
1014 /* edma_tx_queue_get()
1015 * Get the starting number of the queue
1017 static inline int edma_tx_queue_get(struct edma_adapter
*adapter
,
1018 struct sk_buff
*skb
, int txq_id
)
1020 /* skb->priority is used as an index to skb priority table
1021 * and based on packet priority, correspong queue is assigned.
1023 return adapter
->tx_start_offset
[txq_id
] + edma_skb_priority_offset(skb
);
1026 /* edma_tx_update_hw_idx()
1027 * update the producer index for the ring transmitted
1029 static void edma_tx_update_hw_idx(struct edma_common_info
*edma_cinfo
,
1030 struct sk_buff
*skb
, int queue_id
)
1032 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1035 /* Read and update the producer index */
1036 edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id
), &tpd_idx_data
);
1037 tpd_idx_data
&= ~EDMA_TPD_PROD_IDX_BITS
;
1038 tpd_idx_data
|= (etdr
->sw_next_to_fill
& EDMA_TPD_PROD_IDX_MASK
)
1039 << EDMA_TPD_PROD_IDX_SHIFT
;
1041 edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id
), tpd_idx_data
);
1044 /* edma_rollback_tx()
1045 * Function to retrieve tx resources in case of error
1047 static void edma_rollback_tx(struct edma_adapter
*adapter
,
1048 struct edma_tx_desc
*start_tpd
, int queue_id
)
1050 struct edma_tx_desc_ring
*etdr
= adapter
->edma_cinfo
->tpd_ring
[queue_id
];
1051 struct edma_sw_desc
*sw_desc
;
1052 struct edma_tx_desc
*tpd
= NULL
;
1053 u16 start_index
, index
;
1055 start_index
= start_tpd
- (struct edma_tx_desc
*)(etdr
->hw_desc
);
1057 index
= start_index
;
1058 while (index
!= etdr
->sw_next_to_fill
) {
1059 tpd
= (&((struct edma_tx_desc
*)(etdr
->hw_desc
))[index
]);
1060 sw_desc
= &etdr
->sw_desc
[index
];
1061 edma_tx_unmap_and_free(adapter
->pdev
, sw_desc
);
1062 memset(tpd
, 0, sizeof(struct edma_tx_desc
));
1063 if (++index
== etdr
->count
)
1066 etdr
->sw_next_to_fill
= start_index
;
1069 /* edma_tx_map_and_fill()
1070 * gets called from edma_xmit_frame
1072 * This is where the dma of the buffer to be transmitted
1075 static int edma_tx_map_and_fill(struct edma_common_info
*edma_cinfo
,
1076 struct edma_adapter
*adapter
, struct sk_buff
*skb
, int queue_id
,
1077 unsigned int flags_transmit
, u16 from_cpu
, u16 dp_bitmap
,
1078 bool packet_is_rstp
, int nr_frags
)
1080 struct edma_sw_desc
*sw_desc
= NULL
;
1081 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1082 struct edma_tx_desc
*tpd
= NULL
, *start_tpd
= NULL
;
1083 struct sk_buff
*iter_skb
;
1085 u32 word1
= 0, word3
= 0, lso_word1
= 0, svlan_tag
= 0;
1086 u16 buf_len
, lso_desc_len
= 0;
1088 /* It should either be a nr_frags skb or fraglist skb but not both */
1089 BUG_ON(nr_frags
&& skb_has_frag_list(skb
));
1091 if (skb_is_gso(skb
)) {
1092 /* TODO: What additional checks need to be performed here */
1093 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
1094 lso_word1
|= EDMA_TPD_IPV4_EN
;
1095 ip_hdr(skb
)->check
= 0;
1096 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
1097 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
1098 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
1099 lso_word1
|= EDMA_TPD_LSO_V2_EN
;
1100 ipv6_hdr(skb
)->payload_len
= 0;
1101 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1102 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
1106 lso_word1
|= EDMA_TPD_LSO_EN
| ((skb_shinfo(skb
)->gso_size
& EDMA_TPD_MSS_MASK
) << EDMA_TPD_MSS_SHIFT
) |
1107 (skb_transport_offset(skb
) << EDMA_TPD_HDR_SHIFT
);
1108 } else if (flags_transmit
& EDMA_HW_CHECKSUM
) {
1110 cso
= skb_checksum_start_offset(skb
);
1111 css
= cso
+ skb
->csum_offset
;
1113 word1
|= (EDMA_TPD_CUSTOM_CSUM_EN
);
1114 word1
|= (cso
>> 1) << EDMA_TPD_HDR_SHIFT
;
1115 word1
|= ((css
>> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT
);
1118 if (skb
->protocol
== htons(ETH_P_PPP_SES
))
1119 word1
|= EDMA_TPD_PPPOE_EN
;
1121 if (flags_transmit
& EDMA_VLAN_TX_TAG_INSERT_FLAG
) {
1122 switch(skb
->vlan_proto
) {
1123 case htons(ETH_P_8021Q
):
1124 word3
|= (1 << EDMA_TX_INS_CVLAN
);
1125 word3
|= skb_vlan_tag_get(skb
) << EDMA_TX_CVLAN_TAG_SHIFT
;
1127 case htons(ETH_P_8021AD
):
1128 word1
|= (1 << EDMA_TX_INS_SVLAN
);
1129 svlan_tag
= skb_vlan_tag_get(skb
) << EDMA_TX_SVLAN_TAG_SHIFT
;
1132 dev_err(&pdev
->dev
, "no ctag or stag present\n");
1133 goto vlan_tag_error
;
1135 } else if (flags_transmit
& EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG
) {
1136 word3
|= (1 << EDMA_TX_INS_CVLAN
);
1137 word3
|= (adapter
->default_vlan_tag
) << EDMA_TX_CVLAN_TAG_SHIFT
;
1140 if (packet_is_rstp
) {
1141 word3
|= dp_bitmap
<< EDMA_TPD_PORT_BITMAP_SHIFT
;
1142 word3
|= from_cpu
<< EDMA_TPD_FROM_CPU_SHIFT
;
1144 word3
|= adapter
->dp_bitmap
<< EDMA_TPD_PORT_BITMAP_SHIFT
;
1147 buf_len
= skb_headlen(skb
);
1150 if (lso_word1
& EDMA_TPD_LSO_V2_EN
) {
1152 /* IPv6 LSOv2 descriptor */
1153 start_tpd
= tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1154 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1155 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_NONE
;
1157 /* LSOv2 descriptor overrides addr field to pass length */
1158 tpd
->addr
= cpu_to_le16(skb
->len
);
1159 tpd
->svlan_tag
= svlan_tag
;
1160 tpd
->word1
= word1
| lso_word1
;
1164 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1167 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1169 /* The last buffer info contain the skb address,
1170 * so skb will be freed after unmap
1172 sw_desc
->length
= lso_desc_len
;
1173 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_HEAD
;
1175 sw_desc
->dma
= dma_map_single(&adapter
->pdev
->dev
,
1176 skb
->data
, buf_len
, DMA_TO_DEVICE
);
1177 if (dma_mapping_error(&pdev
->dev
, sw_desc
->dma
))
1180 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1181 tpd
->len
= cpu_to_le16(buf_len
);
1183 tpd
->svlan_tag
= svlan_tag
;
1184 tpd
->word1
= word1
| lso_word1
;
1187 /* The last buffer info contain the skb address,
1188 * so it will be freed after unmap
1190 sw_desc
->length
= lso_desc_len
;
1191 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_HEAD
;
1196 if (likely(buf_len
)) {
1198 /* TODO Do not dequeue descriptor if there is a potential error */
1199 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1204 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1206 /* The last buffer info contain the skb address,
1207 * so it will be free after unmap
1209 sw_desc
->length
= buf_len
;
1210 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_HEAD
;
1211 sw_desc
->dma
= dma_map_single(&adapter
->pdev
->dev
,
1212 skb
->data
, buf_len
, DMA_TO_DEVICE
);
1213 if (dma_mapping_error(&pdev
->dev
, sw_desc
->dma
))
1216 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1217 tpd
->len
= cpu_to_le16(buf_len
);
1219 tpd
->svlan_tag
= svlan_tag
;
1220 tpd
->word1
= word1
| lso_word1
;
1224 /* Walk through all paged fragments */
1225 while (nr_frags
--) {
1226 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1227 buf_len
= skb_frag_size(frag
);
1228 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1229 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1230 sw_desc
->length
= buf_len
;
1231 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_FRAG
;
1233 sw_desc
->dma
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, buf_len
, DMA_TO_DEVICE
);
1235 if (dma_mapping_error(NULL
, sw_desc
->dma
))
1238 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1239 tpd
->len
= cpu_to_le16(buf_len
);
1241 tpd
->svlan_tag
= svlan_tag
;
1242 tpd
->word1
= word1
| lso_word1
;
1247 /* Walk through all fraglist skbs */
1248 skb_walk_frags(skb
, iter_skb
) {
1249 buf_len
= iter_skb
->len
;
1250 tpd
= edma_get_next_tpd(edma_cinfo
, queue_id
);
1251 sw_desc
= edma_get_tx_buffer(edma_cinfo
, tpd
, queue_id
);
1252 sw_desc
->length
= buf_len
;
1253 sw_desc
->dma
= dma_map_single(&adapter
->pdev
->dev
,
1254 iter_skb
->data
, buf_len
, DMA_TO_DEVICE
);
1256 if (dma_mapping_error(NULL
, sw_desc
->dma
))
1259 tpd
->addr
= cpu_to_le32(sw_desc
->dma
);
1260 tpd
->len
= cpu_to_le16(buf_len
);
1261 tpd
->svlan_tag
= svlan_tag
;
1262 tpd
->word1
= word1
| lso_word1
;
1264 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_SKB_FRAGLIST
;
1268 tpd
->word1
|= 1 << EDMA_TPD_EOP_SHIFT
;
1271 sw_desc
->flags
|= EDMA_SW_DESC_FLAG_LAST
;
1276 edma_rollback_tx(adapter
, start_tpd
, queue_id
);
1277 dev_err(&pdev
->dev
, "TX DMA map failed\n");
1282 /* edma_check_link()
1285 static int edma_check_link(struct edma_adapter
*adapter
)
1287 struct phy_device
*phydev
= adapter
->phydev
;
1289 if (!(adapter
->poll_required
))
1290 return __EDMA_LINKUP
;
1293 return __EDMA_LINKUP
;
1295 return __EDMA_LINKDOWN
;
1298 /* edma_adjust_link()
1299 * check for edma link status
1301 void edma_adjust_link(struct net_device
*netdev
)
1304 struct edma_adapter
*adapter
= netdev_priv(netdev
);
1305 struct phy_device
*phydev
= adapter
->phydev
;
1307 if (!test_bit(__EDMA_UP
, &adapter
->state_flags
))
1310 status
= edma_check_link(adapter
);
1312 if (status
== __EDMA_LINKUP
&& adapter
->link_state
== __EDMA_LINKDOWN
) {
1313 phy_print_status(phydev
);
1314 adapter
->link_state
= __EDMA_LINKUP
;
1315 if (adapter
->edma_cinfo
->is_single_phy
) {
1316 ess_set_port_status_speed(adapter
->edma_cinfo
, phydev
,
1317 ffs(adapter
->dp_bitmap
) - 1);
1319 netif_carrier_on(netdev
);
1320 if (netif_running(netdev
))
1321 netif_tx_wake_all_queues(netdev
);
1322 } else if (status
== __EDMA_LINKDOWN
&& adapter
->link_state
== __EDMA_LINKUP
) {
1323 phy_print_status(phydev
);
1324 adapter
->link_state
= __EDMA_LINKDOWN
;
1325 netif_carrier_off(netdev
);
1326 netif_tx_stop_all_queues(netdev
);
1331 * Statistics api used to retreive the tx/rx statistics
1333 struct net_device_stats
*edma_get_stats(struct net_device
*netdev
)
1335 struct edma_adapter
*adapter
= netdev_priv(netdev
);
1337 return &adapter
->stats
;
1341 * Main api to be called by the core for packet transmission
1343 netdev_tx_t
edma_xmit(struct sk_buff
*skb
,
1344 struct net_device
*net_dev
)
1346 struct edma_adapter
*adapter
= netdev_priv(net_dev
);
1347 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1348 struct edma_tx_desc_ring
*etdr
;
1349 u16 from_cpu
, dp_bitmap
, txq_id
;
1350 int ret
, nr_frags
= 0, num_tpds_needed
= 1, queue_id
;
1351 unsigned int flags_transmit
= 0;
1352 bool packet_is_rstp
= false;
1353 struct netdev_queue
*nq
= NULL
;
1355 if (skb_shinfo(skb
)->nr_frags
) {
1356 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1357 num_tpds_needed
+= nr_frags
;
1358 } else if (skb_has_frag_list(skb
)) {
1359 struct sk_buff
*iter_skb
;
1361 skb_walk_frags(skb
, iter_skb
)
1365 if (num_tpds_needed
> EDMA_MAX_SKB_FRAGS
) {
1366 dev_err(&net_dev
->dev
,
1367 "skb received with fragments %d which is more than %lu",
1368 num_tpds_needed
, EDMA_MAX_SKB_FRAGS
);
1369 dev_kfree_skb_any(skb
);
1370 adapter
->stats
.tx_errors
++;
1371 return NETDEV_TX_OK
;
1374 if (edma_stp_rstp
) {
1375 u16 ath_hdr
, ath_eth_type
;
1376 u8 mac_addr
[EDMA_ETH_HDR_LEN
];
1377 ath_eth_type
= ntohs(*(uint16_t *)&skb
->data
[12]);
1378 if (ath_eth_type
== edma_ath_eth_type
) {
1379 packet_is_rstp
= true;
1380 ath_hdr
= htons(*(uint16_t *)&skb
->data
[14]);
1381 dp_bitmap
= ath_hdr
& EDMA_TX_ATH_HDR_PORT_BITMAP_MASK
;
1382 from_cpu
= (ath_hdr
& EDMA_TX_ATH_HDR_FROM_CPU_MASK
) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT
;
1383 memcpy(mac_addr
, skb
->data
, EDMA_ETH_HDR_LEN
);
1387 memcpy(skb
->data
, mac_addr
, EDMA_ETH_HDR_LEN
);
1391 /* this will be one of the 4 TX queues exposed to linux kernel */
1392 txq_id
= skb_get_queue_mapping(skb
);
1393 queue_id
= edma_tx_queue_get(adapter
, skb
, txq_id
);
1394 etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1395 nq
= netdev_get_tx_queue(net_dev
, txq_id
);
1398 /* Tx is not handled in bottom half context. Hence, we need to protect
1399 * Tx from tasks and bottom half
1402 if (num_tpds_needed
> edma_tpd_available(edma_cinfo
, queue_id
)) {
1403 /* not enough descriptor, just stop queue */
1404 netif_tx_stop_queue(nq
);
1406 dev_dbg(&net_dev
->dev
, "Not enough descriptors available");
1407 edma_cinfo
->edma_ethstats
.tx_desc_error
++;
1408 return NETDEV_TX_BUSY
;
1411 /* Check and mark VLAN tag offload */
1412 if (unlikely(skb_vlan_tag_present(skb
)))
1413 flags_transmit
|= EDMA_VLAN_TX_TAG_INSERT_FLAG
;
1414 else if (!adapter
->edma_cinfo
->is_single_phy
&& adapter
->default_vlan_tag
)
1415 flags_transmit
|= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG
;
1417 /* Check and mark checksum offload */
1418 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
1419 flags_transmit
|= EDMA_HW_CHECKSUM
;
1421 /* Map and fill descriptor for Tx */
1422 ret
= edma_tx_map_and_fill(edma_cinfo
, adapter
, skb
, queue_id
,
1423 flags_transmit
, from_cpu
, dp_bitmap
, packet_is_rstp
, nr_frags
);
1425 dev_kfree_skb_any(skb
);
1426 adapter
->stats
.tx_errors
++;
1430 /* Update SW producer index */
1431 edma_tx_update_hw_idx(edma_cinfo
, skb
, queue_id
);
1433 /* update tx statistics */
1434 adapter
->stats
.tx_packets
++;
1435 adapter
->stats
.tx_bytes
+= skb
->len
;
1439 return NETDEV_TX_OK
;
1443 * edma_flow_may_expire()
1444 * Timer function called periodically to delete the node
1446 void edma_flow_may_expire(struct timer_list
*t
)
1448 struct edma_rfs_flow_table
*table
= from_timer(table
, t
, expire_rfs
);
1449 struct edma_adapter
*adapter
=
1450 container_of(table
, typeof(*adapter
), rfs
);
1453 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1454 for (j
= 0; j
< EDMA_RFS_EXPIRE_COUNT_PER_CALL
; j
++) {
1455 struct hlist_head
*hhead
;
1456 struct hlist_node
*tmp
;
1457 struct edma_rfs_filter_node
*n
;
1460 hhead
= &adapter
->rfs
.hlist_head
[adapter
->rfs
.hashtoclean
++];
1461 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
) {
1462 res
= rps_may_expire_flow(adapter
->netdev
, n
->rq_id
,
1463 n
->flow_id
, n
->filter_id
);
1466 ret
= edma_delete_rfs_filter(adapter
, n
);
1468 dev_dbg(&adapter
->netdev
->dev
,
1469 "RFS entry %d not allowed to be flushed by Switch",
1472 hlist_del(&n
->node
);
1474 adapter
->rfs
.filter_available
++;
1480 adapter
->rfs
.hashtoclean
= adapter
->rfs
.hashtoclean
& (EDMA_RFS_FLOW_ENTRIES
- 1);
1481 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1482 mod_timer(&adapter
->rfs
.expire_rfs
, jiffies
+ HZ
/ 4);
1485 /* edma_rx_flow_steer()
1486 * Called by core to to steer the flow to CPU
1488 int edma_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
1489 u16 rxq
, u32 flow_id
)
1491 struct flow_keys keys
;
1492 struct edma_rfs_filter_node
*filter_node
;
1493 struct edma_adapter
*adapter
= netdev_priv(dev
);
1497 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1498 dev_err(&adapter
->pdev
->dev
, "IPv6 not supported\n");
1500 goto no_protocol_err
;
1503 /* Dissect flow parameters
1504 * We only support IPv4 + TCP/UDP
1506 res
= skb_flow_dissect_flow_keys(skb
, &keys
, 0);
1507 if (!((keys
.basic
.ip_proto
== IPPROTO_TCP
) || (keys
.basic
.ip_proto
== IPPROTO_UDP
))) {
1508 res
= -EPROTONOSUPPORT
;
1509 goto no_protocol_err
;
1512 /* Check if table entry exists */
1513 hash_tblid
= skb_get_hash_raw(skb
) & EDMA_RFS_FLOW_ENTRIES_MASK
;
1515 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1516 filter_node
= edma_rfs_key_search(&adapter
->rfs
.hlist_head
[hash_tblid
], &keys
);
1519 if (rxq
== filter_node
->rq_id
) {
1523 res
= edma_delete_rfs_filter(adapter
, filter_node
);
1525 dev_warn(&adapter
->netdev
->dev
,
1526 "Cannot steer flow %d to different queue",
1527 filter_node
->flow_id
);
1529 adapter
->rfs
.filter_available
++;
1530 res
= edma_add_rfs_filter(adapter
, &keys
, rxq
, filter_node
);
1532 dev_warn(&adapter
->netdev
->dev
,
1533 "Cannot steer flow %d to different queue",
1534 filter_node
->flow_id
);
1536 adapter
->rfs
.filter_available
--;
1537 filter_node
->rq_id
= rxq
;
1538 filter_node
->filter_id
= res
;
1543 if (adapter
->rfs
.filter_available
== 0) {
1548 filter_node
= kmalloc(sizeof(*filter_node
), GFP_ATOMIC
);
1554 res
= edma_add_rfs_filter(adapter
, &keys
, rxq
, filter_node
);
1560 adapter
->rfs
.filter_available
--;
1561 filter_node
->rq_id
= rxq
;
1562 filter_node
->filter_id
= res
;
1563 filter_node
->flow_id
= flow_id
;
1564 filter_node
->keys
= keys
;
1565 INIT_HLIST_NODE(&filter_node
->node
);
1566 hlist_add_head(&filter_node
->node
, &adapter
->rfs
.hlist_head
[hash_tblid
]);
1570 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1575 /* edma_register_rfs_filter()
1576 * Add RFS filter callback
1578 int edma_register_rfs_filter(struct net_device
*netdev
,
1579 set_rfs_filter_callback_t set_filter
)
1581 struct edma_adapter
*adapter
= netdev_priv(netdev
);
1583 spin_lock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1585 if (adapter
->set_rfs_rule
) {
1586 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1590 adapter
->set_rfs_rule
= set_filter
;
1591 spin_unlock_bh(&adapter
->rfs
.rfs_ftab_lock
);
1596 /* edma_alloc_tx_rings()
1599 int edma_alloc_tx_rings(struct edma_common_info
*edma_cinfo
)
1601 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1604 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1605 err
= edma_alloc_tx_ring(edma_cinfo
, edma_cinfo
->tpd_ring
[i
]);
1607 dev_err(&pdev
->dev
, "Tx Queue alloc %u failed\n", i
);
1615 /* edma_free_tx_rings()
1618 void edma_free_tx_rings(struct edma_common_info
*edma_cinfo
)
1622 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
1623 edma_free_tx_ring(edma_cinfo
, edma_cinfo
->tpd_ring
[i
]);
1626 /* edma_free_tx_resources()
1627 * Free buffers associated with tx rings
1629 void edma_free_tx_resources(struct edma_common_info
*edma_cinfo
)
1631 struct edma_tx_desc_ring
*etdr
;
1632 struct edma_sw_desc
*sw_desc
;
1633 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1636 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1637 etdr
= edma_cinfo
->tpd_ring
[i
];
1638 for (j
= 0; j
< EDMA_TX_RING_SIZE
; j
++) {
1639 sw_desc
= &etdr
->sw_desc
[j
];
1640 if (sw_desc
->flags
& (EDMA_SW_DESC_FLAG_SKB_HEAD
|
1641 EDMA_SW_DESC_FLAG_SKB_FRAG
| EDMA_SW_DESC_FLAG_SKB_FRAGLIST
))
1642 edma_tx_unmap_and_free(pdev
, sw_desc
);
1647 /* edma_alloc_rx_rings()
1650 int edma_alloc_rx_rings(struct edma_common_info
*edma_cinfo
)
1652 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1655 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1656 err
= edma_alloc_rx_ring(edma_cinfo
, edma_cinfo
->rfd_ring
[j
]);
1658 dev_err(&pdev
->dev
, "Rx Queue alloc%u failed\n", i
);
1661 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1667 /* edma_free_rx_rings()
1670 void edma_free_rx_rings(struct edma_common_info
*edma_cinfo
)
1674 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1675 edma_free_rx_ring(edma_cinfo
, edma_cinfo
->rfd_ring
[j
]);
1676 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1680 /* edma_free_queues()
1681 * Free the queues allocaated
1683 void edma_free_queues(struct edma_common_info
*edma_cinfo
)
1687 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1688 if (edma_cinfo
->tpd_ring
[i
])
1689 kfree(edma_cinfo
->tpd_ring
[i
]);
1690 edma_cinfo
->tpd_ring
[i
] = NULL
;
1693 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1694 if (edma_cinfo
->rfd_ring
[j
])
1695 kfree(edma_cinfo
->rfd_ring
[j
]);
1696 edma_cinfo
->rfd_ring
[j
] = NULL
;
1697 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1700 edma_cinfo
->num_rx_queues
= 0;
1701 edma_cinfo
->num_tx_queues
= 0;
1706 /* edma_free_rx_resources()
1707 * Free buffers associated with tx rings
1709 void edma_free_rx_resources(struct edma_common_info
*edma_cinfo
)
1711 struct edma_rfd_desc_ring
*erdr
;
1712 struct edma_sw_desc
*sw_desc
;
1713 struct platform_device
*pdev
= edma_cinfo
->pdev
;
1716 for (i
= 0, k
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1717 erdr
= edma_cinfo
->rfd_ring
[k
];
1718 for (j
= 0; j
< EDMA_RX_RING_SIZE
; j
++) {
1719 sw_desc
= &erdr
->sw_desc
[j
];
1720 if (likely(sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_HEAD
)) {
1721 dma_unmap_single(&pdev
->dev
, sw_desc
->dma
,
1722 sw_desc
->length
, DMA_FROM_DEVICE
);
1723 edma_clean_rfd(erdr
, j
);
1724 } else if ((sw_desc
->flags
& EDMA_SW_DESC_FLAG_SKB_FRAG
)) {
1725 dma_unmap_page(&pdev
->dev
, sw_desc
->dma
,
1726 sw_desc
->length
, DMA_FROM_DEVICE
);
1727 edma_clean_rfd(erdr
, j
);
1730 k
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1735 /* edma_alloc_queues_tx()
1736 * Allocate memory for all rings
1738 int edma_alloc_queues_tx(struct edma_common_info
*edma_cinfo
)
1742 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
1743 struct edma_tx_desc_ring
*etdr
;
1744 etdr
= kzalloc(sizeof(struct edma_tx_desc_ring
), GFP_KERNEL
);
1747 etdr
->count
= edma_cinfo
->tx_ring_count
;
1748 edma_cinfo
->tpd_ring
[i
] = etdr
;
1753 edma_free_queues(edma_cinfo
);
1757 /* edma_alloc_queues_rx()
1758 * Allocate memory for all rings
1760 int edma_alloc_queues_rx(struct edma_common_info
*edma_cinfo
)
1764 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1765 struct edma_rfd_desc_ring
*rfd_ring
;
1766 rfd_ring
= kzalloc(sizeof(struct edma_rfd_desc_ring
),
1770 rfd_ring
->count
= edma_cinfo
->rx_ring_count
;
1771 edma_cinfo
->rfd_ring
[j
] = rfd_ring
;
1772 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1776 edma_free_queues(edma_cinfo
);
1780 /* edma_clear_irq_status()
1781 * Clear interrupt status
1783 void edma_clear_irq_status()
1785 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1786 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1787 edma_write_reg(EDMA_REG_MISC_ISR
, 0x1fff);
1788 edma_write_reg(EDMA_REG_WOL_ISR
, 0x1);
1792 * Configure skb, edma interrupts and control register.
1794 int edma_configure(struct edma_common_info
*edma_cinfo
)
1796 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1797 u32 intr_modrt_data
;
1798 u32 intr_ctrl_data
= 0;
1799 int i
, j
, ret_count
;
1801 edma_read_reg(EDMA_REG_INTR_CTRL
, &intr_ctrl_data
);
1802 intr_ctrl_data
&= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT
);
1803 intr_ctrl_data
|= hw
->intr_sw_idx_w
<< EDMA_INTR_SW_IDX_W_TYP_SHIFT
;
1804 edma_write_reg(EDMA_REG_INTR_CTRL
, intr_ctrl_data
);
1806 edma_clear_irq_status();
1808 /* Clear any WOL status */
1809 edma_write_reg(EDMA_REG_WOL_CTRL
, 0);
1810 intr_modrt_data
= (EDMA_TX_IMT
<< EDMA_IRQ_MODRT_TX_TIMER_SHIFT
);
1811 intr_modrt_data
|= (EDMA_RX_IMT
<< EDMA_IRQ_MODRT_RX_TIMER_SHIFT
);
1812 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, intr_modrt_data
);
1813 edma_configure_tx(edma_cinfo
);
1814 edma_configure_rx(edma_cinfo
);
1816 /* Allocate the RX buffer */
1817 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1818 struct edma_rfd_desc_ring
*ring
= edma_cinfo
->rfd_ring
[j
];
1819 ret_count
= edma_alloc_rx_buf(edma_cinfo
, ring
, ring
->count
, j
);
1821 dev_dbg(&edma_cinfo
->pdev
->dev
, "not all rx buffers allocated\n");
1823 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1826 /* Configure descriptor Ring */
1827 edma_init_desc(edma_cinfo
);
1831 /* edma_irq_enable()
1832 * Enable default interrupt generation settings
1834 void edma_irq_enable(struct edma_common_info
*edma_cinfo
)
1836 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1839 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1840 for (i
= 0, j
= 0; i
< edma_cinfo
->num_rx_queues
; i
++) {
1841 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j
), hw
->rx_intr_mask
);
1842 j
+= ((edma_cinfo
->num_rx_queues
== 4) ? 2 : 1);
1844 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1845 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
1846 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i
), hw
->tx_intr_mask
);
1849 /* edma_irq_disable()
1852 void edma_irq_disable(struct edma_common_info
*edma_cinfo
)
1856 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++)
1857 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i
), 0x0);
1859 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++)
1860 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i
), 0x0);
1861 edma_write_reg(EDMA_REG_MISC_IMR
, 0);
1862 edma_write_reg(EDMA_REG_WOL_IMR
, 0);
1868 void edma_free_irqs(struct edma_adapter
*adapter
)
1870 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1872 int k
= ((edma_cinfo
->num_rx_queues
== 4) ? 1 : 2);
1874 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++) {
1875 for (j
= edma_cinfo
->edma_percpu_info
[i
].tx_start
; j
< (edma_cinfo
->edma_percpu_info
[i
].tx_start
+ 4); j
++)
1876 free_irq(edma_cinfo
->tx_irq
[j
], &edma_cinfo
->edma_percpu_info
[i
]);
1878 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
; j
< (edma_cinfo
->edma_percpu_info
[i
].rx_start
+ k
); j
++)
1879 free_irq(edma_cinfo
->rx_irq
[j
], &edma_cinfo
->edma_percpu_info
[i
]);
1883 /* edma_enable_rx_ctrl()
1884 * Enable RX queue control
1886 void edma_enable_rx_ctrl(struct edma_hw
*hw
)
1890 edma_read_reg(EDMA_REG_RXQ_CTRL
, &data
);
1891 data
|= EDMA_RXQ_CTRL_EN
;
1892 edma_write_reg(EDMA_REG_RXQ_CTRL
, data
);
1896 /* edma_enable_tx_ctrl()
1897 * Enable TX queue control
1899 void edma_enable_tx_ctrl(struct edma_hw
*hw
)
1903 edma_read_reg(EDMA_REG_TXQ_CTRL
, &data
);
1904 data
|= EDMA_TXQ_CTRL_TXQ_EN
;
1905 edma_write_reg(EDMA_REG_TXQ_CTRL
, data
);
1908 /* edma_stop_rx_tx()
1909 * Disable RX/TQ Queue control
1911 void edma_stop_rx_tx(struct edma_hw
*hw
)
1915 edma_read_reg(EDMA_REG_RXQ_CTRL
, &data
);
1916 data
&= ~EDMA_RXQ_CTRL_EN
;
1917 edma_write_reg(EDMA_REG_RXQ_CTRL
, data
);
1918 edma_read_reg(EDMA_REG_TXQ_CTRL
, &data
);
1919 data
&= ~EDMA_TXQ_CTRL_TXQ_EN
;
1920 edma_write_reg(EDMA_REG_TXQ_CTRL
, data
);
1926 int edma_reset(struct edma_common_info
*edma_cinfo
)
1928 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1930 edma_irq_disable(edma_cinfo
);
1932 edma_clear_irq_status();
1934 edma_stop_rx_tx(hw
);
1939 /* edma_fill_netdev()
1940 * Fill netdev for each etdr
1942 int edma_fill_netdev(struct edma_common_info
*edma_cinfo
, int queue_id
,
1943 int dev
, int txq_id
)
1945 struct edma_tx_desc_ring
*etdr
;
1948 etdr
= edma_cinfo
->tpd_ring
[queue_id
];
1950 while (etdr
->netdev
[i
])
1953 if (i
>= EDMA_MAX_NETDEV_PER_QUEUE
)
1956 /* Populate the netdev associated with the tpd ring */
1957 etdr
->netdev
[i
] = edma_netdev
[dev
];
1958 etdr
->nq
[i
] = netdev_get_tx_queue(edma_netdev
[dev
], txq_id
);
1964 * Change the Ethernet Address of the NIC
1966 int edma_set_mac_addr(struct net_device
*netdev
, void *p
)
1968 struct sockaddr
*addr
= p
;
1970 if (!is_valid_ether_addr(addr
->sa_data
))
1973 if (netif_running(netdev
))
1976 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1980 /* edma_set_stp_rstp()
1983 void edma_set_stp_rstp(bool rstp
)
1985 edma_stp_rstp
= rstp
;
1988 /* edma_assign_ath_hdr_type()
1989 * assign atheros header eth type
1991 void edma_assign_ath_hdr_type(int eth_type
)
1993 edma_ath_eth_type
= eth_type
& EDMA_ETH_TYPE_MASK
;
1996 /* edma_get_default_vlan_tag()
1997 * Used by other modules to get the default vlan tag
1999 int edma_get_default_vlan_tag(struct net_device
*netdev
)
2001 struct edma_adapter
*adapter
= netdev_priv(netdev
);
2003 if (adapter
->default_vlan_tag
)
2004 return adapter
->default_vlan_tag
;
2010 * gets called when netdevice is up, start the queue.
2012 int edma_open(struct net_device
*netdev
)
2014 struct edma_adapter
*adapter
= netdev_priv(netdev
);
2015 struct platform_device
*pdev
= adapter
->edma_cinfo
->pdev
;
2017 netif_tx_start_all_queues(netdev
);
2018 edma_initialise_rfs_flow_table(adapter
);
2019 set_bit(__EDMA_UP
, &adapter
->state_flags
);
2021 /* if Link polling is enabled, in our case enabled for WAN, then
2022 * do a phy start, else always set link as UP
2024 if (adapter
->poll_required
) {
2025 if (!IS_ERR(adapter
->phydev
)) {
2026 /* AR40xx calibration will leave the PHY in unwanted state,
2027 * so a soft reset is required before phy_start()
2029 genphy_soft_reset(adapter
->phydev
);
2030 phy_start(adapter
->phydev
);
2031 phy_start_aneg(adapter
->phydev
);
2032 adapter
->link_state
= __EDMA_LINKDOWN
;
2034 dev_dbg(&pdev
->dev
, "Invalid PHY device for a link polled interface\n");
2037 adapter
->link_state
= __EDMA_LINKUP
;
2038 netif_carrier_on(netdev
);
2046 * gets called when netdevice is down, stops the queue.
2048 int edma_close(struct net_device
*netdev
)
2050 struct edma_adapter
*adapter
= netdev_priv(netdev
);
2052 edma_free_rfs_flow_table(adapter
);
2053 netif_carrier_off(netdev
);
2054 netif_tx_stop_all_queues(netdev
);
2056 if (adapter
->poll_required
) {
2057 if (!IS_ERR(adapter
->phydev
))
2058 phy_stop(adapter
->phydev
);
2061 adapter
->link_state
= __EDMA_LINKDOWN
;
2063 /* Set GMAC state to UP before link state is checked
2065 clear_bit(__EDMA_UP
, &adapter
->state_flags
);
2071 * polling function that gets called when the napi gets scheduled.
2073 * Main sequence of task performed in this api
2074 * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2075 * enable interrupts.
2077 int edma_poll(struct napi_struct
*napi
, int budget
)
2079 struct edma_per_cpu_queues_info
*edma_percpu_info
= container_of(napi
,
2080 struct edma_per_cpu_queues_info
, napi
);
2081 struct edma_common_info
*edma_cinfo
= edma_percpu_info
->edma_cinfo
;
2083 u32 shadow_rx_status
, shadow_tx_status
;
2085 int i
, work_done
= 0;
2086 u16 rx_pending_fill
;
2088 /* Store the Rx/Tx status by ANDing it with
2089 * appropriate CPU RX?TX mask
2091 edma_read_reg(EDMA_REG_RX_ISR
, ®_data
);
2092 edma_percpu_info
->rx_status
|= reg_data
& edma_percpu_info
->rx_mask
;
2093 shadow_rx_status
= edma_percpu_info
->rx_status
;
2094 edma_read_reg(EDMA_REG_TX_ISR
, ®_data
);
2095 edma_percpu_info
->tx_status
|= reg_data
& edma_percpu_info
->tx_mask
;
2096 shadow_tx_status
= edma_percpu_info
->tx_status
;
2098 /* Every core will have a start, which will be computed
2099 * in probe and stored in edma_percpu_info->tx_start variable.
2100 * We will shift the status bit by tx_start to obtain
2101 * status bits for the core on which the current processing
2102 * is happening. Since, there are 4 tx queues per core,
2103 * we will run the loop till we get the correct queue to clear.
2105 while (edma_percpu_info
->tx_status
) {
2106 queue_id
= ffs(edma_percpu_info
->tx_status
) - 1;
2107 edma_tx_complete(edma_cinfo
, queue_id
);
2108 edma_percpu_info
->tx_status
&= ~(1 << queue_id
);
2111 /* Every core will have a start, which will be computed
2112 * in probe and stored in edma_percpu_info->tx_start variable.
2113 * We will shift the status bit by tx_start to obtain
2114 * status bits for the core on which the current processing
2115 * is happening. Since, there are 4 tx queues per core, we
2116 * will run the loop till we get the correct queue to clear.
2118 while (edma_percpu_info
->rx_status
) {
2119 queue_id
= ffs(edma_percpu_info
->rx_status
) - 1;
2120 rx_pending_fill
= edma_rx_complete(edma_cinfo
, &work_done
,
2121 budget
, queue_id
, napi
);
2123 if (likely(work_done
< budget
)) {
2124 if (rx_pending_fill
) {
2125 /* reschedule poll() to refill rx buffer deficit */
2129 edma_percpu_info
->rx_status
&= ~(1 << queue_id
);
2135 /* Clear the status register, to avoid the interrupts to
2136 * reoccur.This clearing of interrupt status register is
2137 * done here as writing to status register only takes place
2138 * once the producer/consumer index has been updated to
2139 * reflect that the packet transmission/reception went fine.
2141 edma_write_reg(EDMA_REG_RX_ISR
, shadow_rx_status
);
2142 edma_write_reg(EDMA_REG_TX_ISR
, shadow_tx_status
);
2144 /* If budget not fully consumed, exit the polling mode */
2145 if (likely(work_done
< budget
)) {
2146 napi_complete(napi
);
2148 /* re-enable the interrupts */
2149 for (i
= 0; i
< edma_cinfo
->num_rxq_per_core
; i
++)
2150 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info
->rx_start
+ i
), 0x1);
2151 for (i
= 0; i
< edma_cinfo
->num_txq_per_core
; i
++)
2152 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info
->tx_start
+ i
), 0x1);
2161 irqreturn_t
edma_interrupt(int irq
, void *dev
)
2163 struct edma_per_cpu_queues_info
*edma_percpu_info
= (struct edma_per_cpu_queues_info
*) dev
;
2164 struct edma_common_info
*edma_cinfo
= edma_percpu_info
->edma_cinfo
;
2167 /* Unmask the TX/RX interrupt register */
2168 for (i
= 0; i
< edma_cinfo
->num_rxq_per_core
; i
++)
2169 edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info
->rx_start
+ i
), 0x0);
2171 for (i
= 0; i
< edma_cinfo
->num_txq_per_core
; i
++)
2172 edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info
->tx_start
+ i
), 0x0);
2174 napi_schedule(&edma_percpu_info
->napi
);