1 // SPDX-License-Identifier: (GPL-2.0 OR ISC)
2 /* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
4 * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
5 * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
16 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <linux/bitfield.h>
20 #include <linux/clk.h>
21 #include <linux/dsa/ipq4019.h>
22 #include <linux/if_vlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/phylink.h>
30 #include <linux/platform_device.h>
31 #include <linux/reset.h>
32 #include <linux/skbuff.h>
33 #include <linux/vmalloc.h>
34 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
40 #define IPQESS_RRD_SIZE 16
41 #define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
42 #define IPQESS_TX_DMA_BUF_LEN 0x3fff
44 static void ipqess_w32(struct ipqess
*ess
, u32 reg
, u32 val
)
46 writel(val
, ess
->hw_addr
+ reg
);
49 static u32
ipqess_r32(struct ipqess
*ess
, u16 reg
)
51 return readl(ess
->hw_addr
+ reg
);
54 static void ipqess_m32(struct ipqess
*ess
, u32 mask
, u32 val
, u16 reg
)
56 u32 _val
= ipqess_r32(ess
, reg
);
59 ipqess_w32(ess
, reg
, _val
);
62 void ipqess_update_hw_stats(struct ipqess
*ess
)
68 lockdep_assert_held(&ess
->stats_lock
);
70 p
= (uint32_t *)&(ess
->ipqessstats
);
71 for (i
= 0; i
< IPQESS_MAX_TX_QUEUE
; i
++) {
72 stat
= ipqess_r32(ess
, IPQESS_REG_TX_STAT_PKT_Q(i
));
77 for (i
= 0; i
< IPQESS_MAX_TX_QUEUE
; i
++) {
78 stat
= ipqess_r32(ess
, IPQESS_REG_TX_STAT_BYTE_Q(i
));
83 for (i
= 0; i
< IPQESS_MAX_RX_QUEUE
; i
++) {
84 stat
= ipqess_r32(ess
, IPQESS_REG_RX_STAT_PKT_Q(i
));
89 for (i
= 0; i
< IPQESS_MAX_RX_QUEUE
; i
++) {
90 stat
= ipqess_r32(ess
, IPQESS_REG_RX_STAT_BYTE_Q(i
));
96 static int ipqess_tx_ring_alloc(struct ipqess
*ess
)
98 struct device
*dev
= &ess
->pdev
->dev
;
101 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
102 struct ipqess_tx_ring
*tx_ring
= &ess
->tx_ring
[i
];
107 tx_ring
->ring_id
= i
;
108 tx_ring
->idx
= i
* 4;
109 tx_ring
->count
= IPQESS_TX_RING_SIZE
;
110 tx_ring
->nq
= netdev_get_tx_queue(ess
->netdev
, i
);
112 size
= sizeof(struct ipqess_buf
) * IPQESS_TX_RING_SIZE
;
113 tx_ring
->buf
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
115 netdev_err(ess
->netdev
, "buffer alloc of tx ring failed");
119 size
= sizeof(struct ipqess_tx_desc
) * IPQESS_TX_RING_SIZE
;
120 tx_ring
->hw_desc
= dmam_alloc_coherent(dev
, size
, &tx_ring
->dma
,
121 GFP_KERNEL
| __GFP_ZERO
);
122 if (!tx_ring
->hw_desc
) {
123 netdev_err(ess
->netdev
, "descriptor allocation for tx ring failed");
127 ipqess_w32(ess
, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring
->idx
),
130 idx
= ipqess_r32(ess
, IPQESS_REG_TPD_IDX_Q(tx_ring
->idx
));
131 idx
>>= IPQESS_TPD_CONS_IDX_SHIFT
; /* need u32 here */
133 tx_ring
->head
= tx_ring
->tail
= idx
;
135 ipqess_m32(ess
, IPQESS_TPD_PROD_IDX_MASK
<< IPQESS_TPD_PROD_IDX_SHIFT
,
136 idx
, IPQESS_REG_TPD_IDX_Q(tx_ring
->idx
));
137 ipqess_w32(ess
, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring
->idx
), idx
);
138 ipqess_w32(ess
, IPQESS_REG_TPD_RING_SIZE
, IPQESS_TX_RING_SIZE
);
144 static int ipqess_tx_unmap_and_free(struct device
*dev
, struct ipqess_buf
*buf
)
148 if (buf
->flags
& IPQESS_DESC_SINGLE
)
149 dma_unmap_single(dev
, buf
->dma
, buf
->length
, DMA_TO_DEVICE
);
150 else if (buf
->flags
& IPQESS_DESC_PAGE
)
151 dma_unmap_page(dev
, buf
->dma
, buf
->length
, DMA_TO_DEVICE
);
153 if (buf
->flags
& IPQESS_DESC_LAST
) {
155 dev_kfree_skb_any(buf
->skb
);
163 static void ipqess_tx_ring_free(struct ipqess
*ess
)
167 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
170 if (ess
->tx_ring
[i
].hw_desc
)
173 for (j
= 0; j
< IPQESS_TX_RING_SIZE
; j
++) {
174 struct ipqess_buf
*buf
= &ess
->tx_ring
[i
].buf
[j
];
176 ipqess_tx_unmap_and_free(&ess
->pdev
->dev
, buf
);
179 ess
->tx_ring
[i
].buf
= NULL
;
183 static int ipqess_rx_buf_prepare(struct ipqess_buf
*buf
,
184 struct ipqess_rx_ring
*rx_ring
)
186 /* Clean the HW DESC header, otherwise we might end up
187 * with a spurious desc because of random garbage */
188 memset(buf
->skb
->data
, 0, sizeof(struct ipqess_rx_desc
));
190 buf
->dma
= dma_map_single(rx_ring
->ppdev
, buf
->skb
->data
,
191 IPQESS_RX_HEAD_BUFF_SIZE
, DMA_FROM_DEVICE
);
192 if (dma_mapping_error(rx_ring
->ppdev
, buf
->dma
)) {
193 dev_err_once(rx_ring
->ppdev
,
194 "IPQESS DMA mapping failed for linear address %x",
196 dev_kfree_skb_any(buf
->skb
);
201 buf
->length
= IPQESS_RX_HEAD_BUFF_SIZE
;
202 rx_ring
->hw_desc
[rx_ring
->head
] = (struct ipqess_rx_desc
*)buf
->dma
;
203 rx_ring
->head
= (rx_ring
->head
+ 1) % IPQESS_RX_RING_SIZE
;
205 ipqess_m32(rx_ring
->ess
, IPQESS_RFD_PROD_IDX_BITS
,
206 (rx_ring
->head
+ IPQESS_RX_RING_SIZE
- 1) % IPQESS_RX_RING_SIZE
,
207 IPQESS_REG_RFD_IDX_Q(rx_ring
->idx
));
212 /* locking is handled by the caller */
213 static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring
*rx_ring
)
215 struct ipqess_buf
*buf
= &rx_ring
->buf
[rx_ring
->head
];
217 buf
->skb
= napi_alloc_skb(&rx_ring
->napi_rx
,
218 IPQESS_RX_HEAD_BUFF_SIZE
);
222 return ipqess_rx_buf_prepare(buf
, rx_ring
);
225 static int ipqess_rx_buf_alloc(struct ipqess_rx_ring
*rx_ring
)
227 struct ipqess_buf
*buf
= &rx_ring
->buf
[rx_ring
->head
];
229 buf
->skb
= netdev_alloc_skb_ip_align(rx_ring
->ess
->netdev
,
230 IPQESS_RX_HEAD_BUFF_SIZE
);
234 return ipqess_rx_buf_prepare(buf
, rx_ring
);
237 static void ipqess_refill_work(struct work_struct
*work
)
239 struct ipqess_rx_ring_refill
*rx_refill
= container_of(work
,
240 struct ipqess_rx_ring_refill
, refill_work
);
241 struct ipqess_rx_ring
*rx_ring
= rx_refill
->rx_ring
;
244 /* don't let this loop by accident. */
245 while (atomic_dec_and_test(&rx_ring
->refill_count
)) {
246 napi_disable(&rx_ring
->napi_rx
);
247 if (ipqess_rx_buf_alloc(rx_ring
)) {
249 dev_dbg(rx_ring
->ppdev
,
250 "Not all buffers were reallocated");
252 napi_enable(&rx_ring
->napi_rx
);
255 if (atomic_add_return(refill
, &rx_ring
->refill_count
))
256 schedule_work(&rx_refill
->refill_work
);
260 static int ipqess_rx_ring_alloc(struct ipqess
*ess
)
264 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
267 ess
->rx_ring
[i
].ess
= ess
;
268 ess
->rx_ring
[i
].ppdev
= &ess
->pdev
->dev
;
269 ess
->rx_ring
[i
].ring_id
= i
;
270 ess
->rx_ring
[i
].idx
= i
* 2;
272 ess
->rx_ring
[i
].buf
= devm_kzalloc(&ess
->pdev
->dev
,
273 sizeof(struct ipqess_buf
) * IPQESS_RX_RING_SIZE
,
275 if (!ess
->rx_ring
[i
].buf
)
278 ess
->rx_ring
[i
].hw_desc
= dmam_alloc_coherent(&ess
->pdev
->dev
,
279 sizeof(struct ipqess_rx_desc
) * IPQESS_RX_RING_SIZE
,
280 &ess
->rx_ring
[i
].dma
, GFP_KERNEL
);
281 if (!ess
->rx_ring
[i
].hw_desc
)
284 for (j
= 0; j
< IPQESS_RX_RING_SIZE
; j
++)
285 if (ipqess_rx_buf_alloc(&ess
->rx_ring
[i
]) < 0)
288 ess
->rx_refill
[i
].rx_ring
= &ess
->rx_ring
[i
];
289 INIT_WORK(&ess
->rx_refill
[i
].refill_work
, ipqess_refill_work
);
291 ipqess_w32(ess
, IPQESS_REG_RFD_BASE_ADDR_Q(ess
->rx_ring
[i
].idx
),
292 (u32
)(ess
->rx_ring
[i
].dma
));
295 ipqess_w32(ess
, IPQESS_REG_RX_DESC0
,
296 (IPQESS_RX_HEAD_BUFF_SIZE
<< IPQESS_RX_BUF_SIZE_SHIFT
) |
297 (IPQESS_RX_RING_SIZE
<< IPQESS_RFD_RING_SIZE_SHIFT
));
302 static void ipqess_rx_ring_free(struct ipqess
*ess
)
306 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
309 atomic_set(&ess
->rx_ring
[i
].refill_count
, 0);
310 cancel_work_sync(&ess
->rx_refill
[i
].refill_work
);
312 for (j
= 0; j
< IPQESS_RX_RING_SIZE
; j
++) {
313 dma_unmap_single(&ess
->pdev
->dev
,
314 ess
->rx_ring
[i
].buf
[j
].dma
,
315 ess
->rx_ring
[i
].buf
[j
].length
,
317 dev_kfree_skb_any(ess
->rx_ring
[i
].buf
[j
].skb
);
322 static struct net_device_stats
*ipqess_get_stats(struct net_device
*netdev
)
324 struct ipqess
*ess
= netdev_priv(netdev
);
326 spin_lock(&ess
->stats_lock
);
327 ipqess_update_hw_stats(ess
);
328 spin_unlock(&ess
->stats_lock
);
333 static int ipqess_rx_poll(struct ipqess_rx_ring
*rx_ring
, int budget
)
335 u32 length
= 0, num_desc
, tail
, rx_ring_tail
;
338 rx_ring_tail
= rx_ring
->tail
;
340 tail
= ipqess_r32(rx_ring
->ess
, IPQESS_REG_RFD_IDX_Q(rx_ring
->idx
));
341 tail
>>= IPQESS_RFD_CONS_IDX_SHIFT
;
342 tail
&= IPQESS_RFD_CONS_IDX_MASK
;
344 while (done
< budget
) {
346 struct ipqess_rx_desc
*rd
;
348 if (rx_ring_tail
== tail
)
351 dma_unmap_single(rx_ring
->ppdev
,
352 rx_ring
->buf
[rx_ring_tail
].dma
,
353 rx_ring
->buf
[rx_ring_tail
].length
,
356 skb
= xchg(&rx_ring
->buf
[rx_ring_tail
].skb
, NULL
);
357 rd
= (struct ipqess_rx_desc
*)skb
->data
;
358 rx_ring_tail
= IPQESS_NEXT_IDX(rx_ring_tail
, IPQESS_RX_RING_SIZE
);
360 /* Check if RRD is valid */
361 if (!(rd
->rrd7
& IPQESS_RRD_DESC_VALID
)) {
363 dev_kfree_skb_any(skb
);
367 num_desc
= rd
->rrd1
& IPQESS_RRD_NUM_RFD_MASK
;
368 length
= rd
->rrd6
& IPQESS_RRD_PKT_SIZE_MASK
;
370 skb_reserve(skb
, IPQESS_RRD_SIZE
);
372 /* can we use build_skb here ? */
373 struct sk_buff
*skb_prev
= NULL
;
378 skb
->tail
+= (IPQESS_RX_HEAD_BUFF_SIZE
- IPQESS_RRD_SIZE
);
379 skb
->len
= skb
->truesize
= length
;
380 size_remaining
= length
- (IPQESS_RX_HEAD_BUFF_SIZE
- IPQESS_RRD_SIZE
);
382 for (i
= 1; i
< num_desc
; i
++) {
383 /* TODO: use build_skb ? */
384 struct sk_buff
*skb_temp
= rx_ring
->buf
[rx_ring_tail
].skb
;
386 dma_unmap_single(rx_ring
->ppdev
,
387 rx_ring
->buf
[rx_ring_tail
].dma
,
388 rx_ring
->buf
[rx_ring_tail
].length
,
391 skb_put(skb_temp
, min(size_remaining
, IPQESS_RX_HEAD_BUFF_SIZE
));
393 skb_prev
->next
= rx_ring
->buf
[rx_ring_tail
].skb
;
395 skb_shinfo(skb
)->frag_list
= rx_ring
->buf
[rx_ring_tail
].skb
;
396 skb_prev
= rx_ring
->buf
[rx_ring_tail
].skb
;
397 rx_ring
->buf
[rx_ring_tail
].skb
->next
= NULL
;
399 skb
->data_len
+= rx_ring
->buf
[rx_ring_tail
].skb
->len
;
400 size_remaining
-= rx_ring
->buf
[rx_ring_tail
].skb
->len
;
402 rx_ring_tail
= IPQESS_NEXT_IDX(rx_ring_tail
, IPQESS_RX_RING_SIZE
);
406 skb_put(skb
, length
);
409 skb
->dev
= rx_ring
->ess
->netdev
;
410 skb
->protocol
= eth_type_trans(skb
, rx_ring
->ess
->netdev
);
411 skb_record_rx_queue(skb
, rx_ring
->ring_id
);
413 if (rd
->rrd6
& IPQESS_RRD_CSUM_FAIL_MASK
)
414 skb_checksum_none_assert(skb
);
416 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
418 if (rd
->rrd7
& IPQESS_RRD_CVLAN
) {
419 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rd
->rrd4
);
420 } else if (rd
->rrd1
& IPQESS_RRD_SVLAN
) {
421 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021AD
), rd
->rrd4
);
423 napi_gro_receive(&rx_ring
->napi_rx
, skb
);
425 /* TODO: do we need to have these here ? */
426 rx_ring
->ess
->stats
.rx_packets
++;
427 rx_ring
->ess
->stats
.rx_bytes
+= length
;
432 num_desc
+= atomic_xchg(&rx_ring
->refill_count
, 0);
434 if (ipqess_rx_buf_alloc_napi(rx_ring
)) {
435 num_desc
= atomic_add_return(num_desc
,
436 &rx_ring
->refill_count
);
437 if (num_desc
>= ((4 * IPQESS_RX_RING_SIZE
+ 6) / 7))
438 schedule_work(&rx_ring
->ess
->rx_refill
[rx_ring
->ring_id
].refill_work
);
445 ipqess_w32(rx_ring
->ess
, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring
->idx
),
447 rx_ring
->tail
= rx_ring_tail
;
452 static int ipqess_tx_complete(struct ipqess_tx_ring
*tx_ring
, int budget
)
458 tail
= ipqess_r32(tx_ring
->ess
, IPQESS_REG_TPD_IDX_Q(tx_ring
->idx
));
459 tail
>>= IPQESS_TPD_CONS_IDX_SHIFT
;
460 tail
&= IPQESS_TPD_CONS_IDX_MASK
;
462 while ((tx_ring
->tail
!= tail
) && (done
< budget
)) {
463 //pr_info("freeing txq:%d tail:%d tailbuf:%p\n", tx_ring->idx, tx_ring->tail, &tx_ring->buf[tx_ring->tail]);
464 ret
= ipqess_tx_unmap_and_free(&tx_ring
->ess
->pdev
->dev
,
465 &tx_ring
->buf
[tx_ring
->tail
]);
466 tx_ring
->tail
= IPQESS_NEXT_IDX(tx_ring
->tail
, tx_ring
->count
);
473 ipqess_w32(tx_ring
->ess
,
474 IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring
->idx
),
477 if (netif_tx_queue_stopped(tx_ring
->nq
)) {
478 netdev_dbg(tx_ring
->ess
->netdev
, "waking up tx queue %d\n",
480 netif_tx_wake_queue(tx_ring
->nq
);
483 netdev_tx_completed_queue(tx_ring
->nq
, done
, total
);
488 static int ipqess_tx_napi(struct napi_struct
*napi
, int budget
)
490 struct ipqess_tx_ring
*tx_ring
= container_of(napi
, struct ipqess_tx_ring
,
495 tx_status
= ipqess_r32(tx_ring
->ess
, IPQESS_REG_TX_ISR
);
496 tx_status
&= BIT(tx_ring
->idx
);
498 work_done
= ipqess_tx_complete(tx_ring
, budget
);
500 ipqess_w32(tx_ring
->ess
, IPQESS_REG_TX_ISR
, tx_status
);
502 if (likely(work_done
< budget
)) {
503 if (napi_complete_done(napi
, work_done
))
504 ipqess_w32(tx_ring
->ess
,
505 IPQESS_REG_TX_INT_MASK_Q(tx_ring
->idx
), 0x1);
511 static int ipqess_rx_napi(struct napi_struct
*napi
, int budget
)
513 struct ipqess_rx_ring
*rx_ring
= container_of(napi
, struct ipqess_rx_ring
,
515 struct ipqess
*ess
= rx_ring
->ess
;
516 int remain_budget
= budget
;
518 u32 rx_mask
= BIT(rx_ring
->idx
);
522 ipqess_w32(ess
, IPQESS_REG_RX_ISR
, rx_mask
);
523 rx_done
= ipqess_rx_poll(rx_ring
, remain_budget
);
525 if (rx_done
== remain_budget
)
528 status
= ipqess_r32(ess
, IPQESS_REG_RX_ISR
);
529 if (status
& rx_mask
) {
530 remain_budget
-= rx_done
;
534 if (napi_complete_done(napi
, rx_done
+ budget
- remain_budget
))
535 ipqess_w32(ess
, IPQESS_REG_RX_INT_MASK_Q(rx_ring
->idx
), 0x1);
537 return rx_done
+ budget
- remain_budget
;
540 static irqreturn_t
ipqess_interrupt_tx(int irq
, void *priv
)
542 struct ipqess_tx_ring
*tx_ring
= (struct ipqess_tx_ring
*) priv
;
544 if (likely(napi_schedule_prep(&tx_ring
->napi_tx
))) {
545 ipqess_w32(tx_ring
->ess
,
546 IPQESS_REG_TX_INT_MASK_Q(tx_ring
->idx
),
548 __napi_schedule(&tx_ring
->napi_tx
);
554 static irqreturn_t
ipqess_interrupt_rx(int irq
, void *priv
)
556 struct ipqess_rx_ring
*rx_ring
= (struct ipqess_rx_ring
*) priv
;
558 if (likely(napi_schedule_prep(&rx_ring
->napi_rx
))) {
559 ipqess_w32(rx_ring
->ess
,
560 IPQESS_REG_RX_INT_MASK_Q(rx_ring
->idx
),
562 __napi_schedule(&rx_ring
->napi_rx
);
568 static void ipqess_irq_enable(struct ipqess
*ess
)
572 ipqess_w32(ess
, IPQESS_REG_RX_ISR
, 0xff);
573 ipqess_w32(ess
, IPQESS_REG_TX_ISR
, 0xffff);
574 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
575 ipqess_w32(ess
, IPQESS_REG_RX_INT_MASK_Q(ess
->rx_ring
[i
].idx
), 1);
576 ipqess_w32(ess
, IPQESS_REG_TX_INT_MASK_Q(ess
->tx_ring
[i
].idx
), 1);
580 static void ipqess_irq_disable(struct ipqess
*ess
)
584 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
585 ipqess_w32(ess
, IPQESS_REG_RX_INT_MASK_Q(ess
->rx_ring
[i
].idx
), 0);
586 ipqess_w32(ess
, IPQESS_REG_TX_INT_MASK_Q(ess
->tx_ring
[i
].idx
), 0);
590 static int __init
ipqess_init(struct net_device
*netdev
)
592 struct ipqess
*ess
= netdev_priv(netdev
);
593 struct device_node
*of_node
= ess
->pdev
->dev
.of_node
;
594 return phylink_of_phy_connect(ess
->phylink
, of_node
, 0);
597 static void ipqess_uninit(struct net_device
*netdev
)
599 struct ipqess
*ess
= netdev_priv(netdev
);
601 phylink_disconnect_phy(ess
->phylink
);
604 static int ipqess_open(struct net_device
*netdev
)
606 struct ipqess
*ess
= netdev_priv(netdev
);
609 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
610 napi_enable(&ess
->tx_ring
[i
].napi_tx
);
611 napi_enable(&ess
->rx_ring
[i
].napi_rx
);
613 ipqess_irq_enable(ess
);
614 phylink_start(ess
->phylink
);
615 netif_tx_start_all_queues(netdev
);
620 static int ipqess_stop(struct net_device
*netdev
)
622 struct ipqess
*ess
= netdev_priv(netdev
);
625 netif_tx_stop_all_queues(netdev
);
626 phylink_stop(ess
->phylink
);
627 ipqess_irq_disable(ess
);
628 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
629 napi_disable(&ess
->tx_ring
[i
].napi_tx
);
630 napi_disable(&ess
->rx_ring
[i
].napi_rx
);
636 static int ipqess_do_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
638 struct ipqess
*ess
= netdev_priv(netdev
);
644 return phylink_mii_ioctl(ess
->phylink
, ifr
, cmd
);
653 static inline u16
ipqess_tx_desc_available(struct ipqess_tx_ring
*tx_ring
)
657 if (tx_ring
->tail
<= tx_ring
->head
)
658 count
= IPQESS_TX_RING_SIZE
;
660 count
+= tx_ring
->tail
- tx_ring
->head
- 1;
665 static inline int ipqess_cal_txd_req(struct sk_buff
*skb
)
669 /* one TPD for the header, and one for each fragments */
670 tpds
= 1 + skb_shinfo(skb
)->nr_frags
;
671 if (skb_is_gso(skb
) && skb_is_gso_v6(skb
)) {
672 /* for LSOv2 one extra TPD is needed */
679 static struct ipqess_buf
*ipqess_get_tx_buffer(struct ipqess_tx_ring
*tx_ring
,
680 struct ipqess_tx_desc
*desc
)
682 return &tx_ring
->buf
[desc
- tx_ring
->hw_desc
];
685 static struct ipqess_tx_desc
*ipqess_tx_desc_next(struct ipqess_tx_ring
*tx_ring
)
687 struct ipqess_tx_desc
*desc
;
689 desc
= &tx_ring
->hw_desc
[tx_ring
->head
];
690 tx_ring
->head
= IPQESS_NEXT_IDX(tx_ring
->head
, tx_ring
->count
);
695 static void ipqess_rollback_tx(struct ipqess
*eth
,
696 struct ipqess_tx_desc
*first_desc
, int ring_id
)
698 struct ipqess_tx_ring
*tx_ring
= ð
->tx_ring
[ring_id
];
699 struct ipqess_buf
*buf
;
700 struct ipqess_tx_desc
*desc
= NULL
;
701 u16 start_index
, index
;
703 start_index
= first_desc
- tx_ring
->hw_desc
;
706 while (index
!= tx_ring
->head
) {
707 desc
= &tx_ring
->hw_desc
[index
];
708 buf
= &tx_ring
->buf
[index
];
709 ipqess_tx_unmap_and_free(ð
->pdev
->dev
, buf
);
710 memset(desc
, 0, sizeof(struct ipqess_tx_desc
));
711 if (++index
== tx_ring
->count
)
714 tx_ring
->head
= start_index
;
717 static bool ipqess_process_dsa_tag_sh(struct sk_buff
*skb
, u32
*word3
)
719 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
720 struct ipq40xx_dsa_tag_data
*tag_data
;
722 if (shinfo
->dsa_tag_proto
!= DSA_TAG_PROTO_IPQ4019
)
725 tag_data
= (struct ipq40xx_dsa_tag_data
*)shinfo
->dsa_tag_data
;
727 pr_debug("SH tag @ %08x, dp:%02x from_cpu:%u\n",
728 (u32
)tag_data
, tag_data
->dp
, tag_data
->from_cpu
);
730 *word3
|= tag_data
->dp
<< IPQESS_TPD_PORT_BITMAP_SHIFT
;
731 if (tag_data
->from_cpu
)
732 *word3
|= BIT(IPQESS_TPD_FROM_CPU_SHIFT
);
737 static void ipqess_get_dp_info(struct ipqess
*ess
, struct sk_buff
*skb
,
740 if (netdev_uses_dsa(ess
->netdev
)) {
742 if (ipqess_process_dsa_tag_sh(skb
, word3
))
746 *word3
|= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT
;
749 static int ipqess_tx_map_and_fill(struct ipqess_tx_ring
*tx_ring
, struct sk_buff
*skb
)
751 struct ipqess_buf
*buf
= NULL
;
752 struct platform_device
*pdev
= tx_ring
->ess
->pdev
;
753 struct ipqess_tx_desc
*desc
= NULL
, *first_desc
= NULL
;
754 u32 word1
= 0, word3
= 0, lso_word1
= 0, svlan_tag
= 0;
758 ipqess_get_dp_info(tx_ring
->ess
, skb
, &word3
);
760 if (skb_is_gso(skb
)) {
761 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
762 lso_word1
|= IPQESS_TPD_IPV4_EN
;
763 ip_hdr(skb
)->check
= 0;
764 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
765 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
766 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
767 lso_word1
|= IPQESS_TPD_LSO_V2_EN
;
768 ipv6_hdr(skb
)->payload_len
= 0;
769 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
770 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
773 lso_word1
|= IPQESS_TPD_LSO_EN
|
774 ((skb_shinfo(skb
)->gso_size
& IPQESS_TPD_MSS_MASK
) << IPQESS_TPD_MSS_SHIFT
) |
775 (skb_transport_offset(skb
) << IPQESS_TPD_HDR_SHIFT
);
776 } else if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
778 cso
= skb_checksum_start_offset(skb
);
779 css
= cso
+ skb
->csum_offset
;
781 word1
|= (IPQESS_TPD_CUSTOM_CSUM_EN
);
782 word1
|= (cso
>> 1) << IPQESS_TPD_HDR_SHIFT
;
783 word1
|= ((css
>> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT
);
786 if (skb_vlan_tag_present(skb
)) {
787 switch (skb
->vlan_proto
) {
788 case htons(ETH_P_8021Q
):
789 word3
|= BIT(IPQESS_TX_INS_CVLAN
);
790 word3
|= skb_vlan_tag_get(skb
) << IPQESS_TX_CVLAN_TAG_SHIFT
;
792 case htons(ETH_P_8021AD
):
793 word1
|= BIT(IPQESS_TX_INS_SVLAN
);
794 svlan_tag
= skb_vlan_tag_get(skb
);
797 dev_err(&pdev
->dev
, "no ctag or stag present\n");
802 if (eth_type_vlan(skb
->protocol
))
803 word1
|= IPQESS_TPD_VLAN_TAGGED
;
805 if (skb
->protocol
== htons(ETH_P_PPP_SES
))
806 word1
|= IPQESS_TPD_PPPOE_EN
;
808 len
= skb_headlen(skb
);
810 first_desc
= desc
= ipqess_tx_desc_next(tx_ring
);
811 if (lso_word1
& IPQESS_TPD_LSO_V2_EN
) {
812 desc
->addr
= cpu_to_le16(skb
->len
);
813 desc
->word1
= word1
| lso_word1
;
814 desc
->svlan_tag
= svlan_tag
;
816 desc
= ipqess_tx_desc_next(tx_ring
);
819 buf
= ipqess_get_tx_buffer(tx_ring
, desc
);
821 buf
->dma
= dma_map_single(&pdev
->dev
,
822 skb
->data
, len
, DMA_TO_DEVICE
);
823 if (dma_mapping_error(&pdev
->dev
, buf
->dma
))
826 desc
->addr
= cpu_to_le32(buf
->dma
);
827 desc
->len
= cpu_to_le16(len
);
829 buf
->flags
|= IPQESS_DESC_SINGLE
;
830 desc
->word1
= word1
| lso_word1
;
831 desc
->svlan_tag
= svlan_tag
;
834 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
835 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
836 len
= skb_frag_size(frag
);
837 desc
= ipqess_tx_desc_next(tx_ring
);
838 buf
= ipqess_get_tx_buffer(tx_ring
, desc
);
840 buf
->flags
|= IPQESS_DESC_PAGE
;
841 buf
->dma
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, len
, DMA_TO_DEVICE
);
842 if (dma_mapping_error(&pdev
->dev
, buf
->dma
))
845 desc
->addr
= cpu_to_le32(buf
->dma
);
846 desc
->len
= cpu_to_le16(len
);
847 desc
->svlan_tag
= svlan_tag
;
848 desc
->word1
= word1
| lso_word1
;
851 desc
->word1
|= 1 << IPQESS_TPD_EOP_SHIFT
;
853 buf
->flags
|= IPQESS_DESC_LAST
;
858 ipqess_rollback_tx(tx_ring
->ess
, first_desc
, tx_ring
->ring_id
);
859 dev_err(&pdev
->dev
, "TX DMA map failed\n");
865 static inline void ipqess_kick_tx(struct ipqess_tx_ring
*tx_ring
)
867 /* Ensure that all TPDs has been written completely */
870 /* update software producer index */
871 ipqess_w32(tx_ring
->ess
, IPQESS_REG_TPD_IDX_Q(tx_ring
->idx
),
875 static netdev_tx_t
ipqess_xmit(struct sk_buff
*skb
,
876 struct net_device
*netdev
)
878 struct ipqess
*ess
= netdev_priv(netdev
);
879 struct ipqess_tx_ring
*tx_ring
;
884 tx_ring
= &ess
->tx_ring
[skb_get_queue_mapping(skb
)];
885 tx_num
= ipqess_cal_txd_req(skb
);
886 avail
= ipqess_tx_desc_available(tx_ring
);
887 if (avail
< tx_num
) {
889 "stopping tx queue %d, avail=%d req=%d im=%x\n",
890 tx_ring
->idx
, avail
, tx_num
,
891 ipqess_r32(tx_ring
->ess
,
892 IPQESS_REG_TX_INT_MASK_Q(tx_ring
->idx
)));
893 netif_tx_stop_queue(tx_ring
->nq
);
894 ipqess_w32(tx_ring
->ess
, IPQESS_REG_TX_INT_MASK_Q(tx_ring
->idx
), 0x1);
895 ipqess_kick_tx(tx_ring
);
896 return NETDEV_TX_BUSY
;
899 ret
= ipqess_tx_map_and_fill(tx_ring
, skb
);
901 dev_kfree_skb_any(skb
);
902 ess
->stats
.tx_errors
++;
906 ess
->stats
.tx_packets
++;
907 ess
->stats
.tx_bytes
+= skb
->len
;
908 netdev_tx_sent_queue(tx_ring
->nq
, skb
->len
);
910 if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring
->nq
))
911 ipqess_kick_tx(tx_ring
);
917 static int ipqess_set_mac_address(struct net_device
*netdev
, void *p
)
919 int ret
= eth_mac_addr(netdev
, p
);
920 struct ipqess
*ess
= netdev_priv(netdev
);
921 const char *macaddr
= netdev
->dev_addr
;
926 // spin_lock_bh(&mac->hw->page_lock);
927 ipqess_w32(ess
, IPQESS_REG_MAC_CTRL1
,
928 (macaddr
[0] << 8) | macaddr
[1]);
929 ipqess_w32(ess
, IPQESS_REG_MAC_CTRL0
,
930 (macaddr
[2] << 24) | (macaddr
[3] << 16) |
931 (macaddr
[4] << 8) | macaddr
[5]);
932 // spin_unlock_bh(&mac->hw->page_lock);
937 static void ipqess_tx_timeout(struct net_device
*netdev
, unsigned int txq_id
)
939 struct ipqess
*ess
= netdev_priv(netdev
);
940 struct ipqess_tx_ring
*tr
= &ess
->tx_ring
[txq_id
];
942 netdev_warn(netdev
, "hardware queue %d is in stuck?\n",
945 /* TODO: dump hardware queue */
948 static const struct net_device_ops ipqess_axi_netdev_ops
= {
949 .ndo_init
= ipqess_init
,
950 .ndo_uninit
= ipqess_uninit
,
951 .ndo_open
= ipqess_open
,
952 .ndo_stop
= ipqess_stop
,
953 .ndo_do_ioctl
= ipqess_do_ioctl
,
954 .ndo_start_xmit
= ipqess_xmit
,
955 .ndo_get_stats
= ipqess_get_stats
,
956 .ndo_set_mac_address
= ipqess_set_mac_address
,
957 .ndo_tx_timeout
= ipqess_tx_timeout
,
960 static void ipqess_hw_stop(struct ipqess
*ess
)
964 /* disable all RX queue IRQs */
965 for (i
= 0; i
< IPQESS_MAX_RX_QUEUE
; i
++)
966 ipqess_w32(ess
, IPQESS_REG_RX_INT_MASK_Q(i
), 0);
968 /* disable all TX queue IRQs */
969 for (i
= 0; i
< IPQESS_MAX_TX_QUEUE
; i
++)
970 ipqess_w32(ess
, IPQESS_REG_TX_INT_MASK_Q(i
), 0);
972 /* disable all other IRQs */
973 ipqess_w32(ess
, IPQESS_REG_MISC_IMR
, 0);
974 ipqess_w32(ess
, IPQESS_REG_WOL_IMR
, 0);
976 /* clear the IRQ status registers */
977 ipqess_w32(ess
, IPQESS_REG_RX_ISR
, 0xff);
978 ipqess_w32(ess
, IPQESS_REG_TX_ISR
, 0xffff);
979 ipqess_w32(ess
, IPQESS_REG_MISC_ISR
, 0x1fff);
980 ipqess_w32(ess
, IPQESS_REG_WOL_ISR
, 0x1);
981 ipqess_w32(ess
, IPQESS_REG_WOL_CTRL
, 0);
983 /* disable RX and TX queues */
984 ipqess_m32(ess
, IPQESS_RXQ_CTRL_EN_MASK
, 0, IPQESS_REG_RXQ_CTRL
);
985 ipqess_m32(ess
, IPQESS_TXQ_CTRL_TXQ_EN
, 0, IPQESS_REG_TXQ_CTRL
);
988 static int ipqess_hw_init(struct ipqess
*ess
)
995 ipqess_m32(ess
, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT
),
996 IPQESS_INTR_SW_IDX_W_TYPE
<< IPQESS_INTR_SW_IDX_W_TYP_SHIFT
,
997 IPQESS_REG_INTR_CTRL
);
999 /* enable IRQ delay slot */
1000 ipqess_w32(ess
, IPQESS_REG_IRQ_MODRT_TIMER_INIT
,
1001 (IPQESS_TX_IMT
<< IPQESS_IRQ_MODRT_TX_TIMER_SHIFT
) |
1002 (IPQESS_RX_IMT
<< IPQESS_IRQ_MODRT_RX_TIMER_SHIFT
));
1004 /* Set Customer and Service VLAN TPIDs */
1005 ipqess_w32(ess
, IPQESS_REG_VLAN_CFG
,
1006 (ETH_P_8021Q
<< IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT
) |
1007 (ETH_P_8021AD
<< IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT
));
1009 /* Configure the TX Queue bursting */
1010 ipqess_w32(ess
, IPQESS_REG_TXQ_CTRL
,
1011 (IPQESS_TPD_BURST
<< IPQESS_TXQ_NUM_TPD_BURST_SHIFT
) |
1012 (IPQESS_TXF_BURST
<< IPQESS_TXQ_TXF_BURST_NUM_SHIFT
) |
1013 IPQESS_TXQ_CTRL_TPD_BURST_EN
);
1016 ipqess_w32(ess
, IPQESS_REG_RSS_TYPE
,
1017 IPQESS_RSS_TYPE_IPV4TCP
| IPQESS_RSS_TYPE_IPV6_TCP
|
1018 IPQESS_RSS_TYPE_IPV4_UDP
| IPQESS_RSS_TYPE_IPV6UDP
|
1019 IPQESS_RSS_TYPE_IPV4
| IPQESS_RSS_TYPE_IPV6
);
1021 /* Set RFD ring burst and threshold */
1022 ipqess_w32(ess
, IPQESS_REG_RX_DESC1
,
1023 (IPQESS_RFD_BURST
<< IPQESS_RXQ_RFD_BURST_NUM_SHIFT
) |
1024 (IPQESS_RFD_THR
<< IPQESS_RXQ_RFD_PF_THRESH_SHIFT
) |
1025 (IPQESS_RFD_LTHR
<< IPQESS_RXQ_RFD_LOW_THRESH_SHIFT
));
1028 * - threshold to start to DMA data to host
1030 ipqess_w32(ess
, IPQESS_REG_RXQ_CTRL
,
1031 IPQESS_FIFO_THRESH_128_BYTE
| IPQESS_RXQ_CTRL_RMV_VLAN
);
1033 err
= ipqess_rx_ring_alloc(ess
);
1037 err
= ipqess_tx_ring_alloc(ess
);
1041 /* Load all of ring base addresses above into the dma engine */
1042 ipqess_m32(ess
, 0, BIT(IPQESS_LOAD_PTR_SHIFT
),
1043 IPQESS_REG_TX_SRAM_PART
);
1045 /* Disable TX FIFO low watermark and high watermark */
1046 ipqess_w32(ess
, IPQESS_REG_TXF_WATER_MARK
, 0);
1048 /* Configure RSS indirection table.
1049 * 128 hash will be configured in the following
1050 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1053 for (i
= 0; i
< IPQESS_NUM_IDT
; i
++)
1054 ipqess_w32(ess
, IPQESS_REG_RSS_IDT(i
), IPQESS_RSS_IDT_VALUE
);
1056 /* Configure load balance mapping table.
1057 * 4 table entry will be configured according to the
1058 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1061 ipqess_w32(ess
, IPQESS_REG_LB_RING
, IPQESS_LB_REG_VALUE
);
1063 /* Configure Virtual queue for Tx rings */
1064 ipqess_w32(ess
, IPQESS_REG_VQ_CTRL0
, IPQESS_VQ_REG_VALUE
);
1065 ipqess_w32(ess
, IPQESS_REG_VQ_CTRL1
, IPQESS_VQ_REG_VALUE
);
1067 /* Configure Max AXI Burst write size to 128 bytes*/
1068 ipqess_w32(ess
, IPQESS_REG_AXIW_CTRL_MAXWRSIZE
,
1069 IPQESS_AXIW_MAXWRSIZE_VALUE
);
1071 /* Enable TX queues */
1072 ipqess_m32(ess
, 0, IPQESS_TXQ_CTRL_TXQ_EN
, IPQESS_REG_TXQ_CTRL
);
1074 /* Enable RX queues */
1076 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++)
1077 tmp
|= IPQESS_RXQ_CTRL_EN(ess
->rx_ring
[i
].idx
);
1079 ipqess_m32(ess
, IPQESS_RXQ_CTRL_EN_MASK
, tmp
, IPQESS_REG_RXQ_CTRL
);
1084 static void ipqess_validate(struct phylink_config
*config
,
1085 unsigned long *supported
,
1086 struct phylink_link_state
*state
)
1088 struct ipqess
*ess
= container_of(config
, struct ipqess
, phylink_config
);
1089 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
1091 if (state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
) {
1092 dev_err(&ess
->pdev
->dev
, "unsupported interface mode: %d\n",
1094 linkmode_zero(supported
);
1098 phylink_set_port_modes(mask
);
1099 phylink_set(mask
, 1000baseT_Full
);
1100 phylink_set(mask
, Pause
);
1101 phylink_set(mask
, Asym_Pause
);
1103 linkmode_and(supported
, supported
, mask
);
1104 linkmode_and(state
->advertising
, state
->advertising
, mask
);
1107 static void ipqess_mac_config(struct phylink_config
*config
, unsigned int mode
,
1108 const struct phylink_link_state
*state
)
1113 static void ipqess_mac_link_down(struct phylink_config
*config
,
1115 phy_interface_t interface
)
1120 static void ipqess_mac_link_up(struct phylink_config
*config
,
1121 struct phy_device
*phy
, unsigned int mode
,
1122 phy_interface_t interface
,
1123 int speed
, int duplex
,
1124 bool tx_pause
, bool rx_pause
)
1129 static struct phylink_mac_ops ipqess_phylink_mac_ops
= {
1130 .validate
= ipqess_validate
,
1131 .mac_config
= ipqess_mac_config
,
1132 .mac_link_up
= ipqess_mac_link_up
,
1133 .mac_link_down
= ipqess_mac_link_down
,
1136 static void ipqess_cleanup(struct ipqess
*ess
)
1138 ipqess_hw_stop(ess
);
1139 unregister_netdev(ess
->netdev
);
1141 ipqess_tx_ring_free(ess
);
1142 ipqess_rx_ring_free(ess
);
1144 if (!IS_ERR_OR_NULL(ess
->phylink
))
1145 phylink_destroy(ess
->phylink
);
1148 static void ess_reset(struct ipqess
*ess
)
1150 reset_control_assert(ess
->ess_rst
);
1154 reset_control_deassert(ess
->ess_rst
);
1156 /* Waiting for all inner tables to be flushed and reinitialized.
1157 * This takes between 5 and 10ms.
1162 static int ipqess_axi_probe(struct platform_device
*pdev
)
1164 struct device_node
*np
= pdev
->dev
.of_node
;
1166 struct net_device
*netdev
;
1167 struct resource
*res
;
1170 netdev
= devm_alloc_etherdev_mqs(&pdev
->dev
, sizeof(struct ipqess
),
1171 IPQESS_NETDEV_QUEUES
,
1172 IPQESS_NETDEV_QUEUES
);
1176 ess
= netdev_priv(netdev
);
1177 ess
->netdev
= netdev
;
1179 spin_lock_init(&ess
->stats_lock
);
1180 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1181 platform_set_drvdata(pdev
, netdev
);
1183 err
= of_get_mac_address(np
, netdev
->dev_addr
);
1184 if (err
== -EPROBE_DEFER
)
1185 return -EPROBE_DEFER
;
1189 random_ether_addr(netdev
->dev_addr
);
1190 dev_info(&ess
->pdev
->dev
, "generated random MAC address %pM\n",
1192 netdev
->addr_assign_type
= NET_ADDR_RANDOM
;
1195 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1196 ess
->hw_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
1197 if (IS_ERR(ess
->hw_addr
)) {
1198 err
= PTR_ERR(ess
->hw_addr
);
1202 ess
->ess_clk
= of_clk_get_by_name(np
, "ess_clk");
1203 if (IS_ERR(ess
->ess_clk
)) {
1204 dev_err(&pdev
->dev
, "Failed to get ess_clk\n");
1205 return PTR_ERR(ess
->ess_clk
);
1208 ess
->ess_rst
= devm_reset_control_get(&pdev
->dev
, "ess_rst");
1209 if (IS_ERR(ess
->ess_rst
)) {
1210 dev_err(&pdev
->dev
, "Failed to get ess_rst control!\n");
1211 return PTR_ERR(ess
->ess_rst
);
1214 clk_prepare_enable(ess
->ess_clk
);
1218 ess
->phylink_config
.dev
= &netdev
->dev
;
1219 ess
->phylink_config
.type
= PHYLINK_NETDEV
;
1220 ess
->phylink_config
.pcs_poll
= true;
1222 ess
->phylink
= phylink_create(&ess
->phylink_config
,
1223 of_fwnode_handle(np
),
1224 PHY_INTERFACE_MODE_INTERNAL
,
1225 &ipqess_phylink_mac_ops
);
1226 if (IS_ERR(ess
->phylink
)) {
1227 err
= PTR_ERR(ess
->phylink
);
1231 for (i
= 0; i
< IPQESS_MAX_TX_QUEUE
; i
++) {
1232 ess
->tx_irq
[i
] = platform_get_irq(pdev
, i
);
1233 scnprintf(ess
->tx_irq_names
[i
], sizeof(ess
->tx_irq_names
[i
]),
1234 "%s:txq%d", pdev
->name
, i
);
1237 for (i
= 0; i
< IPQESS_MAX_RX_QUEUE
; i
++) {
1238 ess
->rx_irq
[i
] = platform_get_irq(pdev
, i
+ IPQESS_MAX_TX_QUEUE
);
1239 scnprintf(ess
->rx_irq_names
[i
], sizeof(ess
->rx_irq_names
[i
]),
1240 "%s:rxq%d", pdev
->name
, i
);
1244 #define NETIF_F_TSO6 0
1246 netdev
->netdev_ops
= &ipqess_axi_netdev_ops
;
1247 netdev
->features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
|
1248 NETIF_F_HW_VLAN_CTAG_RX
|
1249 NETIF_F_HW_VLAN_CTAG_TX
|
1250 NETIF_F_TSO
| NETIF_F_TSO6
|
1251 NETIF_F_GRO
| NETIF_F_SG
;
1252 /* feature change is not supported yet */
1253 netdev
->hw_features
= 0;
1254 netdev
->vlan_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
| NETIF_F_RXCSUM
|
1255 NETIF_F_TSO
| NETIF_F_TSO6
|
1257 netdev
->watchdog_timeo
= 5 * HZ
;
1258 netdev
->base_addr
= (u32
) ess
->hw_addr
;
1259 netdev
->max_mtu
= 9000;
1260 netdev
->gso_max_segs
= IPQESS_TX_RING_SIZE
/ 2;
1262 ipqess_set_ethtool_ops(netdev
);
1264 err
= register_netdev(netdev
);
1268 err
= ipqess_hw_init(ess
);
1272 dev_set_threaded(netdev
, true);
1274 for (i
= 0; i
< IPQESS_NETDEV_QUEUES
; i
++) {
1277 netif_tx_napi_add(netdev
, &ess
->tx_ring
[i
].napi_tx
,
1278 ipqess_tx_napi
, 64);
1279 netif_napi_add(netdev
,
1280 &ess
->rx_ring
[i
].napi_rx
,
1281 ipqess_rx_napi
, 64);
1283 qid
= ess
->tx_ring
[i
].idx
;
1284 err
= devm_request_irq(&ess
->netdev
->dev
, ess
->tx_irq
[qid
],
1285 ipqess_interrupt_tx
, 0, ess
->tx_irq_names
[qid
],
1290 qid
= ess
->rx_ring
[i
].idx
;
1291 err
= devm_request_irq(&ess
->netdev
->dev
, ess
->rx_irq
[qid
],
1292 ipqess_interrupt_rx
, 0, ess
->rx_irq_names
[qid
],
1301 ipqess_cleanup(ess
);
1305 static int ipqess_axi_remove(struct platform_device
*pdev
)
1307 const struct net_device
*netdev
= platform_get_drvdata(pdev
);
1308 struct ipqess
*ess
= netdev_priv(netdev
);
1310 ipqess_cleanup(ess
);
1315 static const struct of_device_id ipqess_of_mtable
[] = {
1316 {.compatible
= "qcom,ipq4019-ess-edma" },
1319 MODULE_DEVICE_TABLE(of
, ipqess_of_mtable
);
1321 static struct platform_driver ipqess_axi_driver
= {
1323 .name
= "ipqess-edma",
1324 .of_match_table
= ipqess_of_mtable
,
1326 .probe
= ipqess_axi_probe
,
1327 .remove
= ipqess_axi_remove
,
1330 module_platform_driver(ipqess_axi_driver
);
1332 MODULE_AUTHOR("Qualcomm Atheros Inc");
1333 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
1334 MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
1335 MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
1336 MODULE_LICENSE("GPL");