1 From c3e6b2c35b34214c58c1e90d65dab5f5393608e7 Mon Sep 17 00:00:00 2001
2 From: Aleksander Jan Bajkowski <olek2@wp.pl>
3 Date: Mon, 3 Jan 2022 20:43:16 +0100
4 Subject: [PATCH] net: lantiq_xrx200: add ingress SG DMA support
6 This patch adds support for scatter gather DMA. DMA in PMAC splits
7 the packet into several buffers when the MTU on the CPU port is
8 less than the MTU of the switch. The first buffer starts at an
9 offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the
10 offset. Thanks to this patch, the user can still connect to the
11 device in such a situation. For normal configurations, the patch
12 has no effect on performance.
14 Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
15 Signed-off-by: David S. Miller <davem@davemloft.net>
17 drivers/net/ethernet/lantiq_xrx200.c | 47 +++++++++++++++++++++++-----
18 1 file changed, 40 insertions(+), 7 deletions(-)
20 --- a/drivers/net/ethernet/lantiq_xrx200.c
21 +++ b/drivers/net/ethernet/lantiq_xrx200.c
23 #define XRX200_DMA_RX 0
24 #define XRX200_DMA_TX 1
26 +#define XRX200_DMA_PACKET_COMPLETE 0
27 +#define XRX200_DMA_PACKET_IN_PROGRESS 1
30 #define PMAC_RX_IPG 0x0024
31 #define PMAC_RX_IPG_MASK 0xf
32 @@ -61,6 +64,9 @@ struct xrx200_chan {
33 struct ltq_dma_channel dma;
34 struct sk_buff *skb[LTQ_DESC_NUM];
36 + struct sk_buff *skb_head;
37 + struct sk_buff *skb_tail;
39 struct xrx200_priv *priv;
42 @@ -204,7 +210,8 @@ static int xrx200_hw_receive(struct xrx2
43 struct xrx200_priv *priv = ch->priv;
44 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
45 struct sk_buff *skb = ch->skb[ch->dma.desc];
46 - int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
47 + u32 ctl = desc->ctl;
48 + int len = (ctl & LTQ_DMA_SIZE_MASK);
49 struct net_device *net_dev = priv->net_dev;
52 @@ -220,12 +227,36 @@ static int xrx200_hw_receive(struct xrx2
56 - skb->protocol = eth_type_trans(skb, net_dev);
57 - netif_receive_skb(skb);
58 - net_dev->stats.rx_packets++;
59 - net_dev->stats.rx_bytes += len;
62 + /* add buffers to skb via skb->frag_list */
63 + if (ctl & LTQ_DMA_SOP) {
66 + } else if (ch->skb_head) {
67 + if (ch->skb_head == ch->skb_tail)
68 + skb_shinfo(ch->skb_tail)->frag_list = skb;
70 + ch->skb_tail->next = skb;
72 + skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
73 + ch->skb_head->len += skb->len;
74 + ch->skb_head->data_len += skb->len;
75 + ch->skb_head->truesize += skb->truesize;
78 + if (ctl & LTQ_DMA_EOP) {
79 + ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
80 + netif_receive_skb(ch->skb_head);
81 + net_dev->stats.rx_packets++;
82 + net_dev->stats.rx_bytes += ch->skb_head->len;
83 + ch->skb_head = NULL;
84 + ch->skb_tail = NULL;
85 + ret = XRX200_DMA_PACKET_COMPLETE;
87 + ret = XRX200_DMA_PACKET_IN_PROGRESS;
93 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
94 @@ -240,7 +271,9 @@ static int xrx200_poll_rx(struct napi_st
96 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
97 ret = xrx200_hw_receive(ch);
99 + if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
101 + if (ret != XRX200_DMA_PACKET_COMPLETE)