f2c36952fca6001fbe942952c869a0c9b26af844
[openwrt/openwrt.git] / target / linux / lantiq / patches-5.10 / 0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch
1 From c3e6b2c35b34214c58c1e90d65dab5f5393608e7 Mon Sep 17 00:00:00 2001
2 From: Aleksander Jan Bajkowski <olek2@wp.pl>
3 Date: Mon, 3 Jan 2022 20:43:16 +0100
4 Subject: [PATCH] net: lantiq_xrx200: add ingress SG DMA support
5
6 This patch adds support for scatter gather DMA. DMA in PMAC splits
7 the packet into several buffers when the MTU on the CPU port is
8 less than the MTU of the switch. The first buffer starts at an
9 offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the
10 offset. Thanks to this patch, the user can still connect to the
11 device in such a situation. For normal configurations, the patch
12 has no effect on performance.
13
14 Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
15 Signed-off-by: David S. Miller <davem@davemloft.net>
16 ---
17 drivers/net/ethernet/lantiq_xrx200.c | 47 +++++++++++++++++++++++-----
18 1 file changed, 40 insertions(+), 7 deletions(-)
19
20 --- a/drivers/net/ethernet/lantiq_xrx200.c
21 +++ b/drivers/net/ethernet/lantiq_xrx200.c
22 @@ -26,6 +26,9 @@
23 #define XRX200_DMA_RX 0
24 #define XRX200_DMA_TX 1
25
26 +#define XRX200_DMA_PACKET_COMPLETE 0
27 +#define XRX200_DMA_PACKET_IN_PROGRESS 1
28 +
29 /* cpu port mac */
30 #define PMAC_RX_IPG 0x0024
31 #define PMAC_RX_IPG_MASK 0xf
32 @@ -61,6 +64,9 @@ struct xrx200_chan {
33 struct ltq_dma_channel dma;
34 struct sk_buff *skb[LTQ_DESC_NUM];
35
36 + struct sk_buff *skb_head;
37 + struct sk_buff *skb_tail;
38 +
39 struct xrx200_priv *priv;
40 };
41
42 @@ -204,7 +210,8 @@ static int xrx200_hw_receive(struct xrx2
43 struct xrx200_priv *priv = ch->priv;
44 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
45 struct sk_buff *skb = ch->skb[ch->dma.desc];
46 - int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
47 + u32 ctl = desc->ctl;
48 + int len = (ctl & LTQ_DMA_SIZE_MASK);
49 struct net_device *net_dev = priv->net_dev;
50 int ret;
51
52 @@ -220,12 +227,36 @@ static int xrx200_hw_receive(struct xrx2
53 }
54
55 skb_put(skb, len);
56 - skb->protocol = eth_type_trans(skb, net_dev);
57 - netif_receive_skb(skb);
58 - net_dev->stats.rx_packets++;
59 - net_dev->stats.rx_bytes += len;
60
61 - return 0;
62 + /* add buffers to skb via skb->frag_list */
63 + if (ctl & LTQ_DMA_SOP) {
64 + ch->skb_head = skb;
65 + ch->skb_tail = skb;
66 + } else if (ch->skb_head) {
67 + if (ch->skb_head == ch->skb_tail)
68 + skb_shinfo(ch->skb_tail)->frag_list = skb;
69 + else
70 + ch->skb_tail->next = skb;
71 + ch->skb_tail = skb;
72 + skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
73 + ch->skb_head->len += skb->len;
74 + ch->skb_head->data_len += skb->len;
75 + ch->skb_head->truesize += skb->truesize;
76 + }
77 +
78 + if (ctl & LTQ_DMA_EOP) {
79 + ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
80 + netif_receive_skb(ch->skb_head);
81 + net_dev->stats.rx_packets++;
82 + net_dev->stats.rx_bytes += ch->skb_head->len;
83 + ch->skb_head = NULL;
84 + ch->skb_tail = NULL;
85 + ret = XRX200_DMA_PACKET_COMPLETE;
86 + } else {
87 + ret = XRX200_DMA_PACKET_IN_PROGRESS;
88 + }
89 +
90 + return ret;
91 }
92
93 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
94 @@ -240,7 +271,9 @@ static int xrx200_poll_rx(struct napi_st
95
96 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
97 ret = xrx200_hw_receive(ch);
98 - if (ret)
99 + if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
100 + continue;
101 + if (ret != XRX200_DMA_PACKET_COMPLETE)
102 return ret;
103 rx++;
104 } else {