kernel: bump 5.15 to 5.15.47
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 702-v5.19-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Sat, 5 Feb 2022 17:59:07 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent
4 DMA
5
6 It improves performance by eliminating the need for a cache flush on rx and tx
7 In preparation for supporting WED (Wireless Ethernet Dispatch), also add a
8 function for disabling coherent DMA at runtime.
9
10 Signed-off-by: Felix Fietkau <nbd@nbd.name>
11 ---
12
13 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
14 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15 @@ -9,6 +9,7 @@
16 #include <linux/of_device.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19 +#include <linux/of_address.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
22 #include <linux/clk.h>
23 @@ -828,7 +829,7 @@ static int mtk_init_fq_dma(struct mtk_et
24 dma_addr_t dma_addr;
25 int i;
26
27 - eth->scratch_ring = dma_alloc_coherent(eth->dev,
28 + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
29 cnt * sizeof(struct mtk_tx_dma),
30 &eth->phy_scratch_ring,
31 GFP_ATOMIC);
32 @@ -840,10 +841,10 @@ static int mtk_init_fq_dma(struct mtk_et
33 if (unlikely(!eth->scratch_head))
34 return -ENOMEM;
35
36 - dma_addr = dma_map_single(eth->dev,
37 + dma_addr = dma_map_single(eth->dma_dev,
38 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
39 DMA_FROM_DEVICE);
40 - if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
41 + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
42 return -ENOMEM;
43
44 phy_ring_tail = eth->phy_scratch_ring +
45 @@ -897,26 +898,26 @@ static void mtk_tx_unmap(struct mtk_eth
46 {
47 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
48 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
49 - dma_unmap_single(eth->dev,
50 + dma_unmap_single(eth->dma_dev,
51 dma_unmap_addr(tx_buf, dma_addr0),
52 dma_unmap_len(tx_buf, dma_len0),
53 DMA_TO_DEVICE);
54 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
55 - dma_unmap_page(eth->dev,
56 + dma_unmap_page(eth->dma_dev,
57 dma_unmap_addr(tx_buf, dma_addr0),
58 dma_unmap_len(tx_buf, dma_len0),
59 DMA_TO_DEVICE);
60 }
61 } else {
62 if (dma_unmap_len(tx_buf, dma_len0)) {
63 - dma_unmap_page(eth->dev,
64 + dma_unmap_page(eth->dma_dev,
65 dma_unmap_addr(tx_buf, dma_addr0),
66 dma_unmap_len(tx_buf, dma_len0),
67 DMA_TO_DEVICE);
68 }
69
70 if (dma_unmap_len(tx_buf, dma_len1)) {
71 - dma_unmap_page(eth->dev,
72 + dma_unmap_page(eth->dma_dev,
73 dma_unmap_addr(tx_buf, dma_addr1),
74 dma_unmap_len(tx_buf, dma_len1),
75 DMA_TO_DEVICE);
76 @@ -994,9 +995,9 @@ static int mtk_tx_map(struct sk_buff *sk
77 if (skb_vlan_tag_present(skb))
78 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
79
80 - mapped_addr = dma_map_single(eth->dev, skb->data,
81 + mapped_addr = dma_map_single(eth->dma_dev, skb->data,
82 skb_headlen(skb), DMA_TO_DEVICE);
83 - if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
84 + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
85 return -ENOMEM;
86
87 WRITE_ONCE(itxd->txd1, mapped_addr);
88 @@ -1035,10 +1036,10 @@ static int mtk_tx_map(struct sk_buff *sk
89
90
91 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
92 - mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
93 + mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
94 frag_map_size,
95 DMA_TO_DEVICE);
96 - if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
97 + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
98 goto err_dma;
99
100 if (i == nr_frags - 1 &&
101 @@ -1316,18 +1317,18 @@ static int mtk_poll_rx(struct napi_struc
102 netdev->stats.rx_dropped++;
103 goto release_desc;
104 }
105 - dma_addr = dma_map_single(eth->dev,
106 + dma_addr = dma_map_single(eth->dma_dev,
107 new_data + NET_SKB_PAD +
108 eth->ip_align,
109 ring->buf_size,
110 DMA_FROM_DEVICE);
111 - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
112 + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
113 skb_free_frag(new_data);
114 netdev->stats.rx_dropped++;
115 goto release_desc;
116 }
117
118 - dma_unmap_single(eth->dev, trxd.rxd1,
119 + dma_unmap_single(eth->dma_dev, trxd.rxd1,
120 ring->buf_size, DMA_FROM_DEVICE);
121
122 /* receive data */
123 @@ -1600,7 +1601,7 @@ static int mtk_tx_alloc(struct mtk_eth *
124 if (!ring->buf)
125 goto no_tx_mem;
126
127 - ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
128 + ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
129 &ring->phys, GFP_ATOMIC);
130 if (!ring->dma)
131 goto no_tx_mem;
132 @@ -1618,7 +1619,7 @@ static int mtk_tx_alloc(struct mtk_eth *
133 * descriptors in ring->dma_pdma.
134 */
135 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
136 - ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
137 + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
138 &ring->phys_pdma,
139 GFP_ATOMIC);
140 if (!ring->dma_pdma)
141 @@ -1677,7 +1678,7 @@ static void mtk_tx_clean(struct mtk_eth
142 }
143
144 if (ring->dma) {
145 - dma_free_coherent(eth->dev,
146 + dma_free_coherent(eth->dma_dev,
147 MTK_DMA_SIZE * sizeof(*ring->dma),
148 ring->dma,
149 ring->phys);
150 @@ -1685,7 +1686,7 @@ static void mtk_tx_clean(struct mtk_eth
151 }
152
153 if (ring->dma_pdma) {
154 - dma_free_coherent(eth->dev,
155 + dma_free_coherent(eth->dma_dev,
156 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
157 ring->dma_pdma,
158 ring->phys_pdma);
159 @@ -1730,18 +1731,18 @@ static int mtk_rx_alloc(struct mtk_eth *
160 return -ENOMEM;
161 }
162
163 - ring->dma = dma_alloc_coherent(eth->dev,
164 + ring->dma = dma_alloc_coherent(eth->dma_dev,
165 rx_dma_size * sizeof(*ring->dma),
166 &ring->phys, GFP_ATOMIC);
167 if (!ring->dma)
168 return -ENOMEM;
169
170 for (i = 0; i < rx_dma_size; i++) {
171 - dma_addr_t dma_addr = dma_map_single(eth->dev,
172 + dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
173 ring->data[i] + NET_SKB_PAD + eth->ip_align,
174 ring->buf_size,
175 DMA_FROM_DEVICE);
176 - if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
177 + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
178 return -ENOMEM;
179 ring->dma[i].rxd1 = (unsigned int)dma_addr;
180
181 @@ -1777,7 +1778,7 @@ static void mtk_rx_clean(struct mtk_eth
182 continue;
183 if (!ring->dma[i].rxd1)
184 continue;
185 - dma_unmap_single(eth->dev,
186 + dma_unmap_single(eth->dma_dev,
187 ring->dma[i].rxd1,
188 ring->buf_size,
189 DMA_FROM_DEVICE);
190 @@ -1788,7 +1789,7 @@ static void mtk_rx_clean(struct mtk_eth
191 }
192
193 if (ring->dma) {
194 - dma_free_coherent(eth->dev,
195 + dma_free_coherent(eth->dma_dev,
196 ring->dma_size * sizeof(*ring->dma),
197 ring->dma,
198 ring->phys);
199 @@ -2144,7 +2145,7 @@ static void mtk_dma_free(struct mtk_eth
200 if (eth->netdev[i])
201 netdev_reset_queue(eth->netdev[i]);
202 if (eth->scratch_ring) {
203 - dma_free_coherent(eth->dev,
204 + dma_free_coherent(eth->dma_dev,
205 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
206 eth->scratch_ring,
207 eth->phy_scratch_ring);
208 @@ -2494,6 +2495,8 @@ static void mtk_dim_tx(struct work_struc
209
210 static int mtk_hw_init(struct mtk_eth *eth)
211 {
212 + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
213 + ETHSYS_DMA_AG_MAP_PPE;
214 int i, val, ret;
215
216 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
217 @@ -2506,6 +2509,10 @@ static int mtk_hw_init(struct mtk_eth *e
218 if (ret)
219 goto err_disable_pm;
220
221 + if (eth->ethsys)
222 + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
223 + of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
224 +
225 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
226 ret = device_reset(eth->dev);
227 if (ret) {
228 @@ -3059,6 +3066,35 @@ free_netdev:
229 return err;
230 }
231
232 +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
233 +{
234 + struct net_device *dev, *tmp;
235 + LIST_HEAD(dev_list);
236 + int i;
237 +
238 + rtnl_lock();
239 +
240 + for (i = 0; i < MTK_MAC_COUNT; i++) {
241 + dev = eth->netdev[i];
242 +
243 + if (!dev || !(dev->flags & IFF_UP))
244 + continue;
245 +
246 + list_add_tail(&dev->close_list, &dev_list);
247 + }
248 +
249 + dev_close_many(&dev_list, false);
250 +
251 + eth->dma_dev = dma_dev;
252 +
253 + list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
254 + list_del_init(&dev->close_list);
255 + dev_open(dev, NULL);
256 + }
257 +
258 + rtnl_unlock();
259 +}
260 +
261 static int mtk_probe(struct platform_device *pdev)
262 {
263 struct device_node *mac_np;
264 @@ -3072,6 +3108,7 @@ static int mtk_probe(struct platform_dev
265 eth->soc = of_device_get_match_data(&pdev->dev);
266
267 eth->dev = &pdev->dev;
268 + eth->dma_dev = &pdev->dev;
269 eth->base = devm_platform_ioremap_resource(pdev, 0);
270 if (IS_ERR(eth->base))
271 return PTR_ERR(eth->base);
272 @@ -3120,6 +3157,16 @@ static int mtk_probe(struct platform_dev
273 }
274 }
275
276 + if (of_dma_is_coherent(pdev->dev.of_node)) {
277 + struct regmap *cci;
278 +
279 + cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
280 + "mediatek,cci-control");
281 + /* enable CPU/bus coherency */
282 + if (!IS_ERR(cci))
283 + regmap_write(cci, 0, 3);
284 + }
285 +
286 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
287 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
288 GFP_KERNEL);
289 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
290 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
291 @@ -462,6 +462,12 @@
292 #define RSTCTRL_FE BIT(6)
293 #define RSTCTRL_PPE BIT(31)
294
295 +/* ethernet dma channel agent map */
296 +#define ETHSYS_DMA_AG_MAP 0x408
297 +#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
298 +#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
299 +#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
300 +
301 /* SGMII subsystem config registers */
302 /* Register to auto-negotiation restart */
303 #define SGMSYS_PCS_CONTROL_1 0x0
304 @@ -879,6 +885,7 @@ struct mtk_sgmii {
305 /* struct mtk_eth - This is the main datasructure for holding the state
306 * of the driver
307 * @dev: The device pointer
308 + * @dev: The device pointer used for dma mapping/alloc
309 * @base: The mapped register i/o base
310 * @page_lock: Make sure that register operations are atomic
311 * @tx_irq__lock: Make sure that IRQ register operations are atomic
312 @@ -922,6 +929,7 @@ struct mtk_sgmii {
313
314 struct mtk_eth {
315 struct device *dev;
316 + struct device *dma_dev;
317 void __iomem *base;
318 spinlock_t page_lock;
319 spinlock_t tx_irq_lock;
320 @@ -1020,6 +1028,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk
321 int mtk_eth_offload_init(struct mtk_eth *eth);
322 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
323 void *type_data);
324 +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
325
326
327 #endif /* MTK_ETH_H */