1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Thu, 2 Nov 2023 16:47:07 +0100
3 Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
6 Split tx and rx fields in mtk_soc_data struct. This is a preliminary
7 patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
8 if the device receives a corrupted packet.
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
12 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
13 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
14 2 files changed, 139 insertions(+), 100 deletions(-)
16 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
17 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
18 @@ -1239,7 +1239,7 @@ static int mtk_init_fq_dma(struct mtk_et
19 eth->scratch_ring = eth->sram_base;
21 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
22 - cnt * soc->txrx.txd_size,
23 + cnt * soc->tx.desc_size,
24 ð->phy_scratch_ring,
26 if (unlikely(!eth->scratch_ring))
27 @@ -1255,16 +1255,16 @@ static int mtk_init_fq_dma(struct mtk_et
28 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
31 - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
32 + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
34 for (i = 0; i < cnt; i++) {
35 struct mtk_tx_dma_v2 *txd;
37 - txd = eth->scratch_ring + i * soc->txrx.txd_size;
38 + txd = eth->scratch_ring + i * soc->tx.desc_size;
39 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
41 txd->txd2 = eth->phy_scratch_ring +
42 - (i + 1) * soc->txrx.txd_size;
43 + (i + 1) * soc->tx.desc_size;
45 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
47 @@ -1511,7 +1511,7 @@ static int mtk_tx_map(struct sk_buff *sk
48 if (itxd == ring->last_free)
51 - itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
52 + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
53 memset(itx_buf, 0, sizeof(*itx_buf));
55 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
56 @@ -1552,7 +1552,7 @@ static int mtk_tx_map(struct sk_buff *sk
58 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
59 txd_info.size = min_t(unsigned int, frag_size,
60 - soc->txrx.dma_max_len);
61 + soc->tx.dma_max_len);
63 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
64 !(frag_size - txd_info.size);
65 @@ -1565,7 +1565,7 @@ static int mtk_tx_map(struct sk_buff *sk
66 mtk_tx_set_dma_desc(dev, txd, &txd_info);
68 tx_buf = mtk_desc_to_tx_buf(ring, txd,
69 - soc->txrx.txd_size);
72 memset(tx_buf, 0, sizeof(*tx_buf));
73 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
74 @@ -1608,7 +1608,7 @@ static int mtk_tx_map(struct sk_buff *sk
78 - next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
79 + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
81 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
83 @@ -1617,7 +1617,7 @@ static int mtk_tx_map(struct sk_buff *sk
87 - tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
88 + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
91 mtk_tx_unmap(eth, tx_buf, false);
92 @@ -1642,7 +1642,7 @@ static int mtk_cal_txd_req(struct mtk_et
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
94 frag = &skb_shinfo(skb)->frags[i];
95 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
96 - eth->soc->txrx.dma_max_len);
97 + eth->soc->tx.dma_max_len);
100 nfrags += skb_shinfo(skb)->nr_frags;
101 @@ -1783,7 +1783,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
103 ring = ð->rx_ring[i];
104 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
105 - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
106 + rxd = ring->dma + idx * eth->soc->rx.desc_size;
107 if (rxd->rxd2 & RX_DMA_DONE) {
108 ring->calc_idx_update = true;
110 @@ -1951,7 +1951,7 @@ static int mtk_xdp_submit_frame(struct m
114 - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
115 + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
116 memset(tx_buf, 0, sizeof(*tx_buf));
119 @@ -1971,7 +1971,7 @@ static int mtk_xdp_submit_frame(struct m
122 tx_buf = mtk_desc_to_tx_buf(ring, txd,
123 - soc->txrx.txd_size);
124 + soc->tx.desc_size);
125 memset(tx_buf, 0, sizeof(*tx_buf));
128 @@ -2008,7 +2008,7 @@ static int mtk_xdp_submit_frame(struct m
132 - idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
133 + idx = txd_to_idx(ring, txd, soc->tx.desc_size);
134 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
137 @@ -2020,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct m
139 while (htxd != txd) {
140 txd_pdma = qdma_to_pdma(ring, htxd);
141 - tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
142 + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
143 mtk_tx_unmap(eth, tx_buf, false);
145 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
146 @@ -2148,7 +2148,7 @@ static int mtk_poll_rx(struct napi_struc
149 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
150 - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
151 + rxd = ring->dma + idx * eth->soc->rx.desc_size;
152 data = ring->data[idx];
154 if (!mtk_rx_get_desc(eth, &trxd, rxd))
155 @@ -2283,7 +2283,7 @@ static int mtk_poll_rx(struct napi_struc
156 rxdcsum = &trxd.rxd4;
159 - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
160 + if (*rxdcsum & eth->soc->rx.dma_l4_valid)
161 skb->ip_summed = CHECKSUM_UNNECESSARY;
163 skb_checksum_none_assert(skb);
164 @@ -2405,7 +2405,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
167 tx_buf = mtk_desc_to_tx_buf(ring, desc,
168 - eth->soc->txrx.txd_size);
169 + eth->soc->tx.desc_size);
173 @@ -2453,7 +2453,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
175 mtk_tx_unmap(eth, tx_buf, true);
177 - desc = ring->dma + cpu * eth->soc->txrx.txd_size;
178 + desc = ring->dma + cpu * eth->soc->tx.desc_size;
179 ring->last_free = desc;
180 atomic_inc(&ring->free_count);
182 @@ -2542,7 +2542,7 @@ static int mtk_napi_rx(struct napi_struc
186 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
187 + mtk_w32(eth, eth->soc->rx.irq_done_mask,
188 reg_map->pdma.irq_status);
189 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
190 rx_done_total += rx_done;
191 @@ -2558,10 +2558,10 @@ static int mtk_napi_rx(struct napi_struc
194 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
195 - eth->soc->txrx.rx_irq_done_mask);
196 + eth->soc->rx.irq_done_mask);
198 if (napi_complete_done(napi, rx_done_total))
199 - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
200 + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
202 return rx_done_total;
204 @@ -2570,7 +2570,7 @@ static int mtk_tx_alloc(struct mtk_eth *
206 const struct mtk_soc_data *soc = eth->soc;
207 struct mtk_tx_ring *ring = ð->tx_ring;
208 - int i, sz = soc->txrx.txd_size;
209 + int i, sz = soc->tx.desc_size;
210 struct mtk_tx_dma_v2 *txd;
213 @@ -2693,14 +2693,14 @@ static void mtk_tx_clean(struct mtk_eth
215 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
216 dma_free_coherent(eth->dma_dev,
217 - ring->dma_size * soc->txrx.txd_size,
218 + ring->dma_size * soc->tx.desc_size,
219 ring->dma, ring->phys);
223 if (ring->dma_pdma) {
224 dma_free_coherent(eth->dma_dev,
225 - ring->dma_size * soc->txrx.txd_size,
226 + ring->dma_size * soc->tx.desc_size,
227 ring->dma_pdma, ring->phys_pdma);
228 ring->dma_pdma = NULL;
230 @@ -2755,15 +2755,15 @@ static int mtk_rx_alloc(struct mtk_eth *
231 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
232 rx_flag != MTK_RX_FLAGS_NORMAL) {
233 ring->dma = dma_alloc_coherent(eth->dma_dev,
234 - rx_dma_size * eth->soc->txrx.rxd_size,
235 - &ring->phys, GFP_KERNEL);
236 + rx_dma_size * eth->soc->rx.desc_size,
237 + &ring->phys, GFP_KERNEL);
239 struct mtk_tx_ring *tx_ring = ð->tx_ring;
241 ring->dma = tx_ring->dma + tx_ring_size *
242 - eth->soc->txrx.txd_size * (ring_no + 1);
243 + eth->soc->tx.desc_size * (ring_no + 1);
244 ring->phys = tx_ring->phys + tx_ring_size *
245 - eth->soc->txrx.txd_size * (ring_no + 1);
246 + eth->soc->tx.desc_size * (ring_no + 1);
250 @@ -2774,7 +2774,7 @@ static int mtk_rx_alloc(struct mtk_eth *
254 - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
255 + rxd = ring->dma + i * eth->soc->rx.desc_size;
256 if (ring->page_pool) {
257 data = mtk_page_pool_get_buff(ring->page_pool,
258 &dma_addr, GFP_KERNEL);
259 @@ -2863,7 +2863,7 @@ static void mtk_rx_clean(struct mtk_eth
263 - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
264 + rxd = ring->dma + i * eth->soc->rx.desc_size;
268 @@ -2880,7 +2880,7 @@ static void mtk_rx_clean(struct mtk_eth
270 if (!in_sram && ring->dma) {
271 dma_free_coherent(eth->dma_dev,
272 - ring->dma_size * eth->soc->txrx.rxd_size,
273 + ring->dma_size * eth->soc->rx.desc_size,
274 ring->dma, ring->phys);
277 @@ -3243,7 +3243,7 @@ static void mtk_dma_free(struct mtk_eth
278 netdev_reset_queue(eth->netdev[i]);
279 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
280 dma_free_coherent(eth->dma_dev,
281 - MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
282 + MTK_QDMA_RING_SIZE * soc->tx.desc_size,
283 eth->scratch_ring, eth->phy_scratch_ring);
284 eth->scratch_ring = NULL;
285 eth->phy_scratch_ring = 0;
286 @@ -3293,7 +3293,7 @@ static irqreturn_t mtk_handle_irq_rx(int
289 if (likely(napi_schedule_prep(ð->rx_napi))) {
290 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
291 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
292 __napi_schedule(ð->rx_napi);
295 @@ -3319,9 +3319,9 @@ static irqreturn_t mtk_handle_irq(int ir
296 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
298 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
299 - eth->soc->txrx.rx_irq_done_mask) {
300 + eth->soc->rx.irq_done_mask) {
301 if (mtk_r32(eth, reg_map->pdma.irq_status) &
302 - eth->soc->txrx.rx_irq_done_mask)
303 + eth->soc->rx.irq_done_mask)
304 mtk_handle_irq_rx(irq, _eth);
306 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
307 @@ -3339,10 +3339,10 @@ static void mtk_poll_controller(struct n
308 struct mtk_eth *eth = mac->hw;
310 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
311 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
312 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
313 mtk_handle_irq_rx(eth->irq[2], dev);
314 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
315 - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
316 + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
320 @@ -3507,7 +3507,7 @@ static int mtk_open(struct net_device *d
321 napi_enable(ð->tx_napi);
322 napi_enable(ð->rx_napi);
323 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
324 - mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
325 + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
326 refcount_set(ð->dma_refcnt, 1);
329 @@ -3590,7 +3590,7 @@ static int mtk_stop(struct net_device *d
330 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
332 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
333 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
334 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
335 napi_disable(ð->tx_napi);
336 napi_disable(ð->rx_napi);
338 @@ -4066,9 +4066,9 @@ static int mtk_hw_init(struct mtk_eth *e
340 /* FE int grouping */
341 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
342 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
343 + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
344 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
345 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
346 + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
347 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
349 if (mtk_is_netsys_v3_or_greater(eth)) {
350 @@ -5168,11 +5168,15 @@ static const struct mtk_soc_data mt2701_
351 .required_clks = MT7623_CLKS_BITMAP,
352 .required_pctl = true,
355 - .txd_size = sizeof(struct mtk_tx_dma),
356 - .rxd_size = sizeof(struct mtk_rx_dma),
357 - .rx_irq_done_mask = MTK_RX_DONE_INT,
358 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
360 + .desc_size = sizeof(struct mtk_tx_dma),
361 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
362 + .dma_len_offset = 16,
365 + .desc_size = sizeof(struct mtk_rx_dma),
366 + .irq_done_mask = MTK_RX_DONE_INT,
367 + .dma_l4_valid = RX_DMA_L4_VALID,
368 .dma_max_len = MTK_TX_DMA_BUF_LEN,
369 .dma_len_offset = 16,
371 @@ -5188,11 +5192,15 @@ static const struct mtk_soc_data mt7621_
372 .offload_version = 1,
374 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
376 - .txd_size = sizeof(struct mtk_tx_dma),
377 - .rxd_size = sizeof(struct mtk_rx_dma),
378 - .rx_irq_done_mask = MTK_RX_DONE_INT,
379 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
381 + .desc_size = sizeof(struct mtk_tx_dma),
382 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
383 + .dma_len_offset = 16,
386 + .desc_size = sizeof(struct mtk_rx_dma),
387 + .irq_done_mask = MTK_RX_DONE_INT,
388 + .dma_l4_valid = RX_DMA_L4_VALID,
389 .dma_max_len = MTK_TX_DMA_BUF_LEN,
390 .dma_len_offset = 16,
392 @@ -5210,11 +5218,15 @@ static const struct mtk_soc_data mt7622_
394 .has_accounting = true,
395 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
397 - .txd_size = sizeof(struct mtk_tx_dma),
398 - .rxd_size = sizeof(struct mtk_rx_dma),
399 - .rx_irq_done_mask = MTK_RX_DONE_INT,
400 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
402 + .desc_size = sizeof(struct mtk_tx_dma),
403 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
404 + .dma_len_offset = 16,
407 + .desc_size = sizeof(struct mtk_rx_dma),
408 + .irq_done_mask = MTK_RX_DONE_INT,
409 + .dma_l4_valid = RX_DMA_L4_VALID,
410 .dma_max_len = MTK_TX_DMA_BUF_LEN,
411 .dma_len_offset = 16,
413 @@ -5231,11 +5243,15 @@ static const struct mtk_soc_data mt7623_
415 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
416 .disable_pll_modes = true,
418 - .txd_size = sizeof(struct mtk_tx_dma),
419 - .rxd_size = sizeof(struct mtk_rx_dma),
420 - .rx_irq_done_mask = MTK_RX_DONE_INT,
421 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
423 + .desc_size = sizeof(struct mtk_tx_dma),
424 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
425 + .dma_len_offset = 16,
428 + .desc_size = sizeof(struct mtk_rx_dma),
429 + .irq_done_mask = MTK_RX_DONE_INT,
430 + .dma_l4_valid = RX_DMA_L4_VALID,
431 .dma_max_len = MTK_TX_DMA_BUF_LEN,
432 .dma_len_offset = 16,
434 @@ -5250,11 +5266,15 @@ static const struct mtk_soc_data mt7629_
435 .required_pctl = false,
436 .has_accounting = true,
439 - .txd_size = sizeof(struct mtk_tx_dma),
440 - .rxd_size = sizeof(struct mtk_rx_dma),
441 - .rx_irq_done_mask = MTK_RX_DONE_INT,
442 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
444 + .desc_size = sizeof(struct mtk_tx_dma),
445 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
446 + .dma_len_offset = 16,
449 + .desc_size = sizeof(struct mtk_rx_dma),
450 + .irq_done_mask = MTK_RX_DONE_INT,
451 + .dma_l4_valid = RX_DMA_L4_VALID,
452 .dma_max_len = MTK_TX_DMA_BUF_LEN,
453 .dma_len_offset = 16,
455 @@ -5272,11 +5292,15 @@ static const struct mtk_soc_data mt7981_
457 .has_accounting = true,
458 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
460 - .txd_size = sizeof(struct mtk_tx_dma_v2),
461 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
462 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
463 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
465 + .desc_size = sizeof(struct mtk_tx_dma_v2),
466 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
467 + .dma_len_offset = 8,
470 + .desc_size = sizeof(struct mtk_rx_dma_v2),
471 + .irq_done_mask = MTK_RX_DONE_INT_V2,
472 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
473 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
476 @@ -5294,11 +5318,15 @@ static const struct mtk_soc_data mt7986_
478 .has_accounting = true,
479 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
481 - .txd_size = sizeof(struct mtk_tx_dma_v2),
482 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
483 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
484 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
486 + .desc_size = sizeof(struct mtk_tx_dma_v2),
487 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
488 + .dma_len_offset = 8,
491 + .desc_size = sizeof(struct mtk_rx_dma_v2),
492 + .irq_done_mask = MTK_RX_DONE_INT_V2,
493 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
494 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
497 @@ -5316,11 +5344,15 @@ static const struct mtk_soc_data mt7988_
499 .has_accounting = true,
500 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
502 - .txd_size = sizeof(struct mtk_tx_dma_v2),
503 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
504 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
505 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
507 + .desc_size = sizeof(struct mtk_tx_dma_v2),
508 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
509 + .dma_len_offset = 8,
512 + .desc_size = sizeof(struct mtk_rx_dma_v2),
513 + .irq_done_mask = MTK_RX_DONE_INT_V2,
514 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
515 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
518 @@ -5333,11 +5365,15 @@ static const struct mtk_soc_data rt5350_
519 .required_clks = MT7628_CLKS_BITMAP,
520 .required_pctl = false,
523 - .txd_size = sizeof(struct mtk_tx_dma),
524 - .rxd_size = sizeof(struct mtk_rx_dma),
525 - .rx_irq_done_mask = MTK_RX_DONE_INT,
526 - .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
528 + .desc_size = sizeof(struct mtk_tx_dma),
529 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
530 + .dma_len_offset = 16,
533 + .desc_size = sizeof(struct mtk_rx_dma),
534 + .irq_done_mask = MTK_RX_DONE_INT,
535 + .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
536 .dma_max_len = MTK_TX_DMA_BUF_LEN,
537 .dma_len_offset = 16,
539 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
540 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
542 /* QDMA descriptor txd3 */
543 #define TX_DMA_OWNER_CPU BIT(31)
544 #define TX_DMA_LS0 BIT(30)
545 -#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
546 -#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
547 +#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
548 +#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
549 #define TX_DMA_SWC BIT(14)
550 #define TX_DMA_PQID GENMASK(3, 0)
551 #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
553 /* QDMA descriptor rxd2 */
554 #define RX_DMA_DONE BIT(31)
555 #define RX_DMA_LSO BIT(30)
556 -#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
557 -#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
558 +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
559 +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
560 #define RX_DMA_VTAG BIT(15)
561 #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
562 #if IS_ENABLED(CONFIG_64BIT)
563 @@ -1279,10 +1279,9 @@ struct mtk_reg_map {
564 * @foe_entry_size Foe table entry size.
565 * @has_accounting Bool indicating support for accounting of
567 - * @txd_size Tx DMA descriptor size.
568 - * @rxd_size Rx DMA descriptor size.
569 - * @rx_irq_done_mask Rx irq done register mask.
570 - * @rx_dma_l4_valid Rx DMA valid register mask.
571 + * @desc_size Tx/Rx DMA descriptor size.
572 + * @irq_done_mask Rx irq done register mask.
573 + * @dma_l4_valid Rx DMA valid register mask.
574 * @dma_max_len Max DMA tx/rx buffer length.
575 * @dma_len_offset Tx/Rx DMA length field offset.
577 @@ -1300,13 +1299,17 @@ struct mtk_soc_data {
579 bool disable_pll_modes;
583 - u32 rx_irq_done_mask;
584 - u32 rx_dma_l4_valid;
595 + u32 dma_len_offset;
599 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)