kernel: bump 5.15 to 5.15.47
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 702-v5.19-26-net-ethernet-mtk_eth_soc-introduce-device-register-m.patch
1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Fri, 20 May 2022 20:11:35 +0200
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce device register map
4
5 Introduce reg_map structure to add the capability to support different
6 register definitions. Move register definitions in mtk_regmap structure.
7 This is a preliminary patch to introduce mt7986 ethernet support.
8
9 Tested-by: Sam Shih <sam.shih@mediatek.com>
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
12 ---
13
14 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
16 @@ -34,6 +34,59 @@ MODULE_PARM_DESC(msg_level, "Message lev
17 #define MTK_ETHTOOL_STAT(x) { #x, \
18 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
19
20 +static const struct mtk_reg_map mtk_reg_map = {
21 + .tx_irq_mask = 0x1a1c,
22 + .tx_irq_status = 0x1a18,
23 + .pdma = {
24 + .rx_ptr = 0x0900,
25 + .rx_cnt_cfg = 0x0904,
26 + .pcrx_ptr = 0x0908,
27 + .glo_cfg = 0x0a04,
28 + .rst_idx = 0x0a08,
29 + .delay_irq = 0x0a0c,
30 + .irq_status = 0x0a20,
31 + .irq_mask = 0x0a28,
32 + .int_grp = 0x0a50,
33 + },
34 + .qdma = {
35 + .qtx_cfg = 0x1800,
36 + .rx_ptr = 0x1900,
37 + .rx_cnt_cfg = 0x1904,
38 + .qcrx_ptr = 0x1908,
39 + .glo_cfg = 0x1a04,
40 + .rst_idx = 0x1a08,
41 + .delay_irq = 0x1a0c,
42 + .fc_th = 0x1a10,
43 + .int_grp = 0x1a20,
44 + .hred = 0x1a44,
45 + .ctx_ptr = 0x1b00,
46 + .dtx_ptr = 0x1b04,
47 + .crx_ptr = 0x1b10,
48 + .drx_ptr = 0x1b14,
49 + .fq_head = 0x1b20,
50 + .fq_tail = 0x1b24,
51 + .fq_count = 0x1b28,
52 + .fq_blen = 0x1b2c,
53 + },
54 + .gdm1_cnt = 0x2400,
55 +};
56 +
57 +static const struct mtk_reg_map mt7628_reg_map = {
58 + .tx_irq_mask = 0x0a28,
59 + .tx_irq_status = 0x0a20,
60 + .pdma = {
61 + .rx_ptr = 0x0900,
62 + .rx_cnt_cfg = 0x0904,
63 + .pcrx_ptr = 0x0908,
64 + .glo_cfg = 0x0a04,
65 + .rst_idx = 0x0a08,
66 + .delay_irq = 0x0a0c,
67 + .irq_status = 0x0a20,
68 + .irq_mask = 0x0a28,
69 + .int_grp = 0x0a50,
70 + },
71 +};
72 +
73 /* strings used by ethtool */
74 static const struct mtk_ethtool_stats {
75 char str[ETH_GSTRING_LEN];
76 @@ -618,8 +671,8 @@ static inline void mtk_tx_irq_disable(st
77 u32 val;
78
79 spin_lock_irqsave(&eth->tx_irq_lock, flags);
80 - val = mtk_r32(eth, eth->tx_int_mask_reg);
81 - mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
82 + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
83 + mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
84 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
85 }
86
87 @@ -629,8 +682,8 @@ static inline void mtk_tx_irq_enable(str
88 u32 val;
89
90 spin_lock_irqsave(&eth->tx_irq_lock, flags);
91 - val = mtk_r32(eth, eth->tx_int_mask_reg);
92 - mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
93 + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
94 + mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
95 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
96 }
97
98 @@ -640,8 +693,8 @@ static inline void mtk_rx_irq_disable(st
99 u32 val;
100
101 spin_lock_irqsave(&eth->rx_irq_lock, flags);
102 - val = mtk_r32(eth, MTK_PDMA_INT_MASK);
103 - mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
104 + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
105 + mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
106 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
107 }
108
109 @@ -651,8 +704,8 @@ static inline void mtk_rx_irq_enable(str
110 u32 val;
111
112 spin_lock_irqsave(&eth->rx_irq_lock, flags);
113 - val = mtk_r32(eth, MTK_PDMA_INT_MASK);
114 - mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
115 + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
116 + mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
117 spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
118 }
119
120 @@ -703,39 +756,39 @@ void mtk_stats_update_mac(struct mtk_mac
121 hw_stats->rx_checksum_errors +=
122 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
123 } else {
124 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
125 unsigned int offs = hw_stats->reg_offset;
126 u64 stats;
127
128 - hw_stats->rx_bytes += mtk_r32(mac->hw,
129 - MTK_GDM1_RX_GBCNT_L + offs);
130 - stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
131 + hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
132 + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
133 if (stats)
134 hw_stats->rx_bytes += (stats << 32);
135 hw_stats->rx_packets +=
136 - mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
137 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
138 hw_stats->rx_overflow +=
139 - mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
140 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
141 hw_stats->rx_fcs_errors +=
142 - mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
143 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
144 hw_stats->rx_short_errors +=
145 - mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
146 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
147 hw_stats->rx_long_errors +=
148 - mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
149 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
150 hw_stats->rx_checksum_errors +=
151 - mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
152 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
153 hw_stats->rx_flow_control_packets +=
154 - mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
155 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
156 hw_stats->tx_skip +=
157 - mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
158 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
159 hw_stats->tx_collisions +=
160 - mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
161 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
162 hw_stats->tx_bytes +=
163 - mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
164 - stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
165 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
166 + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
167 if (stats)
168 hw_stats->tx_bytes += (stats << 32);
169 hw_stats->tx_packets +=
170 - mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
171 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
172 }
173
174 u64_stats_update_end(&hw_stats->syncp);
175 @@ -864,10 +917,10 @@ static int mtk_init_fq_dma(struct mtk_et
176 txd->txd4 = 0;
177 }
178
179 - mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
180 - mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
181 - mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
182 - mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
183 + mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
184 + mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
185 + mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
186 + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
187
188 return 0;
189 }
190 @@ -1111,7 +1164,7 @@ static int mtk_tx_map(struct sk_buff *sk
191 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
192 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
193 !netdev_xmit_more())
194 - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
195 + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
196 } else {
197 int next_idx;
198
199 @@ -1425,6 +1478,7 @@ rx_done:
200 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
201 unsigned int *done, unsigned int *bytes)
202 {
203 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
204 struct mtk_tx_ring *ring = &eth->tx_ring;
205 struct mtk_tx_dma *desc;
206 struct sk_buff *skb;
207 @@ -1432,7 +1486,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
208 u32 cpu, dma;
209
210 cpu = ring->last_free_ptr;
211 - dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
212 + dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
213
214 desc = mtk_qdma_phys_to_virt(ring, cpu);
215
216 @@ -1467,7 +1521,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
217 }
218
219 ring->last_free_ptr = cpu;
220 - mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
221 + mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
222
223 return budget;
224 }
225 @@ -1560,24 +1614,25 @@ static void mtk_handle_status_irq(struct
226 static int mtk_napi_tx(struct napi_struct *napi, int budget)
227 {
228 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
229 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
230 int tx_done = 0;
231
232 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
233 mtk_handle_status_irq(eth);
234 - mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
235 + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
236 tx_done = mtk_poll_tx(eth, budget);
237
238 if (unlikely(netif_msg_intr(eth))) {
239 dev_info(eth->dev,
240 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
241 - mtk_r32(eth, eth->tx_int_status_reg),
242 - mtk_r32(eth, eth->tx_int_mask_reg));
243 + mtk_r32(eth, reg_map->tx_irq_status),
244 + mtk_r32(eth, reg_map->tx_irq_mask));
245 }
246
247 if (tx_done == budget)
248 return budget;
249
250 - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
251 + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
252 return budget;
253
254 if (napi_complete_done(napi, tx_done))
255 @@ -1589,6 +1644,7 @@ static int mtk_napi_tx(struct napi_struc
256 static int mtk_napi_rx(struct napi_struct *napi, int budget)
257 {
258 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
259 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
260 int rx_done_total = 0;
261
262 mtk_handle_status_irq(eth);
263 @@ -1596,21 +1652,21 @@ static int mtk_napi_rx(struct napi_struc
264 do {
265 int rx_done;
266
267 - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
268 + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
269 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
270 rx_done_total += rx_done;
271
272 if (unlikely(netif_msg_intr(eth))) {
273 dev_info(eth->dev,
274 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
275 - mtk_r32(eth, MTK_PDMA_INT_STATUS),
276 - mtk_r32(eth, MTK_PDMA_INT_MASK));
277 + mtk_r32(eth, reg_map->pdma.irq_status),
278 + mtk_r32(eth, reg_map->pdma.irq_mask));
279 }
280
281 if (rx_done_total == budget)
282 return budget;
283
284 - } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
285 + } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
286
287 if (napi_complete_done(napi, rx_done_total))
288 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
289 @@ -1673,20 +1729,20 @@ static int mtk_tx_alloc(struct mtk_eth *
290 */
291 wmb();
292
293 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
294 - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
295 - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
296 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
297 + mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
298 + mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
299 mtk_w32(eth,
300 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
301 - MTK_QTX_CRX_PTR);
302 - mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
303 + soc->reg_map->qdma.crx_ptr);
304 + mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
305 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
306 - MTK_QTX_CFG(0));
307 + soc->reg_map->qdma.qtx_cfg);
308 } else {
309 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
310 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
311 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
312 - mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
313 + mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
314 }
315
316 return 0;
317 @@ -1725,6 +1781,7 @@ static void mtk_tx_clean(struct mtk_eth
318
319 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
320 {
321 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
322 struct mtk_rx_ring *ring;
323 int rx_data_len, rx_dma_size;
324 int i;
325 @@ -1790,16 +1847,18 @@ static int mtk_rx_alloc(struct mtk_eth *
326 ring->dma_size = rx_dma_size;
327 ring->calc_idx_update = false;
328 ring->calc_idx = rx_dma_size - 1;
329 - ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
330 + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
331 /* make sure that all changes to the dma ring are flushed before we
332 * continue
333 */
334 wmb();
335
336 - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
337 - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
338 + mtk_w32(eth, ring->phys,
339 + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
340 + mtk_w32(eth, rx_dma_size,
341 + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
342 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
343 - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
344 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
345
346 return 0;
347 }
348 @@ -2108,9 +2167,9 @@ static int mtk_dma_busy_wait(struct mtk_
349 u32 val;
350
351 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
352 - reg = MTK_QDMA_GLO_CFG;
353 + reg = eth->soc->reg_map->qdma.glo_cfg;
354 else
355 - reg = MTK_PDMA_GLO_CFG;
356 + reg = eth->soc->reg_map->pdma.glo_cfg;
357
358 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
359 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
360 @@ -2168,8 +2227,8 @@ static int mtk_dma_init(struct mtk_eth *
361 * automatically
362 */
363 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
364 - FC_THRES_MIN, MTK_QDMA_FC_THRES);
365 - mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
366 + FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
367 + mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
368 }
369
370 return 0;
371 @@ -2243,13 +2302,14 @@ static irqreturn_t mtk_handle_irq_tx(int
372 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
373 {
374 struct mtk_eth *eth = _eth;
375 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
376
377 - if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
378 - if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
379 + if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
380 + if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
381 mtk_handle_irq_rx(irq, _eth);
382 }
383 - if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
384 - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
385 + if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
386 + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
387 mtk_handle_irq_tx(irq, _eth);
388 }
389
390 @@ -2273,6 +2333,7 @@ static void mtk_poll_controller(struct n
391 static int mtk_start_dma(struct mtk_eth *eth)
392 {
393 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
394 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
395 int err;
396
397 err = mtk_dma_init(eth);
398 @@ -2287,16 +2348,15 @@ static int mtk_start_dma(struct mtk_eth
399 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
400 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
401 MTK_RX_BT_32DWORDS,
402 - MTK_QDMA_GLO_CFG);
403 -
404 + reg_map->qdma.glo_cfg);
405 mtk_w32(eth,
406 MTK_RX_DMA_EN | rx_2b_offset |
407 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
408 - MTK_PDMA_GLO_CFG);
409 + reg_map->pdma.glo_cfg);
410 } else {
411 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
412 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
413 - MTK_PDMA_GLO_CFG);
414 + reg_map->pdma.glo_cfg);
415 }
416
417 return 0;
418 @@ -2420,8 +2480,8 @@ static int mtk_stop(struct net_device *d
419 cancel_work_sync(&eth->tx_dim.work);
420
421 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
422 - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
423 - mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
424 + mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
425 + mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
426
427 mtk_dma_free(eth);
428
429 @@ -2475,6 +2535,7 @@ static void mtk_dim_rx(struct work_struc
430 {
431 struct dim *dim = container_of(work, struct dim, work);
432 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
433 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
434 struct dim_cq_moder cur_profile;
435 u32 val, cur;
436
437 @@ -2482,7 +2543,7 @@ static void mtk_dim_rx(struct work_struc
438 dim->profile_ix);
439 spin_lock_bh(&eth->dim_lock);
440
441 - val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
442 + val = mtk_r32(eth, reg_map->pdma.delay_irq);
443 val &= MTK_PDMA_DELAY_TX_MASK;
444 val |= MTK_PDMA_DELAY_RX_EN;
445
446 @@ -2492,9 +2553,9 @@ static void mtk_dim_rx(struct work_struc
447 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
448 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
449
450 - mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
451 + mtk_w32(eth, val, reg_map->pdma.delay_irq);
452 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
453 - mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
454 + mtk_w32(eth, val, reg_map->qdma.delay_irq);
455
456 spin_unlock_bh(&eth->dim_lock);
457
458 @@ -2505,6 +2566,7 @@ static void mtk_dim_tx(struct work_struc
459 {
460 struct dim *dim = container_of(work, struct dim, work);
461 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
462 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
463 struct dim_cq_moder cur_profile;
464 u32 val, cur;
465
466 @@ -2512,7 +2574,7 @@ static void mtk_dim_tx(struct work_struc
467 dim->profile_ix);
468 spin_lock_bh(&eth->dim_lock);
469
470 - val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
471 + val = mtk_r32(eth, reg_map->pdma.delay_irq);
472 val &= MTK_PDMA_DELAY_RX_MASK;
473 val |= MTK_PDMA_DELAY_TX_EN;
474
475 @@ -2522,9 +2584,9 @@ static void mtk_dim_tx(struct work_struc
476 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
477 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
478
479 - mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
480 + mtk_w32(eth, val, reg_map->pdma.delay_irq);
481 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
482 - mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
483 + mtk_w32(eth, val, reg_map->qdma.delay_irq);
484
485 spin_unlock_bh(&eth->dim_lock);
486
487 @@ -2535,6 +2597,7 @@ static int mtk_hw_init(struct mtk_eth *e
488 {
489 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
490 ETHSYS_DMA_AG_MAP_PPE;
491 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
492 int i, val, ret;
493
494 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
495 @@ -2609,10 +2672,10 @@ static int mtk_hw_init(struct mtk_eth *e
496 mtk_rx_irq_disable(eth, ~0);
497
498 /* FE int grouping */
499 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
500 - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
501 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
502 - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
503 + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
504 + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
505 + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
506 + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
507 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
508
509 return 0;
510 @@ -3151,14 +3214,6 @@ static int mtk_probe(struct platform_dev
511 if (IS_ERR(eth->base))
512 return PTR_ERR(eth->base);
513
514 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
515 - eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
516 - eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
517 - } else {
518 - eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
519 - eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
520 - }
521 -
522 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
523 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
524 eth->ip_align = NET_IP_ALIGN;
525 @@ -3392,6 +3447,7 @@ static int mtk_remove(struct platform_de
526 }
527
528 static const struct mtk_soc_data mt2701_data = {
529 + .reg_map = &mtk_reg_map,
530 .caps = MT7623_CAPS | MTK_HWLRO,
531 .hw_features = MTK_HW_FEATURES,
532 .required_clks = MT7623_CLKS_BITMAP,
533 @@ -3403,6 +3459,7 @@ static const struct mtk_soc_data mt2701_
534 };
535
536 static const struct mtk_soc_data mt7621_data = {
537 + .reg_map = &mtk_reg_map,
538 .caps = MT7621_CAPS,
539 .hw_features = MTK_HW_FEATURES,
540 .required_clks = MT7621_CLKS_BITMAP,
541 @@ -3415,6 +3472,7 @@ static const struct mtk_soc_data mt7621_
542 };
543
544 static const struct mtk_soc_data mt7622_data = {
545 + .reg_map = &mtk_reg_map,
546 .ana_rgc3 = 0x2028,
547 .caps = MT7622_CAPS | MTK_HWLRO,
548 .hw_features = MTK_HW_FEATURES,
549 @@ -3428,6 +3486,7 @@ static const struct mtk_soc_data mt7622_
550 };
551
552 static const struct mtk_soc_data mt7623_data = {
553 + .reg_map = &mtk_reg_map,
554 .caps = MT7623_CAPS | MTK_HWLRO,
555 .hw_features = MTK_HW_FEATURES,
556 .required_clks = MT7623_CLKS_BITMAP,
557 @@ -3440,6 +3499,7 @@ static const struct mtk_soc_data mt7623_
558 };
559
560 static const struct mtk_soc_data mt7629_data = {
561 + .reg_map = &mtk_reg_map,
562 .ana_rgc3 = 0x128,
563 .caps = MT7629_CAPS | MTK_HWLRO,
564 .hw_features = MTK_HW_FEATURES,
565 @@ -3452,6 +3512,7 @@ static const struct mtk_soc_data mt7629_
566 };
567
568 static const struct mtk_soc_data rt5350_data = {
569 + .reg_map = &mt7628_reg_map,
570 .caps = MT7628_CAPS,
571 .hw_features = MTK_HW_FEATURES_MT7628,
572 .required_clks = MT7628_CLKS_BITMAP,
573 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
574 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
575 @@ -48,6 +48,8 @@
576 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
577 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
578
579 +#define MTK_QRX_OFFSET 0x10
580 +
581 #define MTK_MAX_RX_RING_NUM 4
582 #define MTK_HW_LRO_DMA_SIZE 8
583
584 @@ -100,18 +102,6 @@
585 /* Unicast Filter MAC Address Register - High */
586 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
587
588 -/* PDMA RX Base Pointer Register */
589 -#define MTK_PRX_BASE_PTR0 0x900
590 -#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
591 -
592 -/* PDMA RX Maximum Count Register */
593 -#define MTK_PRX_MAX_CNT0 0x904
594 -#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
595 -
596 -/* PDMA RX CPU Pointer Register */
597 -#define MTK_PRX_CRX_IDX0 0x908
598 -#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
599 -
600 /* PDMA HW LRO Control Registers */
601 #define MTK_PDMA_LRO_CTRL_DW0 0x980
602 #define MTK_LRO_EN BIT(0)
603 @@ -126,18 +116,19 @@
604 #define MTK_ADMA_MODE BIT(15)
605 #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
606
607 -/* PDMA Global Configuration Register */
608 -#define MTK_PDMA_GLO_CFG 0xa04
609 +#define MTK_RX_DMA_LRO_EN BIT(8)
610 #define MTK_MULTI_EN BIT(10)
611 #define MTK_PDMA_SIZE_8DWORDS (1 << 4)
612
613 +/* PDMA Global Configuration Register */
614 +#define MTK_PDMA_LRO_SDL 0x3000
615 +#define MTK_RX_CFG_SDL_OFFSET 16
616 +
617 /* PDMA Reset Index Register */
618 -#define MTK_PDMA_RST_IDX 0xa08
619 #define MTK_PST_DRX_IDX0 BIT(16)
620 #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
621
622 /* PDMA Delay Interrupt Register */
623 -#define MTK_PDMA_DELAY_INT 0xa0c
624 #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
625 #define MTK_PDMA_DELAY_RX_EN BIT(15)
626 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
627 @@ -151,19 +142,9 @@
628 #define MTK_PDMA_DELAY_PINT_MASK 0x7f
629 #define MTK_PDMA_DELAY_PTIME_MASK 0xff
630
631 -/* PDMA Interrupt Status Register */
632 -#define MTK_PDMA_INT_STATUS 0xa20
633 -
634 -/* PDMA Interrupt Mask Register */
635 -#define MTK_PDMA_INT_MASK 0xa28
636 -
637 /* PDMA HW LRO Alter Flow Delta Register */
638 #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
639
640 -/* PDMA Interrupt grouping registers */
641 -#define MTK_PDMA_INT_GRP1 0xa50
642 -#define MTK_PDMA_INT_GRP2 0xa54
643 -
644 /* PDMA HW LRO IP Setting Registers */
645 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04
646 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
647 @@ -185,26 +166,9 @@
648 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
649
650 /* QDMA TX Queue Configuration Registers */
651 -#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
652 #define QDMA_RES_THRES 4
653
654 -/* QDMA TX Queue Scheduler Registers */
655 -#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
656 -
657 -/* QDMA RX Base Pointer Register */
658 -#define MTK_QRX_BASE_PTR0 0x1900
659 -
660 -/* QDMA RX Maximum Count Register */
661 -#define MTK_QRX_MAX_CNT0 0x1904
662 -
663 -/* QDMA RX CPU Pointer Register */
664 -#define MTK_QRX_CRX_IDX0 0x1908
665 -
666 -/* QDMA RX DMA Pointer Register */
667 -#define MTK_QRX_DRX_IDX0 0x190C
668 -
669 /* QDMA Global Configuration Register */
670 -#define MTK_QDMA_GLO_CFG 0x1A04
671 #define MTK_RX_2B_OFFSET BIT(31)
672 #define MTK_RX_BT_32DWORDS (3 << 11)
673 #define MTK_NDP_CO_PRO BIT(10)
674 @@ -216,20 +180,12 @@
675 #define MTK_TX_DMA_EN BIT(0)
676 #define MTK_DMA_BUSY_TIMEOUT_US 1000000
677
678 -/* QDMA Reset Index Register */
679 -#define MTK_QDMA_RST_IDX 0x1A08
680 -
681 -/* QDMA Delay Interrupt Register */
682 -#define MTK_QDMA_DELAY_INT 0x1A0C
683 -
684 /* QDMA Flow Control Register */
685 -#define MTK_QDMA_FC_THRES 0x1A10
686 #define FC_THRES_DROP_MODE BIT(20)
687 #define FC_THRES_DROP_EN (7 << 16)
688 #define FC_THRES_MIN 0x4444
689
690 /* QDMA Interrupt Status Register */
691 -#define MTK_QDMA_INT_STATUS 0x1A18
692 #define MTK_RX_DONE_DLY BIT(30)
693 #define MTK_TX_DONE_DLY BIT(28)
694 #define MTK_RX_DONE_INT3 BIT(19)
695 @@ -244,55 +200,8 @@
696 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
697
698 /* QDMA Interrupt grouping registers */
699 -#define MTK_QDMA_INT_GRP1 0x1a20
700 -#define MTK_QDMA_INT_GRP2 0x1a24
701 #define MTK_RLS_DONE_INT BIT(0)
702
703 -/* QDMA Interrupt Status Register */
704 -#define MTK_QDMA_INT_MASK 0x1A1C
705 -
706 -/* QDMA Interrupt Mask Register */
707 -#define MTK_QDMA_HRED2 0x1A44
708 -
709 -/* QDMA TX Forward CPU Pointer Register */
710 -#define MTK_QTX_CTX_PTR 0x1B00
711 -
712 -/* QDMA TX Forward DMA Pointer Register */
713 -#define MTK_QTX_DTX_PTR 0x1B04
714 -
715 -/* QDMA TX Release CPU Pointer Register */
716 -#define MTK_QTX_CRX_PTR 0x1B10
717 -
718 -/* QDMA TX Release DMA Pointer Register */
719 -#define MTK_QTX_DRX_PTR 0x1B14
720 -
721 -/* QDMA FQ Head Pointer Register */
722 -#define MTK_QDMA_FQ_HEAD 0x1B20
723 -
724 -/* QDMA FQ Head Pointer Register */
725 -#define MTK_QDMA_FQ_TAIL 0x1B24
726 -
727 -/* QDMA FQ Free Page Counter Register */
728 -#define MTK_QDMA_FQ_CNT 0x1B28
729 -
730 -/* QDMA FQ Free Page Buffer Length Register */
731 -#define MTK_QDMA_FQ_BLEN 0x1B2C
732 -
733 -/* GMA1 counter / statics register */
734 -#define MTK_GDM1_RX_GBCNT_L 0x2400
735 -#define MTK_GDM1_RX_GBCNT_H 0x2404
736 -#define MTK_GDM1_RX_GPCNT 0x2408
737 -#define MTK_GDM1_RX_OERCNT 0x2410
738 -#define MTK_GDM1_RX_FERCNT 0x2414
739 -#define MTK_GDM1_RX_SERCNT 0x2418
740 -#define MTK_GDM1_RX_LENCNT 0x241c
741 -#define MTK_GDM1_RX_CERCNT 0x2420
742 -#define MTK_GDM1_RX_FCCNT 0x2424
743 -#define MTK_GDM1_TX_SKIPCNT 0x2428
744 -#define MTK_GDM1_TX_COLCNT 0x242c
745 -#define MTK_GDM1_TX_GBCNT_L 0x2430
746 -#define MTK_GDM1_TX_GBCNT_H 0x2434
747 -#define MTK_GDM1_TX_GPCNT 0x2438
748 #define MTK_STAT_OFFSET 0x40
749
750 #define MTK_WDMA0_BASE 0x2800
751 @@ -853,8 +762,46 @@ struct mtk_tx_dma_desc_info {
752 u8 last:1;
753 };
754
755 +struct mtk_reg_map {
756 + u32 tx_irq_mask;
757 + u32 tx_irq_status;
758 + struct {
759 + u32 rx_ptr; /* rx base pointer */
760 + u32 rx_cnt_cfg; /* rx max count configuration */
761 + u32 pcrx_ptr; /* rx cpu pointer */
762 + u32 glo_cfg; /* global configuration */
763 + u32 rst_idx; /* reset index */
764 + u32 delay_irq; /* delay interrupt */
765 + u32 irq_status; /* interrupt status */
766 + u32 irq_mask; /* interrupt mask */
767 + u32 int_grp;
768 + } pdma;
769 + struct {
770 + u32 qtx_cfg; /* tx queue configuration */
771 + u32 rx_ptr; /* rx base pointer */
772 + u32 rx_cnt_cfg; /* rx max count configuration */
773 + u32 qcrx_ptr; /* rx cpu pointer */
774 + u32 glo_cfg; /* global configuration */
775 + u32 rst_idx; /* reset index */
776 + u32 delay_irq; /* delay interrupt */
777 + u32 fc_th; /* flow control */
778 + u32 int_grp;
779 + u32 hred; /* interrupt mask */
780 + u32 ctx_ptr; /* tx acquire cpu pointer */
781 + u32 dtx_ptr; /* tx acquire dma pointer */
782 + u32 crx_ptr; /* tx release cpu pointer */
783 + u32 drx_ptr; /* tx release dma pointer */
784 + u32 fq_head; /* fq head pointer */
785 + u32 fq_tail; /* fq tail pointer */
786 + u32 fq_count; /* fq free page count */
787 + u32 fq_blen; /* fq free page buffer length */
788 + } qdma;
789 + u32 gdm1_cnt;
790 +};
791 +
792 /* struct mtk_eth_data - This is the structure holding all differences
793 * among various plaforms
794 + * @reg_map Soc register map.
795 * @ana_rgc3: The offset for register ANA_RGC3 related to
796 * sgmiisys syscon
797 * @caps Flags shown the extra capability for the SoC
798 @@ -867,6 +814,7 @@ struct mtk_tx_dma_desc_info {
799 * @rxd_size Rx DMA descriptor size.
800 */
801 struct mtk_soc_data {
802 + const struct mtk_reg_map *reg_map;
803 u32 ana_rgc3;
804 u32 caps;
805 u32 required_clks;
806 @@ -994,8 +942,6 @@ struct mtk_eth {
807 u32 tx_bytes;
808 struct dim tx_dim;
809
810 - u32 tx_int_mask_reg;
811 - u32 tx_int_status_reg;
812 u32 rx_dma_l4_valid;
813 int ip_align;
814