realtek: drop ethtool log noise
[openwrt/staging/zorun.git] / target / linux / realtek / files-5.4 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
20 #include <net/dsa.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
23
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
26
27 extern struct rtl83xx_soc_info soc_info;
28
29 /*
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RING_SIZE * RING_BUFFER gives
34 * the memory used for the ring buffer.
35 */
36 #define MAX_RXRINGS 32
37 #define MAX_RXLEN 100
38 #define MAX_ENTRIES (200 * 8)
39 #define TXRINGS 2
40 // BUG: TXRINGLEN can be 160
41 #define TXRINGLEN 16
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
44 #define TX_EN 0x8
45 #define RX_EN 0x4
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
48 #define TX_DO 0x2
49 #define WRAP 0x2
50
51 #define RING_BUFFER 1600
52
53 #define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
54 #define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
55 #define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
56 #define RTL838X_ATK_PRVNT_STS (0x5B1C)
57
58 struct p_hdr {
59 uint8_t *buf;
60 uint16_t reserved;
61 uint16_t size; /* buffer size */
62 uint16_t offset;
63 uint16_t len; /* pkt len */
64 uint16_t cpu_tag[10];
65 } __packed __aligned(1);
66
67 struct n_event {
68 uint32_t type:2;
69 uint32_t fidVid:12;
70 uint64_t mac:48;
71 uint32_t slp:6;
72 uint32_t valid:1;
73 uint32_t reserved:27;
74 } __packed __aligned(1);
75
76 struct ring_b {
77 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
78 uint32_t tx_r[TXRINGS][TXRINGLEN];
79 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
80 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
81 uint32_t c_rx[MAX_RXRINGS];
82 uint32_t c_tx[TXRINGS];
83 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
84 uint8_t *rx_space;
85 };
86
87 struct notify_block {
88 struct n_event events[NOTIFY_EVENTS];
89 };
90
91 struct notify_b {
92 struct notify_block blocks[NOTIFY_BLOCKS];
93 u32 reserved1[8];
94 u32 ring[NOTIFY_BLOCKS];
95 u32 reserved2[8];
96 };
97
98 void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
99 {
100 prio &= 0x7;
101
102 if (dest_port > 0) {
103 // cpu_tag[0] is reserved on the RTL83XX SoCs
104 h->cpu_tag[1] = 0x0400;
105 h->cpu_tag[2] = 0x0200;
106 h->cpu_tag[3] = 0x0000;
107 h->cpu_tag[4] = BIT(dest_port) >> 16;
108 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
109 // Set internal priority and AS_PRIO
110 if (prio >= 0)
111 h->cpu_tag[2] |= (prio | 0x8) << 12;
112 }
113 }
114
115 void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
116 {
117 prio &= 0x7;
118
119 if (dest_port > 0) {
120 // cpu_tag[0] is reserved on the RTL83XX SoCs
121 h->cpu_tag[1] = 0x0100;
122 h->cpu_tag[2] = h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
123 if (dest_port >= 32) {
124 dest_port -= 32;
125 h->cpu_tag[2] = BIT(dest_port) >> 16;
126 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
127 } else {
128 h->cpu_tag[4] = BIT(dest_port) >> 16;
129 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
130 }
131 h->cpu_tag[6] |= BIT(21); // Enable destination port mask use
132 // Set internal priority and AS_PRIO
133 if (prio >= 0)
134 h->cpu_tag[1] |= prio | BIT(3);
135 }
136 }
137
138 void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
139 {
140 h->cpu_tag[0] = 0x8000;
141 h->cpu_tag[1] = 0; // TODO: Fill port and prio
142 h->cpu_tag[2] = 0;
143 h->cpu_tag[3] = 0;
144 h->cpu_tag[4] = 0;
145 h->cpu_tag[5] = 0;
146 h->cpu_tag[6] = 0;
147 h->cpu_tag[7] = 0xffff;
148 }
149
150 void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
151 {
152 h->cpu_tag[0] = 0x8000;
153 h->cpu_tag[1] = 0; // TODO: Fill port and prio
154 h->cpu_tag[2] = 0;
155 h->cpu_tag[3] = 0;
156 h->cpu_tag[4] = 0;
157 h->cpu_tag[5] = 0;
158 h->cpu_tag[6] = 0;
159 h->cpu_tag[7] = 0xffff;
160 }
161
162 struct rtl838x_rx_q {
163 int id;
164 struct rtl838x_eth_priv *priv;
165 struct napi_struct napi;
166 };
167
168 struct rtl838x_eth_priv {
169 struct net_device *netdev;
170 struct platform_device *pdev;
171 void *membase;
172 spinlock_t lock;
173 struct mii_bus *mii_bus;
174 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
175 struct phylink *phylink;
176 struct phylink_config phylink_config;
177 u16 id;
178 u16 family_id;
179 const struct rtl838x_reg *r;
180 u8 cpu_port;
181 u32 lastEvent;
182 u16 rxrings;
183 u16 rxringlen;
184 };
185
186 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
187 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
188 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
189 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
190 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
191 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
192 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
193 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
194
195 /*
196 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
197 * the rings. Writing x into these registers substracts x from its content.
198 * When the content reaches the ring size, the ASIC no longer adds
199 * packets to this receive queue.
200 */
201 void rtl838x_update_cntr(int r, int released)
202 {
203 // This feature is not available on RTL838x SoCs
204 }
205
206 void rtl839x_update_cntr(int r, int released)
207 {
208 // This feature is not available on RTL839x SoCs
209 }
210
211 void rtl930x_update_cntr(int r, int released)
212 {
213 int pos = (r % 3) * 10;
214 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
215 u32 v = sw_r32(reg);
216
217 v = (v >> pos) & 0x3ff;
218 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
219 sw_w32_mask(0x3ff << pos, released << pos, reg);
220 sw_w32(v, reg);
221 }
222
223 void rtl931x_update_cntr(int r, int released)
224 {
225 int pos = (r % 3) * 10;
226 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
227
228 sw_w32_mask(0x3ff << pos, released << pos, reg);
229 }
230
231 struct dsa_tag {
232 u8 reason;
233 u8 queue;
234 u16 port;
235 u8 l2_offloaded;
236 u8 prio;
237 };
238
239 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
240 {
241 t->reason = h->cpu_tag[3] & 0xf;
242 if (t->reason != 15)
243 pr_debug("Reason: %d\n", t->reason);
244 t->queue = (h->cpu_tag[0] & 0xe0) >> 5;
245 if (t->reason != 4) // NIC_RX_REASON_SPECIAL_TRAP
246 t->l2_offloaded = 1;
247 else
248 t->l2_offloaded = 0;
249 t->port = h->cpu_tag[1] & 0x1f;
250
251 return t->l2_offloaded;
252 }
253
254 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
255 {
256 t->reason = h->cpu_tag[4] & 0x1f;
257 if (t->reason != 31)
258 pr_debug("Reason: %d\n", t->reason);
259 t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
260 if ((t->reason != 7) && (t->reason != 8)) // NIC_RX_REASON_RMA_USR
261 t->l2_offloaded = 1;
262 else
263 t->l2_offloaded = 0;
264
265 t->port = h->cpu_tag[1] & 0x3f;
266
267 return t->l2_offloaded;
268 }
269
270 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
271 {
272 t->reason = h->cpu_tag[7] & 0x3f;
273 pr_debug("Reason %d\n", t->reason);
274 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
275 if (t->reason >= 19 && t->reason <= 27)
276 t->l2_offloaded = 0;
277 else
278 t->l2_offloaded = 1;
279 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
280
281 return t->l2_offloaded;
282 }
283
284 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
285 {
286 rtl931x_decode_tag(h, t);
287 t->port &= 0x1f;
288 return t->l2_offloaded;
289 }
290
291 /*
292 * Discard the RX ring-buffers, called as part of the net-ISR
293 * when the buffer runs over
294 * Caller needs to hold priv->lock
295 */
296 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
297 {
298 int r;
299 u32 *last;
300 struct p_hdr *h;
301 struct ring_b *ring = priv->membase;
302
303 for (r = 0; r < priv->rxrings; r++) {
304 pr_debug("In %s working on r: %d\n", __func__, r);
305 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
306 do {
307 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
308 break;
309 pr_debug("Got something: %d\n", ring->c_rx[r]);
310 h = &ring->rx_header[r][ring->c_rx[r]];
311 memset(h, 0, sizeof(struct p_hdr));
312 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
313 + r * priv->rxringlen * RING_BUFFER
314 + ring->c_rx[r] * RING_BUFFER);
315 h->size = RING_BUFFER;
316 /* make sure the header is visible to the ASIC */
317 mb();
318
319 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
320 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
321 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
322 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
323 }
324 }
325
326 struct fdb_update_work {
327 struct work_struct work;
328 struct net_device *ndev;
329 u64 macs[NOTIFY_EVENTS + 1];
330 };
331
332 void rtl838x_fdb_sync(struct work_struct *work)
333 {
334 const struct fdb_update_work *uw =
335 container_of(work, struct fdb_update_work, work);
336 struct switchdev_notifier_fdb_info info;
337 u8 addr[ETH_ALEN];
338 int i = 0;
339 int action;
340
341 while (uw->macs[i]) {
342 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
343 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
344 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
345 info.addr = &addr[0];
346 info.vid = 0;
347 info.offloaded = 1;
348 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
349 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
350 i++;
351 }
352 kfree(work);
353 }
354
355 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
356 {
357 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
358 u32 e = priv->lastEvent;
359 struct n_event *event;
360 int i;
361 u64 mac;
362 struct fdb_update_work *w;
363
364 while (!(nb->ring[e] & 1)) {
365 w = kzalloc(sizeof(*w), GFP_ATOMIC);
366 if (!w) {
367 pr_err("Out of memory: %s", __func__);
368 return;
369 }
370 INIT_WORK(&w->work, rtl838x_fdb_sync);
371
372 for (i = 0; i < NOTIFY_EVENTS; i++) {
373 event = &nb->blocks[e].events[i];
374 if (!event->valid)
375 continue;
376 mac = event->mac;
377 if (event->type)
378 mac |= 1ULL << 63;
379 w->ndev = priv->netdev;
380 w->macs[i] = mac;
381 }
382
383 /* Hand the ring entry back to the switch */
384 nb->ring[e] = nb->ring[e] | 1;
385 e = (e + 1) % NOTIFY_BLOCKS;
386
387 w->macs[i] = 0ULL;
388 schedule_work(&w->work);
389 }
390 priv->lastEvent = e;
391 }
392
393 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
394 {
395 struct net_device *dev = dev_id;
396 struct rtl838x_eth_priv *priv = netdev_priv(dev);
397 u32 status = sw_r32(priv->r->dma_if_intr_sts);
398 bool triggered = false;
399 u32 atk = sw_r32(RTL838X_ATK_PRVNT_STS);
400 int i;
401 u32 storm_uc = sw_r32(RTL838X_STORM_CTRL_PORT_UC_EXCEED);
402 u32 storm_mc = sw_r32(RTL838X_STORM_CTRL_PORT_MC_EXCEED);
403 u32 storm_bc = sw_r32(RTL838X_STORM_CTRL_PORT_BC_EXCEED);
404
405 pr_debug("IRQ: %08x\n", status);
406 if (storm_uc || storm_mc || storm_bc) {
407 pr_warn("Storm control UC: %08x, MC: %08x, BC: %08x\n",
408 storm_uc, storm_mc, storm_bc);
409
410 sw_w32(storm_uc, RTL838X_STORM_CTRL_PORT_UC_EXCEED);
411 sw_w32(storm_mc, RTL838X_STORM_CTRL_PORT_MC_EXCEED);
412 sw_w32(storm_bc, RTL838X_STORM_CTRL_PORT_BC_EXCEED);
413
414 triggered = true;
415 }
416
417 if (atk) {
418 pr_debug("Attack prevention triggered: %08x\n", atk);
419 sw_w32(atk, RTL838X_ATK_PRVNT_STS);
420 }
421
422 spin_lock(&priv->lock);
423 /* Ignore TX interrupt */
424 if ((status & 0xf0000)) {
425 /* Clear ISR */
426 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
427 }
428
429 /* RX interrupt */
430 if (status & 0x0ff00) {
431 /* ACK and disable RX interrupt for this ring */
432 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
433 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
434 for (i = 0; i < priv->rxrings; i++) {
435 if (status & BIT(i + 8)) {
436 pr_debug("Scheduling queue: %d\n", i);
437 napi_schedule(&priv->rx_qs[i].napi);
438 }
439 }
440 }
441
442 /* RX buffer overrun */
443 if (status & 0x000ff) {
444 pr_info("RX buffer overrun: status %x, mask: %x\n",
445 status, sw_r32(priv->r->dma_if_intr_msk));
446 sw_w32(status, priv->r->dma_if_intr_sts);
447 rtl838x_rb_cleanup(priv, status & 0xff);
448 }
449
450 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
451 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
452 rtl839x_l2_notification_handler(priv);
453 }
454
455 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
456 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
457 rtl839x_l2_notification_handler(priv);
458 }
459
460 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
461 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
462 rtl839x_l2_notification_handler(priv);
463 }
464
465 spin_unlock(&priv->lock);
466 return IRQ_HANDLED;
467 }
468
469 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
470 {
471 struct net_device *dev = dev_id;
472 struct rtl838x_eth_priv *priv = netdev_priv(dev);
473 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
474 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
475 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
476 int i;
477
478 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
479 __func__, status_tx, status_rx, status_rx_r);
480 spin_lock(&priv->lock);
481
482 /* Ignore TX interrupt */
483 if (status_tx) {
484 /* Clear ISR */
485 pr_debug("TX done\n");
486 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
487 }
488
489 /* RX interrupt */
490 if (status_rx) {
491 pr_debug("RX IRQ\n");
492 /* ACK and disable RX interrupt for given rings */
493 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
494 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
495 for (i = 0; i < priv->rxrings; i++) {
496 if (status_rx & BIT(i)) {
497 pr_debug("Scheduling queue: %d\n", i);
498 napi_schedule(&priv->rx_qs[i].napi);
499 }
500 }
501 }
502
503 /* RX buffer overrun */
504 if (status_rx_r) {
505 pr_debug("RX buffer overrun: status %x, mask: %x\n",
506 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
507 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
508 rtl838x_rb_cleanup(priv, status_rx_r);
509 }
510
511 spin_unlock(&priv->lock);
512 return IRQ_HANDLED;
513 }
514
515 static const struct rtl838x_reg rtl838x_reg = {
516 .net_irq = rtl83xx_net_irq,
517 .mac_port_ctrl = rtl838x_mac_port_ctrl,
518 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
519 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
520 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
521 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
522 .dma_rx_base = RTL838X_DMA_RX_BASE,
523 .dma_tx_base = RTL838X_DMA_TX_BASE,
524 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
525 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
526 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
527 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
528 .get_mac_link_sts = rtl838x_get_mac_link_sts,
529 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
530 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
531 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
532 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
533 .mac = RTL838X_MAC,
534 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
535 .update_cntr = rtl838x_update_cntr,
536 .create_tx_header = rtl838x_create_tx_header,
537 .decode_tag = rtl838x_decode_tag,
538 };
539
540 static const struct rtl838x_reg rtl839x_reg = {
541 .net_irq = rtl83xx_net_irq,
542 .mac_port_ctrl = rtl839x_mac_port_ctrl,
543 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
544 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
545 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
546 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
547 .dma_rx_base = RTL839X_DMA_RX_BASE,
548 .dma_tx_base = RTL839X_DMA_TX_BASE,
549 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
550 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
551 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
552 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
553 .get_mac_link_sts = rtl839x_get_mac_link_sts,
554 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
555 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
556 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
557 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
558 .mac = RTL839X_MAC,
559 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
560 .update_cntr = rtl839x_update_cntr,
561 .create_tx_header = rtl839x_create_tx_header,
562 .decode_tag = rtl839x_decode_tag,
563 };
564
565 static const struct rtl838x_reg rtl930x_reg = {
566 .net_irq = rtl93xx_net_irq,
567 .mac_port_ctrl = rtl930x_mac_port_ctrl,
568 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
569 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
570 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
571 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
572 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
573 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
574 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
575 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
576 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
577 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
578 .dma_rx_base = RTL930X_DMA_RX_BASE,
579 .dma_tx_base = RTL930X_DMA_TX_BASE,
580 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
581 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
582 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
583 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
584 .get_mac_link_sts = rtl930x_get_mac_link_sts,
585 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
586 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
587 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
588 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
589 .mac = RTL930X_MAC_L2_ADDR_CTRL,
590 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
591 .update_cntr = rtl930x_update_cntr,
592 .create_tx_header = rtl930x_create_tx_header,
593 .decode_tag = rtl930x_decode_tag,
594 };
595
596 static const struct rtl838x_reg rtl931x_reg = {
597 .net_irq = rtl93xx_net_irq,
598 .mac_port_ctrl = rtl931x_mac_port_ctrl,
599 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
600 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
601 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
602 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
603 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
604 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
605 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
606 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
607 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
608 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
609 .dma_rx_base = RTL931X_DMA_RX_BASE,
610 .dma_tx_base = RTL931X_DMA_TX_BASE,
611 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
612 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
613 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
614 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
615 .get_mac_link_sts = rtl931x_get_mac_link_sts,
616 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
617 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
618 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
619 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
620 .mac = RTL931X_MAC_L2_ADDR_CTRL,
621 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
622 .update_cntr = rtl931x_update_cntr,
623 .create_tx_header = rtl931x_create_tx_header,
624 .decode_tag = rtl931x_decode_tag,
625 };
626
627 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
628 {
629 u32 int_saved, nbuf;
630 int i, pos;
631
632 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
633 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
634 mdelay(100);
635
636 /* Disable and clear interrupts */
637 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
638 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
639 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
640 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
641 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
642 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
643 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
644 } else {
645 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
646 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
647 }
648
649 if (priv->family_id == RTL8390_FAMILY_ID) {
650 /* Preserve L2 notification and NBUF settings */
651 int_saved = sw_r32(priv->r->dma_if_intr_msk);
652 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
653
654 /* Disable link change interrupt on RTL839x */
655 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
656 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
657
658 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
659 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
660 }
661
662 /* Reset NIC */
663 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
664 sw_w32(0x4, priv->r->rst_glb_ctrl);
665 else
666 sw_w32(0x8, priv->r->rst_glb_ctrl);
667
668 do { /* Wait for reset of NIC and Queues done */
669 udelay(20);
670 } while (sw_r32(priv->r->rst_glb_ctrl) & 0xc);
671 mdelay(100);
672
673 /* Setup Head of Line */
674 if (priv->family_id == RTL8380_FAMILY_ID)
675 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
676 if (priv->family_id == RTL8390_FAMILY_ID)
677 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
678 if (priv->family_id == RTL9300_FAMILY_ID) {
679 for (i = 0; i < priv->rxrings; i++) {
680 pos = (i % 3) * 10;
681 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
682 sw_w32_mask(0x3ff << pos, priv->rxringlen,
683 priv->r->dma_if_rx_ring_cntr(i));
684 }
685 }
686
687 /* Re-enable link change interrupt */
688 if (priv->family_id == RTL8390_FAMILY_ID) {
689 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
690 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
691 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
692 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
693
694 /* Restore notification settings: on RTL838x these bits are null */
695 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
696 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
697 }
698 }
699
700 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
701 {
702 int i;
703 struct ring_b *ring = priv->membase;
704
705 for (i = 0; i < priv->rxrings; i++)
706 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
707
708 for (i = 0; i < TXRINGS; i++)
709 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
710 }
711
712 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
713 {
714 /* Disable Head of Line features for all RX rings */
715 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
716
717 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
718 sw_w32(0x06400020, priv->r->dma_if_ctrl);
719
720 /* Enable RX done, RX overflow and TX done interrupts */
721 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
722
723 /* Enable DMA, engine expects empty FCS field */
724 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
725
726 /* Restart TX/RX to CPU port */
727 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
728 /* Set Speed, duplex, flow control
729 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
730 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
731 * | MEDIA_SEL
732 */
733 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
734 /* allow CRC errors on CPU-port */
735 sw_w32_mask(0, 0x8, priv->r->mac_port_ctrl(priv->cpu_port));
736 }
737
738 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
739 {
740 /* Setup CPU-Port: RX Buffer */
741 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
742
743 /* Enable Notify, RX done, RX overflow and TX done interrupts */
744 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
745
746 /* Enable DMA */
747 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
748
749 /* Restart TX/RX to CPU port */
750 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
751
752 /* CPU port joins Lookup Miss Flooding Portmask */
753 // TODO: The code below should also work for the RTL838x
754 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
755 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
756 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
757
758 /* Force CPU port link up */
759 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
760 }
761
762 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
763 {
764 int i, pos;
765 u32 v;
766
767 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
768 sw_w32(0x06400040, priv->r->dma_if_ctrl);
769
770 for (i = 0; i < priv->rxrings; i++) {
771 pos = (i % 3) * 10;
772 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
773
774 // Some SoCs have issues with missing underflow protection
775 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
776 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
777 }
778
779 /* Enable Notify, RX done, RX overflow and TX done interrupts */
780 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
781 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
782 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
783
784 /* Enable DMA */
785 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
786
787 /* Restart TX/RX to CPU port */
788 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
789
790 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
791 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
792 }
793
794 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
795 {
796 int i, j;
797
798 struct p_hdr *h;
799
800 for (i = 0; i < priv->rxrings; i++) {
801 for (j = 0; j < priv->rxringlen; j++) {
802 h = &ring->rx_header[i][j];
803 memset(h, 0, sizeof(struct p_hdr));
804 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
805 + i * priv->rxringlen * RING_BUFFER
806 + j * RING_BUFFER);
807 h->size = RING_BUFFER;
808 /* All rings owned by switch, last one wraps */
809 ring->rx_r[i][j] = KSEG1ADDR(h) | 1
810 | (j == (priv->rxringlen - 1) ? WRAP : 0);
811 }
812 ring->c_rx[i] = 0;
813 }
814
815 for (i = 0; i < TXRINGS; i++) {
816 for (j = 0; j < TXRINGLEN; j++) {
817 h = &ring->tx_header[i][j];
818 memset(h, 0, sizeof(struct p_hdr));
819 h->buf = (u8 *)KSEG1ADDR(ring->tx_space
820 + i * TXRINGLEN * RING_BUFFER
821 + j * RING_BUFFER);
822 h->size = RING_BUFFER;
823 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
824 }
825 /* Last header is wrapping around */
826 ring->tx_r[i][j-1] |= WRAP;
827 ring->c_tx[i] = 0;
828 }
829 }
830
831 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
832 {
833 int i;
834 struct notify_b *b = priv->membase + sizeof(struct ring_b);
835
836 for (i = 0; i < NOTIFY_BLOCKS; i++)
837 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
838
839 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
840 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
841
842 /* Setup notification events */
843 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
844 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
845
846 /* Enable Notification */
847 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
848 priv->lastEvent = 0;
849 }
850
851 static int rtl838x_eth_open(struct net_device *ndev)
852 {
853 unsigned long flags;
854 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
855 struct ring_b *ring = priv->membase;
856 int i, err;
857
858 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
859 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
860
861 spin_lock_irqsave(&priv->lock, flags);
862 rtl838x_hw_reset(priv);
863 rtl838x_setup_ring_buffer(priv, ring);
864 if (priv->family_id == RTL8390_FAMILY_ID) {
865 rtl839x_setup_notify_ring_buffer(priv);
866 /* Make sure the ring structure is visible to the ASIC */
867 mb();
868 flush_cache_all();
869 }
870
871 rtl838x_hw_ring_setup(priv);
872 err = request_irq(ndev->irq, priv->r->net_irq, IRQF_SHARED, ndev->name, ndev);
873 if (err) {
874 netdev_err(ndev, "%s: could not acquire interrupt: %d\n",
875 __func__, err);
876 return err;
877 }
878 phylink_start(priv->phylink);
879
880 for (i = 0; i < priv->rxrings; i++)
881 napi_enable(&priv->rx_qs[i].napi);
882
883 switch (priv->family_id) {
884 case RTL8380_FAMILY_ID:
885 rtl838x_hw_en_rxtx(priv);
886 /* Trap IGMP traffic to CPU-Port */
887 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
888 /* Flush learned FDB entries on link down of a port */
889 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
890 break;
891 case RTL8390_FAMILY_ID:
892 rtl839x_hw_en_rxtx(priv);
893 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
894 /* Flush learned FDB entries on link down of a port */
895 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
896 break;
897 case RTL9300_FAMILY_ID:
898 rtl93xx_hw_en_rxtx(priv);
899 /* Flush learned FDB entries on link down of a port */
900 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
901 sw_w32_mask(BIT(28), 0, RTL930X_L2_PORT_SABLK_CTRL);
902 sw_w32_mask(BIT(28), 0, RTL930X_L2_PORT_DABLK_CTRL);
903 break;
904
905 case RTL9310_FAMILY_ID:
906 rtl93xx_hw_en_rxtx(priv);
907 // TODO: Add trapping of IGMP frames to CPU-port
908 break;
909 }
910
911 netif_tx_start_all_queues(ndev);
912
913 spin_unlock_irqrestore(&priv->lock, flags);
914
915 return 0;
916 }
917
918 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
919 {
920 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
921 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
922 int i;
923
924 // Disable RX/TX from/to CPU-port
925 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
926
927 /* Disable traffic */
928 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
929 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
930 else
931 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
932 mdelay(200); // Test, whether this is needed
933
934 /* Block all ports */
935 if (priv->family_id == RTL8380_FAMILY_ID) {
936 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
937 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
938 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
939 }
940
941 /* Flush L2 address cache */
942 if (priv->family_id == RTL8380_FAMILY_ID) {
943 for (i = 0; i <= priv->cpu_port; i++) {
944 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
945 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
946 }
947 } else if (priv->family_id == RTL8390_FAMILY_ID) {
948 for (i = 0; i <= priv->cpu_port; i++) {
949 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
950 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
951 }
952 }
953 // TODO: L2 flush register is 64 bit on RTL931X and 930X
954
955 /* CPU-Port: Link down */
956 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
957 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
958 else
959 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
960 mdelay(100);
961
962 /* Disable all TX/RX interrupts */
963 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
964 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
965 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
966 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
967 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
968 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
969 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
970 } else {
971 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
972 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
973 }
974
975 /* Disable TX/RX DMA */
976 sw_w32(0x00000000, priv->r->dma_if_ctrl);
977 mdelay(200);
978 }
979
980 static int rtl838x_eth_stop(struct net_device *ndev)
981 {
982 unsigned long flags;
983 int i;
984 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
985
986 pr_info("in %s\n", __func__);
987
988 spin_lock_irqsave(&priv->lock, flags);
989 phylink_stop(priv->phylink);
990 rtl838x_hw_stop(priv);
991 free_irq(ndev->irq, ndev);
992
993 for (i = 0; i < priv->rxrings; i++)
994 napi_disable(&priv->rx_qs[i].napi);
995
996 netif_tx_stop_all_queues(ndev);
997
998 spin_unlock_irqrestore(&priv->lock, flags);
999
1000 return 0;
1001 }
1002
1003 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1004 {
1005 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1006 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1007 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1008 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1009 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1010 }
1011 if (ndev->flags & IFF_ALLMULTI) {
1012 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1013 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1014 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1015 }
1016 if (ndev->flags & IFF_PROMISC) {
1017 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1018 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1019 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1020 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1021 }
1022 }
1023
1024 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1025 {
1026 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1027
1028 if (priv->family_id == RTL8390_FAMILY_ID)
1029 return rtl839x_eth_set_multicast_list(ndev);
1030
1031 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1032 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1033 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1034 }
1035 if (ndev->flags & IFF_ALLMULTI)
1036 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1037 if (ndev->flags & IFF_PROMISC) {
1038 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1039 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1040 }
1041 }
1042
1043 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1044 {
1045 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1046 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1047 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1048 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1049 }
1050 if (ndev->flags & IFF_ALLMULTI) {
1051 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1052 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1053 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1054 }
1055 if (ndev->flags & IFF_PROMISC) {
1056 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1057 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1058 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1059 }
1060 }
1061
1062 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1063 {
1064 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1065 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1066 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1067 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1068 }
1069 if (ndev->flags & IFF_ALLMULTI) {
1070 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1071 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1072 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1073 }
1074 if (ndev->flags & IFF_PROMISC) {
1075 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1076 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1077 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1078 }
1079 }
1080
1081 static void rtl838x_eth_tx_timeout(struct net_device *ndev)
1082 {
1083 unsigned long flags;
1084 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1085
1086 pr_warn("%s\n", __func__);
1087 spin_lock_irqsave(&priv->lock, flags);
1088 rtl838x_hw_stop(priv);
1089 rtl838x_hw_ring_setup(priv);
1090 rtl838x_hw_en_rxtx(priv);
1091 netif_trans_update(ndev);
1092 netif_start_queue(ndev);
1093 spin_unlock_irqrestore(&priv->lock, flags);
1094 }
1095
1096 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1097 {
1098 int len, i;
1099 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1100 struct ring_b *ring = priv->membase;
1101 uint32_t val;
1102 int ret;
1103 unsigned long flags;
1104 struct p_hdr *h;
1105 int dest_port = -1;
1106 int q = skb_get_queue_mapping(skb) % TXRINGS;
1107
1108 if (q) // Check for high prio queue
1109 pr_debug("SKB priority: %d\n", skb->priority);
1110
1111 spin_lock_irqsave(&priv->lock, flags);
1112 len = skb->len;
1113
1114 /* Check for DSA tagging at the end of the buffer */
1115 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
1116 && skb->data[len-3] < 28 && skb->data[len-2] == 0x10
1117 && skb->data[len-1] == 0x00) {
1118 /* Reuse tag space for CRC */
1119 dest_port = skb->data[len-3];
1120 len -= 4;
1121 }
1122 if (len < ETH_ZLEN)
1123 len = ETH_ZLEN;
1124
1125 /* ASIC expects that packet includes CRC, so we extend by 4 bytes */
1126 len += 4;
1127
1128 if (skb_padto(skb, len)) {
1129 ret = NETDEV_TX_OK;
1130 goto txdone;
1131 }
1132
1133 /* We can send this packet if CPU owns the descriptor */
1134 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1135
1136 /* Set descriptor for tx */
1137 h = &ring->tx_header[q][ring->c_tx[q]];
1138 h->size = len;
1139 h->len = len;
1140
1141 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1142
1143 /* Copy packet data to tx buffer */
1144 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1145 /* Make sure packet data is visible to ASIC */
1146 wmb();
1147
1148 /* Hand over to switch */
1149 ring->tx_r[q][ring->c_tx[q]] |= 1;
1150
1151 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1152 if (priv->family_id == RTL8380_FAMILY_ID) {
1153 for (i = 0; i < 10; i++) {
1154 val = sw_r32(priv->r->dma_if_ctrl);
1155 if ((val & 0xc) == 0xc)
1156 break;
1157 }
1158 }
1159
1160 /* Tell switch to send data */
1161 if (priv->family_id == RTL9310_FAMILY_ID
1162 || priv->family_id == RTL9300_FAMILY_ID) {
1163 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1164 if (!q)
1165 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1166 else
1167 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1168 } else {
1169 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1170 }
1171
1172 dev->stats.tx_packets++;
1173 dev->stats.tx_bytes += len;
1174 dev_kfree_skb(skb);
1175 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1176 ret = NETDEV_TX_OK;
1177 } else {
1178 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1179 ret = NETDEV_TX_BUSY;
1180 }
1181 txdone:
1182 spin_unlock_irqrestore(&priv->lock, flags);
1183 return ret;
1184 }
1185
1186 /*
1187 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1188 * so we do round-robin
1189 */
1190 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1191 struct net_device *sb_dev)
1192 {
1193 static u8 last = 0;
1194
1195 last++;
1196 return last % TXRINGS;
1197 }
1198
1199 /*
1200 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1201 */
1202 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1203 struct net_device *sb_dev)
1204 {
1205 if (skb->priority >= TC_PRIO_CONTROL)
1206 return 1;
1207 return 0;
1208 }
1209
1210 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1211 {
1212 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1213 struct ring_b *ring = priv->membase;
1214 struct sk_buff *skb;
1215 unsigned long flags;
1216 int i, len, work_done = 0;
1217 u8 *data, *skb_data;
1218 unsigned int val;
1219 u32 *last;
1220 struct p_hdr *h;
1221 bool dsa = netdev_uses_dsa(dev);
1222 struct dsa_tag tag;
1223
1224 spin_lock_irqsave(&priv->lock, flags);
1225 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1226 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1227
1228 do {
1229 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1230 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1231 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1232 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1233 }
1234 break;
1235 }
1236
1237 h = &ring->rx_header[r][ring->c_rx[r]];
1238 data = (u8 *)KSEG1ADDR(h->buf);
1239 len = h->len;
1240 if (!len)
1241 break;
1242 work_done++;
1243
1244 len -= 4; /* strip the CRC */
1245 /* Add 4 bytes for cpu_tag */
1246 if (dsa)
1247 len += 4;
1248
1249 skb = alloc_skb(len + 4, GFP_KERNEL);
1250 skb_reserve(skb, NET_IP_ALIGN);
1251
1252 if (likely(skb)) {
1253 /* BUG: Prevent bug on RTL838x SoCs*/
1254 if (priv->family_id == RTL8380_FAMILY_ID) {
1255 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1256 for (i = 0; i < priv->rxrings; i++) {
1257 /* Update each ring cnt */
1258 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1259 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1260 }
1261 }
1262
1263 skb_data = skb_put(skb, len);
1264 /* Make sure data is visible */
1265 mb();
1266 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1267 /* Overwrite CRC with cpu_tag */
1268 if (dsa) {
1269 priv->r->decode_tag(h, &tag);
1270 skb->data[len-4] = 0x80;
1271 skb->data[len-3] = tag.port;
1272 skb->data[len-2] = 0x10;
1273 skb->data[len-1] = 0x00;
1274 if (tag.l2_offloaded)
1275 skb->data[len-3] |= 0x40;
1276 }
1277
1278 if (tag.queue >= 0)
1279 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1280 tag.queue, len, tag.reason, tag.port);
1281
1282 skb->protocol = eth_type_trans(skb, dev);
1283 dev->stats.rx_packets++;
1284 dev->stats.rx_bytes += len;
1285
1286 netif_receive_skb(skb);
1287 } else {
1288 if (net_ratelimit())
1289 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1290 dev->stats.rx_dropped++;
1291 }
1292
1293 /* Reset header structure */
1294 memset(h, 0, sizeof(struct p_hdr));
1295 h->buf = data;
1296 h->size = RING_BUFFER;
1297
1298 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
1299 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
1300 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1301 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1302 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1303
1304 // Update counters
1305 priv->r->update_cntr(r, 0);
1306
1307 spin_unlock_irqrestore(&priv->lock, flags);
1308 return work_done;
1309 }
1310
1311 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1312 {
1313 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1314 struct rtl838x_eth_priv *priv = rx_q->priv;
1315 int work_done = 0;
1316 int r = rx_q->id;
1317 int work;
1318
1319 while (work_done < budget) {
1320 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1321 if (!work)
1322 break;
1323 work_done += work;
1324 }
1325
1326 if (work_done < budget) {
1327 napi_complete_done(napi, work_done);
1328
1329 /* Enable RX interrupt */
1330 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1331 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1332 else
1333 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1334 }
1335 return work_done;
1336 }
1337
1338
1339 static void rtl838x_validate(struct phylink_config *config,
1340 unsigned long *supported,
1341 struct phylink_link_state *state)
1342 {
1343 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1344
1345 pr_debug("In %s\n", __func__);
1346
1347 if (!phy_interface_mode_is_rgmii(state->interface) &&
1348 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1349 state->interface != PHY_INTERFACE_MODE_MII &&
1350 state->interface != PHY_INTERFACE_MODE_REVMII &&
1351 state->interface != PHY_INTERFACE_MODE_GMII &&
1352 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1353 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1354 state->interface != PHY_INTERFACE_MODE_SGMII) {
1355 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1356 pr_err("Unsupported interface: %d\n", state->interface);
1357 return;
1358 }
1359
1360 /* Allow all the expected bits */
1361 phylink_set(mask, Autoneg);
1362 phylink_set_port_modes(mask);
1363 phylink_set(mask, Pause);
1364 phylink_set(mask, Asym_Pause);
1365
1366 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1367 * including Half duplex
1368 */
1369 if (state->interface != PHY_INTERFACE_MODE_MII &&
1370 state->interface != PHY_INTERFACE_MODE_REVMII) {
1371 phylink_set(mask, 1000baseT_Full);
1372 phylink_set(mask, 1000baseT_Half);
1373 }
1374
1375 phylink_set(mask, 10baseT_Half);
1376 phylink_set(mask, 10baseT_Full);
1377 phylink_set(mask, 100baseT_Half);
1378 phylink_set(mask, 100baseT_Full);
1379
1380 bitmap_and(supported, supported, mask,
1381 __ETHTOOL_LINK_MODE_MASK_NBITS);
1382 bitmap_and(state->advertising, state->advertising, mask,
1383 __ETHTOOL_LINK_MODE_MASK_NBITS);
1384 }
1385
1386
1387 static void rtl838x_mac_config(struct phylink_config *config,
1388 unsigned int mode,
1389 const struct phylink_link_state *state)
1390 {
1391 /* This is only being called for the master device,
1392 * i.e. the CPU-Port. We don't need to do anything.
1393 */
1394
1395 pr_info("In %s, mode %x\n", __func__, mode);
1396 }
1397
1398 static void rtl838x_mac_an_restart(struct phylink_config *config)
1399 {
1400 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1401 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1402
1403 /* This works only on RTL838x chips */
1404 if (priv->family_id != RTL8380_FAMILY_ID)
1405 return;
1406
1407 pr_debug("In %s\n", __func__);
1408 /* Restart by disabling and re-enabling link */
1409 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1410 mdelay(20);
1411 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1412 }
1413
1414 static int rtl838x_mac_pcs_get_state(struct phylink_config *config,
1415 struct phylink_link_state *state)
1416 {
1417 u32 speed;
1418 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1419 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1420 int port = priv->cpu_port;
1421
1422 pr_debug("In %s\n", __func__);
1423
1424 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1425 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1426
1427 speed = priv->r->get_mac_link_spd_sts(port);
1428 switch (speed) {
1429 case 0:
1430 state->speed = SPEED_10;
1431 break;
1432 case 1:
1433 state->speed = SPEED_100;
1434 break;
1435 case 2:
1436 state->speed = SPEED_1000;
1437 break;
1438 default:
1439 state->speed = SPEED_UNKNOWN;
1440 break;
1441 }
1442
1443 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1444 if (priv->r->get_mac_rx_pause_sts(port))
1445 state->pause |= MLO_PAUSE_RX;
1446 if (priv->r->get_mac_tx_pause_sts(port))
1447 state->pause |= MLO_PAUSE_TX;
1448
1449 return 1;
1450 }
1451
1452 static void rtl838x_mac_link_down(struct phylink_config *config,
1453 unsigned int mode,
1454 phy_interface_t interface)
1455 {
1456 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1457 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1458
1459 pr_debug("In %s\n", __func__);
1460 /* Stop TX/RX to port */
1461 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1462 }
1463
1464 static void rtl838x_mac_link_up(struct phylink_config *config, unsigned int mode,
1465 phy_interface_t interface,
1466 struct phy_device *phy)
1467 {
1468 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1469 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1470
1471 pr_debug("In %s\n", __func__);
1472 /* Restart TX/RX to port */
1473 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1474 }
1475
1476 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1477 {
1478 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1479 unsigned long flags;
1480
1481 spin_lock_irqsave(&priv->lock, flags);
1482 pr_debug("In %s\n", __func__);
1483 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1484 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1485
1486 if (priv->family_id == RTL8380_FAMILY_ID) {
1487 /* 2 more registers, ALE/MAC block */
1488 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1489 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1490 (RTL838X_MAC_ALE + 4));
1491
1492 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1493 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1494 RTL838X_MAC2 + 4);
1495 }
1496 spin_unlock_irqrestore(&priv->lock, flags);
1497 }
1498
1499 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1500 {
1501 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1502 const struct sockaddr *addr = p;
1503 u8 *mac = (u8 *) (addr->sa_data);
1504
1505 if (!is_valid_ether_addr(addr->sa_data))
1506 return -EADDRNOTAVAIL;
1507
1508 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1509 rtl838x_set_mac_hw(dev, mac);
1510
1511 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1512 return 0;
1513 }
1514
1515 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1516 {
1517 // We will need to set-up EEE and the egress-rate limitation
1518 return 0;
1519 }
1520
1521 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1522 {
1523 int i;
1524
1525 if (priv->family_id == 0x8390)
1526 return rtl8390_init_mac(priv);
1527
1528 pr_info("%s\n", __func__);
1529 /* fix timer for EEE */
1530 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1531 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1532
1533 /* Init VLAN */
1534 if (priv->id == 0x8382) {
1535 for (i = 0; i <= 28; i++)
1536 sw_w32(0, 0xd57c + i * 0x80);
1537 }
1538 if (priv->id == 0x8380) {
1539 for (i = 8; i <= 28; i++)
1540 sw_w32(0, 0xd57c + i * 0x80);
1541 }
1542 return 0;
1543 }
1544
1545 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1546 struct ethtool_link_ksettings *cmd)
1547 {
1548 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1549
1550 pr_debug("%s called\n", __func__);
1551 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1552 }
1553
1554 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1555 const struct ethtool_link_ksettings *cmd)
1556 {
1557 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1558
1559 pr_debug("%s called\n", __func__);
1560 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1561 }
1562
1563 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1564 {
1565 u32 val;
1566 int err;
1567 struct rtl838x_eth_priv *priv = bus->priv;
1568
1569 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1570 return rtl838x_read_sds_phy(mii_id, regnum);
1571 err = rtl838x_read_phy(mii_id, 0, regnum, &val);
1572 if (err)
1573 return err;
1574 return val;
1575 }
1576
1577 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1578 {
1579 u32 val;
1580 int err;
1581 struct rtl838x_eth_priv *priv = bus->priv;
1582
1583 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1584 return rtl839x_read_sds_phy(mii_id, regnum);
1585
1586 err = rtl839x_read_phy(mii_id, 0, regnum, &val);
1587 if (err)
1588 return err;
1589 return val;
1590 }
1591
1592 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1593 {
1594 u32 val;
1595 int err;
1596
1597 // TODO: These are hard-coded for the 2 Fibre Ports of the XGS1210
1598 if (mii_id >= 26 && mii_id <= 27)
1599 return rtl930x_read_sds_phy(mii_id - 18, 0, regnum);
1600
1601 if (regnum & MII_ADDR_C45) {
1602 regnum &= ~MII_ADDR_C45;
1603 err = rtl930x_read_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, &val);
1604 } else {
1605 err = rtl930x_read_phy(mii_id, 0, regnum, &val);
1606 }
1607 if (err)
1608 return err;
1609 return val;
1610 }
1611
1612 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1613 {
1614 u32 val;
1615 int err;
1616 // struct rtl838x_eth_priv *priv = bus->priv;
1617
1618 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1619 // return rtl839x_read_sds_phy(mii_id, regnum);
1620
1621 err = rtl931x_read_phy(mii_id, 0, regnum, &val);
1622 if (err)
1623 return err;
1624 return val;
1625 }
1626
1627 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1628 int regnum, u16 value)
1629 {
1630 u32 offset = 0;
1631 struct rtl838x_eth_priv *priv = bus->priv;
1632
1633 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1634 if (mii_id == 26)
1635 offset = 0x100;
1636 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1637 return 0;
1638 }
1639 return rtl838x_write_phy(mii_id, 0, regnum, value);
1640 }
1641
1642 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1643 int regnum, u16 value)
1644 {
1645 struct rtl838x_eth_priv *priv = bus->priv;
1646
1647 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1648 return rtl839x_write_sds_phy(mii_id, regnum, value);
1649
1650 return rtl839x_write_phy(mii_id, 0, regnum, value);
1651 }
1652
1653 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1654 int regnum, u16 value)
1655 {
1656 // struct rtl838x_eth_priv *priv = bus->priv;
1657
1658 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1659 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1660 if (regnum & MII_ADDR_C45) {
1661 regnum &= ~MII_ADDR_C45;
1662 return rtl930x_write_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, value);
1663 }
1664
1665 return rtl930x_write_phy(mii_id, 0, regnum, value);
1666 }
1667
1668 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1669 int regnum, u16 value)
1670 {
1671 // struct rtl838x_eth_priv *priv = bus->priv;
1672
1673 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1674 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1675
1676 return rtl931x_write_phy(mii_id, 0, regnum, value);
1677 }
1678
1679 static int rtl838x_mdio_reset(struct mii_bus *bus)
1680 {
1681 pr_debug("%s called\n", __func__);
1682 /* Disable MAC polling the PHY so that we can start configuration */
1683 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1684
1685 /* Enable PHY control via SoC */
1686 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1687
1688 // Probably should reset all PHYs here...
1689 return 0;
1690 }
1691
1692 static int rtl839x_mdio_reset(struct mii_bus *bus)
1693 {
1694 return 0;
1695
1696 pr_debug("%s called\n", __func__);
1697 /* BUG: The following does not work, but should! */
1698 /* Disable MAC polling the PHY so that we can start configuration */
1699 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1700 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1701 /* Disable PHY polling via SoC */
1702 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1703
1704 // Probably should reset all PHYs here...
1705 return 0;
1706 }
1707
1708 static int rtl931x_mdio_reset(struct mii_bus *bus)
1709 {
1710 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL);
1711 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL + 4);
1712
1713 pr_debug("%s called\n", __func__);
1714
1715 return 0;
1716 }
1717
1718 static int rtl930x_mdio_reset(struct mii_bus *bus)
1719 {
1720 int i;
1721 int pos;
1722
1723 pr_info("RTL930X_SMI_PORT0_15_POLLING_SEL %08x 16-27: %08x\n",
1724 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL),
1725 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
1726
1727 pr_info("%s: Enable SMI polling on SMI bus 0, SMI1, SMI2, disable on SMI3\n", __func__);
1728 sw_w32_mask(BIT(20) | BIT(21) | BIT(22), BIT(23), RTL930X_SMI_GLB_CTRL);
1729
1730 pr_info("RTL9300 Powering on SerDes ports\n");
1731 rtl9300_sds_power(24, 1);
1732 rtl9300_sds_power(25, 1);
1733 rtl9300_sds_power(26, 1);
1734 rtl9300_sds_power(27, 1);
1735 mdelay(200);
1736
1737 // RTL930X_SMI_PORT0_15_POLLING_SEL 55550000 16-27: 00f9aaaa
1738 // i.e SMI=0 for all ports
1739 for (i = 0; i < 5; i++)
1740 pr_info("port phy: %08x\n", sw_r32(RTL930X_SMI_PORT0_5_ADDR + i *4));
1741
1742 // 1-to-1 mapping of port to phy-address
1743 for (i = 0; i < 24; i++) {
1744 pos = (i % 6) * 5;
1745 sw_w32_mask(0x1f << pos, i << pos, RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1746 }
1747
1748 // ports 24 and 25 have PHY addresses 8 and 9, ports 26/27 PHY 26/27
1749 sw_w32(8 | 9 << 5 | 26 << 10 | 27 << 15, RTL930X_SMI_PORT0_5_ADDR + 4 * 4);
1750
1751 // Ports 24 and 25 live on SMI bus 1 and 2
1752 sw_w32_mask(0x3 << 16, 0x1 << 16, RTL930X_SMI_PORT16_27_POLLING_SEL);
1753 sw_w32_mask(0x3 << 18, 0x2 << 18, RTL930X_SMI_PORT16_27_POLLING_SEL);
1754
1755 // SMI bus 1 and 2 speak Clause 45 TODO: Configure from .dts
1756 sw_w32_mask(0, BIT(17) | BIT(18), RTL930X_SMI_GLB_CTRL);
1757
1758 // Ports 24 and 25 are 2.5 Gig, set this type (1)
1759 sw_w32_mask(0x7 << 12, 1 << 12, RTL930X_SMI_MAC_TYPE_CTRL);
1760 sw_w32_mask(0x7 << 15, 1 << 15, RTL930X_SMI_MAC_TYPE_CTRL);
1761
1762 return 0;
1763 }
1764
1765 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
1766 {
1767 struct device_node *mii_np;
1768 int ret;
1769
1770 pr_debug("%s called\n", __func__);
1771 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
1772
1773 if (!mii_np) {
1774 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
1775 return -ENODEV;
1776 }
1777
1778 if (!of_device_is_available(mii_np)) {
1779 ret = -ENODEV;
1780 goto err_put_node;
1781 }
1782
1783 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
1784 if (!priv->mii_bus) {
1785 ret = -ENOMEM;
1786 goto err_put_node;
1787 }
1788
1789 switch(priv->family_id) {
1790 case RTL8380_FAMILY_ID:
1791 priv->mii_bus->name = "rtl838x-eth-mdio";
1792 priv->mii_bus->read = rtl838x_mdio_read;
1793 priv->mii_bus->write = rtl838x_mdio_write;
1794 priv->mii_bus->reset = rtl838x_mdio_reset;
1795 break;
1796 case RTL8390_FAMILY_ID:
1797 priv->mii_bus->name = "rtl839x-eth-mdio";
1798 priv->mii_bus->read = rtl839x_mdio_read;
1799 priv->mii_bus->write = rtl839x_mdio_write;
1800 priv->mii_bus->reset = rtl839x_mdio_reset;
1801 break;
1802 case RTL9300_FAMILY_ID:
1803 priv->mii_bus->name = "rtl930x-eth-mdio";
1804 priv->mii_bus->read = rtl930x_mdio_read;
1805 priv->mii_bus->write = rtl930x_mdio_write;
1806 priv->mii_bus->reset = rtl930x_mdio_reset;
1807 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1808 break;
1809 case RTL9310_FAMILY_ID:
1810 priv->mii_bus->name = "rtl931x-eth-mdio";
1811 priv->mii_bus->read = rtl931x_mdio_read;
1812 priv->mii_bus->write = rtl931x_mdio_write;
1813 priv->mii_bus->reset = rtl931x_mdio_reset;
1814 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1815 break;
1816 }
1817 priv->mii_bus->priv = priv;
1818 priv->mii_bus->parent = &priv->pdev->dev;
1819
1820 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1821 ret = of_mdiobus_register(priv->mii_bus, mii_np);
1822
1823 err_put_node:
1824 of_node_put(mii_np);
1825 return ret;
1826 }
1827
1828 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
1829 {
1830 pr_debug("%s called\n", __func__);
1831 if (!priv->mii_bus)
1832 return 0;
1833
1834 mdiobus_unregister(priv->mii_bus);
1835 mdiobus_free(priv->mii_bus);
1836
1837 return 0;
1838 }
1839
1840 static const struct net_device_ops rtl838x_eth_netdev_ops = {
1841 .ndo_open = rtl838x_eth_open,
1842 .ndo_stop = rtl838x_eth_stop,
1843 .ndo_start_xmit = rtl838x_eth_tx,
1844 .ndo_select_queue = rtl83xx_pick_tx_queue,
1845 .ndo_set_mac_address = rtl838x_set_mac_address,
1846 .ndo_validate_addr = eth_validate_addr,
1847 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
1848 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1849 };
1850
1851 static const struct net_device_ops rtl839x_eth_netdev_ops = {
1852 .ndo_open = rtl838x_eth_open,
1853 .ndo_stop = rtl838x_eth_stop,
1854 .ndo_start_xmit = rtl838x_eth_tx,
1855 .ndo_select_queue = rtl83xx_pick_tx_queue,
1856 .ndo_set_mac_address = rtl838x_set_mac_address,
1857 .ndo_validate_addr = eth_validate_addr,
1858 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
1859 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1860 };
1861
1862 static const struct net_device_ops rtl930x_eth_netdev_ops = {
1863 .ndo_open = rtl838x_eth_open,
1864 .ndo_stop = rtl838x_eth_stop,
1865 .ndo_start_xmit = rtl838x_eth_tx,
1866 .ndo_select_queue = rtl93xx_pick_tx_queue,
1867 .ndo_set_mac_address = rtl838x_set_mac_address,
1868 .ndo_validate_addr = eth_validate_addr,
1869 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
1870 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1871 };
1872
1873 static const struct net_device_ops rtl931x_eth_netdev_ops = {
1874 .ndo_open = rtl838x_eth_open,
1875 .ndo_stop = rtl838x_eth_stop,
1876 .ndo_start_xmit = rtl838x_eth_tx,
1877 .ndo_select_queue = rtl93xx_pick_tx_queue,
1878 .ndo_set_mac_address = rtl838x_set_mac_address,
1879 .ndo_validate_addr = eth_validate_addr,
1880 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
1881 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1882 };
1883
1884 static const struct phylink_mac_ops rtl838x_phylink_ops = {
1885 .validate = rtl838x_validate,
1886 .mac_link_state = rtl838x_mac_pcs_get_state,
1887 .mac_an_restart = rtl838x_mac_an_restart,
1888 .mac_config = rtl838x_mac_config,
1889 .mac_link_down = rtl838x_mac_link_down,
1890 .mac_link_up = rtl838x_mac_link_up,
1891 };
1892
1893 static const struct ethtool_ops rtl838x_ethtool_ops = {
1894 .get_link_ksettings = rtl838x_get_link_ksettings,
1895 .set_link_ksettings = rtl838x_set_link_ksettings,
1896 };
1897
1898 static int __init rtl838x_eth_probe(struct platform_device *pdev)
1899 {
1900 struct net_device *dev;
1901 struct device_node *dn = pdev->dev.of_node;
1902 struct rtl838x_eth_priv *priv;
1903 struct resource *res, *mem;
1904 const void *mac;
1905 phy_interface_t phy_mode;
1906 struct phylink *phylink;
1907 int err = 0, i, rxrings, rxringlen;
1908 struct ring_b *ring;
1909
1910 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
1911 (u32)pdev, (u32)(&(pdev->dev)));
1912
1913 if (!dn) {
1914 dev_err(&pdev->dev, "No DT found\n");
1915 return -EINVAL;
1916 }
1917
1918 rxrings = (soc_info.family == RTL8380_FAMILY_ID
1919 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
1920 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
1921 rxringlen = MAX_ENTRIES / rxrings;
1922 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
1923
1924 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
1925 if (!dev) {
1926 err = -ENOMEM;
1927 goto err_free;
1928 }
1929 SET_NETDEV_DEV(dev, &pdev->dev);
1930 priv = netdev_priv(dev);
1931
1932 /* obtain buffer memory space */
1933 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1934 if (res) {
1935 mem = devm_request_mem_region(&pdev->dev, res->start,
1936 resource_size(res), res->name);
1937 if (!mem) {
1938 dev_err(&pdev->dev, "cannot request memory space\n");
1939 err = -ENXIO;
1940 goto err_free;
1941 }
1942
1943 dev->mem_start = mem->start;
1944 dev->mem_end = mem->end;
1945 } else {
1946 dev_err(&pdev->dev, "cannot request IO resource\n");
1947 err = -ENXIO;
1948 goto err_free;
1949 }
1950
1951 /* Allocate buffer memory */
1952 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
1953 + sizeof(struct ring_b) + sizeof(struct notify_b),
1954 (void *)&dev->mem_start, GFP_KERNEL);
1955 if (!priv->membase) {
1956 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
1957 err = -ENOMEM;
1958 goto err_free;
1959 }
1960
1961 // Allocate ring-buffer space at the end of the allocated memory
1962 ring = priv->membase;
1963 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
1964
1965 spin_lock_init(&priv->lock);
1966
1967 /* obtain device IRQ number */
1968 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1969 if (!res) {
1970 dev_err(&pdev->dev, "cannot obtain IRQ, using default 24\n");
1971 dev->irq = 24;
1972 } else {
1973 dev->irq = res->start;
1974 }
1975 dev->ethtool_ops = &rtl838x_ethtool_ops;
1976 dev->min_mtu = ETH_ZLEN;
1977 dev->max_mtu = 1536;
1978
1979 priv->id = soc_info.id;
1980 priv->family_id = soc_info.family;
1981 if (priv->id) {
1982 pr_info("Found SoC ID: %4x: %s, family %x\n",
1983 priv->id, soc_info.name, priv->family_id);
1984 } else {
1985 pr_err("Unknown chip id (%04x)\n", priv->id);
1986 return -ENODEV;
1987 }
1988
1989 switch (priv->family_id) {
1990 case RTL8380_FAMILY_ID:
1991 priv->cpu_port = RTL838X_CPU_PORT;
1992 priv->r = &rtl838x_reg;
1993 dev->netdev_ops = &rtl838x_eth_netdev_ops;
1994 break;
1995 case RTL8390_FAMILY_ID:
1996 priv->cpu_port = RTL839X_CPU_PORT;
1997 priv->r = &rtl839x_reg;
1998 dev->netdev_ops = &rtl839x_eth_netdev_ops;
1999 break;
2000 case RTL9300_FAMILY_ID:
2001 priv->cpu_port = RTL930X_CPU_PORT;
2002 priv->r = &rtl930x_reg;
2003 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2004 break;
2005 case RTL9310_FAMILY_ID:
2006 priv->cpu_port = RTL931X_CPU_PORT;
2007 priv->r = &rtl931x_reg;
2008 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2009 break;
2010 default:
2011 pr_err("Unknown SoC family\n");
2012 return -ENODEV;
2013 }
2014 priv->rxringlen = rxringlen;
2015 priv->rxrings = rxrings;
2016
2017 rtl8380_init_mac(priv);
2018
2019 /* try to get mac address in the following order:
2020 * 1) from device tree data
2021 * 2) from internal registers set by bootloader
2022 */
2023 mac = of_get_mac_address(pdev->dev.of_node);
2024 if (!IS_ERR(mac)) {
2025 memcpy(dev->dev_addr, mac, ETH_ALEN);
2026 rtl838x_set_mac_hw(dev, (u8 *)mac);
2027 } else {
2028 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2029 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2030 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2031 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2032 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2033 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2034 }
2035 /* if the address is invalid, use a random value */
2036 if (!is_valid_ether_addr(dev->dev_addr)) {
2037 struct sockaddr sa = { AF_UNSPEC };
2038
2039 netdev_warn(dev, "Invalid MAC address, using random\n");
2040 eth_hw_addr_random(dev);
2041 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2042 if (rtl838x_set_mac_address(dev, &sa))
2043 netdev_warn(dev, "Failed to set MAC address.\n");
2044 }
2045 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2046 sw_r32(priv->r->mac + 4));
2047 strcpy(dev->name, "eth%d");
2048 priv->pdev = pdev;
2049 priv->netdev = dev;
2050
2051 err = rtl838x_mdio_init(priv);
2052 if (err)
2053 goto err_free;
2054
2055 err = register_netdev(dev);
2056 if (err)
2057 goto err_free;
2058
2059 for (i = 0; i < priv->rxrings; i++) {
2060 priv->rx_qs[i].id = i;
2061 priv->rx_qs[i].priv = priv;
2062 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2063 }
2064
2065 platform_set_drvdata(pdev, dev);
2066
2067 phy_mode = of_get_phy_mode(dn);
2068 if (phy_mode < 0) {
2069 dev_err(&pdev->dev, "incorrect phy-mode\n");
2070 err = -EINVAL;
2071 goto err_free;
2072 }
2073 priv->phylink_config.dev = &dev->dev;
2074 priv->phylink_config.type = PHYLINK_NETDEV;
2075
2076 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2077 phy_mode, &rtl838x_phylink_ops);
2078 if (IS_ERR(phylink)) {
2079 err = PTR_ERR(phylink);
2080 goto err_free;
2081 }
2082 priv->phylink = phylink;
2083
2084 return 0;
2085
2086 err_free:
2087 pr_err("Error setting up netdev, freeing it again.\n");
2088 free_netdev(dev);
2089 return err;
2090 }
2091
2092 static int rtl838x_eth_remove(struct platform_device *pdev)
2093 {
2094 struct net_device *dev = platform_get_drvdata(pdev);
2095 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2096 int i;
2097
2098 if (dev) {
2099 pr_info("Removing platform driver for rtl838x-eth\n");
2100 rtl838x_mdio_remove(priv);
2101 rtl838x_hw_stop(priv);
2102
2103 netif_tx_stop_all_queues(dev);
2104
2105 for (i = 0; i < priv->rxrings; i++)
2106 netif_napi_del(&priv->rx_qs[i].napi);
2107
2108 unregister_netdev(dev);
2109 free_netdev(dev);
2110 }
2111 return 0;
2112 }
2113
2114 static const struct of_device_id rtl838x_eth_of_ids[] = {
2115 { .compatible = "realtek,rtl838x-eth"},
2116 { /* sentinel */ }
2117 };
2118 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2119
2120 static struct platform_driver rtl838x_eth_driver = {
2121 .probe = rtl838x_eth_probe,
2122 .remove = rtl838x_eth_remove,
2123 .driver = {
2124 .name = "rtl838x-eth",
2125 .pm = NULL,
2126 .of_match_table = rtl838x_eth_of_ids,
2127 },
2128 };
2129
2130 module_platform_driver(rtl838x_eth_driver);
2131
2132 MODULE_AUTHOR("B. Koblitz");
2133 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2134 MODULE_LICENSE("GPL");