realtek: fix syntax error introduced by previous commit
[openwrt/openwrt.git] / target / linux / realtek / files-5.4 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
20 #include <net/dsa.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
23
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
26
27 extern struct rtl83xx_soc_info soc_info;
28
29 /*
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RING_SIZE * RING_BUFFER gives
34 * the memory used for the ring buffer.
35 */
36 #define MAX_RXRINGS 32
37 #define MAX_RXLEN 100
38 #define MAX_ENTRIES (200 * 8)
39 #define TXRINGS 2
40 // BUG: TXRINGLEN can be 160
41 #define TXRINGLEN 16
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
44 #define TX_EN 0x8
45 #define RX_EN 0x4
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
48 #define TX_DO 0x2
49 #define WRAP 0x2
50
51 #define RING_BUFFER 1600
52
53 #define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
54 #define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
55 #define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
56 #define RTL838X_ATK_PRVNT_STS (0x5B1C)
57
58 struct p_hdr {
59 uint8_t *buf;
60 uint16_t reserved;
61 uint16_t size; /* buffer size */
62 uint16_t offset;
63 uint16_t len; /* pkt len */
64 uint16_t cpu_tag[10];
65 } __packed __aligned(1);
66
67 struct n_event {
68 uint32_t type:2;
69 uint32_t fidVid:12;
70 uint64_t mac:48;
71 uint32_t slp:6;
72 uint32_t valid:1;
73 uint32_t reserved:27;
74 } __packed __aligned(1);
75
76 struct ring_b {
77 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
78 uint32_t tx_r[TXRINGS][TXRINGLEN];
79 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
80 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
81 uint32_t c_rx[MAX_RXRINGS];
82 uint32_t c_tx[TXRINGS];
83 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
84 uint8_t *rx_space;
85 };
86
87 struct notify_block {
88 struct n_event events[NOTIFY_EVENTS];
89 };
90
91 struct notify_b {
92 struct notify_block blocks[NOTIFY_BLOCKS];
93 u32 reserved1[8];
94 u32 ring[NOTIFY_BLOCKS];
95 u32 reserved2[8];
96 };
97
98 void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
99 {
100 prio &= 0x7;
101
102 if (dest_port > 0) {
103 // cpu_tag[0] is reserved on the RTL83XX SoCs
104 h->cpu_tag[1] = 0x0400;
105 h->cpu_tag[2] = 0x0200;
106 h->cpu_tag[3] = 0x0000;
107 h->cpu_tag[4] = BIT(dest_port) >> 16;
108 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
109 // Set internal priority and AS_PRIO
110 if (prio >= 0)
111 h->cpu_tag[2] |= (prio | 0x8) << 12;
112 }
113 }
114
115 void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
116 {
117 prio &= 0x7;
118
119 if (dest_port > 0) {
120 // cpu_tag[0] is reserved on the RTL83XX SoCs
121 h->cpu_tag[1] = 0x0100;
122 h->cpu_tag[2] = h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
123 if (dest_port >= 32) {
124 dest_port -= 32;
125 h->cpu_tag[2] = BIT(dest_port) >> 16;
126 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
127 } else {
128 h->cpu_tag[4] = BIT(dest_port) >> 16;
129 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
130 }
131 h->cpu_tag[6] |= BIT(21); // Enable destination port mask use
132 // Set internal priority and AS_PRIO
133 if (prio >= 0)
134 h->cpu_tag[1] |= prio | BIT(3);
135 }
136 }
137
138 void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
139 {
140 h->cpu_tag[0] = 0x8000;
141 h->cpu_tag[1] = 0; // TODO: Fill port and prio
142 h->cpu_tag[2] = 0;
143 h->cpu_tag[3] = 0;
144 h->cpu_tag[4] = 0;
145 h->cpu_tag[5] = 0;
146 h->cpu_tag[6] = 0;
147 h->cpu_tag[7] = 0xffff;
148 }
149
150 void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
151 {
152 h->cpu_tag[0] = 0x8000;
153 h->cpu_tag[1] = 0; // TODO: Fill port and prio
154 h->cpu_tag[2] = 0;
155 h->cpu_tag[3] = 0;
156 h->cpu_tag[4] = 0;
157 h->cpu_tag[5] = 0;
158 h->cpu_tag[6] = 0;
159 h->cpu_tag[7] = 0xffff;
160 }
161
162 struct rtl838x_rx_q {
163 int id;
164 struct rtl838x_eth_priv *priv;
165 struct napi_struct napi;
166 };
167
168 struct rtl838x_eth_priv {
169 struct net_device *netdev;
170 struct platform_device *pdev;
171 void *membase;
172 spinlock_t lock;
173 struct mii_bus *mii_bus;
174 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
175 struct phylink *phylink;
176 struct phylink_config phylink_config;
177 u16 id;
178 u16 family_id;
179 const struct rtl838x_reg *r;
180 u8 cpu_port;
181 u32 lastEvent;
182 u16 rxrings;
183 u16 rxringlen;
184 };
185
186 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
187 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
188 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
189 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
190 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
191 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
192 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
193 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
194
195 /*
196 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
197 * the rings. Writing x into these registers substracts x from its content.
198 * When the content reaches the ring size, the ASIC no longer adds
199 * packets to this receive queue.
200 */
201 void rtl838x_update_cntr(int r, int released)
202 {
203 // This feature is not available on RTL838x SoCs
204 }
205
206 void rtl839x_update_cntr(int r, int released)
207 {
208 // This feature is not available on RTL839x SoCs
209 }
210
211 void rtl930x_update_cntr(int r, int released)
212 {
213 int pos = (r % 3) * 10;
214 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
215 u32 v = sw_r32(reg);
216
217 v = (v >> pos) & 0x3ff;
218 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
219 sw_w32_mask(0x3ff << pos, released << pos, reg);
220 sw_w32(v, reg);
221 }
222
223 void rtl931x_update_cntr(int r, int released)
224 {
225 int pos = (r % 3) * 10;
226 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
227
228 sw_w32_mask(0x3ff << pos, released << pos, reg);
229 }
230
231 struct dsa_tag {
232 u8 reason;
233 u8 queue;
234 u16 port;
235 u8 l2_offloaded;
236 u8 prio;
237 bool crc_error;
238 };
239
240 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
241 {
242 t->reason = h->cpu_tag[3] & 0xf;
243 t->queue = (h->cpu_tag[0] & 0xe0) >> 5;
244 t->port = h->cpu_tag[1] & 0x1f;
245 t->crc_error = t->reason == 13;
246
247 pr_debug("Reason: %d\n", t->reason);
248 if (t->reason != 4) // NIC_RX_REASON_SPECIAL_TRAP
249 t->l2_offloaded = 1;
250 else
251 t->l2_offloaded = 0;
252
253 return t->l2_offloaded;
254 }
255
256 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
257 {
258 t->reason = h->cpu_tag[4] & 0x1f;
259 t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
260 t->port = h->cpu_tag[1] & 0x3f;
261 t->crc_error = h->cpu_tag[3] & BIT(2);
262
263 pr_debug("Reason: %d\n", t->reason);
264 if ((t->reason != 7) && (t->reason != 8)) // NIC_RX_REASON_RMA_USR
265 t->l2_offloaded = 1;
266 else
267 t->l2_offloaded = 0;
268
269 return t->l2_offloaded;
270 }
271
272 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
273 {
274 t->reason = h->cpu_tag[7] & 0x3f;
275 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
276 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
277 t->crc_error = h->cpu_tag[1] & BIT(6);
278
279 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
280 if (t->reason >= 19 && t->reason <= 27)
281 t->l2_offloaded = 0;
282 else
283 t->l2_offloaded = 1;
284
285 return t->l2_offloaded;
286 }
287
288 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
289 {
290 t->reason = h->cpu_tag[7] & 0x3f;
291 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
292 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
293 t->crc_error = h->cpu_tag[1] & BIT(6);
294
295 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
296 if (t->reason >= 19 && t->reason <= 27)
297 t->l2_offloaded = 0;
298 else
299 t->l2_offloaded = 1;
300
301 return t->l2_offloaded;
302 }
303
304 /*
305 * Discard the RX ring-buffers, called as part of the net-ISR
306 * when the buffer runs over
307 * Caller needs to hold priv->lock
308 */
309 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
310 {
311 int r;
312 u32 *last;
313 struct p_hdr *h;
314 struct ring_b *ring = priv->membase;
315
316 for (r = 0; r < priv->rxrings; r++) {
317 pr_debug("In %s working on r: %d\n", __func__, r);
318 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
319 do {
320 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
321 break;
322 pr_debug("Got something: %d\n", ring->c_rx[r]);
323 h = &ring->rx_header[r][ring->c_rx[r]];
324 memset(h, 0, sizeof(struct p_hdr));
325 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
326 + r * priv->rxringlen * RING_BUFFER
327 + ring->c_rx[r] * RING_BUFFER);
328 h->size = RING_BUFFER;
329 /* make sure the header is visible to the ASIC */
330 mb();
331
332 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
333 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
334 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
335 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
336 }
337 }
338
339 struct fdb_update_work {
340 struct work_struct work;
341 struct net_device *ndev;
342 u64 macs[NOTIFY_EVENTS + 1];
343 };
344
345 void rtl838x_fdb_sync(struct work_struct *work)
346 {
347 const struct fdb_update_work *uw =
348 container_of(work, struct fdb_update_work, work);
349 struct switchdev_notifier_fdb_info info;
350 u8 addr[ETH_ALEN];
351 int i = 0;
352 int action;
353
354 while (uw->macs[i]) {
355 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
356 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
357 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
358 info.addr = &addr[0];
359 info.vid = 0;
360 info.offloaded = 1;
361 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
362 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
363 i++;
364 }
365 kfree(work);
366 }
367
368 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
369 {
370 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
371 u32 e = priv->lastEvent;
372 struct n_event *event;
373 int i;
374 u64 mac;
375 struct fdb_update_work *w;
376
377 while (!(nb->ring[e] & 1)) {
378 w = kzalloc(sizeof(*w), GFP_ATOMIC);
379 if (!w) {
380 pr_err("Out of memory: %s", __func__);
381 return;
382 }
383 INIT_WORK(&w->work, rtl838x_fdb_sync);
384
385 for (i = 0; i < NOTIFY_EVENTS; i++) {
386 event = &nb->blocks[e].events[i];
387 if (!event->valid)
388 continue;
389 mac = event->mac;
390 if (event->type)
391 mac |= 1ULL << 63;
392 w->ndev = priv->netdev;
393 w->macs[i] = mac;
394 }
395
396 /* Hand the ring entry back to the switch */
397 nb->ring[e] = nb->ring[e] | 1;
398 e = (e + 1) % NOTIFY_BLOCKS;
399
400 w->macs[i] = 0ULL;
401 schedule_work(&w->work);
402 }
403 priv->lastEvent = e;
404 }
405
406 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
407 {
408 struct net_device *dev = dev_id;
409 struct rtl838x_eth_priv *priv = netdev_priv(dev);
410 u32 status = sw_r32(priv->r->dma_if_intr_sts);
411 bool triggered = false;
412 u32 atk = sw_r32(RTL838X_ATK_PRVNT_STS);
413 int i;
414 u32 storm_uc = sw_r32(RTL838X_STORM_CTRL_PORT_UC_EXCEED);
415 u32 storm_mc = sw_r32(RTL838X_STORM_CTRL_PORT_MC_EXCEED);
416 u32 storm_bc = sw_r32(RTL838X_STORM_CTRL_PORT_BC_EXCEED);
417
418 pr_debug("IRQ: %08x\n", status);
419 if (storm_uc || storm_mc || storm_bc) {
420 pr_warn("Storm control UC: %08x, MC: %08x, BC: %08x\n",
421 storm_uc, storm_mc, storm_bc);
422
423 sw_w32(storm_uc, RTL838X_STORM_CTRL_PORT_UC_EXCEED);
424 sw_w32(storm_mc, RTL838X_STORM_CTRL_PORT_MC_EXCEED);
425 sw_w32(storm_bc, RTL838X_STORM_CTRL_PORT_BC_EXCEED);
426
427 triggered = true;
428 }
429
430 if (atk) {
431 pr_debug("Attack prevention triggered: %08x\n", atk);
432 sw_w32(atk, RTL838X_ATK_PRVNT_STS);
433 }
434
435 spin_lock(&priv->lock);
436 /* Ignore TX interrupt */
437 if ((status & 0xf0000)) {
438 /* Clear ISR */
439 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
440 }
441
442 /* RX interrupt */
443 if (status & 0x0ff00) {
444 /* ACK and disable RX interrupt for this ring */
445 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
446 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
447 for (i = 0; i < priv->rxrings; i++) {
448 if (status & BIT(i + 8)) {
449 pr_debug("Scheduling queue: %d\n", i);
450 napi_schedule(&priv->rx_qs[i].napi);
451 }
452 }
453 }
454
455 /* RX buffer overrun */
456 if (status & 0x000ff) {
457 pr_info("RX buffer overrun: status %x, mask: %x\n",
458 status, sw_r32(priv->r->dma_if_intr_msk));
459 sw_w32(status, priv->r->dma_if_intr_sts);
460 rtl838x_rb_cleanup(priv, status & 0xff);
461 }
462
463 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
464 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
465 rtl839x_l2_notification_handler(priv);
466 }
467
468 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
469 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
470 rtl839x_l2_notification_handler(priv);
471 }
472
473 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
474 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
475 rtl839x_l2_notification_handler(priv);
476 }
477
478 spin_unlock(&priv->lock);
479 return IRQ_HANDLED;
480 }
481
482 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
483 {
484 struct net_device *dev = dev_id;
485 struct rtl838x_eth_priv *priv = netdev_priv(dev);
486 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
487 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
488 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
489 int i;
490
491 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
492 __func__, status_tx, status_rx, status_rx_r);
493 spin_lock(&priv->lock);
494
495 /* Ignore TX interrupt */
496 if (status_tx) {
497 /* Clear ISR */
498 pr_debug("TX done\n");
499 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
500 }
501
502 /* RX interrupt */
503 if (status_rx) {
504 pr_debug("RX IRQ\n");
505 /* ACK and disable RX interrupt for given rings */
506 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
507 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
508 for (i = 0; i < priv->rxrings; i++) {
509 if (status_rx & BIT(i)) {
510 pr_debug("Scheduling queue: %d\n", i);
511 napi_schedule(&priv->rx_qs[i].napi);
512 }
513 }
514 }
515
516 /* RX buffer overrun */
517 if (status_rx_r) {
518 pr_debug("RX buffer overrun: status %x, mask: %x\n",
519 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
520 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
521 rtl838x_rb_cleanup(priv, status_rx_r);
522 }
523
524 spin_unlock(&priv->lock);
525 return IRQ_HANDLED;
526 }
527
528 static const struct rtl838x_reg rtl838x_reg = {
529 .net_irq = rtl83xx_net_irq,
530 .mac_port_ctrl = rtl838x_mac_port_ctrl,
531 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
532 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
533 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
534 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
535 .dma_rx_base = RTL838X_DMA_RX_BASE,
536 .dma_tx_base = RTL838X_DMA_TX_BASE,
537 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
538 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
539 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
540 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
541 .get_mac_link_sts = rtl838x_get_mac_link_sts,
542 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
543 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
544 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
545 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
546 .mac = RTL838X_MAC,
547 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
548 .update_cntr = rtl838x_update_cntr,
549 .create_tx_header = rtl838x_create_tx_header,
550 .decode_tag = rtl838x_decode_tag,
551 };
552
553 static const struct rtl838x_reg rtl839x_reg = {
554 .net_irq = rtl83xx_net_irq,
555 .mac_port_ctrl = rtl839x_mac_port_ctrl,
556 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
557 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
558 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
559 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
560 .dma_rx_base = RTL839X_DMA_RX_BASE,
561 .dma_tx_base = RTL839X_DMA_TX_BASE,
562 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
563 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
564 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
565 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
566 .get_mac_link_sts = rtl839x_get_mac_link_sts,
567 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
568 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
569 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
570 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
571 .mac = RTL839X_MAC,
572 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
573 .update_cntr = rtl839x_update_cntr,
574 .create_tx_header = rtl839x_create_tx_header,
575 .decode_tag = rtl839x_decode_tag,
576 };
577
578 static const struct rtl838x_reg rtl930x_reg = {
579 .net_irq = rtl93xx_net_irq,
580 .mac_port_ctrl = rtl930x_mac_port_ctrl,
581 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
582 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
583 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
584 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
585 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
586 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
587 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
588 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
589 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
590 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
591 .dma_rx_base = RTL930X_DMA_RX_BASE,
592 .dma_tx_base = RTL930X_DMA_TX_BASE,
593 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
594 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
595 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
596 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
597 .get_mac_link_sts = rtl930x_get_mac_link_sts,
598 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
599 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
600 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
601 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
602 .mac = RTL930X_MAC_L2_ADDR_CTRL,
603 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
604 .update_cntr = rtl930x_update_cntr,
605 .create_tx_header = rtl930x_create_tx_header,
606 .decode_tag = rtl930x_decode_tag,
607 };
608
609 static const struct rtl838x_reg rtl931x_reg = {
610 .net_irq = rtl93xx_net_irq,
611 .mac_port_ctrl = rtl931x_mac_port_ctrl,
612 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
613 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
614 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
615 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
616 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
617 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
618 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
619 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
620 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
621 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
622 .dma_rx_base = RTL931X_DMA_RX_BASE,
623 .dma_tx_base = RTL931X_DMA_TX_BASE,
624 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
625 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
626 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
627 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
628 .get_mac_link_sts = rtl931x_get_mac_link_sts,
629 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
630 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
631 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
632 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
633 .mac = RTL931X_MAC_L2_ADDR_CTRL,
634 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
635 .update_cntr = rtl931x_update_cntr,
636 .create_tx_header = rtl931x_create_tx_header,
637 .decode_tag = rtl931x_decode_tag,
638 };
639
640 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
641 {
642 u32 int_saved, nbuf;
643 int i, pos;
644
645 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
646 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
647 mdelay(100);
648
649 /* Disable and clear interrupts */
650 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
651 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
652 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
653 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
654 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
655 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
656 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
657 } else {
658 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
659 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
660 }
661
662 if (priv->family_id == RTL8390_FAMILY_ID) {
663 /* Preserve L2 notification and NBUF settings */
664 int_saved = sw_r32(priv->r->dma_if_intr_msk);
665 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
666
667 /* Disable link change interrupt on RTL839x */
668 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
669 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
670
671 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
672 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
673 }
674
675 /* Reset NIC */
676 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
677 sw_w32(0x4, priv->r->rst_glb_ctrl);
678 else
679 sw_w32(0x8, priv->r->rst_glb_ctrl);
680
681 do { /* Wait for reset of NIC and Queues done */
682 udelay(20);
683 } while (sw_r32(priv->r->rst_glb_ctrl) & 0xc);
684 mdelay(100);
685
686 /* Setup Head of Line */
687 if (priv->family_id == RTL8380_FAMILY_ID)
688 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
689 if (priv->family_id == RTL8390_FAMILY_ID)
690 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
691 if (priv->family_id == RTL9300_FAMILY_ID) {
692 for (i = 0; i < priv->rxrings; i++) {
693 pos = (i % 3) * 10;
694 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
695 sw_w32_mask(0x3ff << pos, priv->rxringlen,
696 priv->r->dma_if_rx_ring_cntr(i));
697 }
698 }
699
700 /* Re-enable link change interrupt */
701 if (priv->family_id == RTL8390_FAMILY_ID) {
702 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
703 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
704 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
705 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
706
707 /* Restore notification settings: on RTL838x these bits are null */
708 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
709 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
710 }
711 }
712
713 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
714 {
715 int i;
716 struct ring_b *ring = priv->membase;
717
718 for (i = 0; i < priv->rxrings; i++)
719 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
720
721 for (i = 0; i < TXRINGS; i++)
722 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
723 }
724
725 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
726 {
727 /* Disable Head of Line features for all RX rings */
728 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
729
730 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
731 sw_w32(0x06400020, priv->r->dma_if_ctrl);
732
733 /* Enable RX done, RX overflow and TX done interrupts */
734 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
735
736 /* Enable DMA, engine expects empty FCS field */
737 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
738
739 /* Restart TX/RX to CPU port */
740 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
741 /* Set Speed, duplex, flow control
742 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
743 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
744 * | MEDIA_SEL
745 */
746 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
747
748 /* Enable CRC checks on CPU-port */
749 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
750 }
751
752 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
753 {
754 /* Setup CPU-Port: RX Buffer */
755 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
756
757 /* Enable Notify, RX done, RX overflow and TX done interrupts */
758 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
759
760 /* Enable DMA */
761 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
762
763 /* Restart TX/RX to CPU port, enable CRC checking */
764 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
765
766 /* CPU port joins Lookup Miss Flooding Portmask */
767 // TODO: The code below should also work for the RTL838x
768 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
769 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
770 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
771
772 /* Force CPU port link up */
773 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
774 }
775
776 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
777 {
778 int i, pos;
779 u32 v;
780
781 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
782 sw_w32(0x06400040, priv->r->dma_if_ctrl);
783
784 for (i = 0; i < priv->rxrings; i++) {
785 pos = (i % 3) * 10;
786 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
787
788 // Some SoCs have issues with missing underflow protection
789 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
790 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
791 }
792
793 /* Enable Notify, RX done, RX overflow and TX done interrupts */
794 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
795 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
796 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
797
798 /* Enable DMA */
799 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
800
801 /* Restart TX/RX to CPU port, enable CRC checking */
802 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
803
804 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
805 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
806 }
807
808 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
809 {
810 int i, j;
811
812 struct p_hdr *h;
813
814 for (i = 0; i < priv->rxrings; i++) {
815 for (j = 0; j < priv->rxringlen; j++) {
816 h = &ring->rx_header[i][j];
817 memset(h, 0, sizeof(struct p_hdr));
818 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
819 + i * priv->rxringlen * RING_BUFFER
820 + j * RING_BUFFER);
821 h->size = RING_BUFFER;
822 /* All rings owned by switch, last one wraps */
823 ring->rx_r[i][j] = KSEG1ADDR(h) | 1
824 | (j == (priv->rxringlen - 1) ? WRAP : 0);
825 }
826 ring->c_rx[i] = 0;
827 }
828
829 for (i = 0; i < TXRINGS; i++) {
830 for (j = 0; j < TXRINGLEN; j++) {
831 h = &ring->tx_header[i][j];
832 memset(h, 0, sizeof(struct p_hdr));
833 h->buf = (u8 *)KSEG1ADDR(ring->tx_space
834 + i * TXRINGLEN * RING_BUFFER
835 + j * RING_BUFFER);
836 h->size = RING_BUFFER;
837 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
838 }
839 /* Last header is wrapping around */
840 ring->tx_r[i][j-1] |= WRAP;
841 ring->c_tx[i] = 0;
842 }
843 }
844
845 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
846 {
847 int i;
848 struct notify_b *b = priv->membase + sizeof(struct ring_b);
849
850 for (i = 0; i < NOTIFY_BLOCKS; i++)
851 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
852
853 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
854 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
855
856 /* Setup notification events */
857 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
858 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
859
860 /* Enable Notification */
861 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
862 priv->lastEvent = 0;
863 }
864
865 static int rtl838x_eth_open(struct net_device *ndev)
866 {
867 unsigned long flags;
868 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
869 struct ring_b *ring = priv->membase;
870 int i, err;
871
872 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
873 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
874
875 spin_lock_irqsave(&priv->lock, flags);
876 rtl838x_hw_reset(priv);
877 rtl838x_setup_ring_buffer(priv, ring);
878 if (priv->family_id == RTL8390_FAMILY_ID) {
879 rtl839x_setup_notify_ring_buffer(priv);
880 /* Make sure the ring structure is visible to the ASIC */
881 mb();
882 flush_cache_all();
883 }
884
885 rtl838x_hw_ring_setup(priv);
886 err = request_irq(ndev->irq, priv->r->net_irq, IRQF_SHARED, ndev->name, ndev);
887 if (err) {
888 netdev_err(ndev, "%s: could not acquire interrupt: %d\n",
889 __func__, err);
890 return err;
891 }
892 phylink_start(priv->phylink);
893
894 for (i = 0; i < priv->rxrings; i++)
895 napi_enable(&priv->rx_qs[i].napi);
896
897 switch (priv->family_id) {
898 case RTL8380_FAMILY_ID:
899 rtl838x_hw_en_rxtx(priv);
900 /* Trap IGMP/MLD traffic to CPU-Port */
901 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
902 /* Flush learned FDB entries on link down of a port */
903 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
904 break;
905
906 case RTL8390_FAMILY_ID:
907 rtl839x_hw_en_rxtx(priv);
908 // Trap MLD and IGMP messages to CPU_PORT
909 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
910 /* Flush learned FDB entries on link down of a port */
911 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
912 break;
913
914 case RTL9300_FAMILY_ID:
915 rtl93xx_hw_en_rxtx(priv);
916 /* Flush learned FDB entries on link down of a port */
917 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
918 // Trap MLD and IGMP messages to CPU_PORT
919 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
920 break;
921
922 case RTL9310_FAMILY_ID:
923 rtl93xx_hw_en_rxtx(priv);
924 break;
925 }
926
927 netif_tx_start_all_queues(ndev);
928
929 spin_unlock_irqrestore(&priv->lock, flags);
930
931 return 0;
932 }
933
934 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
935 {
936 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
937 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
938 int i;
939
940 // Disable RX/TX from/to CPU-port
941 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
942
943 /* Disable traffic */
944 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
945 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
946 else
947 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
948 mdelay(200); // Test, whether this is needed
949
950 /* Block all ports */
951 if (priv->family_id == RTL8380_FAMILY_ID) {
952 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
953 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
954 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
955 }
956
957 /* Flush L2 address cache */
958 if (priv->family_id == RTL8380_FAMILY_ID) {
959 for (i = 0; i <= priv->cpu_port; i++) {
960 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
961 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
962 }
963 } else if (priv->family_id == RTL8390_FAMILY_ID) {
964 for (i = 0; i <= priv->cpu_port; i++) {
965 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
966 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
967 }
968 }
969 // TODO: L2 flush register is 64 bit on RTL931X and 930X
970
971 /* CPU-Port: Link down */
972 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
973 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
974 else
975 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
976 mdelay(100);
977
978 /* Disable all TX/RX interrupts */
979 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
980 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
981 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
982 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
983 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
984 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
985 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
986 } else {
987 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
988 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
989 }
990
991 /* Disable TX/RX DMA */
992 sw_w32(0x00000000, priv->r->dma_if_ctrl);
993 mdelay(200);
994 }
995
996 static int rtl838x_eth_stop(struct net_device *ndev)
997 {
998 unsigned long flags;
999 int i;
1000 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1001
1002 pr_info("in %s\n", __func__);
1003
1004 spin_lock_irqsave(&priv->lock, flags);
1005 phylink_stop(priv->phylink);
1006 rtl838x_hw_stop(priv);
1007 free_irq(ndev->irq, ndev);
1008
1009 for (i = 0; i < priv->rxrings; i++)
1010 napi_disable(&priv->rx_qs[i].napi);
1011
1012 netif_tx_stop_all_queues(ndev);
1013
1014 spin_unlock_irqrestore(&priv->lock, flags);
1015
1016 return 0;
1017 }
1018
1019 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1020 {
1021 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1022 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1023 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1024 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1025 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1026 }
1027 if (ndev->flags & IFF_ALLMULTI) {
1028 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1029 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1030 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1031 }
1032 if (ndev->flags & IFF_PROMISC) {
1033 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1034 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1035 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1036 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1037 }
1038 }
1039
1040 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1041 {
1042 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1043
1044 if (priv->family_id == RTL8390_FAMILY_ID)
1045 return rtl839x_eth_set_multicast_list(ndev);
1046
1047 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1048 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1049 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1050 }
1051 if (ndev->flags & IFF_ALLMULTI)
1052 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1053 if (ndev->flags & IFF_PROMISC) {
1054 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1055 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1056 }
1057 }
1058
1059 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1060 {
1061 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1062 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1063 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1064 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1065 }
1066 if (ndev->flags & IFF_ALLMULTI) {
1067 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1068 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1069 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1070 }
1071 if (ndev->flags & IFF_PROMISC) {
1072 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1073 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1074 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1075 }
1076 }
1077
1078 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1079 {
1080 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1081 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1082 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1083 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1084 }
1085 if (ndev->flags & IFF_ALLMULTI) {
1086 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1087 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1088 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1089 }
1090 if (ndev->flags & IFF_PROMISC) {
1091 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1092 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1093 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1094 }
1095 }
1096
1097 static void rtl838x_eth_tx_timeout(struct net_device *ndev)
1098 {
1099 unsigned long flags;
1100 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1101
1102 pr_warn("%s\n", __func__);
1103 spin_lock_irqsave(&priv->lock, flags);
1104 rtl838x_hw_stop(priv);
1105 rtl838x_hw_ring_setup(priv);
1106 rtl838x_hw_en_rxtx(priv);
1107 netif_trans_update(ndev);
1108 netif_start_queue(ndev);
1109 spin_unlock_irqrestore(&priv->lock, flags);
1110 }
1111
1112 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1113 {
1114 int len, i;
1115 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1116 struct ring_b *ring = priv->membase;
1117 uint32_t val;
1118 int ret;
1119 unsigned long flags;
1120 struct p_hdr *h;
1121 int dest_port = -1;
1122 int q = skb_get_queue_mapping(skb) % TXRINGS;
1123
1124 if (q) // Check for high prio queue
1125 pr_debug("SKB priority: %d\n", skb->priority);
1126
1127 spin_lock_irqsave(&priv->lock, flags);
1128 len = skb->len;
1129
1130 /* Check for DSA tagging at the end of the buffer */
1131 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
1132 && skb->data[len-3] < priv->cpu_port && skb->data[len-2] == 0x10
1133 && skb->data[len-1] == 0x00) {
1134 /* Reuse tag space for CRC if possible */
1135 dest_port = skb->data[len-3];
1136 skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
1137 len -= 4;
1138 }
1139
1140 len += 4; // Add space for CRC
1141
1142 if (skb_padto(skb, len)) {
1143 ret = NETDEV_TX_OK;
1144 goto txdone;
1145 }
1146
1147 /* We can send this packet if CPU owns the descriptor */
1148 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1149
1150 /* Set descriptor for tx */
1151 h = &ring->tx_header[q][ring->c_tx[q]];
1152 h->size = len;
1153 h->len = len;
1154 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1155 if (priv->family_id == RTL8380_FAMILY_ID) {
1156 if (len < ETH_ZLEN - 4)
1157 h->len -= 4;
1158 }
1159
1160 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1161
1162 /* Copy packet data to tx buffer */
1163 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1164 /* Make sure packet data is visible to ASIC */
1165 wmb();
1166
1167 /* Hand over to switch */
1168 ring->tx_r[q][ring->c_tx[q]] |= 1;
1169
1170 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1171 if (priv->family_id == RTL8380_FAMILY_ID) {
1172 for (i = 0; i < 10; i++) {
1173 val = sw_r32(priv->r->dma_if_ctrl);
1174 if ((val & 0xc) == 0xc)
1175 break;
1176 }
1177 }
1178
1179 /* Tell switch to send data */
1180 if (priv->family_id == RTL9310_FAMILY_ID
1181 || priv->family_id == RTL9300_FAMILY_ID) {
1182 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1183 if (!q)
1184 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1185 else
1186 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1187 } else {
1188 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1189 }
1190
1191 dev->stats.tx_packets++;
1192 dev->stats.tx_bytes += len;
1193 dev_kfree_skb(skb);
1194 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1195 ret = NETDEV_TX_OK;
1196 } else {
1197 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1198 ret = NETDEV_TX_BUSY;
1199 }
1200 txdone:
1201 spin_unlock_irqrestore(&priv->lock, flags);
1202 return ret;
1203 }
1204
1205 /*
1206 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1207 * so we do round-robin
1208 */
1209 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1210 struct net_device *sb_dev)
1211 {
1212 static u8 last = 0;
1213
1214 last++;
1215 return last % TXRINGS;
1216 }
1217
1218 /*
1219 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1220 */
1221 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1222 struct net_device *sb_dev)
1223 {
1224 if (skb->priority >= TC_PRIO_CONTROL)
1225 return 1;
1226 return 0;
1227 }
1228
1229 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1230 {
1231 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1232 struct ring_b *ring = priv->membase;
1233 struct sk_buff *skb;
1234 unsigned long flags;
1235 int i, len, work_done = 0;
1236 u8 *data, *skb_data;
1237 unsigned int val;
1238 u32 *last;
1239 struct p_hdr *h;
1240 bool dsa = netdev_uses_dsa(dev);
1241 struct dsa_tag tag;
1242
1243 spin_lock_irqsave(&priv->lock, flags);
1244 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1245 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1246
1247 do {
1248 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1249 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1250 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1251 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1252 }
1253 break;
1254 }
1255
1256 h = &ring->rx_header[r][ring->c_rx[r]];
1257 data = (u8 *)KSEG1ADDR(h->buf);
1258 len = h->len;
1259 if (!len)
1260 break;
1261 work_done++;
1262
1263 len -= 4; /* strip the CRC */
1264 /* Add 4 bytes for cpu_tag */
1265 if (dsa)
1266 len += 4;
1267
1268 skb = alloc_skb(len + 4, GFP_KERNEL);
1269 skb_reserve(skb, NET_IP_ALIGN);
1270
1271 if (likely(skb)) {
1272 /* BUG: Prevent bug on RTL838x SoCs*/
1273 if (priv->family_id == RTL8380_FAMILY_ID) {
1274 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1275 for (i = 0; i < priv->rxrings; i++) {
1276 /* Update each ring cnt */
1277 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1278 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1279 }
1280 }
1281
1282 skb_data = skb_put(skb, len);
1283 /* Make sure data is visible */
1284 mb();
1285 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1286 /* Overwrite CRC with cpu_tag */
1287 if (dsa) {
1288 priv->r->decode_tag(h, &tag);
1289 skb->data[len-4] = 0x80;
1290 skb->data[len-3] = tag.port;
1291 skb->data[len-2] = 0x10;
1292 skb->data[len-1] = 0x00;
1293 if (tag.l2_offloaded)
1294 skb->data[len-3] |= 0x40;
1295 }
1296
1297 if (tag.queue >= 0)
1298 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1299 tag.queue, len, tag.reason, tag.port);
1300
1301 skb->protocol = eth_type_trans(skb, dev);
1302 if (dev->features & NETIF_F_RXCSUM) {
1303 if (tag.crc_error)
1304 skb_checksum_none_assert(skb);
1305 else
1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307 }
1308 dev->stats.rx_packets++;
1309 dev->stats.rx_bytes += len;
1310
1311 netif_receive_skb(skb);
1312 } else {
1313 if (net_ratelimit())
1314 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1315 dev->stats.rx_dropped++;
1316 }
1317
1318 /* Reset header structure */
1319 memset(h, 0, sizeof(struct p_hdr));
1320 h->buf = data;
1321 h->size = RING_BUFFER;
1322
1323 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
1324 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
1325 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1326 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1327 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1328
1329 // Update counters
1330 priv->r->update_cntr(r, 0);
1331
1332 spin_unlock_irqrestore(&priv->lock, flags);
1333 return work_done;
1334 }
1335
1336 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1337 {
1338 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1339 struct rtl838x_eth_priv *priv = rx_q->priv;
1340 int work_done = 0;
1341 int r = rx_q->id;
1342 int work;
1343
1344 while (work_done < budget) {
1345 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1346 if (!work)
1347 break;
1348 work_done += work;
1349 }
1350
1351 if (work_done < budget) {
1352 napi_complete_done(napi, work_done);
1353
1354 /* Enable RX interrupt */
1355 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1356 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1357 else
1358 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1359 }
1360 return work_done;
1361 }
1362
1363
1364 static void rtl838x_validate(struct phylink_config *config,
1365 unsigned long *supported,
1366 struct phylink_link_state *state)
1367 {
1368 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1369
1370 pr_debug("In %s\n", __func__);
1371
1372 if (!phy_interface_mode_is_rgmii(state->interface) &&
1373 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1374 state->interface != PHY_INTERFACE_MODE_MII &&
1375 state->interface != PHY_INTERFACE_MODE_REVMII &&
1376 state->interface != PHY_INTERFACE_MODE_GMII &&
1377 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1378 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1379 state->interface != PHY_INTERFACE_MODE_SGMII) {
1380 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1381 pr_err("Unsupported interface: %d\n", state->interface);
1382 return;
1383 }
1384
1385 /* Allow all the expected bits */
1386 phylink_set(mask, Autoneg);
1387 phylink_set_port_modes(mask);
1388 phylink_set(mask, Pause);
1389 phylink_set(mask, Asym_Pause);
1390
1391 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1392 * including Half duplex
1393 */
1394 if (state->interface != PHY_INTERFACE_MODE_MII &&
1395 state->interface != PHY_INTERFACE_MODE_REVMII) {
1396 phylink_set(mask, 1000baseT_Full);
1397 phylink_set(mask, 1000baseT_Half);
1398 }
1399
1400 phylink_set(mask, 10baseT_Half);
1401 phylink_set(mask, 10baseT_Full);
1402 phylink_set(mask, 100baseT_Half);
1403 phylink_set(mask, 100baseT_Full);
1404
1405 bitmap_and(supported, supported, mask,
1406 __ETHTOOL_LINK_MODE_MASK_NBITS);
1407 bitmap_and(state->advertising, state->advertising, mask,
1408 __ETHTOOL_LINK_MODE_MASK_NBITS);
1409 }
1410
1411
1412 static void rtl838x_mac_config(struct phylink_config *config,
1413 unsigned int mode,
1414 const struct phylink_link_state *state)
1415 {
1416 /* This is only being called for the master device,
1417 * i.e. the CPU-Port. We don't need to do anything.
1418 */
1419
1420 pr_info("In %s, mode %x\n", __func__, mode);
1421 }
1422
1423 static void rtl838x_mac_an_restart(struct phylink_config *config)
1424 {
1425 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1426 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1427
1428 /* This works only on RTL838x chips */
1429 if (priv->family_id != RTL8380_FAMILY_ID)
1430 return;
1431
1432 pr_debug("In %s\n", __func__);
1433 /* Restart by disabling and re-enabling link */
1434 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1435 mdelay(20);
1436 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1437 }
1438
1439 static int rtl838x_mac_pcs_get_state(struct phylink_config *config,
1440 struct phylink_link_state *state)
1441 {
1442 u32 speed;
1443 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1444 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1445 int port = priv->cpu_port;
1446
1447 pr_debug("In %s\n", __func__);
1448
1449 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1450 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1451
1452 speed = priv->r->get_mac_link_spd_sts(port);
1453 switch (speed) {
1454 case 0:
1455 state->speed = SPEED_10;
1456 break;
1457 case 1:
1458 state->speed = SPEED_100;
1459 break;
1460 case 2:
1461 state->speed = SPEED_1000;
1462 break;
1463 default:
1464 state->speed = SPEED_UNKNOWN;
1465 break;
1466 }
1467
1468 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1469 if (priv->r->get_mac_rx_pause_sts(port))
1470 state->pause |= MLO_PAUSE_RX;
1471 if (priv->r->get_mac_tx_pause_sts(port))
1472 state->pause |= MLO_PAUSE_TX;
1473
1474 return 1;
1475 }
1476
1477 static void rtl838x_mac_link_down(struct phylink_config *config,
1478 unsigned int mode,
1479 phy_interface_t interface)
1480 {
1481 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1482 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1483
1484 pr_debug("In %s\n", __func__);
1485 /* Stop TX/RX to port */
1486 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1487 }
1488
1489 static void rtl838x_mac_link_up(struct phylink_config *config, unsigned int mode,
1490 phy_interface_t interface,
1491 struct phy_device *phy)
1492 {
1493 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1494 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1495
1496 pr_debug("In %s\n", __func__);
1497 /* Restart TX/RX to port */
1498 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1499 }
1500
1501 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1502 {
1503 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1504 unsigned long flags;
1505
1506 spin_lock_irqsave(&priv->lock, flags);
1507 pr_debug("In %s\n", __func__);
1508 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1509 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1510
1511 if (priv->family_id == RTL8380_FAMILY_ID) {
1512 /* 2 more registers, ALE/MAC block */
1513 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1514 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1515 (RTL838X_MAC_ALE + 4));
1516
1517 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1518 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1519 RTL838X_MAC2 + 4);
1520 }
1521 spin_unlock_irqrestore(&priv->lock, flags);
1522 }
1523
1524 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1525 {
1526 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1527 const struct sockaddr *addr = p;
1528 u8 *mac = (u8 *) (addr->sa_data);
1529
1530 if (!is_valid_ether_addr(addr->sa_data))
1531 return -EADDRNOTAVAIL;
1532
1533 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1534 rtl838x_set_mac_hw(dev, mac);
1535
1536 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1537 return 0;
1538 }
1539
1540 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1541 {
1542 // We will need to set-up EEE and the egress-rate limitation
1543 return 0;
1544 }
1545
1546 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1547 {
1548 int i;
1549
1550 if (priv->family_id == 0x8390)
1551 return rtl8390_init_mac(priv);
1552
1553 pr_info("%s\n", __func__);
1554 /* fix timer for EEE */
1555 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1556 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1557
1558 /* Init VLAN */
1559 if (priv->id == 0x8382) {
1560 for (i = 0; i <= 28; i++)
1561 sw_w32(0, 0xd57c + i * 0x80);
1562 }
1563 if (priv->id == 0x8380) {
1564 for (i = 8; i <= 28; i++)
1565 sw_w32(0, 0xd57c + i * 0x80);
1566 }
1567 return 0;
1568 }
1569
1570 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1571 struct ethtool_link_ksettings *cmd)
1572 {
1573 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1574
1575 pr_debug("%s called\n", __func__);
1576 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1577 }
1578
1579 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1580 const struct ethtool_link_ksettings *cmd)
1581 {
1582 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1583
1584 pr_debug("%s called\n", __func__);
1585 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1586 }
1587
1588 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1589 {
1590 u32 val;
1591 int err;
1592 struct rtl838x_eth_priv *priv = bus->priv;
1593
1594 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1595 return rtl838x_read_sds_phy(mii_id, regnum);
1596 err = rtl838x_read_phy(mii_id, 0, regnum, &val);
1597 if (err)
1598 return err;
1599 return val;
1600 }
1601
1602 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1603 {
1604 u32 val;
1605 int err;
1606 struct rtl838x_eth_priv *priv = bus->priv;
1607
1608 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1609 return rtl839x_read_sds_phy(mii_id, regnum);
1610
1611 err = rtl839x_read_phy(mii_id, 0, regnum, &val);
1612 if (err)
1613 return err;
1614 return val;
1615 }
1616
1617 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1618 {
1619 u32 val;
1620 int err;
1621
1622 // TODO: These are hard-coded for the 2 Fibre Ports of the XGS1210
1623 if (mii_id >= 26 && mii_id <= 27)
1624 return rtl930x_read_sds_phy(mii_id - 18, 0, regnum);
1625
1626 if (regnum & MII_ADDR_C45) {
1627 regnum &= ~MII_ADDR_C45;
1628 err = rtl930x_read_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, &val);
1629 } else {
1630 err = rtl930x_read_phy(mii_id, 0, regnum, &val);
1631 }
1632 if (err)
1633 return err;
1634 return val;
1635 }
1636
1637 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1638 {
1639 u32 val;
1640 int err;
1641 // struct rtl838x_eth_priv *priv = bus->priv;
1642
1643 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1644 // return rtl839x_read_sds_phy(mii_id, regnum);
1645
1646 err = rtl931x_read_phy(mii_id, 0, regnum, &val);
1647 if (err)
1648 return err;
1649 return val;
1650 }
1651
1652 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1653 int regnum, u16 value)
1654 {
1655 u32 offset = 0;
1656 struct rtl838x_eth_priv *priv = bus->priv;
1657
1658 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1659 if (mii_id == 26)
1660 offset = 0x100;
1661 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1662 return 0;
1663 }
1664 return rtl838x_write_phy(mii_id, 0, regnum, value);
1665 }
1666
1667 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1668 int regnum, u16 value)
1669 {
1670 struct rtl838x_eth_priv *priv = bus->priv;
1671
1672 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1673 return rtl839x_write_sds_phy(mii_id, regnum, value);
1674
1675 return rtl839x_write_phy(mii_id, 0, regnum, value);
1676 }
1677
1678 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1679 int regnum, u16 value)
1680 {
1681 // struct rtl838x_eth_priv *priv = bus->priv;
1682
1683 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1684 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1685 if (regnum & MII_ADDR_C45) {
1686 regnum &= ~MII_ADDR_C45;
1687 return rtl930x_write_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, value);
1688 }
1689
1690 return rtl930x_write_phy(mii_id, 0, regnum, value);
1691 }
1692
1693 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1694 int regnum, u16 value)
1695 {
1696 // struct rtl838x_eth_priv *priv = bus->priv;
1697
1698 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1699 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1700
1701 return rtl931x_write_phy(mii_id, 0, regnum, value);
1702 }
1703
1704 static int rtl838x_mdio_reset(struct mii_bus *bus)
1705 {
1706 pr_debug("%s called\n", __func__);
1707 /* Disable MAC polling the PHY so that we can start configuration */
1708 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1709
1710 /* Enable PHY control via SoC */
1711 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1712
1713 // Probably should reset all PHYs here...
1714 return 0;
1715 }
1716
1717 static int rtl839x_mdio_reset(struct mii_bus *bus)
1718 {
1719 return 0;
1720
1721 pr_debug("%s called\n", __func__);
1722 /* BUG: The following does not work, but should! */
1723 /* Disable MAC polling the PHY so that we can start configuration */
1724 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1725 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1726 /* Disable PHY polling via SoC */
1727 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1728
1729 // Probably should reset all PHYs here...
1730 return 0;
1731 }
1732
1733 static int rtl931x_mdio_reset(struct mii_bus *bus)
1734 {
1735 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL);
1736 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL + 4);
1737
1738 pr_debug("%s called\n", __func__);
1739
1740 return 0;
1741 }
1742
1743 static int rtl930x_mdio_reset(struct mii_bus *bus)
1744 {
1745 int i;
1746 int pos;
1747
1748 pr_info("RTL930X_SMI_PORT0_15_POLLING_SEL %08x 16-27: %08x\n",
1749 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL),
1750 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
1751
1752 pr_info("%s: Enable SMI polling on SMI bus 0, SMI1, SMI2, disable on SMI3\n", __func__);
1753 sw_w32_mask(BIT(20) | BIT(21) | BIT(22), BIT(23), RTL930X_SMI_GLB_CTRL);
1754
1755 pr_info("RTL9300 Powering on SerDes ports\n");
1756 rtl9300_sds_power(24, 1);
1757 rtl9300_sds_power(25, 1);
1758 rtl9300_sds_power(26, 1);
1759 rtl9300_sds_power(27, 1);
1760 mdelay(200);
1761
1762 // RTL930X_SMI_PORT0_15_POLLING_SEL 55550000 16-27: 00f9aaaa
1763 // i.e SMI=0 for all ports
1764 for (i = 0; i < 5; i++)
1765 pr_info("port phy: %08x\n", sw_r32(RTL930X_SMI_PORT0_5_ADDR + i *4));
1766
1767 // 1-to-1 mapping of port to phy-address
1768 for (i = 0; i < 24; i++) {
1769 pos = (i % 6) * 5;
1770 sw_w32_mask(0x1f << pos, i << pos, RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1771 }
1772
1773 // ports 24 and 25 have PHY addresses 8 and 9, ports 26/27 PHY 26/27
1774 sw_w32(8 | 9 << 5 | 26 << 10 | 27 << 15, RTL930X_SMI_PORT0_5_ADDR + 4 * 4);
1775
1776 // Ports 24 and 25 live on SMI bus 1 and 2
1777 sw_w32_mask(0x3 << 16, 0x1 << 16, RTL930X_SMI_PORT16_27_POLLING_SEL);
1778 sw_w32_mask(0x3 << 18, 0x2 << 18, RTL930X_SMI_PORT16_27_POLLING_SEL);
1779
1780 // SMI bus 1 and 2 speak Clause 45 TODO: Configure from .dts
1781 sw_w32_mask(0, BIT(17) | BIT(18), RTL930X_SMI_GLB_CTRL);
1782
1783 // Ports 24 and 25 are 2.5 Gig, set this type (1)
1784 sw_w32_mask(0x7 << 12, 1 << 12, RTL930X_SMI_MAC_TYPE_CTRL);
1785 sw_w32_mask(0x7 << 15, 1 << 15, RTL930X_SMI_MAC_TYPE_CTRL);
1786
1787 return 0;
1788 }
1789
1790 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
1791 {
1792 struct device_node *mii_np;
1793 int ret;
1794
1795 pr_debug("%s called\n", __func__);
1796 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
1797
1798 if (!mii_np) {
1799 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
1800 return -ENODEV;
1801 }
1802
1803 if (!of_device_is_available(mii_np)) {
1804 ret = -ENODEV;
1805 goto err_put_node;
1806 }
1807
1808 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
1809 if (!priv->mii_bus) {
1810 ret = -ENOMEM;
1811 goto err_put_node;
1812 }
1813
1814 switch(priv->family_id) {
1815 case RTL8380_FAMILY_ID:
1816 priv->mii_bus->name = "rtl838x-eth-mdio";
1817 priv->mii_bus->read = rtl838x_mdio_read;
1818 priv->mii_bus->write = rtl838x_mdio_write;
1819 priv->mii_bus->reset = rtl838x_mdio_reset;
1820 break;
1821 case RTL8390_FAMILY_ID:
1822 priv->mii_bus->name = "rtl839x-eth-mdio";
1823 priv->mii_bus->read = rtl839x_mdio_read;
1824 priv->mii_bus->write = rtl839x_mdio_write;
1825 priv->mii_bus->reset = rtl839x_mdio_reset;
1826 break;
1827 case RTL9300_FAMILY_ID:
1828 priv->mii_bus->name = "rtl930x-eth-mdio";
1829 priv->mii_bus->read = rtl930x_mdio_read;
1830 priv->mii_bus->write = rtl930x_mdio_write;
1831 priv->mii_bus->reset = rtl930x_mdio_reset;
1832 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1833 break;
1834 case RTL9310_FAMILY_ID:
1835 priv->mii_bus->name = "rtl931x-eth-mdio";
1836 priv->mii_bus->read = rtl931x_mdio_read;
1837 priv->mii_bus->write = rtl931x_mdio_write;
1838 priv->mii_bus->reset = rtl931x_mdio_reset;
1839 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1840 break;
1841 }
1842 priv->mii_bus->priv = priv;
1843 priv->mii_bus->parent = &priv->pdev->dev;
1844
1845 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1846 ret = of_mdiobus_register(priv->mii_bus, mii_np);
1847
1848 err_put_node:
1849 of_node_put(mii_np);
1850 return ret;
1851 }
1852
1853 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
1854 {
1855 pr_debug("%s called\n", __func__);
1856 if (!priv->mii_bus)
1857 return 0;
1858
1859 mdiobus_unregister(priv->mii_bus);
1860 mdiobus_free(priv->mii_bus);
1861
1862 return 0;
1863 }
1864
1865 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
1866 netdev_features_t features)
1867 {
1868 return features;
1869 }
1870
1871 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
1872 {
1873 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1874
1875 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
1876 if (!(features & NETIF_F_RXCSUM))
1877 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
1878 else
1879 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
1880 }
1881
1882 return 0;
1883 }
1884
1885 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
1886 {
1887 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1888
1889 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
1890 if (!(features & NETIF_F_RXCSUM))
1891 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
1892 else
1893 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
1894 }
1895
1896 return 0;
1897 }
1898
1899 static const struct net_device_ops rtl838x_eth_netdev_ops = {
1900 .ndo_open = rtl838x_eth_open,
1901 .ndo_stop = rtl838x_eth_stop,
1902 .ndo_start_xmit = rtl838x_eth_tx,
1903 .ndo_select_queue = rtl83xx_pick_tx_queue,
1904 .ndo_set_mac_address = rtl838x_set_mac_address,
1905 .ndo_validate_addr = eth_validate_addr,
1906 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
1907 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1908 .ndo_set_features = rtl83xx_set_features,
1909 .ndo_fix_features = rtl838x_fix_features,
1910 };
1911
1912 static const struct net_device_ops rtl839x_eth_netdev_ops = {
1913 .ndo_open = rtl838x_eth_open,
1914 .ndo_stop = rtl838x_eth_stop,
1915 .ndo_start_xmit = rtl838x_eth_tx,
1916 .ndo_select_queue = rtl83xx_pick_tx_queue,
1917 .ndo_set_mac_address = rtl838x_set_mac_address,
1918 .ndo_validate_addr = eth_validate_addr,
1919 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
1920 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1921 .ndo_set_features = rtl83xx_set_features,
1922 .ndo_fix_features = rtl838x_fix_features,
1923 };
1924
1925 static const struct net_device_ops rtl930x_eth_netdev_ops = {
1926 .ndo_open = rtl838x_eth_open,
1927 .ndo_stop = rtl838x_eth_stop,
1928 .ndo_start_xmit = rtl838x_eth_tx,
1929 .ndo_select_queue = rtl93xx_pick_tx_queue,
1930 .ndo_set_mac_address = rtl838x_set_mac_address,
1931 .ndo_validate_addr = eth_validate_addr,
1932 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
1933 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1934 .ndo_set_features = rtl93xx_set_features,
1935 .ndo_fix_features = rtl838x_fix_features,
1936 };
1937
1938 static const struct net_device_ops rtl931x_eth_netdev_ops = {
1939 .ndo_open = rtl838x_eth_open,
1940 .ndo_stop = rtl838x_eth_stop,
1941 .ndo_start_xmit = rtl838x_eth_tx,
1942 .ndo_select_queue = rtl93xx_pick_tx_queue,
1943 .ndo_set_mac_address = rtl838x_set_mac_address,
1944 .ndo_validate_addr = eth_validate_addr,
1945 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
1946 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1947 .ndo_set_features = rtl93xx_set_features,
1948 .ndo_fix_features = rtl838x_fix_features,
1949 };
1950
1951 static const struct phylink_mac_ops rtl838x_phylink_ops = {
1952 .validate = rtl838x_validate,
1953 .mac_link_state = rtl838x_mac_pcs_get_state,
1954 .mac_an_restart = rtl838x_mac_an_restart,
1955 .mac_config = rtl838x_mac_config,
1956 .mac_link_down = rtl838x_mac_link_down,
1957 .mac_link_up = rtl838x_mac_link_up,
1958 };
1959
1960 static const struct ethtool_ops rtl838x_ethtool_ops = {
1961 .get_link_ksettings = rtl838x_get_link_ksettings,
1962 .set_link_ksettings = rtl838x_set_link_ksettings,
1963 };
1964
1965 static int __init rtl838x_eth_probe(struct platform_device *pdev)
1966 {
1967 struct net_device *dev;
1968 struct device_node *dn = pdev->dev.of_node;
1969 struct rtl838x_eth_priv *priv;
1970 struct resource *res, *mem;
1971 phy_interface_t phy_mode;
1972 struct phylink *phylink;
1973 int err = 0, i, rxrings, rxringlen;
1974 struct ring_b *ring;
1975
1976 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
1977 (u32)pdev, (u32)(&(pdev->dev)));
1978
1979 if (!dn) {
1980 dev_err(&pdev->dev, "No DT found\n");
1981 return -EINVAL;
1982 }
1983
1984 rxrings = (soc_info.family == RTL8380_FAMILY_ID
1985 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
1986 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
1987 rxringlen = MAX_ENTRIES / rxrings;
1988 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
1989
1990 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
1991 if (!dev) {
1992 err = -ENOMEM;
1993 goto err_free;
1994 }
1995 SET_NETDEV_DEV(dev, &pdev->dev);
1996 priv = netdev_priv(dev);
1997
1998 /* obtain buffer memory space */
1999 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2000 if (res) {
2001 mem = devm_request_mem_region(&pdev->dev, res->start,
2002 resource_size(res), res->name);
2003 if (!mem) {
2004 dev_err(&pdev->dev, "cannot request memory space\n");
2005 err = -ENXIO;
2006 goto err_free;
2007 }
2008
2009 dev->mem_start = mem->start;
2010 dev->mem_end = mem->end;
2011 } else {
2012 dev_err(&pdev->dev, "cannot request IO resource\n");
2013 err = -ENXIO;
2014 goto err_free;
2015 }
2016
2017 /* Allocate buffer memory */
2018 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
2019 + sizeof(struct ring_b) + sizeof(struct notify_b),
2020 (void *)&dev->mem_start, GFP_KERNEL);
2021 if (!priv->membase) {
2022 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2023 err = -ENOMEM;
2024 goto err_free;
2025 }
2026
2027 // Allocate ring-buffer space at the end of the allocated memory
2028 ring = priv->membase;
2029 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2030
2031 spin_lock_init(&priv->lock);
2032
2033 /* obtain device IRQ number */
2034 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2035 if (!res) {
2036 dev_err(&pdev->dev, "cannot obtain IRQ, using default 24\n");
2037 dev->irq = 24;
2038 } else {
2039 dev->irq = res->start;
2040 }
2041 dev->ethtool_ops = &rtl838x_ethtool_ops;
2042 dev->min_mtu = ETH_ZLEN;
2043 dev->max_mtu = 1536;
2044 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2045 dev->hw_features = NETIF_F_RXCSUM;
2046
2047 priv->id = soc_info.id;
2048 priv->family_id = soc_info.family;
2049 if (priv->id) {
2050 pr_info("Found SoC ID: %4x: %s, family %x\n",
2051 priv->id, soc_info.name, priv->family_id);
2052 } else {
2053 pr_err("Unknown chip id (%04x)\n", priv->id);
2054 return -ENODEV;
2055 }
2056
2057 switch (priv->family_id) {
2058 case RTL8380_FAMILY_ID:
2059 priv->cpu_port = RTL838X_CPU_PORT;
2060 priv->r = &rtl838x_reg;
2061 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2062 break;
2063 case RTL8390_FAMILY_ID:
2064 priv->cpu_port = RTL839X_CPU_PORT;
2065 priv->r = &rtl839x_reg;
2066 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2067 break;
2068 case RTL9300_FAMILY_ID:
2069 priv->cpu_port = RTL930X_CPU_PORT;
2070 priv->r = &rtl930x_reg;
2071 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2072 break;
2073 case RTL9310_FAMILY_ID:
2074 priv->cpu_port = RTL931X_CPU_PORT;
2075 priv->r = &rtl931x_reg;
2076 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2077 break;
2078 default:
2079 pr_err("Unknown SoC family\n");
2080 return -ENODEV;
2081 }
2082 priv->rxringlen = rxringlen;
2083 priv->rxrings = rxrings;
2084
2085 rtl8380_init_mac(priv);
2086
2087 /* try to get mac address in the following order:
2088 * 1) from device tree data
2089 * 2) from internal registers set by bootloader
2090 */
2091 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2092 if (is_valid_ether_addr(dev->dev_addr)) {
2093 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2094 } else {
2095 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2096 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2097 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2098 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2099 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2100 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2101 }
2102 /* if the address is invalid, use a random value */
2103 if (!is_valid_ether_addr(dev->dev_addr)) {
2104 struct sockaddr sa = { AF_UNSPEC };
2105
2106 netdev_warn(dev, "Invalid MAC address, using random\n");
2107 eth_hw_addr_random(dev);
2108 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2109 if (rtl838x_set_mac_address(dev, &sa))
2110 netdev_warn(dev, "Failed to set MAC address.\n");
2111 }
2112 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2113 sw_r32(priv->r->mac + 4));
2114 strcpy(dev->name, "eth%d");
2115 priv->pdev = pdev;
2116 priv->netdev = dev;
2117
2118 err = rtl838x_mdio_init(priv);
2119 if (err)
2120 goto err_free;
2121
2122 err = register_netdev(dev);
2123 if (err)
2124 goto err_free;
2125
2126 for (i = 0; i < priv->rxrings; i++) {
2127 priv->rx_qs[i].id = i;
2128 priv->rx_qs[i].priv = priv;
2129 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2130 }
2131
2132 platform_set_drvdata(pdev, dev);
2133
2134 phy_mode = of_get_phy_mode(dn);
2135 if (phy_mode < 0) {
2136 dev_err(&pdev->dev, "incorrect phy-mode\n");
2137 err = -EINVAL;
2138 goto err_free;
2139 }
2140 priv->phylink_config.dev = &dev->dev;
2141 priv->phylink_config.type = PHYLINK_NETDEV;
2142
2143 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2144 phy_mode, &rtl838x_phylink_ops);
2145 if (IS_ERR(phylink)) {
2146 err = PTR_ERR(phylink);
2147 goto err_free;
2148 }
2149 priv->phylink = phylink;
2150
2151 return 0;
2152
2153 err_free:
2154 pr_err("Error setting up netdev, freeing it again.\n");
2155 free_netdev(dev);
2156 return err;
2157 }
2158
2159 static int rtl838x_eth_remove(struct platform_device *pdev)
2160 {
2161 struct net_device *dev = platform_get_drvdata(pdev);
2162 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2163 int i;
2164
2165 if (dev) {
2166 pr_info("Removing platform driver for rtl838x-eth\n");
2167 rtl838x_mdio_remove(priv);
2168 rtl838x_hw_stop(priv);
2169
2170 netif_tx_stop_all_queues(dev);
2171
2172 for (i = 0; i < priv->rxrings; i++)
2173 netif_napi_del(&priv->rx_qs[i].napi);
2174
2175 unregister_netdev(dev);
2176 free_netdev(dev);
2177 }
2178 return 0;
2179 }
2180
2181 static const struct of_device_id rtl838x_eth_of_ids[] = {
2182 { .compatible = "realtek,rtl838x-eth"},
2183 { /* sentinel */ }
2184 };
2185 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2186
2187 static struct platform_driver rtl838x_eth_driver = {
2188 .probe = rtl838x_eth_probe,
2189 .remove = rtl838x_eth_remove,
2190 .driver = {
2191 .name = "rtl838x-eth",
2192 .pm = NULL,
2193 .of_match_table = rtl838x_eth_of_ids,
2194 },
2195 };
2196
2197 module_platform_driver(rtl838x_eth_driver);
2198
2199 MODULE_AUTHOR("B. Koblitz");
2200 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2201 MODULE_LICENSE("GPL");