realtek: Improve TX CPU-Tag usage
[openwrt/staging/dedeckeh.git] / target / linux / realtek / files-5.10 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
20 #include <net/dsa.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
23
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
26
27 extern struct rtl83xx_soc_info soc_info;
28
29 /*
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RING_SIZE * RING_BUFFER gives
34 * the memory used for the ring buffer.
35 */
36 #define MAX_RXRINGS 32
37 #define MAX_RXLEN 100
38 #define MAX_ENTRIES (200 * 8)
39 #define TXRINGS 2
40 #define TXRINGLEN 160
41 #define NOTIFY_EVENTS 10
42 #define NOTIFY_BLOCKS 10
43 #define TX_EN 0x8
44 #define RX_EN 0x4
45 #define TX_EN_93XX 0x20
46 #define RX_EN_93XX 0x10
47 #define TX_DO 0x2
48 #define WRAP 0x2
49
50 #define RING_BUFFER 1600
51
52 struct p_hdr {
53 uint8_t *buf;
54 uint16_t reserved;
55 uint16_t size; /* buffer size */
56 uint16_t offset;
57 uint16_t len; /* pkt len */
58 uint16_t cpu_tag[10];
59 } __packed __aligned(1);
60
61 struct n_event {
62 uint32_t type:2;
63 uint32_t fidVid:12;
64 uint64_t mac:48;
65 uint32_t slp:6;
66 uint32_t valid:1;
67 uint32_t reserved:27;
68 } __packed __aligned(1);
69
70 struct ring_b {
71 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
72 uint32_t tx_r[TXRINGS][TXRINGLEN];
73 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
74 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
75 uint32_t c_rx[MAX_RXRINGS];
76 uint32_t c_tx[TXRINGS];
77 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
78 uint8_t *rx_space;
79 };
80
81 struct notify_block {
82 struct n_event events[NOTIFY_EVENTS];
83 };
84
85 struct notify_b {
86 struct notify_block blocks[NOTIFY_BLOCKS];
87 u32 reserved1[8];
88 u32 ring[NOTIFY_BLOCKS];
89 u32 reserved2[8];
90 };
91
92 static void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
93 {
94 prio &= 0x7;
95
96 if (dest_port > 0) {
97 // cpu_tag[0] is reserved on the RTL83XX SoCs
98 h->cpu_tag[1] = 0x0401; // BIT 10: RTL8380_CPU_TAG, BIT0: L2LEARNING on
99 h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
100 h->cpu_tag[3] = 0x0000;
101 h->cpu_tag[4] = BIT(dest_port) >> 16;
102 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
103 // Set internal priority and AS_PRIO
104 if (prio >= 0)
105 h->cpu_tag[2] |= (prio | 0x8) << 12;
106 }
107 }
108
109 static void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
110 {
111 prio &= 0x7;
112
113 if (dest_port > 0) {
114 // cpu_tag[0] is reserved on the RTL83XX SoCs
115 h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
116 h->cpu_tag[2] = h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
117 // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
118 if (dest_port >= 32) {
119 dest_port -= 32;
120 h->cpu_tag[2] = BIT(dest_port) >> 16;
121 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
122 } else {
123 h->cpu_tag[4] = BIT(dest_port) >> 16;
124 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
125 }
126 h->cpu_tag[2] |= BIT(20); // Enable destination port mask use
127 h->cpu_tag[2] |= BIT(23); // Enable L2 Learning
128 // Set internal priority and AS_PRIO
129 if (prio >= 0)
130 h->cpu_tag[1] |= prio | BIT(3);
131 }
132 }
133
134 static void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
135 {
136 h->cpu_tag[0] = 0x8000; // CPU tag marker
137 h->cpu_tag[1] = h->cpu_tag[2] = 0;
138 if (prio >= 0)
139 h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
140 h->cpu_tag[3] = 0;
141 h->cpu_tag[4] = 0;
142 h->cpu_tag[5] = 0;
143 h->cpu_tag[6] = BIT(dest_port) >> 16;
144 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
145 }
146
147 static void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
148 {
149 h->cpu_tag[0] = 0x8000; // CPU tag marker
150 h->cpu_tag[1] = h->cpu_tag[2] = 0;
151 if (prio >= 0)
152 h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
153 h->cpu_tag[3] = 0;
154 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
155 if (dest_port >= 32) {
156 dest_port -= 32;
157 h->cpu_tag[4] = BIT(dest_port) >> 16;
158 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
159 } else {
160 h->cpu_tag[6] = BIT(dest_port) >> 16;
161 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
162 }
163 }
164
165 struct rtl838x_rx_q {
166 int id;
167 struct rtl838x_eth_priv *priv;
168 struct napi_struct napi;
169 };
170
171 struct rtl838x_eth_priv {
172 struct net_device *netdev;
173 struct platform_device *pdev;
174 void *membase;
175 spinlock_t lock;
176 struct mii_bus *mii_bus;
177 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
178 struct phylink *phylink;
179 struct phylink_config phylink_config;
180 u16 id;
181 u16 family_id;
182 const struct rtl838x_reg *r;
183 u8 cpu_port;
184 u32 lastEvent;
185 u16 rxrings;
186 u16 rxringlen;
187 };
188
189 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
190 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
191 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
192 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
193 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
194 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
195 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
196 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
197
198 /*
199 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
200 * the rings. Writing x into these registers substracts x from its content.
201 * When the content reaches the ring size, the ASIC no longer adds
202 * packets to this receive queue.
203 */
204 void rtl838x_update_cntr(int r, int released)
205 {
206 // This feature is not available on RTL838x SoCs
207 }
208
209 void rtl839x_update_cntr(int r, int released)
210 {
211 // This feature is not available on RTL839x SoCs
212 }
213
214 void rtl930x_update_cntr(int r, int released)
215 {
216 int pos = (r % 3) * 10;
217 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
218 u32 v = sw_r32(reg);
219
220 v = (v >> pos) & 0x3ff;
221 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
222 sw_w32_mask(0x3ff << pos, released << pos, reg);
223 sw_w32(v, reg);
224 }
225
226 void rtl931x_update_cntr(int r, int released)
227 {
228 int pos = (r % 3) * 10;
229 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
230
231 sw_w32_mask(0x3ff << pos, released << pos, reg);
232 }
233
234 struct dsa_tag {
235 u8 reason;
236 u8 queue;
237 u16 port;
238 u8 l2_offloaded;
239 u8 prio;
240 bool crc_error;
241 };
242
243 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
244 {
245 t->reason = h->cpu_tag[3] & 0xf;
246 t->queue = (h->cpu_tag[0] & 0xe0) >> 5;
247 t->port = h->cpu_tag[1] & 0x1f;
248 t->crc_error = t->reason == 13;
249
250 pr_debug("Reason: %d\n", t->reason);
251 if (t->reason != 4) // NIC_RX_REASON_SPECIAL_TRAP
252 t->l2_offloaded = 1;
253 else
254 t->l2_offloaded = 0;
255
256 return t->l2_offloaded;
257 }
258
259 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
260 {
261 t->reason = h->cpu_tag[4] & 0x1f;
262 t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
263 t->port = h->cpu_tag[1] & 0x3f;
264 t->crc_error = h->cpu_tag[3] & BIT(2);
265
266 pr_debug("Reason: %d\n", t->reason);
267 if ((t->reason != 7) && (t->reason != 8)) // NIC_RX_REASON_RMA_USR
268 t->l2_offloaded = 1;
269 else
270 t->l2_offloaded = 0;
271
272 return t->l2_offloaded;
273 }
274
275 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
276 {
277 t->reason = h->cpu_tag[7] & 0x3f;
278 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
279 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
280 t->crc_error = h->cpu_tag[1] & BIT(6);
281
282 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
283 if (t->reason >= 19 && t->reason <= 27)
284 t->l2_offloaded = 0;
285 else
286 t->l2_offloaded = 1;
287
288 return t->l2_offloaded;
289 }
290
291 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
292 {
293 t->reason = h->cpu_tag[7] & 0x3f;
294 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
295 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
296 t->crc_error = h->cpu_tag[1] & BIT(6);
297
298 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
299 if (t->reason >= 19 && t->reason <= 27)
300 t->l2_offloaded = 0;
301 else
302 t->l2_offloaded = 1;
303
304 return t->l2_offloaded;
305 }
306
307 /*
308 * Discard the RX ring-buffers, called as part of the net-ISR
309 * when the buffer runs over
310 * Caller needs to hold priv->lock
311 */
312 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
313 {
314 int r;
315 u32 *last;
316 struct p_hdr *h;
317 struct ring_b *ring = priv->membase;
318
319 for (r = 0; r < priv->rxrings; r++) {
320 pr_debug("In %s working on r: %d\n", __func__, r);
321 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
322 do {
323 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
324 break;
325 pr_debug("Got something: %d\n", ring->c_rx[r]);
326 h = &ring->rx_header[r][ring->c_rx[r]];
327 memset(h, 0, sizeof(struct p_hdr));
328 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
329 + r * priv->rxringlen * RING_BUFFER
330 + ring->c_rx[r] * RING_BUFFER);
331 h->size = RING_BUFFER;
332 /* make sure the header is visible to the ASIC */
333 mb();
334
335 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
336 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
337 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
338 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
339 }
340 }
341
342 struct fdb_update_work {
343 struct work_struct work;
344 struct net_device *ndev;
345 u64 macs[NOTIFY_EVENTS + 1];
346 };
347
348 void rtl838x_fdb_sync(struct work_struct *work)
349 {
350 const struct fdb_update_work *uw =
351 container_of(work, struct fdb_update_work, work);
352 struct switchdev_notifier_fdb_info info;
353 u8 addr[ETH_ALEN];
354 int i = 0;
355 int action;
356
357 while (uw->macs[i]) {
358 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
359 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
360 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
361 info.addr = &addr[0];
362 info.vid = 0;
363 info.offloaded = 1;
364 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
365 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
366 i++;
367 }
368 kfree(work);
369 }
370
371 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
372 {
373 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
374 u32 e = priv->lastEvent;
375 struct n_event *event;
376 int i;
377 u64 mac;
378 struct fdb_update_work *w;
379
380 while (!(nb->ring[e] & 1)) {
381 w = kzalloc(sizeof(*w), GFP_ATOMIC);
382 if (!w) {
383 pr_err("Out of memory: %s", __func__);
384 return;
385 }
386 INIT_WORK(&w->work, rtl838x_fdb_sync);
387
388 for (i = 0; i < NOTIFY_EVENTS; i++) {
389 event = &nb->blocks[e].events[i];
390 if (!event->valid)
391 continue;
392 mac = event->mac;
393 if (event->type)
394 mac |= 1ULL << 63;
395 w->ndev = priv->netdev;
396 w->macs[i] = mac;
397 }
398
399 /* Hand the ring entry back to the switch */
400 nb->ring[e] = nb->ring[e] | 1;
401 e = (e + 1) % NOTIFY_BLOCKS;
402
403 w->macs[i] = 0ULL;
404 schedule_work(&w->work);
405 }
406 priv->lastEvent = e;
407 }
408
409 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
410 {
411 struct net_device *dev = dev_id;
412 struct rtl838x_eth_priv *priv = netdev_priv(dev);
413 u32 status = sw_r32(priv->r->dma_if_intr_sts);
414 int i;
415
416 pr_debug("IRQ: %08x\n", status);
417
418 spin_lock(&priv->lock);
419 /* Ignore TX interrupt */
420 if ((status & 0xf0000)) {
421 /* Clear ISR */
422 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
423 }
424
425 /* RX interrupt */
426 if (status & 0x0ff00) {
427 /* ACK and disable RX interrupt for this ring */
428 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
429 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
430 for (i = 0; i < priv->rxrings; i++) {
431 if (status & BIT(i + 8)) {
432 pr_debug("Scheduling queue: %d\n", i);
433 napi_schedule(&priv->rx_qs[i].napi);
434 }
435 }
436 }
437
438 /* RX buffer overrun */
439 if (status & 0x000ff) {
440 pr_info("RX buffer overrun: status %x, mask: %x\n",
441 status, sw_r32(priv->r->dma_if_intr_msk));
442 sw_w32(status, priv->r->dma_if_intr_sts);
443 rtl838x_rb_cleanup(priv, status & 0xff);
444 }
445
446 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
447 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
448 rtl839x_l2_notification_handler(priv);
449 }
450
451 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
452 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
453 rtl839x_l2_notification_handler(priv);
454 }
455
456 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
457 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
458 rtl839x_l2_notification_handler(priv);
459 }
460
461 spin_unlock(&priv->lock);
462 return IRQ_HANDLED;
463 }
464
465 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
466 {
467 struct net_device *dev = dev_id;
468 struct rtl838x_eth_priv *priv = netdev_priv(dev);
469 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
470 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
471 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
472 int i;
473
474 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
475 __func__, status_tx, status_rx, status_rx_r);
476 spin_lock(&priv->lock);
477
478 /* Ignore TX interrupt */
479 if (status_tx) {
480 /* Clear ISR */
481 pr_debug("TX done\n");
482 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
483 }
484
485 /* RX interrupt */
486 if (status_rx) {
487 pr_debug("RX IRQ\n");
488 /* ACK and disable RX interrupt for given rings */
489 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
490 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
491 for (i = 0; i < priv->rxrings; i++) {
492 if (status_rx & BIT(i)) {
493 pr_debug("Scheduling queue: %d\n", i);
494 napi_schedule(&priv->rx_qs[i].napi);
495 }
496 }
497 }
498
499 /* RX buffer overrun */
500 if (status_rx_r) {
501 pr_debug("RX buffer overrun: status %x, mask: %x\n",
502 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
503 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
504 rtl838x_rb_cleanup(priv, status_rx_r);
505 }
506
507 spin_unlock(&priv->lock);
508 return IRQ_HANDLED;
509 }
510
511 static const struct rtl838x_reg rtl838x_reg = {
512 .net_irq = rtl83xx_net_irq,
513 .mac_port_ctrl = rtl838x_mac_port_ctrl,
514 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
515 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
516 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
517 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
518 .dma_rx_base = RTL838X_DMA_RX_BASE,
519 .dma_tx_base = RTL838X_DMA_TX_BASE,
520 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
521 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
522 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
523 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
524 .get_mac_link_sts = rtl838x_get_mac_link_sts,
525 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
526 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
527 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
528 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
529 .mac = RTL838X_MAC,
530 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
531 .update_cntr = rtl838x_update_cntr,
532 .create_tx_header = rtl838x_create_tx_header,
533 .decode_tag = rtl838x_decode_tag,
534 };
535
536 static const struct rtl838x_reg rtl839x_reg = {
537 .net_irq = rtl83xx_net_irq,
538 .mac_port_ctrl = rtl839x_mac_port_ctrl,
539 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
540 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
541 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
542 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
543 .dma_rx_base = RTL839X_DMA_RX_BASE,
544 .dma_tx_base = RTL839X_DMA_TX_BASE,
545 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
546 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
547 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
548 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
549 .get_mac_link_sts = rtl839x_get_mac_link_sts,
550 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
551 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
552 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
553 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
554 .mac = RTL839X_MAC,
555 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
556 .update_cntr = rtl839x_update_cntr,
557 .create_tx_header = rtl839x_create_tx_header,
558 .decode_tag = rtl839x_decode_tag,
559 };
560
561 static const struct rtl838x_reg rtl930x_reg = {
562 .net_irq = rtl93xx_net_irq,
563 .mac_port_ctrl = rtl930x_mac_port_ctrl,
564 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
565 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
566 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
567 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
568 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
569 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
570 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
571 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
572 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
573 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
574 .dma_rx_base = RTL930X_DMA_RX_BASE,
575 .dma_tx_base = RTL930X_DMA_TX_BASE,
576 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
577 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
578 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
579 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
580 .get_mac_link_sts = rtl930x_get_mac_link_sts,
581 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
582 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
583 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
584 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
585 .mac = RTL930X_MAC_L2_ADDR_CTRL,
586 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
587 .update_cntr = rtl930x_update_cntr,
588 .create_tx_header = rtl930x_create_tx_header,
589 .decode_tag = rtl930x_decode_tag,
590 };
591
592 static const struct rtl838x_reg rtl931x_reg = {
593 .net_irq = rtl93xx_net_irq,
594 .mac_port_ctrl = rtl931x_mac_port_ctrl,
595 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
596 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
597 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
598 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
599 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
600 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
601 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
602 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
603 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
604 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
605 .dma_rx_base = RTL931X_DMA_RX_BASE,
606 .dma_tx_base = RTL931X_DMA_TX_BASE,
607 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
608 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
609 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
610 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
611 .get_mac_link_sts = rtl931x_get_mac_link_sts,
612 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
613 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
614 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
615 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
616 .mac = RTL931X_MAC_L2_ADDR_CTRL,
617 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
618 .update_cntr = rtl931x_update_cntr,
619 .create_tx_header = rtl931x_create_tx_header,
620 .decode_tag = rtl931x_decode_tag,
621 };
622
623 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
624 {
625 u32 int_saved, nbuf;
626 int i, pos;
627
628 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
629 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
630 mdelay(100);
631
632 /* Disable and clear interrupts */
633 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
634 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
635 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
636 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
637 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
638 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
639 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
640 } else {
641 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
642 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
643 }
644
645 if (priv->family_id == RTL8390_FAMILY_ID) {
646 /* Preserve L2 notification and NBUF settings */
647 int_saved = sw_r32(priv->r->dma_if_intr_msk);
648 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
649
650 /* Disable link change interrupt on RTL839x */
651 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
652 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
653
654 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
655 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
656 }
657
658 /* Reset NIC */
659 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
660 sw_w32(0x4, priv->r->rst_glb_ctrl);
661 else
662 sw_w32(0x8, priv->r->rst_glb_ctrl);
663
664 do { /* Wait for reset of NIC and Queues done */
665 udelay(20);
666 } while (sw_r32(priv->r->rst_glb_ctrl) & 0xc);
667 mdelay(100);
668
669 /* Setup Head of Line */
670 if (priv->family_id == RTL8380_FAMILY_ID)
671 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
672 if (priv->family_id == RTL8390_FAMILY_ID)
673 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
674 if (priv->family_id == RTL9300_FAMILY_ID) {
675 for (i = 0; i < priv->rxrings; i++) {
676 pos = (i % 3) * 10;
677 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
678 sw_w32_mask(0x3ff << pos, priv->rxringlen,
679 priv->r->dma_if_rx_ring_cntr(i));
680 }
681 }
682
683 /* Re-enable link change interrupt */
684 if (priv->family_id == RTL8390_FAMILY_ID) {
685 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
686 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
687 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
688 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
689
690 /* Restore notification settings: on RTL838x these bits are null */
691 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
692 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
693 }
694 }
695
696 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
697 {
698 int i;
699 struct ring_b *ring = priv->membase;
700
701 for (i = 0; i < priv->rxrings; i++)
702 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
703
704 for (i = 0; i < TXRINGS; i++)
705 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
706 }
707
708 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
709 {
710 /* Disable Head of Line features for all RX rings */
711 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
712
713 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
714 sw_w32(0x06400020, priv->r->dma_if_ctrl);
715
716 /* Enable RX done, RX overflow and TX done interrupts */
717 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
718
719 /* Enable DMA, engine expects empty FCS field */
720 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
721
722 /* Restart TX/RX to CPU port */
723 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
724 /* Set Speed, duplex, flow control
725 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
726 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
727 * | MEDIA_SEL
728 */
729 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
730
731 /* Enable CRC checks on CPU-port */
732 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
733 }
734
735 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
736 {
737 /* Setup CPU-Port: RX Buffer */
738 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
739
740 /* Enable Notify, RX done, RX overflow and TX done interrupts */
741 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
742
743 /* Enable DMA */
744 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
745
746 /* Restart TX/RX to CPU port, enable CRC checking */
747 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
748
749 /* CPU port joins Lookup Miss Flooding Portmask */
750 // TODO: The code below should also work for the RTL838x
751 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
752 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
753 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
754
755 /* Force CPU port link up */
756 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
757 }
758
759 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
760 {
761 int i, pos;
762 u32 v;
763
764 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
765 sw_w32(0x06400040, priv->r->dma_if_ctrl);
766
767 for (i = 0; i < priv->rxrings; i++) {
768 pos = (i % 3) * 10;
769 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
770
771 // Some SoCs have issues with missing underflow protection
772 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
773 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
774 }
775
776 /* Enable Notify, RX done, RX overflow and TX done interrupts */
777 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
778 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
779 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
780
781 /* Enable DMA */
782 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
783
784 /* Restart TX/RX to CPU port, enable CRC checking */
785 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
786
787 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
788 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
789 }
790
791 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
792 {
793 int i, j;
794
795 struct p_hdr *h;
796
797 for (i = 0; i < priv->rxrings; i++) {
798 for (j = 0; j < priv->rxringlen; j++) {
799 h = &ring->rx_header[i][j];
800 memset(h, 0, sizeof(struct p_hdr));
801 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
802 + i * priv->rxringlen * RING_BUFFER
803 + j * RING_BUFFER);
804 h->size = RING_BUFFER;
805 /* All rings owned by switch, last one wraps */
806 ring->rx_r[i][j] = KSEG1ADDR(h) | 1
807 | (j == (priv->rxringlen - 1) ? WRAP : 0);
808 }
809 ring->c_rx[i] = 0;
810 }
811
812 for (i = 0; i < TXRINGS; i++) {
813 for (j = 0; j < TXRINGLEN; j++) {
814 h = &ring->tx_header[i][j];
815 memset(h, 0, sizeof(struct p_hdr));
816 h->buf = (u8 *)KSEG1ADDR(ring->tx_space
817 + i * TXRINGLEN * RING_BUFFER
818 + j * RING_BUFFER);
819 h->size = RING_BUFFER;
820 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
821 }
822 /* Last header is wrapping around */
823 ring->tx_r[i][j-1] |= WRAP;
824 ring->c_tx[i] = 0;
825 }
826 }
827
828 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
829 {
830 int i;
831 struct notify_b *b = priv->membase + sizeof(struct ring_b);
832
833 for (i = 0; i < NOTIFY_BLOCKS; i++)
834 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
835
836 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
837 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
838
839 /* Setup notification events */
840 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
841 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
842
843 /* Enable Notification */
844 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
845 priv->lastEvent = 0;
846 }
847
848 static int rtl838x_eth_open(struct net_device *ndev)
849 {
850 unsigned long flags;
851 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
852 struct ring_b *ring = priv->membase;
853 int i, err;
854
855 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
856 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
857
858 spin_lock_irqsave(&priv->lock, flags);
859 rtl838x_hw_reset(priv);
860 rtl838x_setup_ring_buffer(priv, ring);
861 if (priv->family_id == RTL8390_FAMILY_ID) {
862 rtl839x_setup_notify_ring_buffer(priv);
863 /* Make sure the ring structure is visible to the ASIC */
864 mb();
865 flush_cache_all();
866 }
867
868 rtl838x_hw_ring_setup(priv);
869 err = request_irq(ndev->irq, priv->r->net_irq, IRQF_SHARED, ndev->name, ndev);
870 if (err) {
871 netdev_err(ndev, "%s: could not acquire interrupt: %d\n",
872 __func__, err);
873 return err;
874 }
875 phylink_start(priv->phylink);
876
877 for (i = 0; i < priv->rxrings; i++)
878 napi_enable(&priv->rx_qs[i].napi);
879
880 switch (priv->family_id) {
881 case RTL8380_FAMILY_ID:
882 rtl838x_hw_en_rxtx(priv);
883 /* Trap IGMP/MLD traffic to CPU-Port */
884 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
885 /* Flush learned FDB entries on link down of a port */
886 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
887 break;
888
889 case RTL8390_FAMILY_ID:
890 rtl839x_hw_en_rxtx(priv);
891 // Trap MLD and IGMP messages to CPU_PORT
892 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
893 /* Flush learned FDB entries on link down of a port */
894 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
895 break;
896
897 case RTL9300_FAMILY_ID:
898 rtl93xx_hw_en_rxtx(priv);
899 /* Flush learned FDB entries on link down of a port */
900 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
901 // Trap MLD and IGMP messages to CPU_PORT
902 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
903 break;
904
905 case RTL9310_FAMILY_ID:
906 rtl93xx_hw_en_rxtx(priv);
907 break;
908 }
909
910 netif_tx_start_all_queues(ndev);
911
912 spin_unlock_irqrestore(&priv->lock, flags);
913
914 return 0;
915 }
916
917 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
918 {
919 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
920 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
921 int i;
922
923 // Disable RX/TX from/to CPU-port
924 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
925
926 /* Disable traffic */
927 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
928 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
929 else
930 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
931 mdelay(200); // Test, whether this is needed
932
933 /* Block all ports */
934 if (priv->family_id == RTL8380_FAMILY_ID) {
935 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
936 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
937 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
938 }
939
940 /* Flush L2 address cache */
941 if (priv->family_id == RTL8380_FAMILY_ID) {
942 for (i = 0; i <= priv->cpu_port; i++) {
943 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
944 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
945 }
946 } else if (priv->family_id == RTL8390_FAMILY_ID) {
947 for (i = 0; i <= priv->cpu_port; i++) {
948 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
949 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
950 }
951 }
952 // TODO: L2 flush register is 64 bit on RTL931X and 930X
953
954 /* CPU-Port: Link down */
955 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
956 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
957 else
958 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
959 mdelay(100);
960
961 /* Disable all TX/RX interrupts */
962 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
963 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
964 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
965 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
966 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
967 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
968 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
969 } else {
970 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
971 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
972 }
973
974 /* Disable TX/RX DMA */
975 sw_w32(0x00000000, priv->r->dma_if_ctrl);
976 mdelay(200);
977 }
978
979 static int rtl838x_eth_stop(struct net_device *ndev)
980 {
981 unsigned long flags;
982 int i;
983 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
984
985 pr_info("in %s\n", __func__);
986
987 spin_lock_irqsave(&priv->lock, flags);
988 phylink_stop(priv->phylink);
989 rtl838x_hw_stop(priv);
990 free_irq(ndev->irq, ndev);
991
992 for (i = 0; i < priv->rxrings; i++)
993 napi_disable(&priv->rx_qs[i].napi);
994
995 netif_tx_stop_all_queues(ndev);
996
997 spin_unlock_irqrestore(&priv->lock, flags);
998
999 return 0;
1000 }
1001
1002 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1003 {
1004 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1005 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1006 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1007 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1008 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1009 }
1010 if (ndev->flags & IFF_ALLMULTI) {
1011 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1012 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1013 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1014 }
1015 if (ndev->flags & IFF_PROMISC) {
1016 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1017 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1018 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1019 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1020 }
1021 }
1022
1023 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1024 {
1025 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1026
1027 if (priv->family_id == RTL8390_FAMILY_ID)
1028 return rtl839x_eth_set_multicast_list(ndev);
1029
1030 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1031 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1032 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1033 }
1034 if (ndev->flags & IFF_ALLMULTI)
1035 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1036 if (ndev->flags & IFF_PROMISC) {
1037 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1038 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1039 }
1040 }
1041
1042 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1043 {
1044 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1045 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1046 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1047 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1048 }
1049 if (ndev->flags & IFF_ALLMULTI) {
1050 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1051 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1052 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1053 }
1054 if (ndev->flags & IFF_PROMISC) {
1055 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1056 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1057 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1058 }
1059 }
1060
1061 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1062 {
1063 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1064 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1065 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1066 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1067 }
1068 if (ndev->flags & IFF_ALLMULTI) {
1069 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1070 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1071 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1072 }
1073 if (ndev->flags & IFF_PROMISC) {
1074 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1075 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1076 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1077 }
1078 }
1079
1080 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1081 {
1082 unsigned long flags;
1083 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1084
1085 pr_warn("%s\n", __func__);
1086 spin_lock_irqsave(&priv->lock, flags);
1087 rtl838x_hw_stop(priv);
1088 rtl838x_hw_ring_setup(priv);
1089 rtl838x_hw_en_rxtx(priv);
1090 netif_trans_update(ndev);
1091 netif_start_queue(ndev);
1092 spin_unlock_irqrestore(&priv->lock, flags);
1093 }
1094
1095 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1096 {
1097 int len, i;
1098 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1099 struct ring_b *ring = priv->membase;
1100 uint32_t val;
1101 int ret;
1102 unsigned long flags;
1103 struct p_hdr *h;
1104 int dest_port = -1;
1105 int q = skb_get_queue_mapping(skb) % TXRINGS;
1106
1107 if (q) // Check for high prio queue
1108 pr_debug("SKB priority: %d\n", skb->priority);
1109
1110 spin_lock_irqsave(&priv->lock, flags);
1111 len = skb->len;
1112
1113 /* Check for DSA tagging at the end of the buffer */
1114 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
1115 && skb->data[len-3] < priv->cpu_port && skb->data[len-2] == 0x10
1116 && skb->data[len-1] == 0x00) {
1117 /* Reuse tag space for CRC if possible */
1118 dest_port = skb->data[len-3];
1119 skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
1120 len -= 4;
1121 }
1122
1123 len += 4; // Add space for CRC
1124
1125 if (skb_padto(skb, len)) {
1126 ret = NETDEV_TX_OK;
1127 goto txdone;
1128 }
1129
1130 /* We can send this packet if CPU owns the descriptor */
1131 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1132
1133 /* Set descriptor for tx */
1134 h = &ring->tx_header[q][ring->c_tx[q]];
1135 h->size = len;
1136 h->len = len;
1137 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1138 if (priv->family_id == RTL8380_FAMILY_ID) {
1139 if (len < ETH_ZLEN - 4)
1140 h->len -= 4;
1141 }
1142
1143 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1144
1145 /* Copy packet data to tx buffer */
1146 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1147 /* Make sure packet data is visible to ASIC */
1148 wmb();
1149
1150 /* Hand over to switch */
1151 ring->tx_r[q][ring->c_tx[q]] |= 1;
1152
1153 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1154 if (priv->family_id == RTL8380_FAMILY_ID) {
1155 for (i = 0; i < 10; i++) {
1156 val = sw_r32(priv->r->dma_if_ctrl);
1157 if ((val & 0xc) == 0xc)
1158 break;
1159 }
1160 }
1161
1162 /* Tell switch to send data */
1163 if (priv->family_id == RTL9310_FAMILY_ID
1164 || priv->family_id == RTL9300_FAMILY_ID) {
1165 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1166 if (!q)
1167 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1168 else
1169 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1170 } else {
1171 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1172 }
1173
1174 dev->stats.tx_packets++;
1175 dev->stats.tx_bytes += len;
1176 dev_kfree_skb(skb);
1177 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1178 ret = NETDEV_TX_OK;
1179 } else {
1180 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1181 ret = NETDEV_TX_BUSY;
1182 }
1183 txdone:
1184 spin_unlock_irqrestore(&priv->lock, flags);
1185 return ret;
1186 }
1187
1188 /*
1189 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1190 * so we do round-robin
1191 */
1192 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1193 struct net_device *sb_dev)
1194 {
1195 static u8 last = 0;
1196
1197 last++;
1198 return last % TXRINGS;
1199 }
1200
1201 /*
1202 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1203 */
1204 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1205 struct net_device *sb_dev)
1206 {
1207 if (skb->priority >= TC_PRIO_CONTROL)
1208 return 1;
1209 return 0;
1210 }
1211
1212 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1213 {
1214 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1215 struct ring_b *ring = priv->membase;
1216 struct sk_buff *skb;
1217 unsigned long flags;
1218 int i, len, work_done = 0;
1219 u8 *data, *skb_data;
1220 unsigned int val;
1221 u32 *last;
1222 struct p_hdr *h;
1223 bool dsa = netdev_uses_dsa(dev);
1224 struct dsa_tag tag;
1225
1226 spin_lock_irqsave(&priv->lock, flags);
1227 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1228 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1229
1230 do {
1231 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1232 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1233 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1234 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1235 }
1236 break;
1237 }
1238
1239 h = &ring->rx_header[r][ring->c_rx[r]];
1240 data = (u8 *)KSEG1ADDR(h->buf);
1241 len = h->len;
1242 if (!len)
1243 break;
1244 work_done++;
1245
1246 len -= 4; /* strip the CRC */
1247 /* Add 4 bytes for cpu_tag */
1248 if (dsa)
1249 len += 4;
1250
1251 skb = alloc_skb(len + 4, GFP_KERNEL);
1252 skb_reserve(skb, NET_IP_ALIGN);
1253
1254 if (likely(skb)) {
1255 /* BUG: Prevent bug on RTL838x SoCs*/
1256 if (priv->family_id == RTL8380_FAMILY_ID) {
1257 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1258 for (i = 0; i < priv->rxrings; i++) {
1259 /* Update each ring cnt */
1260 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1261 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1262 }
1263 }
1264
1265 skb_data = skb_put(skb, len);
1266 /* Make sure data is visible */
1267 mb();
1268 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1269 /* Overwrite CRC with cpu_tag */
1270 if (dsa) {
1271 priv->r->decode_tag(h, &tag);
1272 skb->data[len-4] = 0x80;
1273 skb->data[len-3] = tag.port;
1274 skb->data[len-2] = 0x10;
1275 skb->data[len-1] = 0x00;
1276 if (tag.l2_offloaded)
1277 skb->data[len-3] |= 0x40;
1278 }
1279
1280 if (tag.queue >= 0)
1281 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1282 tag.queue, len, tag.reason, tag.port);
1283
1284 skb->protocol = eth_type_trans(skb, dev);
1285 if (dev->features & NETIF_F_RXCSUM) {
1286 if (tag.crc_error)
1287 skb_checksum_none_assert(skb);
1288 else
1289 skb->ip_summed = CHECKSUM_UNNECESSARY;
1290 }
1291 dev->stats.rx_packets++;
1292 dev->stats.rx_bytes += len;
1293
1294 netif_receive_skb(skb);
1295 } else {
1296 if (net_ratelimit())
1297 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1298 dev->stats.rx_dropped++;
1299 }
1300
1301 /* Reset header structure */
1302 memset(h, 0, sizeof(struct p_hdr));
1303 h->buf = data;
1304 h->size = RING_BUFFER;
1305
1306 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
1307 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
1308 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1309 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1310 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1311
1312 // Update counters
1313 priv->r->update_cntr(r, 0);
1314
1315 spin_unlock_irqrestore(&priv->lock, flags);
1316 return work_done;
1317 }
1318
1319 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1320 {
1321 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1322 struct rtl838x_eth_priv *priv = rx_q->priv;
1323 int work_done = 0;
1324 int r = rx_q->id;
1325 int work;
1326
1327 while (work_done < budget) {
1328 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1329 if (!work)
1330 break;
1331 work_done += work;
1332 }
1333
1334 if (work_done < budget) {
1335 napi_complete_done(napi, work_done);
1336
1337 /* Enable RX interrupt */
1338 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1339 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1340 else
1341 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1342 }
1343 return work_done;
1344 }
1345
1346
1347 static void rtl838x_validate(struct phylink_config *config,
1348 unsigned long *supported,
1349 struct phylink_link_state *state)
1350 {
1351 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1352
1353 pr_debug("In %s\n", __func__);
1354
1355 if (!phy_interface_mode_is_rgmii(state->interface) &&
1356 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1357 state->interface != PHY_INTERFACE_MODE_MII &&
1358 state->interface != PHY_INTERFACE_MODE_REVMII &&
1359 state->interface != PHY_INTERFACE_MODE_GMII &&
1360 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1361 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1362 state->interface != PHY_INTERFACE_MODE_SGMII) {
1363 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1364 pr_err("Unsupported interface: %d\n", state->interface);
1365 return;
1366 }
1367
1368 /* Allow all the expected bits */
1369 phylink_set(mask, Autoneg);
1370 phylink_set_port_modes(mask);
1371 phylink_set(mask, Pause);
1372 phylink_set(mask, Asym_Pause);
1373
1374 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1375 * including Half duplex
1376 */
1377 if (state->interface != PHY_INTERFACE_MODE_MII &&
1378 state->interface != PHY_INTERFACE_MODE_REVMII) {
1379 phylink_set(mask, 1000baseT_Full);
1380 phylink_set(mask, 1000baseT_Half);
1381 }
1382
1383 phylink_set(mask, 10baseT_Half);
1384 phylink_set(mask, 10baseT_Full);
1385 phylink_set(mask, 100baseT_Half);
1386 phylink_set(mask, 100baseT_Full);
1387
1388 bitmap_and(supported, supported, mask,
1389 __ETHTOOL_LINK_MODE_MASK_NBITS);
1390 bitmap_and(state->advertising, state->advertising, mask,
1391 __ETHTOOL_LINK_MODE_MASK_NBITS);
1392 }
1393
1394
1395 static void rtl838x_mac_config(struct phylink_config *config,
1396 unsigned int mode,
1397 const struct phylink_link_state *state)
1398 {
1399 /* This is only being called for the master device,
1400 * i.e. the CPU-Port. We don't need to do anything.
1401 */
1402
1403 pr_info("In %s, mode %x\n", __func__, mode);
1404 }
1405
1406 static void rtl838x_mac_an_restart(struct phylink_config *config)
1407 {
1408 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1409 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1410
1411 /* This works only on RTL838x chips */
1412 if (priv->family_id != RTL8380_FAMILY_ID)
1413 return;
1414
1415 pr_debug("In %s\n", __func__);
1416 /* Restart by disabling and re-enabling link */
1417 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1418 mdelay(20);
1419 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1420 }
1421
1422 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1423 struct phylink_link_state *state)
1424 {
1425 u32 speed;
1426 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1427 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1428 int port = priv->cpu_port;
1429
1430 pr_debug("In %s\n", __func__);
1431
1432 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1433 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1434
1435 speed = priv->r->get_mac_link_spd_sts(port);
1436 switch (speed) {
1437 case 0:
1438 state->speed = SPEED_10;
1439 break;
1440 case 1:
1441 state->speed = SPEED_100;
1442 break;
1443 case 2:
1444 state->speed = SPEED_1000;
1445 break;
1446 default:
1447 state->speed = SPEED_UNKNOWN;
1448 break;
1449 }
1450
1451 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1452 if (priv->r->get_mac_rx_pause_sts(port))
1453 state->pause |= MLO_PAUSE_RX;
1454 if (priv->r->get_mac_tx_pause_sts(port))
1455 state->pause |= MLO_PAUSE_TX;
1456 }
1457
1458 static void rtl838x_mac_link_down(struct phylink_config *config,
1459 unsigned int mode,
1460 phy_interface_t interface)
1461 {
1462 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1463 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1464
1465 pr_debug("In %s\n", __func__);
1466 /* Stop TX/RX to port */
1467 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1468 }
1469
1470 static void rtl838x_mac_link_up(struct phylink_config *config,
1471 struct phy_device *phy, unsigned int mode,
1472 phy_interface_t interface, int speed, int duplex,
1473 bool tx_pause, bool rx_pause)
1474 {
1475 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1476 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1477
1478 pr_debug("In %s\n", __func__);
1479 /* Restart TX/RX to port */
1480 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1481 }
1482
1483 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1484 {
1485 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1486 unsigned long flags;
1487
1488 spin_lock_irqsave(&priv->lock, flags);
1489 pr_debug("In %s\n", __func__);
1490 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1491 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1492
1493 if (priv->family_id == RTL8380_FAMILY_ID) {
1494 /* 2 more registers, ALE/MAC block */
1495 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1496 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1497 (RTL838X_MAC_ALE + 4));
1498
1499 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1500 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1501 RTL838X_MAC2 + 4);
1502 }
1503 spin_unlock_irqrestore(&priv->lock, flags);
1504 }
1505
1506 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1507 {
1508 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1509 const struct sockaddr *addr = p;
1510 u8 *mac = (u8 *) (addr->sa_data);
1511
1512 if (!is_valid_ether_addr(addr->sa_data))
1513 return -EADDRNOTAVAIL;
1514
1515 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1516 rtl838x_set_mac_hw(dev, mac);
1517
1518 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1519 return 0;
1520 }
1521
1522 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1523 {
1524 // We will need to set-up EEE and the egress-rate limitation
1525 return 0;
1526 }
1527
1528 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1529 {
1530 int i;
1531
1532 if (priv->family_id == 0x8390)
1533 return rtl8390_init_mac(priv);
1534
1535 pr_info("%s\n", __func__);
1536 /* fix timer for EEE */
1537 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1538 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1539
1540 /* Init VLAN */
1541 if (priv->id == 0x8382) {
1542 for (i = 0; i <= 28; i++)
1543 sw_w32(0, 0xd57c + i * 0x80);
1544 }
1545 if (priv->id == 0x8380) {
1546 for (i = 8; i <= 28; i++)
1547 sw_w32(0, 0xd57c + i * 0x80);
1548 }
1549 return 0;
1550 }
1551
1552 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1553 struct ethtool_link_ksettings *cmd)
1554 {
1555 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1556
1557 pr_debug("%s called\n", __func__);
1558 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1559 }
1560
1561 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1562 const struct ethtool_link_ksettings *cmd)
1563 {
1564 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1565
1566 pr_debug("%s called\n", __func__);
1567 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1568 }
1569
1570 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1571 {
1572 u32 val;
1573 int err;
1574 struct rtl838x_eth_priv *priv = bus->priv;
1575
1576 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1577 return rtl838x_read_sds_phy(mii_id, regnum);
1578 err = rtl838x_read_phy(mii_id, 0, regnum, &val);
1579 if (err)
1580 return err;
1581 return val;
1582 }
1583
1584 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1585 {
1586 u32 val;
1587 int err;
1588 struct rtl838x_eth_priv *priv = bus->priv;
1589
1590 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1591 return rtl839x_read_sds_phy(mii_id, regnum);
1592
1593 err = rtl839x_read_phy(mii_id, 0, regnum, &val);
1594 if (err)
1595 return err;
1596 return val;
1597 }
1598
1599 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1600 {
1601 u32 val;
1602 int err;
1603
1604 // TODO: These are hard-coded for the 2 Fibre Ports of the XGS1210
1605 if (mii_id >= 26 && mii_id <= 27)
1606 return rtl930x_read_sds_phy(mii_id - 18, 0, regnum);
1607
1608 if (regnum & MII_ADDR_C45) {
1609 regnum &= ~MII_ADDR_C45;
1610 err = rtl930x_read_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, &val);
1611 } else {
1612 err = rtl930x_read_phy(mii_id, 0, regnum, &val);
1613 }
1614 if (err)
1615 return err;
1616 return val;
1617 }
1618
1619 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1620 {
1621 u32 val;
1622 int err;
1623 // struct rtl838x_eth_priv *priv = bus->priv;
1624
1625 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1626 // return rtl839x_read_sds_phy(mii_id, regnum);
1627
1628 err = rtl931x_read_phy(mii_id, 0, regnum, &val);
1629 if (err)
1630 return err;
1631 return val;
1632 }
1633
1634 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1635 int regnum, u16 value)
1636 {
1637 u32 offset = 0;
1638 struct rtl838x_eth_priv *priv = bus->priv;
1639
1640 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1641 if (mii_id == 26)
1642 offset = 0x100;
1643 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1644 return 0;
1645 }
1646 return rtl838x_write_phy(mii_id, 0, regnum, value);
1647 }
1648
1649 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1650 int regnum, u16 value)
1651 {
1652 struct rtl838x_eth_priv *priv = bus->priv;
1653
1654 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1655 return rtl839x_write_sds_phy(mii_id, regnum, value);
1656
1657 return rtl839x_write_phy(mii_id, 0, regnum, value);
1658 }
1659
1660 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1661 int regnum, u16 value)
1662 {
1663 // struct rtl838x_eth_priv *priv = bus->priv;
1664
1665 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1666 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1667 if (regnum & MII_ADDR_C45) {
1668 regnum &= ~MII_ADDR_C45;
1669 return rtl930x_write_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, value);
1670 }
1671
1672 return rtl930x_write_phy(mii_id, 0, regnum, value);
1673 }
1674
1675 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1676 int regnum, u16 value)
1677 {
1678 // struct rtl838x_eth_priv *priv = bus->priv;
1679
1680 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1681 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1682
1683 return rtl931x_write_phy(mii_id, 0, regnum, value);
1684 }
1685
1686 static int rtl838x_mdio_reset(struct mii_bus *bus)
1687 {
1688 pr_debug("%s called\n", __func__);
1689 /* Disable MAC polling the PHY so that we can start configuration */
1690 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1691
1692 /* Enable PHY control via SoC */
1693 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1694
1695 // Probably should reset all PHYs here...
1696 return 0;
1697 }
1698
1699 static int rtl839x_mdio_reset(struct mii_bus *bus)
1700 {
1701 return 0;
1702
1703 pr_debug("%s called\n", __func__);
1704 /* BUG: The following does not work, but should! */
1705 /* Disable MAC polling the PHY so that we can start configuration */
1706 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1707 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1708 /* Disable PHY polling via SoC */
1709 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1710
1711 // Probably should reset all PHYs here...
1712 return 0;
1713 }
1714
1715 static int rtl931x_mdio_reset(struct mii_bus *bus)
1716 {
1717 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL);
1718 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL + 4);
1719
1720 pr_debug("%s called\n", __func__);
1721
1722 return 0;
1723 }
1724
1725 static int rtl930x_mdio_reset(struct mii_bus *bus)
1726 {
1727 int i;
1728 int pos;
1729
1730 pr_info("RTL930X_SMI_PORT0_15_POLLING_SEL %08x 16-27: %08x\n",
1731 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL),
1732 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
1733
1734 pr_info("%s: Enable SMI polling on SMI bus 0, SMI1, SMI2, disable on SMI3\n", __func__);
1735 sw_w32_mask(BIT(20) | BIT(21) | BIT(22), BIT(23), RTL930X_SMI_GLB_CTRL);
1736
1737 pr_info("RTL9300 Powering on SerDes ports\n");
1738 rtl9300_sds_power(24, 1);
1739 rtl9300_sds_power(25, 1);
1740 rtl9300_sds_power(26, 1);
1741 rtl9300_sds_power(27, 1);
1742 mdelay(200);
1743
1744 // RTL930X_SMI_PORT0_15_POLLING_SEL 55550000 16-27: 00f9aaaa
1745 // i.e SMI=0 for all ports
1746 for (i = 0; i < 5; i++)
1747 pr_info("port phy: %08x\n", sw_r32(RTL930X_SMI_PORT0_5_ADDR + i *4));
1748
1749 // 1-to-1 mapping of port to phy-address
1750 for (i = 0; i < 24; i++) {
1751 pos = (i % 6) * 5;
1752 sw_w32_mask(0x1f << pos, i << pos, RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1753 }
1754
1755 // ports 24 and 25 have PHY addresses 8 and 9, ports 26/27 PHY 26/27
1756 sw_w32(8 | 9 << 5 | 26 << 10 | 27 << 15, RTL930X_SMI_PORT0_5_ADDR + 4 * 4);
1757
1758 // Ports 24 and 25 live on SMI bus 1 and 2
1759 sw_w32_mask(0x3 << 16, 0x1 << 16, RTL930X_SMI_PORT16_27_POLLING_SEL);
1760 sw_w32_mask(0x3 << 18, 0x2 << 18, RTL930X_SMI_PORT16_27_POLLING_SEL);
1761
1762 // SMI bus 1 and 2 speak Clause 45 TODO: Configure from .dts
1763 sw_w32_mask(0, BIT(17) | BIT(18), RTL930X_SMI_GLB_CTRL);
1764
1765 // Ports 24 and 25 are 2.5 Gig, set this type (1)
1766 sw_w32_mask(0x7 << 12, 1 << 12, RTL930X_SMI_MAC_TYPE_CTRL);
1767 sw_w32_mask(0x7 << 15, 1 << 15, RTL930X_SMI_MAC_TYPE_CTRL);
1768
1769 return 0;
1770 }
1771
1772 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
1773 {
1774 struct device_node *mii_np;
1775 int ret;
1776
1777 pr_debug("%s called\n", __func__);
1778 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
1779
1780 if (!mii_np) {
1781 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
1782 return -ENODEV;
1783 }
1784
1785 if (!of_device_is_available(mii_np)) {
1786 ret = -ENODEV;
1787 goto err_put_node;
1788 }
1789
1790 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
1791 if (!priv->mii_bus) {
1792 ret = -ENOMEM;
1793 goto err_put_node;
1794 }
1795
1796 switch(priv->family_id) {
1797 case RTL8380_FAMILY_ID:
1798 priv->mii_bus->name = "rtl838x-eth-mdio";
1799 priv->mii_bus->read = rtl838x_mdio_read;
1800 priv->mii_bus->write = rtl838x_mdio_write;
1801 priv->mii_bus->reset = rtl838x_mdio_reset;
1802 break;
1803 case RTL8390_FAMILY_ID:
1804 priv->mii_bus->name = "rtl839x-eth-mdio";
1805 priv->mii_bus->read = rtl839x_mdio_read;
1806 priv->mii_bus->write = rtl839x_mdio_write;
1807 priv->mii_bus->reset = rtl839x_mdio_reset;
1808 break;
1809 case RTL9300_FAMILY_ID:
1810 priv->mii_bus->name = "rtl930x-eth-mdio";
1811 priv->mii_bus->read = rtl930x_mdio_read;
1812 priv->mii_bus->write = rtl930x_mdio_write;
1813 priv->mii_bus->reset = rtl930x_mdio_reset;
1814 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1815 break;
1816 case RTL9310_FAMILY_ID:
1817 priv->mii_bus->name = "rtl931x-eth-mdio";
1818 priv->mii_bus->read = rtl931x_mdio_read;
1819 priv->mii_bus->write = rtl931x_mdio_write;
1820 priv->mii_bus->reset = rtl931x_mdio_reset;
1821 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1822 break;
1823 }
1824 priv->mii_bus->priv = priv;
1825 priv->mii_bus->parent = &priv->pdev->dev;
1826
1827 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1828 ret = of_mdiobus_register(priv->mii_bus, mii_np);
1829
1830 err_put_node:
1831 of_node_put(mii_np);
1832 return ret;
1833 }
1834
1835 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
1836 {
1837 pr_debug("%s called\n", __func__);
1838 if (!priv->mii_bus)
1839 return 0;
1840
1841 mdiobus_unregister(priv->mii_bus);
1842 mdiobus_free(priv->mii_bus);
1843
1844 return 0;
1845 }
1846
1847 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
1848 netdev_features_t features)
1849 {
1850 return features;
1851 }
1852
1853 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
1854 {
1855 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1856
1857 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
1858 if (!(features & NETIF_F_RXCSUM))
1859 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
1860 else
1861 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
1862 }
1863
1864 return 0;
1865 }
1866
1867 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
1868 {
1869 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1870
1871 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
1872 if (!(features & NETIF_F_RXCSUM))
1873 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
1874 else
1875 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
1876 }
1877
1878 return 0;
1879 }
1880
1881 static const struct net_device_ops rtl838x_eth_netdev_ops = {
1882 .ndo_open = rtl838x_eth_open,
1883 .ndo_stop = rtl838x_eth_stop,
1884 .ndo_start_xmit = rtl838x_eth_tx,
1885 .ndo_select_queue = rtl83xx_pick_tx_queue,
1886 .ndo_set_mac_address = rtl838x_set_mac_address,
1887 .ndo_validate_addr = eth_validate_addr,
1888 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
1889 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1890 .ndo_set_features = rtl83xx_set_features,
1891 .ndo_fix_features = rtl838x_fix_features,
1892 };
1893
1894 static const struct net_device_ops rtl839x_eth_netdev_ops = {
1895 .ndo_open = rtl838x_eth_open,
1896 .ndo_stop = rtl838x_eth_stop,
1897 .ndo_start_xmit = rtl838x_eth_tx,
1898 .ndo_select_queue = rtl83xx_pick_tx_queue,
1899 .ndo_set_mac_address = rtl838x_set_mac_address,
1900 .ndo_validate_addr = eth_validate_addr,
1901 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
1902 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1903 .ndo_set_features = rtl83xx_set_features,
1904 .ndo_fix_features = rtl838x_fix_features,
1905 };
1906
1907 static const struct net_device_ops rtl930x_eth_netdev_ops = {
1908 .ndo_open = rtl838x_eth_open,
1909 .ndo_stop = rtl838x_eth_stop,
1910 .ndo_start_xmit = rtl838x_eth_tx,
1911 .ndo_select_queue = rtl93xx_pick_tx_queue,
1912 .ndo_set_mac_address = rtl838x_set_mac_address,
1913 .ndo_validate_addr = eth_validate_addr,
1914 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
1915 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1916 .ndo_set_features = rtl93xx_set_features,
1917 .ndo_fix_features = rtl838x_fix_features,
1918 };
1919
1920 static const struct net_device_ops rtl931x_eth_netdev_ops = {
1921 .ndo_open = rtl838x_eth_open,
1922 .ndo_stop = rtl838x_eth_stop,
1923 .ndo_start_xmit = rtl838x_eth_tx,
1924 .ndo_select_queue = rtl93xx_pick_tx_queue,
1925 .ndo_set_mac_address = rtl838x_set_mac_address,
1926 .ndo_validate_addr = eth_validate_addr,
1927 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
1928 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
1929 .ndo_set_features = rtl93xx_set_features,
1930 .ndo_fix_features = rtl838x_fix_features,
1931 };
1932
1933 static const struct phylink_mac_ops rtl838x_phylink_ops = {
1934 .validate = rtl838x_validate,
1935 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
1936 .mac_an_restart = rtl838x_mac_an_restart,
1937 .mac_config = rtl838x_mac_config,
1938 .mac_link_down = rtl838x_mac_link_down,
1939 .mac_link_up = rtl838x_mac_link_up,
1940 };
1941
1942 static const struct ethtool_ops rtl838x_ethtool_ops = {
1943 .get_link_ksettings = rtl838x_get_link_ksettings,
1944 .set_link_ksettings = rtl838x_set_link_ksettings,
1945 };
1946
1947 static int __init rtl838x_eth_probe(struct platform_device *pdev)
1948 {
1949 struct net_device *dev;
1950 struct device_node *dn = pdev->dev.of_node;
1951 struct rtl838x_eth_priv *priv;
1952 struct resource *res, *mem;
1953 phy_interface_t phy_mode;
1954 struct phylink *phylink;
1955 int err = 0, i, rxrings, rxringlen;
1956 struct ring_b *ring;
1957
1958 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
1959 (u32)pdev, (u32)(&(pdev->dev)));
1960
1961 if (!dn) {
1962 dev_err(&pdev->dev, "No DT found\n");
1963 return -EINVAL;
1964 }
1965
1966 rxrings = (soc_info.family == RTL8380_FAMILY_ID
1967 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
1968 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
1969 rxringlen = MAX_ENTRIES / rxrings;
1970 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
1971
1972 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
1973 if (!dev) {
1974 err = -ENOMEM;
1975 goto err_free;
1976 }
1977 SET_NETDEV_DEV(dev, &pdev->dev);
1978 priv = netdev_priv(dev);
1979
1980 /* obtain buffer memory space */
1981 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1982 if (res) {
1983 mem = devm_request_mem_region(&pdev->dev, res->start,
1984 resource_size(res), res->name);
1985 if (!mem) {
1986 dev_err(&pdev->dev, "cannot request memory space\n");
1987 err = -ENXIO;
1988 goto err_free;
1989 }
1990
1991 dev->mem_start = mem->start;
1992 dev->mem_end = mem->end;
1993 } else {
1994 dev_err(&pdev->dev, "cannot request IO resource\n");
1995 err = -ENXIO;
1996 goto err_free;
1997 }
1998
1999 /* Allocate buffer memory */
2000 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
2001 + sizeof(struct ring_b) + sizeof(struct notify_b),
2002 (void *)&dev->mem_start, GFP_KERNEL);
2003 if (!priv->membase) {
2004 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2005 err = -ENOMEM;
2006 goto err_free;
2007 }
2008
2009 // Allocate ring-buffer space at the end of the allocated memory
2010 ring = priv->membase;
2011 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2012
2013 spin_lock_init(&priv->lock);
2014
2015 /* obtain device IRQ number */
2016 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2017 if (!res) {
2018 dev_err(&pdev->dev, "cannot obtain IRQ, using default 24\n");
2019 dev->irq = 24;
2020 } else {
2021 dev->irq = res->start;
2022 }
2023 dev->ethtool_ops = &rtl838x_ethtool_ops;
2024 dev->min_mtu = ETH_ZLEN;
2025 dev->max_mtu = 1536;
2026 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2027 dev->hw_features = NETIF_F_RXCSUM;
2028
2029 priv->id = soc_info.id;
2030 priv->family_id = soc_info.family;
2031 if (priv->id) {
2032 pr_info("Found SoC ID: %4x: %s, family %x\n",
2033 priv->id, soc_info.name, priv->family_id);
2034 } else {
2035 pr_err("Unknown chip id (%04x)\n", priv->id);
2036 return -ENODEV;
2037 }
2038
2039 switch (priv->family_id) {
2040 case RTL8380_FAMILY_ID:
2041 priv->cpu_port = RTL838X_CPU_PORT;
2042 priv->r = &rtl838x_reg;
2043 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2044 break;
2045 case RTL8390_FAMILY_ID:
2046 priv->cpu_port = RTL839X_CPU_PORT;
2047 priv->r = &rtl839x_reg;
2048 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2049 break;
2050 case RTL9300_FAMILY_ID:
2051 priv->cpu_port = RTL930X_CPU_PORT;
2052 priv->r = &rtl930x_reg;
2053 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2054 break;
2055 case RTL9310_FAMILY_ID:
2056 priv->cpu_port = RTL931X_CPU_PORT;
2057 priv->r = &rtl931x_reg;
2058 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2059 break;
2060 default:
2061 pr_err("Unknown SoC family\n");
2062 return -ENODEV;
2063 }
2064 priv->rxringlen = rxringlen;
2065 priv->rxrings = rxrings;
2066
2067 rtl8380_init_mac(priv);
2068
2069 /* try to get mac address in the following order:
2070 * 1) from device tree data
2071 * 2) from internal registers set by bootloader
2072 */
2073 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2074 if (is_valid_ether_addr(dev->dev_addr)) {
2075 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2076 } else {
2077 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2078 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2079 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2080 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2081 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2082 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2083 }
2084 /* if the address is invalid, use a random value */
2085 if (!is_valid_ether_addr(dev->dev_addr)) {
2086 struct sockaddr sa = { AF_UNSPEC };
2087
2088 netdev_warn(dev, "Invalid MAC address, using random\n");
2089 eth_hw_addr_random(dev);
2090 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2091 if (rtl838x_set_mac_address(dev, &sa))
2092 netdev_warn(dev, "Failed to set MAC address.\n");
2093 }
2094 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2095 sw_r32(priv->r->mac + 4));
2096 strcpy(dev->name, "eth%d");
2097 priv->pdev = pdev;
2098 priv->netdev = dev;
2099
2100 err = rtl838x_mdio_init(priv);
2101 if (err)
2102 goto err_free;
2103
2104 err = register_netdev(dev);
2105 if (err)
2106 goto err_free;
2107
2108 for (i = 0; i < priv->rxrings; i++) {
2109 priv->rx_qs[i].id = i;
2110 priv->rx_qs[i].priv = priv;
2111 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2112 }
2113
2114 platform_set_drvdata(pdev, dev);
2115
2116 phy_mode = PHY_INTERFACE_MODE_NA;
2117 err = of_get_phy_mode(dn, &phy_mode);
2118 if (err < 0) {
2119 dev_err(&pdev->dev, "incorrect phy-mode\n");
2120 err = -EINVAL;
2121 goto err_free;
2122 }
2123 priv->phylink_config.dev = &dev->dev;
2124 priv->phylink_config.type = PHYLINK_NETDEV;
2125
2126 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2127 phy_mode, &rtl838x_phylink_ops);
2128 if (IS_ERR(phylink)) {
2129 err = PTR_ERR(phylink);
2130 goto err_free;
2131 }
2132 priv->phylink = phylink;
2133
2134 return 0;
2135
2136 err_free:
2137 pr_err("Error setting up netdev, freeing it again.\n");
2138 free_netdev(dev);
2139 return err;
2140 }
2141
2142 static int rtl838x_eth_remove(struct platform_device *pdev)
2143 {
2144 struct net_device *dev = platform_get_drvdata(pdev);
2145 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2146 int i;
2147
2148 if (dev) {
2149 pr_info("Removing platform driver for rtl838x-eth\n");
2150 rtl838x_mdio_remove(priv);
2151 rtl838x_hw_stop(priv);
2152
2153 netif_tx_stop_all_queues(dev);
2154
2155 for (i = 0; i < priv->rxrings; i++)
2156 netif_napi_del(&priv->rx_qs[i].napi);
2157
2158 unregister_netdev(dev);
2159 free_netdev(dev);
2160 }
2161 return 0;
2162 }
2163
2164 static const struct of_device_id rtl838x_eth_of_ids[] = {
2165 { .compatible = "realtek,rtl838x-eth"},
2166 { /* sentinel */ }
2167 };
2168 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2169
2170 static struct platform_driver rtl838x_eth_driver = {
2171 .probe = rtl838x_eth_probe,
2172 .remove = rtl838x_eth_remove,
2173 .driver = {
2174 .name = "rtl838x-eth",
2175 .pm = NULL,
2176 .of_match_table = rtl838x_eth_of_ids,
2177 },
2178 };
2179
2180 module_platform_driver(rtl838x_eth_driver);
2181
2182 MODULE_AUTHOR("B. Koblitz");
2183 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2184 MODULE_LICENSE("GPL");