realtek: Fix RTL931X Ethernet driver
[openwrt/staging/ldir.git] / target / linux / realtek / files-5.10 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
20 #include <net/dsa.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
23
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
26
27 extern struct rtl83xx_soc_info soc_info;
28
29 /*
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
34 * for an RX ring, MAX_ENTRIES the maximum number of entries
35 * available in total for all queues.
36 */
37 #define MAX_RXRINGS 32
38 #define MAX_RXLEN 300
39 #define MAX_ENTRIES (300 * 8)
40 #define TXRINGS 2
41 #define TXRINGLEN 160
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
44 #define TX_EN 0x8
45 #define RX_EN 0x4
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
48 #define TX_DO 0x2
49 #define WRAP 0x2
50 #define MAX_PORTS 57
51 #define MAX_SMI_BUSSES 4
52
53 #define RING_BUFFER 1600
54
55 struct p_hdr {
56 uint8_t *buf;
57 uint16_t reserved;
58 uint16_t size; /* buffer size */
59 uint16_t offset;
60 uint16_t len; /* pkt len */
61 uint16_t cpu_tag[10];
62 } __packed __aligned(1);
63
64 struct n_event {
65 uint32_t type:2;
66 uint32_t fidVid:12;
67 uint64_t mac:48;
68 uint32_t slp:6;
69 uint32_t valid:1;
70 uint32_t reserved:27;
71 } __packed __aligned(1);
72
73 struct ring_b {
74 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
75 uint32_t tx_r[TXRINGS][TXRINGLEN];
76 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
77 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
78 uint32_t c_rx[MAX_RXRINGS];
79 uint32_t c_tx[TXRINGS];
80 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
81 uint8_t *rx_space;
82 };
83
84 struct notify_block {
85 struct n_event events[NOTIFY_EVENTS];
86 };
87
88 struct notify_b {
89 struct notify_block blocks[NOTIFY_BLOCKS];
90 u32 reserved1[8];
91 u32 ring[NOTIFY_BLOCKS];
92 u32 reserved2[8];
93 };
94
95 static void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
96 {
97 prio &= 0x7;
98
99 if (dest_port > 0) {
100 // cpu_tag[0] is reserved on the RTL83XX SoCs
101 h->cpu_tag[1] = 0x0401; // BIT 10: RTL8380_CPU_TAG, BIT0: L2LEARNING on
102 h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
103 h->cpu_tag[3] = 0x0000;
104 h->cpu_tag[4] = BIT(dest_port) >> 16;
105 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
106 // Set internal priority and AS_PRIO
107 if (prio >= 0)
108 h->cpu_tag[2] |= (prio | 0x8) << 12;
109 }
110 }
111
112 static void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
113 {
114 prio &= 0x7;
115
116 if (dest_port > 0) {
117 // cpu_tag[0] is reserved on the RTL83XX SoCs
118 h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
119 h->cpu_tag[2] = h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
120 // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
121 if (dest_port >= 32) {
122 dest_port -= 32;
123 h->cpu_tag[2] = BIT(dest_port) >> 16;
124 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
125 } else {
126 h->cpu_tag[4] = BIT(dest_port) >> 16;
127 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
128 }
129 h->cpu_tag[2] |= BIT(5); // Enable destination port mask use
130 h->cpu_tag[2] |= BIT(8); // Enable L2 Learning
131 // Set internal priority and AS_PRIO
132 if (prio >= 0)
133 h->cpu_tag[1] |= prio | BIT(3);
134 }
135 }
136
137 static void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
138 {
139 h->cpu_tag[0] = 0x8000; // CPU tag marker
140 h->cpu_tag[1] = h->cpu_tag[2] = 0;
141 if (prio >= 0)
142 h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
143 h->cpu_tag[3] = 0;
144 h->cpu_tag[4] = 0;
145 h->cpu_tag[5] = 0;
146 h->cpu_tag[6] = BIT(dest_port) >> 16;
147 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
148 }
149
150 static void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
151 {
152 h->cpu_tag[0] = 0x8000; // CPU tag marker
153 h->cpu_tag[1] = h->cpu_tag[2] = 0;
154 if (prio >= 0)
155 h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
156 h->cpu_tag[3] = 0;
157 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
158 if (dest_port >= 32) {
159 dest_port -= 32;
160 h->cpu_tag[4] = BIT(dest_port) >> 16;
161 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
162 } else {
163 h->cpu_tag[6] = BIT(dest_port) >> 16;
164 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
165 }
166 }
167
168 static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
169 {
170 h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload
171 h->cpu_tag[2] |= (vlan >> 8) & 0xf;
172 h->cpu_tag[3] |= (vlan & 0xff) << 8;
173 }
174
175 struct rtl838x_rx_q {
176 int id;
177 struct rtl838x_eth_priv *priv;
178 struct napi_struct napi;
179 };
180
181 struct rtl838x_eth_priv {
182 struct net_device *netdev;
183 struct platform_device *pdev;
184 void *membase;
185 spinlock_t lock;
186 struct mii_bus *mii_bus;
187 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
188 struct phylink *phylink;
189 struct phylink_config phylink_config;
190 u16 id;
191 u16 family_id;
192 const struct rtl838x_eth_reg *r;
193 u8 cpu_port;
194 u32 lastEvent;
195 u16 rxrings;
196 u16 rxringlen;
197 u8 smi_bus[MAX_PORTS];
198 u8 smi_addr[MAX_PORTS];
199 u32 sds_id[MAX_PORTS];
200 bool smi_bus_isc45[MAX_SMI_BUSSES];
201 bool phy_is_internal[MAX_PORTS];
202 };
203
204 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
205 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
206 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
207 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
208 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
209 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
210 extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
211 extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
212 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
213 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
214 extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
215 extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
216
217 /*
218 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
219 * the rings. Writing x into these registers substracts x from its content.
220 * When the content reaches the ring size, the ASIC no longer adds
221 * packets to this receive queue.
222 */
223 void rtl838x_update_cntr(int r, int released)
224 {
225 // This feature is not available on RTL838x SoCs
226 }
227
228 void rtl839x_update_cntr(int r, int released)
229 {
230 // This feature is not available on RTL839x SoCs
231 }
232
233 void rtl930x_update_cntr(int r, int released)
234 {
235 int pos = (r % 3) * 10;
236 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
237 u32 v = sw_r32(reg);
238
239 v = (v >> pos) & 0x3ff;
240 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
241 sw_w32_mask(0x3ff << pos, released << pos, reg);
242 sw_w32(v, reg);
243 }
244
245 void rtl931x_update_cntr(int r, int released)
246 {
247 int pos = (r % 3) * 10;
248 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
249 u32 v = sw_r32(reg);
250
251 v = (v >> pos) & 0x3ff;
252 sw_w32_mask(0x3ff << pos, released << pos, reg);
253 sw_w32(v, reg);
254 }
255
256 struct dsa_tag {
257 u8 reason;
258 u8 queue;
259 u16 port;
260 u8 l2_offloaded;
261 u8 prio;
262 bool crc_error;
263 };
264
265 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
266 {
267 t->reason = h->cpu_tag[3] & 0xf;
268 t->queue = (h->cpu_tag[0] & 0xe0) >> 5;
269 t->port = h->cpu_tag[1] & 0x1f;
270 t->crc_error = t->reason == 13;
271
272 pr_debug("Reason: %d\n", t->reason);
273 if (t->reason != 4) // NIC_RX_REASON_SPECIAL_TRAP
274 t->l2_offloaded = 1;
275 else
276 t->l2_offloaded = 0;
277
278 return t->l2_offloaded;
279 }
280
281 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
282 {
283 t->reason = h->cpu_tag[5] & 0x1f;
284 t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
285 t->port = h->cpu_tag[1] & 0x3f;
286 t->crc_error = h->cpu_tag[3] & BIT(2);
287
288 pr_debug("Reason: %d\n", t->reason);
289 if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA
290 (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP
291 t->l2_offloaded = 0;
292 else
293 t->l2_offloaded = 1;
294
295 return t->l2_offloaded;
296 }
297
298 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
299 {
300 t->reason = h->cpu_tag[7] & 0x3f;
301 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
302 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
303 t->crc_error = h->cpu_tag[1] & BIT(6);
304
305 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
306 if (t->reason >= 19 && t->reason <= 27)
307 t->l2_offloaded = 0;
308 else
309 t->l2_offloaded = 1;
310
311 return t->l2_offloaded;
312 }
313
314 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
315 {
316 t->reason = h->cpu_tag[7] & 0x3f;
317 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
318 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
319 t->crc_error = h->cpu_tag[1] & BIT(6);
320
321 if (t->reason != 63)
322 pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
323 if (t->reason >= 19 && t->reason <= 27) // NIC_RX_REASON_RMA
324 t->l2_offloaded = 0;
325 else
326 t->l2_offloaded = 1;
327
328 return t->l2_offloaded;
329 }
330
331 /*
332 * Discard the RX ring-buffers, called as part of the net-ISR
333 * when the buffer runs over
334 */
335 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
336 {
337 int r;
338 u32 *last;
339 struct p_hdr *h;
340 struct ring_b *ring = priv->membase;
341
342 for (r = 0; r < priv->rxrings; r++) {
343 pr_debug("In %s working on r: %d\n", __func__, r);
344 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
345 do {
346 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
347 break;
348 pr_debug("Got something: %d\n", ring->c_rx[r]);
349 h = &ring->rx_header[r][ring->c_rx[r]];
350 memset(h, 0, sizeof(struct p_hdr));
351 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
352 + r * priv->rxringlen * RING_BUFFER
353 + ring->c_rx[r] * RING_BUFFER);
354 h->size = RING_BUFFER;
355 /* make sure the header is visible to the ASIC */
356 mb();
357
358 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
359 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
360 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
361 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
362 }
363 }
364
365 struct fdb_update_work {
366 struct work_struct work;
367 struct net_device *ndev;
368 u64 macs[NOTIFY_EVENTS + 1];
369 };
370
371 void rtl838x_fdb_sync(struct work_struct *work)
372 {
373 const struct fdb_update_work *uw =
374 container_of(work, struct fdb_update_work, work);
375 struct switchdev_notifier_fdb_info info;
376 u8 addr[ETH_ALEN];
377 int i = 0;
378 int action;
379
380 while (uw->macs[i]) {
381 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
382 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
383 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
384 info.addr = &addr[0];
385 info.vid = 0;
386 info.offloaded = 1;
387 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
388 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
389 i++;
390 }
391 kfree(work);
392 }
393
394 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
395 {
396 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
397 u32 e = priv->lastEvent;
398 struct n_event *event;
399 int i;
400 u64 mac;
401 struct fdb_update_work *w;
402
403 while (!(nb->ring[e] & 1)) {
404 w = kzalloc(sizeof(*w), GFP_ATOMIC);
405 if (!w) {
406 pr_err("Out of memory: %s", __func__);
407 return;
408 }
409 INIT_WORK(&w->work, rtl838x_fdb_sync);
410
411 for (i = 0; i < NOTIFY_EVENTS; i++) {
412 event = &nb->blocks[e].events[i];
413 if (!event->valid)
414 continue;
415 mac = event->mac;
416 if (event->type)
417 mac |= 1ULL << 63;
418 w->ndev = priv->netdev;
419 w->macs[i] = mac;
420 }
421
422 /* Hand the ring entry back to the switch */
423 nb->ring[e] = nb->ring[e] | 1;
424 e = (e + 1) % NOTIFY_BLOCKS;
425
426 w->macs[i] = 0ULL;
427 schedule_work(&w->work);
428 }
429 priv->lastEvent = e;
430 }
431
432 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
433 {
434 struct net_device *dev = dev_id;
435 struct rtl838x_eth_priv *priv = netdev_priv(dev);
436 u32 status = sw_r32(priv->r->dma_if_intr_sts);
437 int i;
438
439 pr_debug("IRQ: %08x\n", status);
440
441 /* Ignore TX interrupt */
442 if ((status & 0xf0000)) {
443 /* Clear ISR */
444 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
445 }
446
447 /* RX interrupt */
448 if (status & 0x0ff00) {
449 /* ACK and disable RX interrupt for this ring */
450 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
451 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
452 for (i = 0; i < priv->rxrings; i++) {
453 if (status & BIT(i + 8)) {
454 pr_debug("Scheduling queue: %d\n", i);
455 napi_schedule(&priv->rx_qs[i].napi);
456 }
457 }
458 }
459
460 /* RX buffer overrun */
461 if (status & 0x000ff) {
462 pr_debug("RX buffer overrun: status %x, mask: %x\n",
463 status, sw_r32(priv->r->dma_if_intr_msk));
464 sw_w32(status, priv->r->dma_if_intr_sts);
465 rtl838x_rb_cleanup(priv, status & 0xff);
466 }
467
468 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
469 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
470 rtl839x_l2_notification_handler(priv);
471 }
472
473 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
474 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
475 rtl839x_l2_notification_handler(priv);
476 }
477
478 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
479 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
480 rtl839x_l2_notification_handler(priv);
481 }
482
483 return IRQ_HANDLED;
484 }
485
486 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
487 {
488 struct net_device *dev = dev_id;
489 struct rtl838x_eth_priv *priv = netdev_priv(dev);
490 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
491 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
492 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
493 int i;
494
495 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
496 __func__, status_tx, status_rx, status_rx_r);
497
498 /* Ignore TX interrupt */
499 if (status_tx) {
500 /* Clear ISR */
501 pr_debug("TX done\n");
502 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
503 }
504
505 /* RX interrupt */
506 if (status_rx) {
507 pr_debug("RX IRQ\n");
508 /* ACK and disable RX interrupt for given rings */
509 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
510 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
511 for (i = 0; i < priv->rxrings; i++) {
512 if (status_rx & BIT(i)) {
513 pr_debug("Scheduling queue: %d\n", i);
514 napi_schedule(&priv->rx_qs[i].napi);
515 }
516 }
517 }
518
519 /* RX buffer overrun */
520 if (status_rx_r) {
521 pr_debug("RX buffer overrun: status %x, mask: %x\n",
522 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
523 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
524 rtl838x_rb_cleanup(priv, status_rx_r);
525 }
526
527 return IRQ_HANDLED;
528 }
529
530 static const struct rtl838x_eth_reg rtl838x_reg = {
531 .net_irq = rtl83xx_net_irq,
532 .mac_port_ctrl = rtl838x_mac_port_ctrl,
533 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
534 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
535 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
536 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
537 .dma_rx_base = RTL838X_DMA_RX_BASE,
538 .dma_tx_base = RTL838X_DMA_TX_BASE,
539 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
540 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
541 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
542 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
543 .get_mac_link_sts = rtl838x_get_mac_link_sts,
544 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
545 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
546 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
547 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
548 .mac = RTL838X_MAC,
549 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
550 .update_cntr = rtl838x_update_cntr,
551 .create_tx_header = rtl838x_create_tx_header,
552 .decode_tag = rtl838x_decode_tag,
553 };
554
555 static const struct rtl838x_eth_reg rtl839x_reg = {
556 .net_irq = rtl83xx_net_irq,
557 .mac_port_ctrl = rtl839x_mac_port_ctrl,
558 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
559 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
560 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
561 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
562 .dma_rx_base = RTL839X_DMA_RX_BASE,
563 .dma_tx_base = RTL839X_DMA_TX_BASE,
564 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
565 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
566 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
567 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
568 .get_mac_link_sts = rtl839x_get_mac_link_sts,
569 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
570 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
571 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
572 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
573 .mac = RTL839X_MAC,
574 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
575 .update_cntr = rtl839x_update_cntr,
576 .create_tx_header = rtl839x_create_tx_header,
577 .decode_tag = rtl839x_decode_tag,
578 };
579
580 static const struct rtl838x_eth_reg rtl930x_reg = {
581 .net_irq = rtl93xx_net_irq,
582 .mac_port_ctrl = rtl930x_mac_port_ctrl,
583 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
584 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
585 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
586 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
587 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
588 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
589 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
590 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
591 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
592 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
593 .dma_rx_base = RTL930X_DMA_RX_BASE,
594 .dma_tx_base = RTL930X_DMA_TX_BASE,
595 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
596 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
597 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
598 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
599 .get_mac_link_sts = rtl930x_get_mac_link_sts,
600 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
601 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
602 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
603 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
604 .mac = RTL930X_MAC_L2_ADDR_CTRL,
605 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
606 .update_cntr = rtl930x_update_cntr,
607 .create_tx_header = rtl930x_create_tx_header,
608 .decode_tag = rtl930x_decode_tag,
609 };
610
611 static const struct rtl838x_eth_reg rtl931x_reg = {
612 .net_irq = rtl93xx_net_irq,
613 .mac_port_ctrl = rtl931x_mac_port_ctrl,
614 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
615 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
616 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
617 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
618 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
619 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
620 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
621 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
622 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
623 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
624 .dma_rx_base = RTL931X_DMA_RX_BASE,
625 .dma_tx_base = RTL931X_DMA_TX_BASE,
626 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
627 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
628 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
629 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
630 .get_mac_link_sts = rtl931x_get_mac_link_sts,
631 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
632 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
633 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
634 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
635 .mac = RTL931X_MAC_L2_ADDR_CTRL,
636 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
637 .update_cntr = rtl931x_update_cntr,
638 .create_tx_header = rtl931x_create_tx_header,
639 .decode_tag = rtl931x_decode_tag,
640 };
641
642 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
643 {
644 u32 int_saved, nbuf;
645 u32 reset_mask;
646 int i, pos;
647
648 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
649 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
650 mdelay(100);
651
652 /* Disable and clear interrupts */
653 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
654 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
655 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
656 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
657 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
658 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
659 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
660 } else {
661 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
662 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
663 }
664
665 if (priv->family_id == RTL8390_FAMILY_ID) {
666 /* Preserve L2 notification and NBUF settings */
667 int_saved = sw_r32(priv->r->dma_if_intr_msk);
668 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
669
670 /* Disable link change interrupt on RTL839x */
671 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
672 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
673
674 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
675 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
676 }
677
678 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
679 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
680 reset_mask = 0x6;
681 else
682 reset_mask = 0xc;
683
684 sw_w32(reset_mask, priv->r->rst_glb_ctrl);
685
686 do { /* Wait for reset of NIC and Queues done */
687 udelay(20);
688 } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
689 mdelay(100);
690
691 /* Setup Head of Line */
692 if (priv->family_id == RTL8380_FAMILY_ID)
693 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
694 if (priv->family_id == RTL8390_FAMILY_ID)
695 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
696 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
697 for (i = 0; i < priv->rxrings; i++) {
698 pos = (i % 3) * 10;
699 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
700 sw_w32_mask(0x3ff << pos, priv->rxringlen,
701 priv->r->dma_if_rx_ring_cntr(i));
702 }
703 }
704
705 /* Re-enable link change interrupt */
706 if (priv->family_id == RTL8390_FAMILY_ID) {
707 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
708 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
709 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
710 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
711
712 /* Restore notification settings: on RTL838x these bits are null */
713 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
714 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
715 }
716 }
717
718 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
719 {
720 int i;
721 struct ring_b *ring = priv->membase;
722
723 for (i = 0; i < priv->rxrings; i++)
724 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
725
726 for (i = 0; i < TXRINGS; i++)
727 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
728 }
729
730 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
731 {
732 /* Disable Head of Line features for all RX rings */
733 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
734
735 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
736 sw_w32(0x06400020, priv->r->dma_if_ctrl);
737
738 /* Enable RX done, RX overflow and TX done interrupts */
739 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
740
741 /* Enable DMA, engine expects empty FCS field */
742 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
743
744 /* Restart TX/RX to CPU port */
745 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
746 /* Set Speed, duplex, flow control
747 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
748 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
749 * | MEDIA_SEL
750 */
751 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
752
753 /* Enable CRC checks on CPU-port */
754 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
755 }
756
757 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
758 {
759 /* Setup CPU-Port: RX Buffer */
760 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
761
762 /* Enable Notify, RX done, RX overflow and TX done interrupts */
763 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
764
765 /* Enable DMA */
766 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
767
768 /* Restart TX/RX to CPU port, enable CRC checking */
769 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
770
771 /* CPU port joins Lookup Miss Flooding Portmask */
772 // TODO: The code below should also work for the RTL838x
773 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
774 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
775 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
776
777 /* Force CPU port link up */
778 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
779 }
780
781 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
782 {
783 int i, pos;
784 u32 v;
785
786 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
787 sw_w32(0x06400040, priv->r->dma_if_ctrl);
788
789 for (i = 0; i < priv->rxrings; i++) {
790 pos = (i % 3) * 10;
791 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
792
793 // Some SoCs have issues with missing underflow protection
794 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
795 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
796 }
797
798 /* Enable Notify, RX done, RX overflow and TX done interrupts */
799 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
800 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
801 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
802
803 /* Enable DMA */
804 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
805
806 /* Restart TX/RX to CPU port, enable CRC checking */
807 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
808
809 if (priv->family_id == RTL9300_FAMILY_ID)
810 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
811 else
812 sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
813
814 if (priv->family_id == RTL9300_FAMILY_ID)
815 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
816 else
817 sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
818 }
819
820 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
821 {
822 int i, j;
823
824 struct p_hdr *h;
825
826 for (i = 0; i < priv->rxrings; i++) {
827 for (j = 0; j < priv->rxringlen; j++) {
828 h = &ring->rx_header[i][j];
829 memset(h, 0, sizeof(struct p_hdr));
830 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
831 + i * priv->rxringlen * RING_BUFFER
832 + j * RING_BUFFER);
833 h->size = RING_BUFFER;
834 /* All rings owned by switch, last one wraps */
835 ring->rx_r[i][j] = KSEG1ADDR(h) | 1
836 | (j == (priv->rxringlen - 1) ? WRAP : 0);
837 }
838 ring->c_rx[i] = 0;
839 }
840
841 for (i = 0; i < TXRINGS; i++) {
842 for (j = 0; j < TXRINGLEN; j++) {
843 h = &ring->tx_header[i][j];
844 memset(h, 0, sizeof(struct p_hdr));
845 h->buf = (u8 *)KSEG1ADDR(ring->tx_space
846 + i * TXRINGLEN * RING_BUFFER
847 + j * RING_BUFFER);
848 h->size = RING_BUFFER;
849 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
850 }
851 /* Last header is wrapping around */
852 ring->tx_r[i][j-1] |= WRAP;
853 ring->c_tx[i] = 0;
854 }
855 }
856
857 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
858 {
859 int i;
860 struct notify_b *b = priv->membase + sizeof(struct ring_b);
861
862 for (i = 0; i < NOTIFY_BLOCKS; i++)
863 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
864
865 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
866 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
867
868 /* Setup notification events */
869 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
870 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
871
872 /* Enable Notification */
873 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
874 priv->lastEvent = 0;
875 }
876
877 static int rtl838x_eth_open(struct net_device *ndev)
878 {
879 unsigned long flags;
880 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
881 struct ring_b *ring = priv->membase;
882 int i, err;
883
884 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
885 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
886
887 spin_lock_irqsave(&priv->lock, flags);
888 rtl838x_hw_reset(priv);
889 rtl838x_setup_ring_buffer(priv, ring);
890 if (priv->family_id == RTL8390_FAMILY_ID) {
891 rtl839x_setup_notify_ring_buffer(priv);
892 /* Make sure the ring structure is visible to the ASIC */
893 mb();
894 flush_cache_all();
895 }
896
897 rtl838x_hw_ring_setup(priv);
898 err = request_irq(ndev->irq, priv->r->net_irq, IRQF_SHARED, ndev->name, ndev);
899 if (err) {
900 netdev_err(ndev, "%s: could not acquire interrupt: %d\n",
901 __func__, err);
902 return err;
903 }
904 phylink_start(priv->phylink);
905
906 for (i = 0; i < priv->rxrings; i++)
907 napi_enable(&priv->rx_qs[i].napi);
908
909 switch (priv->family_id) {
910 case RTL8380_FAMILY_ID:
911 rtl838x_hw_en_rxtx(priv);
912 /* Trap IGMP/MLD traffic to CPU-Port */
913 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
914 /* Flush learned FDB entries on link down of a port */
915 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
916 break;
917
918 case RTL8390_FAMILY_ID:
919 rtl839x_hw_en_rxtx(priv);
920 // Trap MLD and IGMP messages to CPU_PORT
921 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
922 /* Flush learned FDB entries on link down of a port */
923 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
924 break;
925
926 case RTL9300_FAMILY_ID:
927 rtl93xx_hw_en_rxtx(priv);
928 /* Flush learned FDB entries on link down of a port */
929 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
930 // Trap MLD and IGMP messages to CPU_PORT
931 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
932 break;
933
934
935 case RTL9310_FAMILY_ID:
936 rtl93xx_hw_en_rxtx(priv);
937
938 // Trap MLD and IGMP messages to CPU_PORT
939 sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
940
941 // Disable External CPU access to switch, clear EXT_CPU_EN
942 sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
943
944 // Set PCIE_PWR_DOWN
945 sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
946 break;
947 }
948
949 netif_tx_start_all_queues(ndev);
950
951 spin_unlock_irqrestore(&priv->lock, flags);
952
953 return 0;
954 }
955
956 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
957 {
958 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
959 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
960 int i;
961
962 // Disable RX/TX from/to CPU-port
963 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
964
965 /* Disable traffic */
966 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
967 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
968 else
969 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
970 mdelay(200); // Test, whether this is needed
971
972 /* Block all ports */
973 if (priv->family_id == RTL8380_FAMILY_ID) {
974 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
975 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
976 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
977 }
978
979 /* Flush L2 address cache */
980 if (priv->family_id == RTL8380_FAMILY_ID) {
981 for (i = 0; i <= priv->cpu_port; i++) {
982 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
983 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
984 }
985 } else if (priv->family_id == RTL8390_FAMILY_ID) {
986 for (i = 0; i <= priv->cpu_port; i++) {
987 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
988 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
989 }
990 }
991 // TODO: L2 flush register is 64 bit on RTL931X and 930X
992
993 /* CPU-Port: Link down */
994 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
995 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
996 else if (priv->family_id == RTL9300_FAMILY_ID)
997 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
998 else if (priv->family_id == RTL9310_FAMILY_ID)
999 sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
1000 mdelay(100);
1001
1002 /* Disable all TX/RX interrupts */
1003 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
1004 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
1005 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
1006 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
1007 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
1008 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
1009 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
1010 } else {
1011 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
1012 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
1013 }
1014
1015 /* Disable TX/RX DMA */
1016 sw_w32(0x00000000, priv->r->dma_if_ctrl);
1017 mdelay(200);
1018 }
1019
1020 static int rtl838x_eth_stop(struct net_device *ndev)
1021 {
1022 unsigned long flags;
1023 int i;
1024 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1025
1026 pr_info("in %s\n", __func__);
1027
1028 spin_lock_irqsave(&priv->lock, flags);
1029 phylink_stop(priv->phylink);
1030 rtl838x_hw_stop(priv);
1031 free_irq(ndev->irq, ndev);
1032
1033 for (i = 0; i < priv->rxrings; i++)
1034 napi_disable(&priv->rx_qs[i].napi);
1035
1036 netif_tx_stop_all_queues(ndev);
1037
1038 spin_unlock_irqrestore(&priv->lock, flags);
1039
1040 return 0;
1041 }
1042
1043 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1044 {
1045 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1046 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1047 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1048 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1049 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1050 }
1051 if (ndev->flags & IFF_ALLMULTI) {
1052 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1053 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1054 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1055 }
1056 if (ndev->flags & IFF_PROMISC) {
1057 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0);
1058 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1);
1059 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2);
1060 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1061 }
1062 }
1063
1064 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1065 {
1066 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1067
1068 if (priv->family_id == RTL8390_FAMILY_ID)
1069 return rtl839x_eth_set_multicast_list(ndev);
1070
1071 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1072 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1073 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1074 }
1075 if (ndev->flags & IFF_ALLMULTI)
1076 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1077 if (ndev->flags & IFF_PROMISC) {
1078 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0);
1079 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1080 }
1081 }
1082
1083 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1084 {
1085 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1086 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1087 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1088 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1089 }
1090 if (ndev->flags & IFF_ALLMULTI) {
1091 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1092 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1093 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1094 }
1095 if (ndev->flags & IFF_PROMISC) {
1096 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0);
1097 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1);
1098 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2);
1099 }
1100 }
1101
1102 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1103 {
1104 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1105 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1106 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1107 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1108 }
1109 if (ndev->flags & IFF_ALLMULTI) {
1110 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1111 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1112 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1113 }
1114 if (ndev->flags & IFF_PROMISC) {
1115 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0);
1116 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1);
1117 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2);
1118 }
1119 }
1120
1121 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1122 {
1123 unsigned long flags;
1124 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1125
1126 pr_warn("%s\n", __func__);
1127 spin_lock_irqsave(&priv->lock, flags);
1128 rtl838x_hw_stop(priv);
1129 rtl838x_hw_ring_setup(priv);
1130 rtl838x_hw_en_rxtx(priv);
1131 netif_trans_update(ndev);
1132 netif_start_queue(ndev);
1133 spin_unlock_irqrestore(&priv->lock, flags);
1134 }
1135
1136 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1137 {
1138 int len, i;
1139 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1140 struct ring_b *ring = priv->membase;
1141 uint32_t val;
1142 int ret;
1143 unsigned long flags;
1144 struct p_hdr *h;
1145 int dest_port = -1;
1146 int q = skb_get_queue_mapping(skb) % TXRINGS;
1147
1148 if (q) // Check for high prio queue
1149 pr_debug("SKB priority: %d\n", skb->priority);
1150
1151 spin_lock_irqsave(&priv->lock, flags);
1152 len = skb->len;
1153
1154 /* Check for DSA tagging at the end of the buffer */
1155 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
1156 && skb->data[len-3] < priv->cpu_port && skb->data[len-2] == 0x10
1157 && skb->data[len-1] == 0x00) {
1158 /* Reuse tag space for CRC if possible */
1159 dest_port = skb->data[len-3];
1160 skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
1161 len -= 4;
1162 }
1163
1164 len += 4; // Add space for CRC
1165
1166 if (skb_padto(skb, len)) {
1167 ret = NETDEV_TX_OK;
1168 goto txdone;
1169 }
1170
1171 /* We can send this packet if CPU owns the descriptor */
1172 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1173
1174 /* Set descriptor for tx */
1175 h = &ring->tx_header[q][ring->c_tx[q]];
1176 h->size = len;
1177 h->len = len;
1178 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1179 if (priv->family_id == RTL8380_FAMILY_ID) {
1180 if (len < ETH_ZLEN - 4)
1181 h->len -= 4;
1182 }
1183
1184 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1185
1186 /* Copy packet data to tx buffer */
1187 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1188 /* Make sure packet data is visible to ASIC */
1189 wmb();
1190
1191 /* Hand over to switch */
1192 ring->tx_r[q][ring->c_tx[q]] |= 1;
1193
1194 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1195 if (priv->family_id == RTL8380_FAMILY_ID) {
1196 for (i = 0; i < 10; i++) {
1197 val = sw_r32(priv->r->dma_if_ctrl);
1198 if ((val & 0xc) == 0xc)
1199 break;
1200 }
1201 }
1202
1203 /* Tell switch to send data */
1204 if (priv->family_id == RTL9310_FAMILY_ID
1205 || priv->family_id == RTL9300_FAMILY_ID) {
1206 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1207 if (!q)
1208 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1209 else
1210 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1211 } else {
1212 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1213 }
1214
1215 dev->stats.tx_packets++;
1216 dev->stats.tx_bytes += len;
1217 dev_kfree_skb(skb);
1218 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1219 ret = NETDEV_TX_OK;
1220 } else {
1221 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1222 ret = NETDEV_TX_BUSY;
1223 }
1224 txdone:
1225 spin_unlock_irqrestore(&priv->lock, flags);
1226 return ret;
1227 }
1228
1229 /*
1230 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1231 * so we do round-robin
1232 */
1233 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1234 struct net_device *sb_dev)
1235 {
1236 static u8 last = 0;
1237
1238 last++;
1239 return last % TXRINGS;
1240 }
1241
1242 /*
1243 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1244 */
1245 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1246 struct net_device *sb_dev)
1247 {
1248 if (skb->priority >= TC_PRIO_CONTROL)
1249 return 1;
1250 return 0;
1251 }
1252
1253 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1254 {
1255 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1256 struct ring_b *ring = priv->membase;
1257 struct sk_buff *skb;
1258 unsigned long flags;
1259 int i, len, work_done = 0;
1260 u8 *data, *skb_data;
1261 unsigned int val;
1262 u32 *last;
1263 struct p_hdr *h;
1264 bool dsa = netdev_uses_dsa(dev);
1265 struct dsa_tag tag;
1266
1267 spin_lock_irqsave(&priv->lock, flags);
1268 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1269 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1270
1271 do {
1272 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1273 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1274 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1275 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1276 }
1277 break;
1278 }
1279
1280 h = &ring->rx_header[r][ring->c_rx[r]];
1281 data = (u8 *)KSEG1ADDR(h->buf);
1282 len = h->len;
1283 if (!len)
1284 break;
1285 work_done++;
1286
1287 len -= 4; /* strip the CRC */
1288 /* Add 4 bytes for cpu_tag */
1289 if (dsa)
1290 len += 4;
1291
1292 skb = alloc_skb(len + 4, GFP_KERNEL);
1293 skb_reserve(skb, NET_IP_ALIGN);
1294
1295 if (likely(skb)) {
1296 /* BUG: Prevent bug on RTL838x SoCs*/
1297 if (priv->family_id == RTL8380_FAMILY_ID) {
1298 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1299 for (i = 0; i < priv->rxrings; i++) {
1300 /* Update each ring cnt */
1301 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1302 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1303 }
1304 }
1305
1306 skb_data = skb_put(skb, len);
1307 /* Make sure data is visible */
1308 mb();
1309 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1310 /* Overwrite CRC with cpu_tag */
1311 if (dsa) {
1312 priv->r->decode_tag(h, &tag);
1313 skb->data[len-4] = 0x80;
1314 skb->data[len-3] = tag.port;
1315 skb->data[len-2] = 0x10;
1316 skb->data[len-1] = 0x00;
1317 if (tag.l2_offloaded)
1318 skb->data[len-3] |= 0x40;
1319 }
1320
1321 if (tag.queue >= 0)
1322 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1323 tag.queue, len, tag.reason, tag.port);
1324
1325 skb->protocol = eth_type_trans(skb, dev);
1326 if (dev->features & NETIF_F_RXCSUM) {
1327 if (tag.crc_error)
1328 skb_checksum_none_assert(skb);
1329 else
1330 skb->ip_summed = CHECKSUM_UNNECESSARY;
1331 }
1332 dev->stats.rx_packets++;
1333 dev->stats.rx_bytes += len;
1334
1335 netif_receive_skb(skb);
1336 } else {
1337 if (net_ratelimit())
1338 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1339 dev->stats.rx_dropped++;
1340 }
1341
1342 /* Reset header structure */
1343 memset(h, 0, sizeof(struct p_hdr));
1344 h->buf = data;
1345 h->size = RING_BUFFER;
1346
1347 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
1348 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
1349 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1350 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1351 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1352
1353 // Update counters
1354 priv->r->update_cntr(r, 0);
1355
1356 spin_unlock_irqrestore(&priv->lock, flags);
1357 return work_done;
1358 }
1359
1360 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1361 {
1362 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1363 struct rtl838x_eth_priv *priv = rx_q->priv;
1364 int work_done = 0;
1365 int r = rx_q->id;
1366 int work;
1367
1368 while (work_done < budget) {
1369 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1370 if (!work)
1371 break;
1372 work_done += work;
1373 }
1374
1375 if (work_done < budget) {
1376 napi_complete_done(napi, work_done);
1377
1378 /* Enable RX interrupt */
1379 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1380 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1381 else
1382 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1383 }
1384 return work_done;
1385 }
1386
1387
1388 static void rtl838x_validate(struct phylink_config *config,
1389 unsigned long *supported,
1390 struct phylink_link_state *state)
1391 {
1392 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1393
1394 pr_debug("In %s\n", __func__);
1395
1396 if (!phy_interface_mode_is_rgmii(state->interface) &&
1397 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1398 state->interface != PHY_INTERFACE_MODE_MII &&
1399 state->interface != PHY_INTERFACE_MODE_REVMII &&
1400 state->interface != PHY_INTERFACE_MODE_GMII &&
1401 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1402 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1403 state->interface != PHY_INTERFACE_MODE_SGMII) {
1404 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1405 pr_err("Unsupported interface: %d\n", state->interface);
1406 return;
1407 }
1408
1409 /* Allow all the expected bits */
1410 phylink_set(mask, Autoneg);
1411 phylink_set_port_modes(mask);
1412 phylink_set(mask, Pause);
1413 phylink_set(mask, Asym_Pause);
1414
1415 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1416 * including Half duplex
1417 */
1418 if (state->interface != PHY_INTERFACE_MODE_MII &&
1419 state->interface != PHY_INTERFACE_MODE_REVMII) {
1420 phylink_set(mask, 1000baseT_Full);
1421 phylink_set(mask, 1000baseT_Half);
1422 }
1423
1424 phylink_set(mask, 10baseT_Half);
1425 phylink_set(mask, 10baseT_Full);
1426 phylink_set(mask, 100baseT_Half);
1427 phylink_set(mask, 100baseT_Full);
1428
1429 bitmap_and(supported, supported, mask,
1430 __ETHTOOL_LINK_MODE_MASK_NBITS);
1431 bitmap_and(state->advertising, state->advertising, mask,
1432 __ETHTOOL_LINK_MODE_MASK_NBITS);
1433 }
1434
1435
1436 static void rtl838x_mac_config(struct phylink_config *config,
1437 unsigned int mode,
1438 const struct phylink_link_state *state)
1439 {
1440 /* This is only being called for the master device,
1441 * i.e. the CPU-Port. We don't need to do anything.
1442 */
1443
1444 pr_info("In %s, mode %x\n", __func__, mode);
1445 }
1446
1447 static void rtl838x_mac_an_restart(struct phylink_config *config)
1448 {
1449 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1450 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1451
1452 /* This works only on RTL838x chips */
1453 if (priv->family_id != RTL8380_FAMILY_ID)
1454 return;
1455
1456 pr_debug("In %s\n", __func__);
1457 /* Restart by disabling and re-enabling link */
1458 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1459 mdelay(20);
1460 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1461 }
1462
1463 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1464 struct phylink_link_state *state)
1465 {
1466 u32 speed;
1467 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1468 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1469 int port = priv->cpu_port;
1470
1471 pr_debug("In %s\n", __func__);
1472
1473 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1474 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1475
1476 speed = priv->r->get_mac_link_spd_sts(port);
1477 switch (speed) {
1478 case 0:
1479 state->speed = SPEED_10;
1480 break;
1481 case 1:
1482 state->speed = SPEED_100;
1483 break;
1484 case 2:
1485 state->speed = SPEED_1000;
1486 break;
1487 case 5:
1488 state->speed = SPEED_2500;
1489 break;
1490 case 6:
1491 state->speed = SPEED_5000;
1492 break;
1493 case 4:
1494 state->speed = SPEED_10000;
1495 break;
1496 default:
1497 state->speed = SPEED_UNKNOWN;
1498 break;
1499 }
1500
1501 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1502 if (priv->r->get_mac_rx_pause_sts(port))
1503 state->pause |= MLO_PAUSE_RX;
1504 if (priv->r->get_mac_tx_pause_sts(port))
1505 state->pause |= MLO_PAUSE_TX;
1506 }
1507
1508 static void rtl838x_mac_link_down(struct phylink_config *config,
1509 unsigned int mode,
1510 phy_interface_t interface)
1511 {
1512 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1513 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1514
1515 pr_debug("In %s\n", __func__);
1516 /* Stop TX/RX to port */
1517 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1518 }
1519
1520 static void rtl838x_mac_link_up(struct phylink_config *config,
1521 struct phy_device *phy, unsigned int mode,
1522 phy_interface_t interface, int speed, int duplex,
1523 bool tx_pause, bool rx_pause)
1524 {
1525 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1526 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1527
1528 pr_debug("In %s\n", __func__);
1529 /* Restart TX/RX to port */
1530 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1531 }
1532
1533 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1534 {
1535 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1536 unsigned long flags;
1537
1538 spin_lock_irqsave(&priv->lock, flags);
1539 pr_debug("In %s\n", __func__);
1540 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1541 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1542
1543 if (priv->family_id == RTL8380_FAMILY_ID) {
1544 /* 2 more registers, ALE/MAC block */
1545 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1546 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1547 (RTL838X_MAC_ALE + 4));
1548
1549 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1550 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1551 RTL838X_MAC2 + 4);
1552 }
1553 spin_unlock_irqrestore(&priv->lock, flags);
1554 }
1555
1556 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1557 {
1558 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1559 const struct sockaddr *addr = p;
1560 u8 *mac = (u8 *) (addr->sa_data);
1561
1562 if (!is_valid_ether_addr(addr->sa_data))
1563 return -EADDRNOTAVAIL;
1564
1565 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1566 rtl838x_set_mac_hw(dev, mac);
1567
1568 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1569 return 0;
1570 }
1571
1572 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1573 {
1574 // We will need to set-up EEE and the egress-rate limitation
1575 return 0;
1576 }
1577
1578 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1579 {
1580 int i;
1581
1582 if (priv->family_id == 0x8390)
1583 return rtl8390_init_mac(priv);
1584
1585 // At present we do not know how to set up EEE on any other SoC than RTL8380
1586 if (priv->family_id != 0x8380)
1587 return 0;
1588
1589 pr_info("%s\n", __func__);
1590 /* fix timer for EEE */
1591 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1592 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1593
1594 /* Init VLAN. TODO: Understand what is being done, here */
1595 if (priv->id == 0x8382) {
1596 for (i = 0; i <= 28; i++)
1597 sw_w32(0, 0xd57c + i * 0x80);
1598 }
1599 if (priv->id == 0x8380) {
1600 for (i = 8; i <= 28; i++)
1601 sw_w32(0, 0xd57c + i * 0x80);
1602 }
1603 return 0;
1604 }
1605
1606 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1607 struct ethtool_link_ksettings *cmd)
1608 {
1609 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1610
1611 pr_debug("%s called\n", __func__);
1612 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1613 }
1614
1615 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1616 const struct ethtool_link_ksettings *cmd)
1617 {
1618 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1619
1620 pr_debug("%s called\n", __func__);
1621 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1622 }
1623
1624 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1625 {
1626 u32 val;
1627 int err;
1628 struct rtl838x_eth_priv *priv = bus->priv;
1629
1630 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1631 return rtl838x_read_sds_phy(mii_id, regnum);
1632 err = rtl838x_read_phy(mii_id, 0, regnum, &val);
1633 if (err)
1634 return err;
1635 return val;
1636 }
1637
1638 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1639 {
1640 u32 val;
1641 int err;
1642 struct rtl838x_eth_priv *priv = bus->priv;
1643
1644 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1645 return rtl839x_read_sds_phy(mii_id, regnum);
1646
1647 err = rtl839x_read_phy(mii_id, 0, regnum, &val);
1648 if (err)
1649 return err;
1650 return val;
1651 }
1652
1653 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1654 {
1655 u32 val;
1656 int err;
1657 struct rtl838x_eth_priv *priv = bus->priv;
1658
1659 if (priv->phy_is_internal[mii_id])
1660 return rtl930x_read_sds_phy(priv->sds_id[mii_id], 0, regnum);
1661
1662 if (regnum & MII_ADDR_C45) {
1663 regnum &= ~MII_ADDR_C45;
1664 err = rtl930x_read_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, &val);
1665 pr_debug("MMD: %d register %d read %x, err %d\n", mii_id, regnum & 0xffff, val, err);
1666 } else {
1667 err = rtl930x_read_phy(mii_id, 0, regnum, &val);
1668 pr_debug("PHY: %d register %d read %x, err %d\n", mii_id, regnum, val, err);
1669 }
1670 if (err)
1671 return err;
1672 return val;
1673 }
1674
1675
1676 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1677 {
1678 u32 val;
1679 int err, v;
1680 struct rtl838x_eth_priv *priv = bus->priv;
1681
1682 pr_debug("%s: In here, port %d\n", __func__, mii_id);
1683 if (priv->sds_id[mii_id] >= 0 && mii_id >= 52) {
1684 v = rtl931x_read_sds_phy(priv->sds_id[mii_id], 0, regnum);
1685 if (v < 0) {
1686 err = v;
1687 } else {
1688 err = 0;
1689 val = v;
1690 }
1691 } else {
1692 if (regnum & MII_ADDR_C45) {
1693 regnum &= ~MII_ADDR_C45;
1694 err = rtl931x_read_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, &val);
1695 } else {
1696 err = rtl931x_read_phy(mii_id, 0, regnum, &val);
1697 }
1698 pr_debug("%s: phy %d, register %d value %x\n", __func__, mii_id, regnum, val);
1699 }
1700
1701 if (err)
1702 return err;
1703 return val;
1704 }
1705
1706 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1707 int regnum, u16 value)
1708 {
1709 u32 offset = 0;
1710 struct rtl838x_eth_priv *priv = bus->priv;
1711
1712 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1713 if (mii_id == 26)
1714 offset = 0x100;
1715 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1716 return 0;
1717 }
1718 return rtl838x_write_phy(mii_id, 0, regnum, value);
1719 }
1720
1721 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1722 int regnum, u16 value)
1723 {
1724 struct rtl838x_eth_priv *priv = bus->priv;
1725
1726 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1727 return rtl839x_write_sds_phy(mii_id, regnum, value);
1728
1729 return rtl839x_write_phy(mii_id, 0, regnum, value);
1730 }
1731
1732 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1733 int regnum, u16 value)
1734 {
1735 struct rtl838x_eth_priv *priv = bus->priv;
1736
1737 if (priv->sds_id[mii_id] >= 0)
1738 return rtl930x_write_sds_phy(priv->sds_id[mii_id], 0, regnum, value);
1739
1740 if (regnum & MII_ADDR_C45) {
1741 regnum &= ~MII_ADDR_C45;
1742 return rtl930x_write_mmd_phy(mii_id, regnum >> 16, regnum & 0xffff, value);
1743 }
1744
1745 return rtl930x_write_phy(mii_id, 0, regnum, value);
1746 }
1747
1748 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1749 int regnum, u16 value)
1750 {
1751 struct rtl838x_eth_priv *priv = bus->priv;
1752
1753 if (priv->sds_id[mii_id] >= 0)
1754 return rtl931x_write_sds_phy(priv->sds_id[mii_id], 0, regnum, value);
1755
1756 return rtl931x_write_phy(mii_id, 0, regnum, value);
1757 }
1758
1759 static int rtl838x_mdio_reset(struct mii_bus *bus)
1760 {
1761 pr_debug("%s called\n", __func__);
1762 /* Disable MAC polling the PHY so that we can start configuration */
1763 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1764
1765 /* Enable PHY control via SoC */
1766 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1767
1768 // Probably should reset all PHYs here...
1769 return 0;
1770 }
1771
1772 static int rtl839x_mdio_reset(struct mii_bus *bus)
1773 {
1774 return 0;
1775
1776 pr_debug("%s called\n", __func__);
1777 /* BUG: The following does not work, but should! */
1778 /* Disable MAC polling the PHY so that we can start configuration */
1779 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1780 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1781 /* Disable PHY polling via SoC */
1782 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1783
1784 // Probably should reset all PHYs here...
1785 return 0;
1786 }
1787
1788 static int rtl930x_mdio_reset(struct mii_bus *bus)
1789 {
1790 int i;
1791 int pos;
1792 struct rtl838x_eth_priv *priv = bus->priv;
1793 u32 c45_mask = 0;
1794 u32 poll_sel[2];
1795 u32 poll_ctrl = 0;
1796
1797 // Mapping of port to phy-addresses on an SMI bus
1798 poll_sel[0] = poll_sel[1] = 0;
1799 for (i = 0; i < 28; i++) {
1800 pos = (i % 6) * 5;
1801 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
1802 RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1803
1804 pos = (i * 2) % 32;
1805 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1806 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1807 }
1808
1809 // Configure which SMI bus is behind which port number
1810 sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
1811 sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
1812
1813 // Enable polling on the respective SMI busses
1814 sw_w32_mask(0, poll_ctrl, RTL930X_SMI_GLB_CTRL);
1815
1816 // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
1817 for (i = 0; i < 4; i++)
1818 if (priv->smi_bus_isc45[i])
1819 c45_mask |= BIT(i + 16);
1820
1821 pr_info("c45_mask: %08x\n", c45_mask);
1822 sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
1823
1824 // Ports 24 to 27 are 2.5 or 10Gig, set this type (1) or (0) for internal SerDes
1825 for (i = 24; i < 28; i++) {
1826 pos = (i - 24) * 3 + 12;
1827 if (priv->phy_is_internal[i])
1828 sw_w32_mask(0x7 << pos, 0 << pos, RTL930X_SMI_MAC_TYPE_CTRL);
1829 else
1830 sw_w32_mask(0x7 << pos, 1 << pos, RTL930X_SMI_MAC_TYPE_CTRL);
1831 }
1832
1833 // TODO: Set up RTL9300_SMI_10GPHY_POLLING_SEL_0 for Aquantia PHYs on e.g. XGS 1250
1834
1835 return 0;
1836 }
1837
1838 static int rtl931x_mdio_reset(struct mii_bus *bus)
1839 {
1840 int i;
1841 int pos;
1842 struct rtl838x_eth_priv *priv = bus->priv;
1843 u32 c45_mask = 0;
1844 u32 poll_sel[4];
1845 u32 poll_ctrl = 0;
1846 bool mdc_on[4];
1847
1848 pr_info("%s called\n", __func__);
1849 // Disable port polling for configuration purposes
1850 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
1851 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
1852 msleep(100);
1853
1854 mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
1855 // Mapping of port to phy-addresses on an SMI bus
1856 poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
1857 for (i = 0; i < 56; i++) {
1858 pos = (i % 6) * 5;
1859 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
1860 pos = (i * 2) % 32;
1861 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1862 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1863 mdc_on[priv->smi_bus[i]] = true;
1864 }
1865
1866 // Configure which SMI bus is behind which port number
1867 for (i = 0; i < 4; i++) {
1868 pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
1869 sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
1870 }
1871
1872 // Configure which SMI busses
1873 pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
1874 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
1875 for (i = 0; i < 4; i++) {
1876 // bus is polled in c45
1877 if (priv->smi_bus_isc45[i])
1878 c45_mask |= 0x2 << (i * 2); // Std. C45, non-standard is 0x3
1879 // Enable bus access via MDC
1880 if (mdc_on[i])
1881 sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
1882 }
1883
1884 pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
1885 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
1886
1887 /* We have a 10G PHY enable polling
1888 sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
1889 sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
1890 sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
1891 */
1892 sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
1893
1894 return 0;
1895 }
1896
1897 static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
1898 {
1899 pr_info("In %s\n", __func__);
1900
1901 // Initialize Encapsulation memory and wait until finished
1902 sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
1903 do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
1904 pr_info("%s: init ENCAP done\n", __func__);
1905
1906 // Initialize Managemen Information Base memory and wait until finished
1907 sw_w32(0x1, RTL931X_MEM_MIB_INIT);
1908 do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
1909 pr_info("%s: init MIB done\n", __func__);
1910
1911 // Initialize ACL (PIE) memory and wait until finished
1912 sw_w32(0x1, RTL931X_MEM_ACL_INIT);
1913 do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
1914 pr_info("%s: init ACL done\n", __func__);
1915
1916 // Initialize ALE memory and wait until finished
1917 sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
1918 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
1919 sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
1920 sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
1921 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
1922 pr_info("%s: init ALE done\n", __func__);
1923
1924 // Enable ESD auto recovery
1925 sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
1926
1927 // Init SPI, is this for thermal control or what?
1928 sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
1929
1930 return 0;
1931 }
1932
1933 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
1934 {
1935 struct device_node *mii_np, *dn;
1936 u32 pn;
1937 int ret;
1938
1939 pr_debug("%s called\n", __func__);
1940 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
1941
1942 if (!mii_np) {
1943 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
1944 return -ENODEV;
1945 }
1946
1947 if (!of_device_is_available(mii_np)) {
1948 ret = -ENODEV;
1949 goto err_put_node;
1950 }
1951
1952 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
1953 if (!priv->mii_bus) {
1954 ret = -ENOMEM;
1955 goto err_put_node;
1956 }
1957
1958 switch(priv->family_id) {
1959 case RTL8380_FAMILY_ID:
1960 priv->mii_bus->name = "rtl838x-eth-mdio";
1961 priv->mii_bus->read = rtl838x_mdio_read;
1962 priv->mii_bus->write = rtl838x_mdio_write;
1963 priv->mii_bus->reset = rtl838x_mdio_reset;
1964 break;
1965 case RTL8390_FAMILY_ID:
1966 priv->mii_bus->name = "rtl839x-eth-mdio";
1967 priv->mii_bus->read = rtl839x_mdio_read;
1968 priv->mii_bus->write = rtl839x_mdio_write;
1969 priv->mii_bus->reset = rtl839x_mdio_reset;
1970 break;
1971 case RTL9300_FAMILY_ID:
1972 priv->mii_bus->name = "rtl930x-eth-mdio";
1973 priv->mii_bus->read = rtl930x_mdio_read;
1974 priv->mii_bus->write = rtl930x_mdio_write;
1975 priv->mii_bus->reset = rtl930x_mdio_reset;
1976 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1977 break;
1978 case RTL9310_FAMILY_ID:
1979 priv->mii_bus->name = "rtl931x-eth-mdio";
1980 priv->mii_bus->read = rtl931x_mdio_read;
1981 priv->mii_bus->write = rtl931x_mdio_write;
1982 priv->mii_bus->reset = rtl931x_mdio_reset;
1983 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1984 break;
1985 }
1986 priv->mii_bus->priv = priv;
1987 priv->mii_bus->parent = &priv->pdev->dev;
1988
1989 for_each_node_by_name(dn, "ethernet-phy") {
1990 u32 smi_addr[2];
1991
1992 if (of_property_read_u32(dn, "reg", &pn))
1993 continue;
1994
1995 if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
1996 smi_addr[0] = 0;
1997 smi_addr[1] = pn;
1998 }
1999
2000 if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
2001 priv->sds_id[pn] = -1;
2002 else {
2003 pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
2004 }
2005
2006 if (pn < MAX_PORTS) {
2007 priv->smi_bus[pn] = smi_addr[0];
2008 priv->smi_addr[pn] = smi_addr[1];
2009 } else {
2010 pr_err("%s: illegal port number %d\n", __func__, pn);
2011 }
2012
2013 if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
2014 priv->smi_bus_isc45[smi_addr[0]] = true;
2015
2016 if (of_property_read_bool(dn, "phy-is-integrated")) {
2017 priv->phy_is_internal[pn] = true;
2018 }
2019 }
2020
2021 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
2022 ret = of_mdiobus_register(priv->mii_bus, mii_np);
2023
2024 err_put_node:
2025 of_node_put(mii_np);
2026 return ret;
2027 }
2028
2029 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
2030 {
2031 pr_debug("%s called\n", __func__);
2032 if (!priv->mii_bus)
2033 return 0;
2034
2035 mdiobus_unregister(priv->mii_bus);
2036 mdiobus_free(priv->mii_bus);
2037
2038 return 0;
2039 }
2040
2041 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
2042 netdev_features_t features)
2043 {
2044 return features;
2045 }
2046
2047 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
2048 {
2049 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2050
2051 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2052 if (!(features & NETIF_F_RXCSUM))
2053 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2054 else
2055 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
2056 }
2057
2058 return 0;
2059 }
2060
2061 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
2062 {
2063 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2064
2065 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2066 if (!(features & NETIF_F_RXCSUM))
2067 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2068 else
2069 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
2070 }
2071
2072 return 0;
2073 }
2074
2075 static const struct net_device_ops rtl838x_eth_netdev_ops = {
2076 .ndo_open = rtl838x_eth_open,
2077 .ndo_stop = rtl838x_eth_stop,
2078 .ndo_start_xmit = rtl838x_eth_tx,
2079 .ndo_select_queue = rtl83xx_pick_tx_queue,
2080 .ndo_set_mac_address = rtl838x_set_mac_address,
2081 .ndo_validate_addr = eth_validate_addr,
2082 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
2083 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2084 .ndo_set_features = rtl83xx_set_features,
2085 .ndo_fix_features = rtl838x_fix_features,
2086 .ndo_setup_tc = rtl83xx_setup_tc,
2087 };
2088
2089 static const struct net_device_ops rtl839x_eth_netdev_ops = {
2090 .ndo_open = rtl838x_eth_open,
2091 .ndo_stop = rtl838x_eth_stop,
2092 .ndo_start_xmit = rtl838x_eth_tx,
2093 .ndo_select_queue = rtl83xx_pick_tx_queue,
2094 .ndo_set_mac_address = rtl838x_set_mac_address,
2095 .ndo_validate_addr = eth_validate_addr,
2096 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
2097 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2098 .ndo_set_features = rtl83xx_set_features,
2099 .ndo_fix_features = rtl838x_fix_features,
2100 .ndo_setup_tc = rtl83xx_setup_tc,
2101 };
2102
2103 static const struct net_device_ops rtl930x_eth_netdev_ops = {
2104 .ndo_open = rtl838x_eth_open,
2105 .ndo_stop = rtl838x_eth_stop,
2106 .ndo_start_xmit = rtl838x_eth_tx,
2107 .ndo_select_queue = rtl93xx_pick_tx_queue,
2108 .ndo_set_mac_address = rtl838x_set_mac_address,
2109 .ndo_validate_addr = eth_validate_addr,
2110 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
2111 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2112 .ndo_set_features = rtl93xx_set_features,
2113 .ndo_fix_features = rtl838x_fix_features,
2114 .ndo_setup_tc = rtl83xx_setup_tc,
2115 };
2116
2117 static const struct net_device_ops rtl931x_eth_netdev_ops = {
2118 .ndo_open = rtl838x_eth_open,
2119 .ndo_stop = rtl838x_eth_stop,
2120 .ndo_start_xmit = rtl838x_eth_tx,
2121 .ndo_select_queue = rtl93xx_pick_tx_queue,
2122 .ndo_set_mac_address = rtl838x_set_mac_address,
2123 .ndo_validate_addr = eth_validate_addr,
2124 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
2125 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2126 .ndo_set_features = rtl93xx_set_features,
2127 .ndo_fix_features = rtl838x_fix_features,
2128 };
2129
2130 static const struct phylink_mac_ops rtl838x_phylink_ops = {
2131 .validate = rtl838x_validate,
2132 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
2133 .mac_an_restart = rtl838x_mac_an_restart,
2134 .mac_config = rtl838x_mac_config,
2135 .mac_link_down = rtl838x_mac_link_down,
2136 .mac_link_up = rtl838x_mac_link_up,
2137 };
2138
2139 static const struct ethtool_ops rtl838x_ethtool_ops = {
2140 .get_link_ksettings = rtl838x_get_link_ksettings,
2141 .set_link_ksettings = rtl838x_set_link_ksettings,
2142 };
2143
2144 static int __init rtl838x_eth_probe(struct platform_device *pdev)
2145 {
2146 struct net_device *dev;
2147 struct device_node *dn = pdev->dev.of_node;
2148 struct rtl838x_eth_priv *priv;
2149 struct resource *res, *mem;
2150 phy_interface_t phy_mode;
2151 struct phylink *phylink;
2152 int err = 0, i, rxrings, rxringlen;
2153 struct ring_b *ring;
2154
2155 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2156 (u32)pdev, (u32)(&(pdev->dev)));
2157
2158 if (!dn) {
2159 dev_err(&pdev->dev, "No DT found\n");
2160 return -EINVAL;
2161 }
2162
2163 rxrings = (soc_info.family == RTL8380_FAMILY_ID
2164 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
2165 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
2166 rxringlen = MAX_ENTRIES / rxrings;
2167 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
2168
2169 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
2170 if (!dev) {
2171 err = -ENOMEM;
2172 goto err_free;
2173 }
2174 SET_NETDEV_DEV(dev, &pdev->dev);
2175 priv = netdev_priv(dev);
2176
2177 /* obtain buffer memory space */
2178 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2179 if (res) {
2180 mem = devm_request_mem_region(&pdev->dev, res->start,
2181 resource_size(res), res->name);
2182 if (!mem) {
2183 dev_err(&pdev->dev, "cannot request memory space\n");
2184 err = -ENXIO;
2185 goto err_free;
2186 }
2187
2188 dev->mem_start = mem->start;
2189 dev->mem_end = mem->end;
2190 } else {
2191 dev_err(&pdev->dev, "cannot request IO resource\n");
2192 err = -ENXIO;
2193 goto err_free;
2194 }
2195
2196 /* Allocate buffer memory */
2197 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
2198 + sizeof(struct ring_b) + sizeof(struct notify_b),
2199 (void *)&dev->mem_start, GFP_KERNEL);
2200 if (!priv->membase) {
2201 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2202 err = -ENOMEM;
2203 goto err_free;
2204 }
2205
2206 // Allocate ring-buffer space at the end of the allocated memory
2207 ring = priv->membase;
2208 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2209
2210 spin_lock_init(&priv->lock);
2211
2212 /* obtain device IRQ number */
2213 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2214 if (!res) {
2215 dev_err(&pdev->dev, "cannot obtain IRQ, using default 24\n");
2216 dev->irq = 24;
2217 } else {
2218 dev->irq = res->start;
2219 }
2220 dev->ethtool_ops = &rtl838x_ethtool_ops;
2221 dev->min_mtu = ETH_ZLEN;
2222 dev->max_mtu = 1536;
2223 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2224 dev->hw_features = NETIF_F_RXCSUM;
2225
2226 priv->id = soc_info.id;
2227 priv->family_id = soc_info.family;
2228 if (priv->id) {
2229 pr_info("Found SoC ID: %4x: %s, family %x\n",
2230 priv->id, soc_info.name, priv->family_id);
2231 } else {
2232 pr_err("Unknown chip id (%04x)\n", priv->id);
2233 return -ENODEV;
2234 }
2235
2236 switch (priv->family_id) {
2237 case RTL8380_FAMILY_ID:
2238 priv->cpu_port = RTL838X_CPU_PORT;
2239 priv->r = &rtl838x_reg;
2240 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2241 break;
2242 case RTL8390_FAMILY_ID:
2243 priv->cpu_port = RTL839X_CPU_PORT;
2244 priv->r = &rtl839x_reg;
2245 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2246 break;
2247 case RTL9300_FAMILY_ID:
2248 priv->cpu_port = RTL930X_CPU_PORT;
2249 priv->r = &rtl930x_reg;
2250 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2251 break;
2252 case RTL9310_FAMILY_ID:
2253 priv->cpu_port = RTL931X_CPU_PORT;
2254 priv->r = &rtl931x_reg;
2255 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2256 rtl931x_chip_init(priv);
2257 break;
2258 default:
2259 pr_err("Unknown SoC family\n");
2260 return -ENODEV;
2261 }
2262 priv->rxringlen = rxringlen;
2263 priv->rxrings = rxrings;
2264
2265 rtl8380_init_mac(priv);
2266
2267 /* try to get mac address in the following order:
2268 * 1) from device tree data
2269 * 2) from internal registers set by bootloader
2270 */
2271 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2272 if (is_valid_ether_addr(dev->dev_addr)) {
2273 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2274 } else {
2275 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2276 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2277 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2278 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2279 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2280 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2281 }
2282 /* if the address is invalid, use a random value */
2283 if (!is_valid_ether_addr(dev->dev_addr)) {
2284 struct sockaddr sa = { AF_UNSPEC };
2285
2286 netdev_warn(dev, "Invalid MAC address, using random\n");
2287 eth_hw_addr_random(dev);
2288 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2289 if (rtl838x_set_mac_address(dev, &sa))
2290 netdev_warn(dev, "Failed to set MAC address.\n");
2291 }
2292 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2293 sw_r32(priv->r->mac + 4));
2294 strcpy(dev->name, "eth%d");
2295 priv->pdev = pdev;
2296 priv->netdev = dev;
2297
2298 err = rtl838x_mdio_init(priv);
2299 if (err)
2300 goto err_free;
2301
2302 err = register_netdev(dev);
2303 if (err)
2304 goto err_free;
2305
2306 for (i = 0; i < priv->rxrings; i++) {
2307 priv->rx_qs[i].id = i;
2308 priv->rx_qs[i].priv = priv;
2309 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2310 }
2311
2312 platform_set_drvdata(pdev, dev);
2313
2314 phy_mode = PHY_INTERFACE_MODE_NA;
2315 err = of_get_phy_mode(dn, &phy_mode);
2316 if (err < 0) {
2317 dev_err(&pdev->dev, "incorrect phy-mode\n");
2318 err = -EINVAL;
2319 goto err_free;
2320 }
2321 priv->phylink_config.dev = &dev->dev;
2322 priv->phylink_config.type = PHYLINK_NETDEV;
2323
2324 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2325 phy_mode, &rtl838x_phylink_ops);
2326 if (IS_ERR(phylink)) {
2327 err = PTR_ERR(phylink);
2328 goto err_free;
2329 }
2330 priv->phylink = phylink;
2331
2332 return 0;
2333
2334 err_free:
2335 pr_err("Error setting up netdev, freeing it again.\n");
2336 free_netdev(dev);
2337 return err;
2338 }
2339
2340 static int rtl838x_eth_remove(struct platform_device *pdev)
2341 {
2342 struct net_device *dev = platform_get_drvdata(pdev);
2343 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2344 int i;
2345
2346 if (dev) {
2347 pr_info("Removing platform driver for rtl838x-eth\n");
2348 rtl838x_mdio_remove(priv);
2349 rtl838x_hw_stop(priv);
2350
2351 netif_tx_stop_all_queues(dev);
2352
2353 for (i = 0; i < priv->rxrings; i++)
2354 netif_napi_del(&priv->rx_qs[i].napi);
2355
2356 unregister_netdev(dev);
2357 free_netdev(dev);
2358 }
2359 return 0;
2360 }
2361
2362 static const struct of_device_id rtl838x_eth_of_ids[] = {
2363 { .compatible = "realtek,rtl838x-eth"},
2364 { /* sentinel */ }
2365 };
2366 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2367
2368 static struct platform_driver rtl838x_eth_driver = {
2369 .probe = rtl838x_eth_probe,
2370 .remove = rtl838x_eth_remove,
2371 .driver = {
2372 .name = "rtl838x-eth",
2373 .pm = NULL,
2374 .of_match_table = rtl838x_eth_of_ids,
2375 },
2376 };
2377
2378 module_platform_driver(rtl838x_eth_driver);
2379
2380 MODULE_AUTHOR("B. Koblitz");
2381 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2382 MODULE_LICENSE("GPL");