06de0ada2aa4ee1a0c2b3142c9969e64f4cf913e
[openwrt/staging/jow.git] / target / linux / realtek / files-5.15 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* linux/drivers/net/ethernet/rtl838x_eth.c
3 * Copyright (C) 2020 B. Koblitz
4 */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/etherdevice.h>
8 #include <linux/interrupt.h>
9 #include <linux/io.h>
10 #include <linux/platform_device.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/of.h>
14 #include <linux/of_net.h>
15 #include <linux/of_mdio.h>
16 #include <linux/module.h>
17 #include <linux/phylink.h>
18 #include <linux/pkt_sched.h>
19 #include <net/dsa.h>
20 #include <net/switchdev.h>
21 #include <asm/cacheflush.h>
22
23 #include <asm/mach-rtl838x/mach-rtl83xx.h>
24 #include "rtl838x_eth.h"
25
26 extern struct rtl83xx_soc_info soc_info;
27
28 /* Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
29 * The ring is assigned by switch based on packet/port priortity
30 * Maximum number of TX rings is 2, Ring 2 being the high priority
31 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
32 * for an RX ring, MAX_ENTRIES the maximum number of entries
33 * available in total for all queues.
34 */
35 #define MAX_RXRINGS 32
36 #define MAX_RXLEN 300
37 #define MAX_ENTRIES (300 * 8)
38 #define TXRINGS 2
39 #define TXRINGLEN 160
40 #define NOTIFY_EVENTS 10
41 #define NOTIFY_BLOCKS 10
42 #define TX_EN 0x8
43 #define RX_EN 0x4
44 #define TX_EN_93XX 0x20
45 #define RX_EN_93XX 0x10
46 #define TX_DO 0x2
47 #define WRAP 0x2
48 #define MAX_PORTS 57
49 #define MAX_SMI_BUSSES 4
50
51 #define RING_BUFFER 1600
52
53 struct p_hdr {
54 uint8_t *buf;
55 uint16_t reserved;
56 uint16_t size; /* buffer size */
57 uint16_t offset;
58 uint16_t len; /* pkt len */
59 /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
60 uint16_t cpu_tag[10];
61 } __packed __aligned(1);
62
63 struct n_event {
64 uint32_t type:2;
65 uint32_t fidVid:12;
66 uint64_t mac:48;
67 uint32_t slp:6;
68 uint32_t valid:1;
69 uint32_t reserved:27;
70 } __packed __aligned(1);
71
72 struct ring_b {
73 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
74 uint32_t tx_r[TXRINGS][TXRINGLEN];
75 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
76 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
77 uint32_t c_rx[MAX_RXRINGS];
78 uint32_t c_tx[TXRINGS];
79 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
80 uint8_t *rx_space;
81 };
82
83 struct notify_block {
84 struct n_event events[NOTIFY_EVENTS];
85 };
86
87 struct notify_b {
88 struct notify_block blocks[NOTIFY_BLOCKS];
89 u32 reserved1[8];
90 u32 ring[NOTIFY_BLOCKS];
91 u32 reserved2[8];
92 };
93
94 static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
95 {
96 /* cpu_tag[0] is reserved on the RTL83XX SoCs */
97 h->cpu_tag[1] = 0x0400; /* BIT 10: RTL8380_CPU_TAG */
98 h->cpu_tag[2] = 0x0200; /* Set only AS_DPM, to enable DPM settings below */
99 h->cpu_tag[3] = 0x0000;
100 h->cpu_tag[4] = BIT(dest_port) >> 16;
101 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
102
103 /* Set internal priority (PRI) and enable (AS_PRI) */
104 if (prio >= 0)
105 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
106 }
107
108 static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
109 {
110 /* cpu_tag[0] is reserved on the RTL83XX SoCs */
111 h->cpu_tag[1] = 0x0100; /* RTL8390_CPU_TAG marker */
112 h->cpu_tag[2] = BIT(4); /* AS_DPM flag */
113 h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
114 /* h->cpu_tag[1] |= BIT(1) | BIT(0); */ /* Bypass filter 1/2 */
115 if (dest_port >= 32) {
116 dest_port -= 32;
117 h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
118 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
119 } else {
120 h->cpu_tag[4] = BIT(dest_port) >> 16;
121 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
122 }
123
124 /* Set internal priority (PRI) and enable (AS_PRI) */
125 if (prio >= 0)
126 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
127 }
128
129 static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
130 {
131 h->cpu_tag[0] = 0x8000; /* CPU tag marker */
132 h->cpu_tag[1] = h->cpu_tag[2] = 0;
133 h->cpu_tag[3] = 0;
134 h->cpu_tag[4] = 0;
135 h->cpu_tag[5] = 0;
136 h->cpu_tag[6] = BIT(dest_port) >> 16;
137 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
138
139 /* Enable (AS_QID) and set priority queue (QID) */
140 if (prio >= 0)
141 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
142 }
143
144 static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
145 {
146 h->cpu_tag[0] = 0x8000; /* CPU tag marker */
147 h->cpu_tag[1] = h->cpu_tag[2] = 0;
148 h->cpu_tag[3] = 0;
149 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
150 if (dest_port >= 32) {
151 dest_port -= 32;
152 h->cpu_tag[4] = BIT(dest_port) >> 16;
153 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
154 } else {
155 h->cpu_tag[6] = BIT(dest_port) >> 16;
156 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
157 }
158
159 /* Enable (AS_QID) and set priority queue (QID) */
160 if (prio >= 0)
161 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
162 }
163
164 // Currently unused
165 // static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
166 // {
167 // h->cpu_tag[2] |= BIT(4); /* Enable VLAN forwarding offload */
168 // h->cpu_tag[2] |= (vlan >> 8) & 0xf;
169 // h->cpu_tag[3] |= (vlan & 0xff) << 8;
170 // }
171
172 struct rtl838x_rx_q {
173 int id;
174 struct rtl838x_eth_priv *priv;
175 struct napi_struct napi;
176 };
177
178 struct rtl838x_eth_priv {
179 struct net_device *netdev;
180 struct platform_device *pdev;
181 void *membase;
182 spinlock_t lock;
183 struct mii_bus *mii_bus;
184 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
185 struct phylink *phylink;
186 struct phylink_config phylink_config;
187 u16 id;
188 u16 family_id;
189 const struct rtl838x_eth_reg *r;
190 u8 cpu_port;
191 u32 lastEvent;
192 u16 rxrings;
193 u16 rxringlen;
194 u8 smi_bus[MAX_PORTS];
195 u8 smi_addr[MAX_PORTS];
196 u32 sds_id[MAX_PORTS];
197 bool smi_bus_isc45[MAX_SMI_BUSSES];
198 bool phy_is_internal[MAX_PORTS];
199 phy_interface_t interfaces[MAX_PORTS];
200 };
201
202 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
203 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
204 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
205 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
206 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
207 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
208 extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
209 extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
210 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
211 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
212 extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
213 extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
214
215 /* On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
216 * the rings. Writing x into these registers substracts x from its content.
217 * When the content reaches the ring size, the ASIC no longer adds
218 * packets to this receive queue.
219 */
220 void rtl838x_update_cntr(int r, int released)
221 {
222 /* This feature is not available on RTL838x SoCs */
223 }
224
225 void rtl839x_update_cntr(int r, int released)
226 {
227 /* This feature is not available on RTL839x SoCs */
228 }
229
230 void rtl930x_update_cntr(int r, int released)
231 {
232 int pos = (r % 3) * 10;
233 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
234 u32 v = sw_r32(reg);
235
236 v = (v >> pos) & 0x3ff;
237 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
238 sw_w32_mask(0x3ff << pos, released << pos, reg);
239 sw_w32(v, reg);
240 }
241
242 void rtl931x_update_cntr(int r, int released)
243 {
244 int pos = (r % 3) * 10;
245 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
246 u32 v = sw_r32(reg);
247
248 v = (v >> pos) & 0x3ff;
249 sw_w32_mask(0x3ff << pos, released << pos, reg);
250 sw_w32(v, reg);
251 }
252
253 struct dsa_tag {
254 u8 reason;
255 u8 queue;
256 u16 port;
257 u8 l2_offloaded;
258 u8 prio;
259 bool crc_error;
260 };
261
262 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
263 {
264 /* cpu_tag[0] is reserved. Fields are off-by-one */
265 t->reason = h->cpu_tag[4] & 0xf;
266 t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
267 t->port = h->cpu_tag[1] & 0x1f;
268 t->crc_error = t->reason == 13;
269
270 pr_debug("Reason: %d\n", t->reason);
271 if (t->reason != 6) /* NIC_RX_REASON_SPECIAL_TRAP */
272 t->l2_offloaded = 1;
273 else
274 t->l2_offloaded = 0;
275
276 return t->l2_offloaded;
277 }
278
279 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
280 {
281 /* cpu_tag[0] is reserved. Fields are off-by-one */
282 t->reason = h->cpu_tag[5] & 0x1f;
283 t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
284 t->port = h->cpu_tag[1] & 0x3f;
285 t->crc_error = h->cpu_tag[4] & BIT(6);
286
287 pr_debug("Reason: %d\n", t->reason);
288 if ((t->reason >= 7 && t->reason <= 13) || /* NIC_RX_REASON_RMA */
289 (t->reason >= 23 && t->reason <= 25)) /* NIC_RX_REASON_SPECIAL_TRAP */
290 t->l2_offloaded = 0;
291 else
292 t->l2_offloaded = 1;
293
294 return t->l2_offloaded;
295 }
296
297 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
298 {
299 t->reason = h->cpu_tag[7] & 0x3f;
300 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
301 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
302 t->crc_error = h->cpu_tag[1] & BIT(6);
303
304 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
305 if (t->reason >= 19 && t->reason <= 27)
306 t->l2_offloaded = 0;
307 else
308 t->l2_offloaded = 1;
309
310 return t->l2_offloaded;
311 }
312
313 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
314 {
315 t->reason = h->cpu_tag[7] & 0x3f;
316 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
317 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
318 t->crc_error = h->cpu_tag[1] & BIT(6);
319
320 if (t->reason != 63)
321 pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
322 if (t->reason >= 19 && t->reason <= 27) /* NIC_RX_REASON_RMA */
323 t->l2_offloaded = 0;
324 else
325 t->l2_offloaded = 1;
326
327 return t->l2_offloaded;
328 }
329
330 /* Discard the RX ring-buffers, called as part of the net-ISR
331 * when the buffer runs over
332 */
333 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
334 {
335 for (int r = 0; r < priv->rxrings; r++) {
336 struct ring_b *ring = priv->membase;
337 struct p_hdr *h;
338 u32 *last;
339
340 pr_debug("In %s working on r: %d\n", __func__, r);
341 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
342 do {
343 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
344 break;
345 pr_debug("Got something: %d\n", ring->c_rx[r]);
346 h = &ring->rx_header[r][ring->c_rx[r]];
347 memset(h, 0, sizeof(struct p_hdr));
348 h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
349 r * priv->rxringlen * RING_BUFFER +
350 ring->c_rx[r] * RING_BUFFER);
351 h->size = RING_BUFFER;
352 /* make sure the header is visible to the ASIC */
353 mb();
354
355 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
356 WRAP :
357 0x1);
358 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
359 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
360 }
361 }
362
363 struct fdb_update_work {
364 struct work_struct work;
365 struct net_device *ndev;
366 u64 macs[NOTIFY_EVENTS + 1];
367 };
368
369 void rtl838x_fdb_sync(struct work_struct *work)
370 {
371 const struct fdb_update_work *uw = container_of(work, struct fdb_update_work, work);
372
373 for (int i = 0; uw->macs[i]; i++) {
374 struct switchdev_notifier_fdb_info info;
375 u8 addr[ETH_ALEN];
376 int action;
377
378 action = (uw->macs[i] & (1ULL << 63)) ?
379 SWITCHDEV_FDB_ADD_TO_BRIDGE :
380 SWITCHDEV_FDB_DEL_TO_BRIDGE;
381 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
382 info.addr = &addr[0];
383 info.vid = 0;
384 info.offloaded = 1;
385 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
386 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
387 }
388 kfree(work);
389 }
390
391 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
392 {
393 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
394 u32 e = priv->lastEvent;
395
396 while (!(nb->ring[e] & 1)) {
397 struct fdb_update_work *w;
398 struct n_event *event;
399 u64 mac;
400 int i;
401
402 w = kzalloc(sizeof(*w), GFP_ATOMIC);
403 if (!w) {
404 pr_err("Out of memory: %s", __func__);
405 return;
406 }
407 INIT_WORK(&w->work, rtl838x_fdb_sync);
408
409 for (i = 0; i < NOTIFY_EVENTS; i++) {
410 event = &nb->blocks[e].events[i];
411 if (!event->valid)
412 continue;
413 mac = event->mac;
414 if (event->type)
415 mac |= 1ULL << 63;
416 w->ndev = priv->netdev;
417 w->macs[i] = mac;
418 }
419
420 /* Hand the ring entry back to the switch */
421 nb->ring[e] = nb->ring[e] | 1;
422 e = (e + 1) % NOTIFY_BLOCKS;
423
424 w->macs[i] = 0ULL;
425 schedule_work(&w->work);
426 }
427 priv->lastEvent = e;
428 }
429
430 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
431 {
432 struct net_device *dev = dev_id;
433 struct rtl838x_eth_priv *priv = netdev_priv(dev);
434 u32 status = sw_r32(priv->r->dma_if_intr_sts);
435
436 pr_debug("IRQ: %08x\n", status);
437
438 /* Ignore TX interrupt */
439 if ((status & 0xf0000)) {
440 /* Clear ISR */
441 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
442 }
443
444 /* RX interrupt */
445 if (status & 0x0ff00) {
446 /* ACK and disable RX interrupt for this ring */
447 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
448 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
449 for (int i = 0; i < priv->rxrings; i++) {
450 if (status & BIT(i + 8)) {
451 pr_debug("Scheduling queue: %d\n", i);
452 napi_schedule(&priv->rx_qs[i].napi);
453 }
454 }
455 }
456
457 /* RX buffer overrun */
458 if (status & 0x000ff) {
459 pr_debug("RX buffer overrun: status %x, mask: %x\n",
460 status, sw_r32(priv->r->dma_if_intr_msk));
461 sw_w32(status, priv->r->dma_if_intr_sts);
462 rtl838x_rb_cleanup(priv, status & 0xff);
463 }
464
465 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
466 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
467 rtl839x_l2_notification_handler(priv);
468 }
469
470 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
471 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
472 rtl839x_l2_notification_handler(priv);
473 }
474
475 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
476 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
477 rtl839x_l2_notification_handler(priv);
478 }
479
480 return IRQ_HANDLED;
481 }
482
483 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
484 {
485 struct net_device *dev = dev_id;
486 struct rtl838x_eth_priv *priv = netdev_priv(dev);
487 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
488 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
489 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
490
491 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
492 __func__, status_tx, status_rx, status_rx_r);
493
494 /* Ignore TX interrupt */
495 if (status_tx) {
496 /* Clear ISR */
497 pr_debug("TX done\n");
498 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
499 }
500
501 /* RX interrupt */
502 if (status_rx) {
503 pr_debug("RX IRQ\n");
504 /* ACK and disable RX interrupt for given rings */
505 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
506 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
507 for (int i = 0; i < priv->rxrings; i++) {
508 if (status_rx & BIT(i)) {
509 pr_debug("Scheduling queue: %d\n", i);
510 napi_schedule(&priv->rx_qs[i].napi);
511 }
512 }
513 }
514
515 /* RX buffer overrun */
516 if (status_rx_r) {
517 pr_debug("RX buffer overrun: status %x, mask: %x\n",
518 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
519 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
520 rtl838x_rb_cleanup(priv, status_rx_r);
521 }
522
523 return IRQ_HANDLED;
524 }
525
526 static const struct rtl838x_eth_reg rtl838x_reg = {
527 .net_irq = rtl83xx_net_irq,
528 .mac_port_ctrl = rtl838x_mac_port_ctrl,
529 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
530 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
531 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
532 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
533 .dma_rx_base = RTL838X_DMA_RX_BASE,
534 .dma_tx_base = RTL838X_DMA_TX_BASE,
535 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
536 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
537 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
538 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
539 .get_mac_link_sts = rtl838x_get_mac_link_sts,
540 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
541 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
542 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
543 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
544 .mac = RTL838X_MAC,
545 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
546 .update_cntr = rtl838x_update_cntr,
547 .create_tx_header = rtl838x_create_tx_header,
548 .decode_tag = rtl838x_decode_tag,
549 };
550
551 static const struct rtl838x_eth_reg rtl839x_reg = {
552 .net_irq = rtl83xx_net_irq,
553 .mac_port_ctrl = rtl839x_mac_port_ctrl,
554 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
555 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
556 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
557 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
558 .dma_rx_base = RTL839X_DMA_RX_BASE,
559 .dma_tx_base = RTL839X_DMA_TX_BASE,
560 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
561 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
562 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
563 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
564 .get_mac_link_sts = rtl839x_get_mac_link_sts,
565 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
566 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
567 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
568 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
569 .mac = RTL839X_MAC,
570 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
571 .update_cntr = rtl839x_update_cntr,
572 .create_tx_header = rtl839x_create_tx_header,
573 .decode_tag = rtl839x_decode_tag,
574 };
575
576 static const struct rtl838x_eth_reg rtl930x_reg = {
577 .net_irq = rtl93xx_net_irq,
578 .mac_port_ctrl = rtl930x_mac_port_ctrl,
579 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
580 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
581 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
582 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
583 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
584 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
585 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
586 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
587 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
588 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
589 .dma_rx_base = RTL930X_DMA_RX_BASE,
590 .dma_tx_base = RTL930X_DMA_TX_BASE,
591 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
592 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
593 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
594 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
595 .get_mac_link_sts = rtl930x_get_mac_link_sts,
596 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
597 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
598 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
599 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
600 .mac = RTL930X_MAC_L2_ADDR_CTRL,
601 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
602 .update_cntr = rtl930x_update_cntr,
603 .create_tx_header = rtl930x_create_tx_header,
604 .decode_tag = rtl930x_decode_tag,
605 };
606
607 static const struct rtl838x_eth_reg rtl931x_reg = {
608 .net_irq = rtl93xx_net_irq,
609 .mac_port_ctrl = rtl931x_mac_port_ctrl,
610 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
611 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
612 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
613 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
614 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
615 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
616 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
617 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
618 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
619 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
620 .dma_rx_base = RTL931X_DMA_RX_BASE,
621 .dma_tx_base = RTL931X_DMA_TX_BASE,
622 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
623 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
624 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
625 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
626 .get_mac_link_sts = rtl931x_get_mac_link_sts,
627 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
628 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
629 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
630 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
631 .mac = RTL931X_MAC_L2_ADDR_CTRL,
632 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
633 .update_cntr = rtl931x_update_cntr,
634 .create_tx_header = rtl931x_create_tx_header,
635 .decode_tag = rtl931x_decode_tag,
636 };
637
638 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
639 {
640 u32 int_saved, nbuf;
641 u32 reset_mask;
642
643 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
644 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
645 mdelay(100);
646
647 /* Disable and clear interrupts */
648 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
649 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
650 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
651 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
652 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
653 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
654 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
655 } else {
656 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
657 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
658 }
659
660 if (priv->family_id == RTL8390_FAMILY_ID) {
661 /* Preserve L2 notification and NBUF settings */
662 int_saved = sw_r32(priv->r->dma_if_intr_msk);
663 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
664
665 /* Disable link change interrupt on RTL839x */
666 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
667 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
668
669 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
670 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
671 }
672
673 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
674 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
675 reset_mask = 0x6;
676 else
677 reset_mask = 0xc;
678
679 sw_w32_mask(0, reset_mask, priv->r->rst_glb_ctrl);
680
681 do { /* Wait for reset of NIC and Queues done */
682 udelay(20);
683 } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
684 mdelay(100);
685
686 /* Setup Head of Line */
687 if (priv->family_id == RTL8380_FAMILY_ID)
688 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); /* Disabled on RTL8380 */
689 if (priv->family_id == RTL8390_FAMILY_ID)
690 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
691 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
692 for (int i = 0; i < priv->rxrings; i++) {
693 int pos = (i % 3) * 10;
694
695 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
696 sw_w32_mask(0x3ff << pos, priv->rxringlen,
697 priv->r->dma_if_rx_ring_cntr(i));
698 }
699 }
700
701 /* Re-enable link change interrupt */
702 if (priv->family_id == RTL8390_FAMILY_ID) {
703 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
704 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
705 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
706 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
707
708 /* Restore notification settings: on RTL838x these bits are null */
709 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
710 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
711 }
712 }
713
714 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
715 {
716 struct ring_b *ring = priv->membase;
717
718 for (int i = 0; i < priv->rxrings; i++)
719 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
720
721 for (int i = 0; i < TXRINGS; i++)
722 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
723 }
724
725 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
726 {
727 /* Disable Head of Line features for all RX rings */
728 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
729
730 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
731 sw_w32(0x06400020, priv->r->dma_if_ctrl);
732
733 /* Enable RX done, RX overflow and TX done interrupts */
734 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
735
736 /* Enable DMA, engine expects empty FCS field */
737 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
738
739 /* Restart TX/RX to CPU port */
740 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
741 /* Set Speed, duplex, flow control
742 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
743 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
744 * | MEDIA_SEL
745 */
746 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
747
748 /* Enable CRC checks on CPU-port */
749 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
750 }
751
752 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
753 {
754 /* Setup CPU-Port: RX Buffer */
755 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
756
757 /* Enable Notify, RX done, RX overflow and TX done interrupts */
758 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); /* Notify IRQ! */
759
760 /* Enable DMA */
761 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
762
763 /* Restart TX/RX to CPU port, enable CRC checking */
764 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
765
766 /* CPU port joins Lookup Miss Flooding Portmask */
767 /* TODO: The code below should also work for the RTL838x */
768 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
769 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
770 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
771
772 /* Force CPU port link up */
773 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
774 }
775
776 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
777 {
778 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
779 sw_w32(0x06400040, priv->r->dma_if_ctrl);
780
781 for (int i = 0; i < priv->rxrings; i++) {
782 int pos = (i % 3) * 10;
783 u32 v;
784
785 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
786
787 /* Some SoCs have issues with missing underflow protection */
788 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
789 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
790 }
791
792 /* Enable Notify, RX done, RX overflow and TX done interrupts */
793 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
794 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
795 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
796
797 /* Enable DMA */
798 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
799
800 /* Restart TX/RX to CPU port, enable CRC checking */
801 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
802
803 if (priv->family_id == RTL9300_FAMILY_ID)
804 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
805 else
806 sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
807
808 if (priv->family_id == RTL9300_FAMILY_ID)
809 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
810 else
811 sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
812 }
813
814 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
815 {
816 for (int i = 0; i < priv->rxrings; i++) {
817 struct p_hdr *h;
818 int j;
819
820 for (j = 0; j < priv->rxringlen; j++) {
821 h = &ring->rx_header[i][j];
822 memset(h, 0, sizeof(struct p_hdr));
823 h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
824 i * priv->rxringlen * RING_BUFFER +
825 j * RING_BUFFER);
826 h->size = RING_BUFFER;
827 /* All rings owned by switch, last one wraps */
828 ring->rx_r[i][j] = KSEG1ADDR(h) | 1 | (j == (priv->rxringlen - 1) ?
829 WRAP :
830 0);
831 }
832 ring->c_rx[i] = 0;
833 }
834
835 for (int i = 0; i < TXRINGS; i++) {
836 struct p_hdr *h;
837 int j;
838
839 for (j = 0; j < TXRINGLEN; j++) {
840 h = &ring->tx_header[i][j];
841 memset(h, 0, sizeof(struct p_hdr));
842 h->buf = (u8 *)KSEG1ADDR(ring->tx_space +
843 i * TXRINGLEN * RING_BUFFER +
844 j * RING_BUFFER);
845 h->size = RING_BUFFER;
846 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
847 }
848 /* Last header is wrapping around */
849 ring->tx_r[i][j - 1] |= WRAP;
850 ring->c_tx[i] = 0;
851 }
852 }
853
854 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
855 {
856 struct notify_b *b = priv->membase + sizeof(struct ring_b);
857
858 for (int i = 0; i < NOTIFY_BLOCKS; i++)
859 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
860
861 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
862 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
863
864 /* Setup notification events */
865 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); /* RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN */
866 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); /* SUSPEND_NOTIFICATION_EN */
867
868 /* Enable Notification */
869 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
870 priv->lastEvent = 0;
871 }
872
873 static int rtl838x_eth_open(struct net_device *ndev)
874 {
875 unsigned long flags;
876 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
877 struct ring_b *ring = priv->membase;
878
879 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
880 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
881
882 spin_lock_irqsave(&priv->lock, flags);
883 rtl838x_hw_reset(priv);
884 rtl838x_setup_ring_buffer(priv, ring);
885 if (priv->family_id == RTL8390_FAMILY_ID) {
886 rtl839x_setup_notify_ring_buffer(priv);
887 /* Make sure the ring structure is visible to the ASIC */
888 mb();
889 flush_cache_all();
890 }
891
892 rtl838x_hw_ring_setup(priv);
893 phylink_start(priv->phylink);
894
895 for (int i = 0; i < priv->rxrings; i++)
896 napi_enable(&priv->rx_qs[i].napi);
897
898 switch (priv->family_id) {
899 case RTL8380_FAMILY_ID:
900 rtl838x_hw_en_rxtx(priv);
901 /* Trap IGMP/MLD traffic to CPU-Port */
902 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
903 /* Flush learned FDB entries on link down of a port */
904 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
905 break;
906
907 case RTL8390_FAMILY_ID:
908 rtl839x_hw_en_rxtx(priv);
909 /* Trap MLD and IGMP messages to CPU_PORT */
910 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
911 /* Flush learned FDB entries on link down of a port */
912 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
913 break;
914
915 case RTL9300_FAMILY_ID:
916 rtl93xx_hw_en_rxtx(priv);
917 /* Flush learned FDB entries on link down of a port */
918 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
919 /* Trap MLD and IGMP messages to CPU_PORT */
920 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
921 break;
922
923 case RTL9310_FAMILY_ID:
924 rtl93xx_hw_en_rxtx(priv);
925
926 /* Trap MLD and IGMP messages to CPU_PORT */
927 sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
928
929 /* Disable External CPU access to switch, clear EXT_CPU_EN */
930 sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
931
932 /* Set PCIE_PWR_DOWN */
933 sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
934 break;
935 }
936
937 netif_tx_start_all_queues(ndev);
938
939 spin_unlock_irqrestore(&priv->lock, flags);
940
941 return 0;
942 }
943
944 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
945 {
946 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
947 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
948
949 /* Disable RX/TX from/to CPU-port */
950 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
951
952 /* Disable traffic */
953 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
954 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
955 else
956 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
957 mdelay(200); /* Test, whether this is needed */
958
959 /* Block all ports */
960 if (priv->family_id == RTL8380_FAMILY_ID) {
961 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
962 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
963 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
964 }
965
966 /* Flush L2 address cache */
967 if (priv->family_id == RTL8380_FAMILY_ID) {
968 for (int i = 0; i <= priv->cpu_port; i++) {
969 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
970 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
971 }
972 } else if (priv->family_id == RTL8390_FAMILY_ID) {
973 for (int i = 0; i <= priv->cpu_port; i++) {
974 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
975 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
976 }
977 }
978 /* TODO: L2 flush register is 64 bit on RTL931X and 930X */
979
980 /* CPU-Port: Link down */
981 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
982 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
983 else if (priv->family_id == RTL9300_FAMILY_ID)
984 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
985 else if (priv->family_id == RTL9310_FAMILY_ID)
986 sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
987 mdelay(100);
988
989 /* Disable all TX/RX interrupts */
990 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
991 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
992 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
993 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
994 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
995 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
996 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
997 } else {
998 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
999 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
1000 }
1001
1002 /* Disable TX/RX DMA */
1003 sw_w32(0x00000000, priv->r->dma_if_ctrl);
1004 mdelay(200);
1005 }
1006
1007 static int rtl838x_eth_stop(struct net_device *ndev)
1008 {
1009 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1010
1011 pr_info("in %s\n", __func__);
1012
1013 phylink_stop(priv->phylink);
1014 rtl838x_hw_stop(priv);
1015
1016 for (int i = 0; i < priv->rxrings; i++)
1017 napi_disable(&priv->rx_qs[i].napi);
1018
1019 netif_tx_stop_all_queues(ndev);
1020
1021 return 0;
1022 }
1023
1024 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1025 {
1026 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1027 * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
1028 */
1029 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1030 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1031 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1032 }
1033 if (ndev->flags & IFF_ALLMULTI)
1034 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1035 if (ndev->flags & IFF_PROMISC) {
1036 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1037 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1038 }
1039 }
1040
1041 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1042 {
1043 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1044 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1045 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1046 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1047 */
1048 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1049 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1050 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1051 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1052 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1053 }
1054 if (ndev->flags & IFF_ALLMULTI) {
1055 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1056 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1057 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1058 }
1059 if (ndev->flags & IFF_PROMISC) {
1060 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1061 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1062 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1063 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1064 }
1065 }
1066
1067 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1068 {
1069 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1070 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1071 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1072 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1073 */
1074 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1075 sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
1076 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
1077 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
1078 } else {
1079 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1080 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1081 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1082 }
1083 }
1084
1085 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1086 {
1087 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1088 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1089 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
1090 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1091 */
1092 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1093 sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
1094 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
1095 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
1096 } else {
1097 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1098 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1099 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1100 }
1101 }
1102
1103 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1104 {
1105 unsigned long flags;
1106 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1107
1108 pr_warn("%s\n", __func__);
1109 spin_lock_irqsave(&priv->lock, flags);
1110 rtl838x_hw_stop(priv);
1111 rtl838x_hw_ring_setup(priv);
1112 rtl838x_hw_en_rxtx(priv);
1113 netif_trans_update(ndev);
1114 netif_start_queue(ndev);
1115 spin_unlock_irqrestore(&priv->lock, flags);
1116 }
1117
1118 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1119 {
1120 int len;
1121 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1122 struct ring_b *ring = priv->membase;
1123 int ret;
1124 unsigned long flags;
1125 struct p_hdr *h;
1126 int dest_port = -1;
1127 int q = skb_get_queue_mapping(skb) % TXRINGS;
1128
1129 if (q) /* Check for high prio queue */
1130 pr_debug("SKB priority: %d\n", skb->priority);
1131
1132 spin_lock_irqsave(&priv->lock, flags);
1133 len = skb->len;
1134
1135 /* Check for DSA tagging at the end of the buffer */
1136 if (netdev_uses_dsa(dev) &&
1137 skb->data[len - 4] == 0x80 &&
1138 skb->data[len - 3] < priv->cpu_port &&
1139 skb->data[len - 2] == 0x10 &&
1140 skb->data[len - 1] == 0x00) {
1141 /* Reuse tag space for CRC if possible */
1142 dest_port = skb->data[len - 3];
1143 skb->data[len - 4] = skb->data[len - 3] = skb->data[len - 2] = skb->data[len - 1] = 0x00;
1144 len -= 4;
1145 }
1146
1147 len += 4; /* Add space for CRC */
1148
1149 if (skb_padto(skb, len)) {
1150 ret = NETDEV_TX_OK;
1151 goto txdone;
1152 }
1153
1154 /* We can send this packet if CPU owns the descriptor */
1155 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1156
1157 /* Set descriptor for tx */
1158 h = &ring->tx_header[q][ring->c_tx[q]];
1159 h->size = len;
1160 h->len = len;
1161 /* On RTL8380 SoCs, small packet lengths being sent need adjustments */
1162 if (priv->family_id == RTL8380_FAMILY_ID) {
1163 if (len < ETH_ZLEN - 4)
1164 h->len -= 4;
1165 }
1166
1167 if (dest_port >= 0)
1168 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1169
1170 /* Copy packet data to tx buffer */
1171 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1172 /* Make sure packet data is visible to ASIC */
1173 wmb();
1174
1175 /* Hand over to switch */
1176 ring->tx_r[q][ring->c_tx[q]] |= 1;
1177
1178 /* Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs */
1179 if (priv->family_id == RTL8380_FAMILY_ID) {
1180 for (int i = 0; i < 10; i++) {
1181 u32 val = sw_r32(priv->r->dma_if_ctrl);
1182 if ((val & 0xc) == 0xc)
1183 break;
1184 }
1185 }
1186
1187 /* Tell switch to send data */
1188 if (priv->family_id == RTL9310_FAMILY_ID || priv->family_id == RTL9300_FAMILY_ID) {
1189 /* Ring ID q == 0: Low priority, Ring ID = 1: High prio queue */
1190 if (!q)
1191 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1192 else
1193 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1194 } else {
1195 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1196 }
1197
1198 dev->stats.tx_packets++;
1199 dev->stats.tx_bytes += len;
1200 dev_kfree_skb(skb);
1201 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1202 ret = NETDEV_TX_OK;
1203 } else {
1204 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1205 ret = NETDEV_TX_BUSY;
1206 }
1207
1208 txdone:
1209 spin_unlock_irqrestore(&priv->lock, flags);
1210
1211 return ret;
1212 }
1213
1214 /* Return queue number for TX. On the RTL83XX, these queues have equal priority
1215 * so we do round-robin
1216 */
1217 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1218 struct net_device *sb_dev)
1219 {
1220 static u8 last = 0;
1221
1222 last++;
1223 return last % TXRINGS;
1224 }
1225
1226 /* Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1227 */
1228 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1229 struct net_device *sb_dev)
1230 {
1231 if (skb->priority >= TC_PRIO_CONTROL)
1232 return 1;
1233
1234 return 0;
1235 }
1236
1237 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1238 {
1239 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1240 struct ring_b *ring = priv->membase;
1241 LIST_HEAD(rx_list);
1242 unsigned long flags;
1243 int work_done = 0;
1244 u32 *last;
1245 bool dsa = netdev_uses_dsa(dev);
1246
1247 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1248 spin_lock_irqsave(&priv->lock, flags);
1249 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1250
1251 do {
1252 struct sk_buff *skb;
1253 struct dsa_tag tag;
1254 struct p_hdr *h;
1255 u8 *skb_data;
1256 u8 *data;
1257 int len;
1258
1259 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1260 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1261 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1262 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1263 }
1264 break;
1265 }
1266
1267 h = &ring->rx_header[r][ring->c_rx[r]];
1268 data = (u8 *)KSEG1ADDR(h->buf);
1269 len = h->len;
1270 if (!len)
1271 break;
1272 work_done++;
1273
1274 len -= 4; /* strip the CRC */
1275 /* Add 4 bytes for cpu_tag */
1276 if (dsa)
1277 len += 4;
1278
1279 skb = netdev_alloc_skb(dev, len + 4);
1280 skb_reserve(skb, NET_IP_ALIGN);
1281
1282 if (likely(skb)) {
1283 /* BUG: Prevent bug on RTL838x SoCs */
1284 if (priv->family_id == RTL8380_FAMILY_ID) {
1285 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1286 for (int i = 0; i < priv->rxrings; i++) {
1287 unsigned int val;
1288
1289 /* Update each ring cnt */
1290 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1291 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1292 }
1293 }
1294
1295 skb_data = skb_put(skb, len);
1296 /* Make sure data is visible */
1297 mb();
1298 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1299 /* Overwrite CRC with cpu_tag */
1300 if (dsa) {
1301 priv->r->decode_tag(h, &tag);
1302 skb->data[len - 4] = 0x80;
1303 skb->data[len - 3] = tag.port;
1304 skb->data[len - 2] = 0x10;
1305 skb->data[len - 1] = 0x00;
1306 if (tag.l2_offloaded)
1307 skb->data[len - 3] |= 0x40;
1308 }
1309
1310 if (tag.queue >= 0)
1311 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1312 tag.queue, len, tag.reason, tag.port);
1313
1314 skb->protocol = eth_type_trans(skb, dev);
1315 if (dev->features & NETIF_F_RXCSUM) {
1316 if (tag.crc_error)
1317 skb_checksum_none_assert(skb);
1318 else
1319 skb->ip_summed = CHECKSUM_UNNECESSARY;
1320 }
1321 dev->stats.rx_packets++;
1322 dev->stats.rx_bytes += len;
1323
1324 list_add_tail(&skb->list, &rx_list);
1325 } else {
1326 if (net_ratelimit())
1327 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1328 dev->stats.rx_dropped++;
1329 }
1330
1331 /* Reset header structure */
1332 memset(h, 0, sizeof(struct p_hdr));
1333 h->buf = data;
1334 h->size = RING_BUFFER;
1335
1336 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
1337 WRAP :
1338 0x1);
1339 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1340 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1341 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1342
1343 netif_receive_skb_list(&rx_list);
1344
1345 /* Update counters */
1346 priv->r->update_cntr(r, 0);
1347
1348 spin_unlock_irqrestore(&priv->lock, flags);
1349
1350 return work_done;
1351 }
1352
1353 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1354 {
1355 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1356 struct rtl838x_eth_priv *priv = rx_q->priv;
1357 int work_done = 0;
1358 int r = rx_q->id;
1359 int work;
1360
1361 while (work_done < budget) {
1362 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1363 if (!work)
1364 break;
1365 work_done += work;
1366 }
1367
1368 if (work_done < budget) {
1369 napi_complete_done(napi, work_done);
1370
1371 /* Enable RX interrupt */
1372 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1373 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1374 else
1375 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1376 }
1377
1378 return work_done;
1379 }
1380
1381
1382 static void rtl838x_validate(struct phylink_config *config,
1383 unsigned long *supported,
1384 struct phylink_link_state *state)
1385 {
1386 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1387
1388 pr_debug("In %s\n", __func__);
1389
1390 if (!phy_interface_mode_is_rgmii(state->interface) &&
1391 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1392 state->interface != PHY_INTERFACE_MODE_MII &&
1393 state->interface != PHY_INTERFACE_MODE_REVMII &&
1394 state->interface != PHY_INTERFACE_MODE_GMII &&
1395 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1396 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1397 state->interface != PHY_INTERFACE_MODE_SGMII) {
1398 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1399 pr_err("Unsupported interface: %d\n", state->interface);
1400 return;
1401 }
1402
1403 /* Allow all the expected bits */
1404 phylink_set(mask, Autoneg);
1405 phylink_set_port_modes(mask);
1406 phylink_set(mask, Pause);
1407 phylink_set(mask, Asym_Pause);
1408
1409 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1410 * including Half duplex
1411 */
1412 if (state->interface != PHY_INTERFACE_MODE_MII &&
1413 state->interface != PHY_INTERFACE_MODE_REVMII) {
1414 phylink_set(mask, 1000baseT_Full);
1415 phylink_set(mask, 1000baseT_Half);
1416 }
1417
1418 phylink_set(mask, 10baseT_Half);
1419 phylink_set(mask, 10baseT_Full);
1420 phylink_set(mask, 100baseT_Half);
1421 phylink_set(mask, 100baseT_Full);
1422
1423 bitmap_and(supported, supported, mask,
1424 __ETHTOOL_LINK_MODE_MASK_NBITS);
1425 bitmap_and(state->advertising, state->advertising, mask,
1426 __ETHTOOL_LINK_MODE_MASK_NBITS);
1427 }
1428
1429
1430 static void rtl838x_mac_config(struct phylink_config *config,
1431 unsigned int mode,
1432 const struct phylink_link_state *state)
1433 {
1434 /* This is only being called for the master device,
1435 * i.e. the CPU-Port. We don't need to do anything.
1436 */
1437
1438 pr_info("In %s, mode %x\n", __func__, mode);
1439 }
1440
1441 static void rtl838x_mac_an_restart(struct phylink_config *config)
1442 {
1443 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1444 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1445
1446 /* This works only on RTL838x chips */
1447 if (priv->family_id != RTL8380_FAMILY_ID)
1448 return;
1449
1450 pr_debug("In %s\n", __func__);
1451 /* Restart by disabling and re-enabling link */
1452 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1453 mdelay(20);
1454 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1455 }
1456
1457 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1458 struct phylink_link_state *state)
1459 {
1460 u32 speed;
1461 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1462 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1463 int port = priv->cpu_port;
1464
1465 pr_info("In %s\n", __func__);
1466
1467 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1468 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1469
1470 pr_info("%s link status is %d\n", __func__, state->link);
1471 speed = priv->r->get_mac_link_spd_sts(port);
1472 switch (speed) {
1473 case 0:
1474 state->speed = SPEED_10;
1475 break;
1476 case 1:
1477 state->speed = SPEED_100;
1478 break;
1479 case 2:
1480 state->speed = SPEED_1000;
1481 break;
1482 case 5:
1483 state->speed = SPEED_2500;
1484 break;
1485 case 6:
1486 state->speed = SPEED_5000;
1487 break;
1488 case 4:
1489 state->speed = SPEED_10000;
1490 break;
1491 default:
1492 state->speed = SPEED_UNKNOWN;
1493 break;
1494 }
1495
1496 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1497 if (priv->r->get_mac_rx_pause_sts(port))
1498 state->pause |= MLO_PAUSE_RX;
1499 if (priv->r->get_mac_tx_pause_sts(port))
1500 state->pause |= MLO_PAUSE_TX;
1501 }
1502
1503 static void rtl838x_mac_link_down(struct phylink_config *config,
1504 unsigned int mode,
1505 phy_interface_t interface)
1506 {
1507 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1508 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1509
1510 pr_debug("In %s\n", __func__);
1511 /* Stop TX/RX to port */
1512 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1513 }
1514
1515 static void rtl838x_mac_link_up(struct phylink_config *config,
1516 struct phy_device *phy, unsigned int mode,
1517 phy_interface_t interface, int speed, int duplex,
1518 bool tx_pause, bool rx_pause)
1519 {
1520 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1521 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1522
1523 pr_debug("In %s\n", __func__);
1524 /* Restart TX/RX to port */
1525 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1526 }
1527
1528 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1529 {
1530 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1531 unsigned long flags;
1532
1533 spin_lock_irqsave(&priv->lock, flags);
1534 pr_debug("In %s\n", __func__);
1535 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1536 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1537
1538 if (priv->family_id == RTL8380_FAMILY_ID) {
1539 /* 2 more registers, ALE/MAC block */
1540 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1541 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1542 (RTL838X_MAC_ALE + 4));
1543
1544 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1545 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1546 RTL838X_MAC2 + 4);
1547 }
1548 spin_unlock_irqrestore(&priv->lock, flags);
1549 }
1550
1551 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1552 {
1553 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1554 const struct sockaddr *addr = p;
1555 u8 *mac = (u8 *) (addr->sa_data);
1556
1557 if (!is_valid_ether_addr(addr->sa_data))
1558 return -EADDRNOTAVAIL;
1559
1560 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1561 rtl838x_set_mac_hw(dev, mac);
1562
1563 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1564
1565 return 0;
1566 }
1567
1568 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1569 {
1570 /* We will need to set-up EEE and the egress-rate limitation */
1571 return 0;
1572 }
1573
1574 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1575 {
1576 if (priv->family_id == 0x8390)
1577 return rtl8390_init_mac(priv);
1578
1579 /* At present we do not know how to set up EEE on any other SoC than RTL8380 */
1580 if (priv->family_id != 0x8380)
1581 return 0;
1582
1583 pr_info("%s\n", __func__);
1584 /* fix timer for EEE */
1585 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1586 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1587
1588 /* Init VLAN. TODO: Understand what is being done, here */
1589 if (priv->id == 0x8382) {
1590 for (int i = 0; i <= 28; i++)
1591 sw_w32(0, 0xd57c + i * 0x80);
1592 }
1593 if (priv->id == 0x8380) {
1594 for (int i = 8; i <= 28; i++)
1595 sw_w32(0, 0xd57c + i * 0x80);
1596 }
1597
1598 return 0;
1599 }
1600
1601 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1602 struct ethtool_link_ksettings *cmd)
1603 {
1604 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1605
1606 pr_debug("%s called\n", __func__);
1607
1608 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1609 }
1610
1611 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1612 const struct ethtool_link_ksettings *cmd)
1613 {
1614 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1615
1616 pr_debug("%s called\n", __func__);
1617
1618 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1619 }
1620
1621 static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1622 {
1623 u32 val;
1624 int err;
1625 struct rtl838x_eth_priv *priv = bus->priv;
1626
1627 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1628 return rtl838x_read_sds_phy(mii_id, regnum);
1629
1630 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1631 err = rtl838x_read_mmd_phy(mii_id,
1632 mdiobus_c45_devad(regnum),
1633 regnum, &val);
1634 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1635 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1636 val, err);
1637 } else {
1638 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1639 err = rtl838x_read_phy(mii_id, page, regnum, &val);
1640 }
1641 if (err)
1642 return err;
1643
1644 return val;
1645 }
1646
1647 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1648 {
1649 return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
1650 }
1651
1652 static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1653 {
1654 u32 val;
1655 int err;
1656 struct rtl838x_eth_priv *priv = bus->priv;
1657
1658 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1659 return rtl839x_read_sds_phy(mii_id, regnum);
1660
1661 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1662 err = rtl839x_read_mmd_phy(mii_id,
1663 mdiobus_c45_devad(regnum),
1664 regnum, &val);
1665 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1666 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1667 val, err);
1668 } else {
1669 err = rtl839x_read_phy(mii_id, page, regnum, &val);
1670 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1671 }
1672
1673 if (err)
1674 return err;
1675
1676 return val;
1677 }
1678
1679 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1680 {
1681 return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
1682 }
1683
1684 static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1685 {
1686 u32 val;
1687 int err;
1688 struct rtl838x_eth_priv *priv = bus->priv;
1689
1690 if (priv->phy_is_internal[mii_id])
1691 return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1692
1693 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1694 err = rtl930x_read_mmd_phy(mii_id,
1695 mdiobus_c45_devad(regnum),
1696 regnum, &val);
1697 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1698 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1699 val, err);
1700 } else {
1701 err = rtl930x_read_phy(mii_id, page, regnum, &val);
1702 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1703 }
1704
1705 if (err)
1706 return err;
1707
1708 return val;
1709 }
1710
1711 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1712 {
1713 return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
1714 }
1715
1716 static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1717 {
1718 u32 val;
1719 int err, v;
1720 struct rtl838x_eth_priv *priv = bus->priv;
1721
1722 pr_debug("%s: In here, port %d\n", __func__, mii_id);
1723 if (priv->phy_is_internal[mii_id]) {
1724 v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1725 if (v < 0) {
1726 err = v;
1727 } else {
1728 err = 0;
1729 val = v;
1730 }
1731 } else {
1732 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1733 err = rtl931x_read_mmd_phy(mii_id,
1734 mdiobus_c45_devad(regnum),
1735 regnum, &val);
1736 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1737 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1738 val, err);
1739 } else {
1740 err = rtl931x_read_phy(mii_id, page, regnum, &val);
1741 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1742 }
1743 }
1744
1745 if (err)
1746 return err;
1747
1748 return val;
1749 }
1750
1751 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1752 {
1753 return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
1754 }
1755
1756 static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1757 int regnum, u16 value)
1758 {
1759 u32 offset = 0;
1760 struct rtl838x_eth_priv *priv = bus->priv;
1761 int err;
1762
1763 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1764 if (mii_id == 26)
1765 offset = 0x100;
1766 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1767 return 0;
1768 }
1769
1770 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1771 err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1772 regnum, value);
1773 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1774 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1775 value, err);
1776
1777 return err;
1778 }
1779 err = rtl838x_write_phy(mii_id, page, regnum, value);
1780 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1781
1782 return err;
1783 }
1784
1785 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1786 int regnum, u16 value)
1787 {
1788 return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1789 }
1790
1791 static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1792 int regnum, u16 value)
1793 {
1794 struct rtl838x_eth_priv *priv = bus->priv;
1795 int err;
1796
1797 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1798 return rtl839x_write_sds_phy(mii_id, regnum, value);
1799
1800 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1801 err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1802 regnum, value);
1803 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1804 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1805 value, err);
1806
1807 return err;
1808 }
1809
1810 err = rtl839x_write_phy(mii_id, page, regnum, value);
1811 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1812
1813 return err;
1814 }
1815
1816 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1817 int regnum, u16 value)
1818 {
1819 return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1820 }
1821
1822 static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1823 int regnum, u16 value)
1824 {
1825 struct rtl838x_eth_priv *priv = bus->priv;
1826 int err;
1827
1828 if (priv->phy_is_internal[mii_id])
1829 return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1830
1831 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
1832 return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1833 regnum, value);
1834
1835 err = rtl930x_write_phy(mii_id, page, regnum, value);
1836 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1837
1838 return err;
1839 }
1840
1841 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1842 int regnum, u16 value)
1843 {
1844 return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1845 }
1846
1847 static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1848 int regnum, u16 value)
1849 {
1850 struct rtl838x_eth_priv *priv = bus->priv;
1851 int err;
1852
1853 if (priv->phy_is_internal[mii_id])
1854 return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1855
1856 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1857 err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1858 regnum, value);
1859 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1860 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1861 value, err);
1862
1863 return err;
1864 }
1865
1866 err = rtl931x_write_phy(mii_id, page, regnum, value);
1867 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1868
1869 return err;
1870 }
1871
1872 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1873 int regnum, u16 value)
1874 {
1875 return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1876 }
1877
1878 static int rtl838x_mdio_reset(struct mii_bus *bus)
1879 {
1880 pr_debug("%s called\n", __func__);
1881 /* Disable MAC polling the PHY so that we can start configuration */
1882 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1883
1884 /* Enable PHY control via SoC */
1885 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1886
1887 /* Probably should reset all PHYs here... */
1888 return 0;
1889 }
1890
1891 static int rtl839x_mdio_reset(struct mii_bus *bus)
1892 {
1893 return 0;
1894
1895 pr_debug("%s called\n", __func__);
1896 /* BUG: The following does not work, but should! */
1897 /* Disable MAC polling the PHY so that we can start configuration */
1898 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1899 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1900 /* Disable PHY polling via SoC */
1901 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1902
1903 /* Probably should reset all PHYs here... */
1904 return 0;
1905 }
1906
1907 u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
1908 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
1909
1910 static int rtl930x_mdio_reset(struct mii_bus *bus)
1911 {
1912 struct rtl838x_eth_priv *priv = bus->priv;
1913 u32 c45_mask = 0;
1914 u32 poll_sel[2];
1915 u32 poll_ctrl = 0;
1916 u32 private_poll_mask = 0;
1917 u32 v;
1918 bool uses_usxgmii = false; /* For the Aquantia PHYs */
1919 bool uses_hisgmii = false; /* For the RTL8221/8226 */
1920
1921 /* Mapping of port to phy-addresses on an SMI bus */
1922 poll_sel[0] = poll_sel[1] = 0;
1923 for (int i = 0; i < RTL930X_CPU_PORT; i++) {
1924 int pos;
1925
1926 if (priv->smi_bus[i] > 3)
1927 continue;
1928 pos = (i % 6) * 5;
1929 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
1930 RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1931
1932 pos = (i * 2) % 32;
1933 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1934 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1935 }
1936
1937 /* Configure which SMI bus is behind which port number */
1938 sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
1939 sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
1940
1941 /* Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+) */
1942 sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
1943
1944 /* Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus */
1945 for (int i = 0; i < 4; i++)
1946 if (priv->smi_bus_isc45[i])
1947 c45_mask |= BIT(i + 16);
1948
1949 pr_info("c45_mask: %08x\n", c45_mask);
1950 sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
1951
1952 /* Set the MAC type of each port according to the PHY-interface */
1953 /* Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 */
1954 v = 0;
1955 for (int i = 0; i < RTL930X_CPU_PORT; i++) {
1956 switch (priv->interfaces[i]) {
1957 case PHY_INTERFACE_MODE_10GBASER:
1958 break; /* Serdes: Value = 0 */
1959 case PHY_INTERFACE_MODE_HSGMII:
1960 private_poll_mask |= BIT(i);
1961 /* fallthrough */
1962 case PHY_INTERFACE_MODE_USXGMII:
1963 v |= BIT(mac_type_bit[i]);
1964 uses_usxgmii = true;
1965 break;
1966 case PHY_INTERFACE_MODE_QSGMII:
1967 private_poll_mask |= BIT(i);
1968 v |= 3 << mac_type_bit[i];
1969 break;
1970 default:
1971 break;
1972 }
1973 }
1974 sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
1975
1976 /* Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones) */
1977 sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
1978
1979 /* The following magic values are found in the port configuration, they seem to
1980 * define different ways of polling a PHY. The below is for the Aquantia PHYs of
1981 * the XGS1250 and the RTL8226 of the XGS1210
1982 */
1983 if (uses_usxgmii) {
1984 sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1985 sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1986 sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1987 }
1988 if (uses_hisgmii) {
1989 sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1990 sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1991 sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1992 }
1993
1994 pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
1995 sw_r32(RTL930X_SMI_GLB_CTRL));
1996 pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
1997 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
1998 pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
1999 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
2000 pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
2001 sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
2002 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
2003 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
2004 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
2005 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
2006 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
2007 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
2008 pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
2009 sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
2010
2011 return 0;
2012 }
2013
2014 static int rtl931x_mdio_reset(struct mii_bus *bus)
2015 {
2016 struct rtl838x_eth_priv *priv = bus->priv;
2017 u32 c45_mask = 0;
2018 u32 poll_sel[4];
2019 u32 poll_ctrl = 0;
2020 bool mdc_on[4];
2021
2022 pr_info("%s called\n", __func__);
2023 /* Disable port polling for configuration purposes */
2024 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
2025 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
2026 msleep(100);
2027
2028 mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
2029 /* Mapping of port to phy-addresses on an SMI bus */
2030 poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
2031 for (int i = 0; i < 56; i++) {
2032 u32 pos;
2033
2034 pos = (i % 6) * 5;
2035 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
2036 pos = (i * 2) % 32;
2037 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
2038 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
2039 mdc_on[priv->smi_bus[i]] = true;
2040 }
2041
2042 /* Configure which SMI bus is behind which port number */
2043 for (int i = 0; i < 4; i++) {
2044 pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
2045 sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
2046 }
2047
2048 /* Configure which SMI busses */
2049 pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2050 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2051 for (int i = 0; i < 4; i++) {
2052 /* bus is polled in c45 */
2053 if (priv->smi_bus_isc45[i])
2054 c45_mask |= 0x2 << (i * 2); /* Std. C45, non-standard is 0x3 */
2055 /* Enable bus access via MDC */
2056 if (mdc_on[i])
2057 sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
2058 }
2059
2060 pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2061 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2062
2063 /* We have a 10G PHY enable polling
2064 * sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
2065 * sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
2066 * sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
2067 */
2068 sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
2069
2070 return 0;
2071 }
2072
2073 static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
2074 {
2075 pr_info("In %s\n", __func__);
2076
2077 /* Initialize Encapsulation memory and wait until finished */
2078 sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
2079 do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
2080 pr_info("%s: init ENCAP done\n", __func__);
2081
2082 /* Initialize Managemen Information Base memory and wait until finished */
2083 sw_w32(0x1, RTL931X_MEM_MIB_INIT);
2084 do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
2085 pr_info("%s: init MIB done\n", __func__);
2086
2087 /* Initialize ACL (PIE) memory and wait until finished */
2088 sw_w32(0x1, RTL931X_MEM_ACL_INIT);
2089 do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
2090 pr_info("%s: init ACL done\n", __func__);
2091
2092 /* Initialize ALE memory and wait until finished */
2093 sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
2094 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
2095 sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
2096 sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
2097 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
2098 pr_info("%s: init ALE done\n", __func__);
2099
2100 /* Enable ESD auto recovery */
2101 sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
2102
2103 /* Init SPI, is this for thermal control or what? */
2104 sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
2105
2106 return 0;
2107 }
2108
2109 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
2110 {
2111 struct device_node *mii_np, *dn;
2112 u32 pn;
2113 int ret;
2114
2115 pr_debug("%s called\n", __func__);
2116 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
2117
2118 if (!mii_np) {
2119 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
2120 return -ENODEV;
2121 }
2122
2123 if (!of_device_is_available(mii_np)) {
2124 ret = -ENODEV;
2125 goto err_put_node;
2126 }
2127
2128 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
2129 if (!priv->mii_bus) {
2130 ret = -ENOMEM;
2131 goto err_put_node;
2132 }
2133
2134 switch(priv->family_id) {
2135 case RTL8380_FAMILY_ID:
2136 priv->mii_bus->name = "rtl838x-eth-mdio";
2137 priv->mii_bus->read = rtl838x_mdio_read;
2138 priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
2139 priv->mii_bus->write = rtl838x_mdio_write;
2140 priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
2141 priv->mii_bus->reset = rtl838x_mdio_reset;
2142 break;
2143 case RTL8390_FAMILY_ID:
2144 priv->mii_bus->name = "rtl839x-eth-mdio";
2145 priv->mii_bus->read = rtl839x_mdio_read;
2146 priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
2147 priv->mii_bus->write = rtl839x_mdio_write;
2148 priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
2149 priv->mii_bus->reset = rtl839x_mdio_reset;
2150 break;
2151 case RTL9300_FAMILY_ID:
2152 priv->mii_bus->name = "rtl930x-eth-mdio";
2153 priv->mii_bus->read = rtl930x_mdio_read;
2154 priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
2155 priv->mii_bus->write = rtl930x_mdio_write;
2156 priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
2157 priv->mii_bus->reset = rtl930x_mdio_reset;
2158 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2159 break;
2160 case RTL9310_FAMILY_ID:
2161 priv->mii_bus->name = "rtl931x-eth-mdio";
2162 priv->mii_bus->read = rtl931x_mdio_read;
2163 priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
2164 priv->mii_bus->write = rtl931x_mdio_write;
2165 priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
2166 priv->mii_bus->reset = rtl931x_mdio_reset;
2167 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2168 break;
2169 }
2170 priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
2171 priv->mii_bus->priv = priv;
2172 priv->mii_bus->parent = &priv->pdev->dev;
2173
2174 for_each_node_by_name(dn, "ethernet-phy") {
2175 u32 smi_addr[2];
2176
2177 if (of_property_read_u32(dn, "reg", &pn))
2178 continue;
2179
2180 if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
2181 smi_addr[0] = 0;
2182 smi_addr[1] = pn;
2183 }
2184
2185 if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
2186 priv->sds_id[pn] = -1;
2187 else {
2188 pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
2189 }
2190
2191 if (pn < MAX_PORTS) {
2192 priv->smi_bus[pn] = smi_addr[0];
2193 priv->smi_addr[pn] = smi_addr[1];
2194 } else {
2195 pr_err("%s: illegal port number %d\n", __func__, pn);
2196 }
2197
2198 if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
2199 priv->smi_bus_isc45[smi_addr[0]] = true;
2200
2201 if (of_property_read_bool(dn, "phy-is-integrated")) {
2202 priv->phy_is_internal[pn] = true;
2203 }
2204 }
2205
2206 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
2207 if (!dn) {
2208 dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
2209 return -ENODEV;
2210 }
2211
2212 for_each_node_by_name(dn, "port") {
2213 if (of_property_read_u32(dn, "reg", &pn))
2214 continue;
2215 pr_debug("%s Looking at port %d\n", __func__, pn);
2216 if (pn > priv->cpu_port)
2217 continue;
2218 if (of_get_phy_mode(dn, &priv->interfaces[pn]))
2219 priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
2220 pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
2221 }
2222
2223 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
2224 ret = of_mdiobus_register(priv->mii_bus, mii_np);
2225
2226 err_put_node:
2227 of_node_put(mii_np);
2228
2229 return ret;
2230 }
2231
2232 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
2233 {
2234 pr_debug("%s called\n", __func__);
2235 if (!priv->mii_bus)
2236 return 0;
2237
2238 mdiobus_unregister(priv->mii_bus);
2239 mdiobus_free(priv->mii_bus);
2240
2241 return 0;
2242 }
2243
2244 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
2245 netdev_features_t features)
2246 {
2247 return features;
2248 }
2249
2250 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
2251 {
2252 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2253
2254 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2255 if (!(features & NETIF_F_RXCSUM))
2256 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2257 else
2258 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
2259 }
2260
2261 return 0;
2262 }
2263
2264 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
2265 {
2266 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2267
2268 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2269 if (!(features & NETIF_F_RXCSUM))
2270 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2271 else
2272 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
2273 }
2274
2275 return 0;
2276 }
2277
2278 static const struct net_device_ops rtl838x_eth_netdev_ops = {
2279 .ndo_open = rtl838x_eth_open,
2280 .ndo_stop = rtl838x_eth_stop,
2281 .ndo_start_xmit = rtl838x_eth_tx,
2282 .ndo_select_queue = rtl83xx_pick_tx_queue,
2283 .ndo_set_mac_address = rtl838x_set_mac_address,
2284 .ndo_validate_addr = eth_validate_addr,
2285 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
2286 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2287 .ndo_set_features = rtl83xx_set_features,
2288 .ndo_fix_features = rtl838x_fix_features,
2289 .ndo_setup_tc = rtl83xx_setup_tc,
2290 };
2291
2292 static const struct net_device_ops rtl839x_eth_netdev_ops = {
2293 .ndo_open = rtl838x_eth_open,
2294 .ndo_stop = rtl838x_eth_stop,
2295 .ndo_start_xmit = rtl838x_eth_tx,
2296 .ndo_select_queue = rtl83xx_pick_tx_queue,
2297 .ndo_set_mac_address = rtl838x_set_mac_address,
2298 .ndo_validate_addr = eth_validate_addr,
2299 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
2300 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2301 .ndo_set_features = rtl83xx_set_features,
2302 .ndo_fix_features = rtl838x_fix_features,
2303 .ndo_setup_tc = rtl83xx_setup_tc,
2304 };
2305
2306 static const struct net_device_ops rtl930x_eth_netdev_ops = {
2307 .ndo_open = rtl838x_eth_open,
2308 .ndo_stop = rtl838x_eth_stop,
2309 .ndo_start_xmit = rtl838x_eth_tx,
2310 .ndo_select_queue = rtl93xx_pick_tx_queue,
2311 .ndo_set_mac_address = rtl838x_set_mac_address,
2312 .ndo_validate_addr = eth_validate_addr,
2313 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
2314 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2315 .ndo_set_features = rtl93xx_set_features,
2316 .ndo_fix_features = rtl838x_fix_features,
2317 .ndo_setup_tc = rtl83xx_setup_tc,
2318 };
2319
2320 static const struct net_device_ops rtl931x_eth_netdev_ops = {
2321 .ndo_open = rtl838x_eth_open,
2322 .ndo_stop = rtl838x_eth_stop,
2323 .ndo_start_xmit = rtl838x_eth_tx,
2324 .ndo_select_queue = rtl93xx_pick_tx_queue,
2325 .ndo_set_mac_address = rtl838x_set_mac_address,
2326 .ndo_validate_addr = eth_validate_addr,
2327 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
2328 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2329 .ndo_set_features = rtl93xx_set_features,
2330 .ndo_fix_features = rtl838x_fix_features,
2331 };
2332
2333 static const struct phylink_mac_ops rtl838x_phylink_ops = {
2334 .validate = rtl838x_validate,
2335 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
2336 .mac_an_restart = rtl838x_mac_an_restart,
2337 .mac_config = rtl838x_mac_config,
2338 .mac_link_down = rtl838x_mac_link_down,
2339 .mac_link_up = rtl838x_mac_link_up,
2340 };
2341
2342 static const struct ethtool_ops rtl838x_ethtool_ops = {
2343 .get_link_ksettings = rtl838x_get_link_ksettings,
2344 .set_link_ksettings = rtl838x_set_link_ksettings,
2345 };
2346
2347 static int __init rtl838x_eth_probe(struct platform_device *pdev)
2348 {
2349 struct net_device *dev;
2350 struct device_node *dn = pdev->dev.of_node;
2351 struct rtl838x_eth_priv *priv;
2352 struct resource *res, *mem;
2353 phy_interface_t phy_mode;
2354 struct phylink *phylink;
2355 int err = 0, rxrings, rxringlen;
2356 struct ring_b *ring;
2357
2358 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2359 (u32)pdev, (u32)(&(pdev->dev)));
2360
2361 if (!dn) {
2362 dev_err(&pdev->dev, "No DT found\n");
2363 return -EINVAL;
2364 }
2365
2366 rxrings = (soc_info.family == RTL8380_FAMILY_ID
2367 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
2368 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
2369 rxringlen = MAX_ENTRIES / rxrings;
2370 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
2371
2372 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
2373 if (!dev) {
2374 err = -ENOMEM;
2375 goto err_free;
2376 }
2377 SET_NETDEV_DEV(dev, &pdev->dev);
2378 priv = netdev_priv(dev);
2379
2380 /* obtain buffer memory space */
2381 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2382 if (res) {
2383 mem = devm_request_mem_region(&pdev->dev, res->start,
2384 resource_size(res), res->name);
2385 if (!mem) {
2386 dev_err(&pdev->dev, "cannot request memory space\n");
2387 err = -ENXIO;
2388 goto err_free;
2389 }
2390
2391 dev->mem_start = mem->start;
2392 dev->mem_end = mem->end;
2393 } else {
2394 dev_err(&pdev->dev, "cannot request IO resource\n");
2395 err = -ENXIO;
2396 goto err_free;
2397 }
2398
2399 /* Allocate buffer memory */
2400 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER +
2401 sizeof(struct ring_b) + sizeof(struct notify_b),
2402 (void *)&dev->mem_start, GFP_KERNEL);
2403 if (!priv->membase) {
2404 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2405 err = -ENOMEM;
2406 goto err_free;
2407 }
2408
2409 /* Allocate ring-buffer space at the end of the allocated memory */
2410 ring = priv->membase;
2411 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2412
2413 spin_lock_init(&priv->lock);
2414
2415 dev->ethtool_ops = &rtl838x_ethtool_ops;
2416 dev->min_mtu = ETH_ZLEN;
2417 dev->max_mtu = 1536;
2418 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2419 dev->hw_features = NETIF_F_RXCSUM;
2420
2421 priv->id = soc_info.id;
2422 priv->family_id = soc_info.family;
2423 if (priv->id) {
2424 pr_info("Found SoC ID: %4x: %s, family %x\n",
2425 priv->id, soc_info.name, priv->family_id);
2426 } else {
2427 pr_err("Unknown chip id (%04x)\n", priv->id);
2428 return -ENODEV;
2429 }
2430
2431 switch (priv->family_id) {
2432 case RTL8380_FAMILY_ID:
2433 priv->cpu_port = RTL838X_CPU_PORT;
2434 priv->r = &rtl838x_reg;
2435 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2436 break;
2437 case RTL8390_FAMILY_ID:
2438 priv->cpu_port = RTL839X_CPU_PORT;
2439 priv->r = &rtl839x_reg;
2440 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2441 break;
2442 case RTL9300_FAMILY_ID:
2443 priv->cpu_port = RTL930X_CPU_PORT;
2444 priv->r = &rtl930x_reg;
2445 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2446 break;
2447 case RTL9310_FAMILY_ID:
2448 priv->cpu_port = RTL931X_CPU_PORT;
2449 priv->r = &rtl931x_reg;
2450 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2451 rtl931x_chip_init(priv);
2452 break;
2453 default:
2454 pr_err("Unknown SoC family\n");
2455 return -ENODEV;
2456 }
2457 priv->rxringlen = rxringlen;
2458 priv->rxrings = rxrings;
2459
2460 /* Obtain device IRQ number */
2461 dev->irq = platform_get_irq(pdev, 0);
2462 if (dev->irq < 0) {
2463 dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
2464 goto err_free;
2465 }
2466
2467 err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
2468 IRQF_SHARED, dev->name, dev);
2469 if (err) {
2470 dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
2471 __func__, err);
2472 goto err_free;
2473 }
2474
2475 rtl8380_init_mac(priv);
2476
2477 /* Try to get mac address in the following order:
2478 * 1) from device tree data
2479 * 2) from internal registers set by bootloader
2480 */
2481 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2482 if (is_valid_ether_addr(dev->dev_addr)) {
2483 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2484 } else {
2485 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2486 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2487 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2488 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2489 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2490 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2491 }
2492 /* if the address is invalid, use a random value */
2493 if (!is_valid_ether_addr(dev->dev_addr)) {
2494 struct sockaddr sa = { AF_UNSPEC };
2495
2496 netdev_warn(dev, "Invalid MAC address, using random\n");
2497 eth_hw_addr_random(dev);
2498 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2499 if (rtl838x_set_mac_address(dev, &sa))
2500 netdev_warn(dev, "Failed to set MAC address.\n");
2501 }
2502 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2503 sw_r32(priv->r->mac + 4));
2504 strcpy(dev->name, "eth%d");
2505 priv->pdev = pdev;
2506 priv->netdev = dev;
2507
2508 err = rtl838x_mdio_init(priv);
2509 if (err)
2510 goto err_free;
2511
2512 err = register_netdev(dev);
2513 if (err)
2514 goto err_free;
2515
2516 for (int i = 0; i < priv->rxrings; i++) {
2517 priv->rx_qs[i].id = i;
2518 priv->rx_qs[i].priv = priv;
2519 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2520 }
2521
2522 platform_set_drvdata(pdev, dev);
2523
2524 phy_mode = PHY_INTERFACE_MODE_NA;
2525 err = of_get_phy_mode(dn, &phy_mode);
2526 if (err < 0) {
2527 dev_err(&pdev->dev, "incorrect phy-mode\n");
2528 err = -EINVAL;
2529 goto err_free;
2530 }
2531 priv->phylink_config.dev = &dev->dev;
2532 priv->phylink_config.type = PHYLINK_NETDEV;
2533
2534 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2535 phy_mode, &rtl838x_phylink_ops);
2536
2537 if (IS_ERR(phylink)) {
2538 err = PTR_ERR(phylink);
2539 goto err_free;
2540 }
2541 priv->phylink = phylink;
2542
2543 return 0;
2544
2545 err_free:
2546 pr_err("Error setting up netdev, freeing it again.\n");
2547 free_netdev(dev);
2548
2549 return err;
2550 }
2551
2552 static int rtl838x_eth_remove(struct platform_device *pdev)
2553 {
2554 struct net_device *dev = platform_get_drvdata(pdev);
2555 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2556
2557 if (dev) {
2558 pr_info("Removing platform driver for rtl838x-eth\n");
2559 rtl838x_mdio_remove(priv);
2560 rtl838x_hw_stop(priv);
2561
2562 netif_tx_stop_all_queues(dev);
2563
2564 for (int i = 0; i < priv->rxrings; i++)
2565 netif_napi_del(&priv->rx_qs[i].napi);
2566
2567 unregister_netdev(dev);
2568 free_netdev(dev);
2569 }
2570
2571 return 0;
2572 }
2573
2574 static const struct of_device_id rtl838x_eth_of_ids[] = {
2575 { .compatible = "realtek,rtl838x-eth"},
2576 { /* sentinel */ }
2577 };
2578 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2579
2580 static struct platform_driver rtl838x_eth_driver = {
2581 .probe = rtl838x_eth_probe,
2582 .remove = rtl838x_eth_remove,
2583 .driver = {
2584 .name = "rtl838x-eth",
2585 .pm = NULL,
2586 .of_match_table = rtl838x_eth_of_ids,
2587 },
2588 };
2589
2590 module_platform_driver(rtl838x_eth_driver);
2591
2592 MODULE_AUTHOR("B. Koblitz");
2593 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2594 MODULE_LICENSE("GPL");