realtek: Whitespace and codestyle cleanup
[openwrt/staging/jow.git] / target / linux / realtek / files-5.15 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* linux/drivers/net/ethernet/rtl838x_eth.c
3 * Copyright (C) 2020 B. Koblitz
4 */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/etherdevice.h>
8 #include <linux/interrupt.h>
9 #include <linux/io.h>
10 #include <linux/platform_device.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/of.h>
14 #include <linux/of_net.h>
15 #include <linux/of_mdio.h>
16 #include <linux/module.h>
17 #include <linux/phylink.h>
18 #include <linux/pkt_sched.h>
19 #include <net/dsa.h>
20 #include <net/switchdev.h>
21 #include <asm/cacheflush.h>
22
23 #include <asm/mach-rtl838x/mach-rtl83xx.h>
24 #include "rtl838x_eth.h"
25
26 extern struct rtl83xx_soc_info soc_info;
27
28 /* Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
29 * The ring is assigned by switch based on packet/port priortity
30 * Maximum number of TX rings is 2, Ring 2 being the high priority
31 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
32 * for an RX ring, MAX_ENTRIES the maximum number of entries
33 * available in total for all queues.
34 */
35 #define MAX_RXRINGS 32
36 #define MAX_RXLEN 300
37 #define MAX_ENTRIES (300 * 8)
38 #define TXRINGS 2
39 #define TXRINGLEN 160
40 #define NOTIFY_EVENTS 10
41 #define NOTIFY_BLOCKS 10
42 #define TX_EN 0x8
43 #define RX_EN 0x4
44 #define TX_EN_93XX 0x20
45 #define RX_EN_93XX 0x10
46 #define TX_DO 0x2
47 #define WRAP 0x2
48 #define MAX_PORTS 57
49 #define MAX_SMI_BUSSES 4
50
51 #define RING_BUFFER 1600
52
53 struct p_hdr {
54 uint8_t *buf;
55 uint16_t reserved;
56 uint16_t size; /* buffer size */
57 uint16_t offset;
58 uint16_t len; /* pkt len */
59 /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
60 uint16_t cpu_tag[10];
61 } __packed __aligned(1);
62
63 struct n_event {
64 uint32_t type:2;
65 uint32_t fidVid:12;
66 uint64_t mac:48;
67 uint32_t slp:6;
68 uint32_t valid:1;
69 uint32_t reserved:27;
70 } __packed __aligned(1);
71
72 struct ring_b {
73 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
74 uint32_t tx_r[TXRINGS][TXRINGLEN];
75 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
76 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
77 uint32_t c_rx[MAX_RXRINGS];
78 uint32_t c_tx[TXRINGS];
79 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
80 uint8_t *rx_space;
81 };
82
83 struct notify_block {
84 struct n_event events[NOTIFY_EVENTS];
85 };
86
87 struct notify_b {
88 struct notify_block blocks[NOTIFY_BLOCKS];
89 u32 reserved1[8];
90 u32 ring[NOTIFY_BLOCKS];
91 u32 reserved2[8];
92 };
93
94 static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
95 {
96 // cpu_tag[0] is reserved on the RTL83XX SoCs
97 h->cpu_tag[1] = 0x0400; // BIT 10: RTL8380_CPU_TAG
98 h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
99 h->cpu_tag[3] = 0x0000;
100 h->cpu_tag[4] = BIT(dest_port) >> 16;
101 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
102
103 /* Set internal priority (PRI) and enable (AS_PRI) */
104 if (prio >= 0)
105 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
106 }
107
108 static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
109 {
110 // cpu_tag[0] is reserved on the RTL83XX SoCs
111 h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
112 h->cpu_tag[2] = BIT(4); /* AS_DPM flag */
113 h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
114 // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
115 if (dest_port >= 32) {
116 dest_port -= 32;
117 h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
118 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
119 } else {
120 h->cpu_tag[4] = BIT(dest_port) >> 16;
121 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
122 }
123
124 /* Set internal priority (PRI) and enable (AS_PRI) */
125 if (prio >= 0)
126 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
127 }
128
129 static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
130 {
131 h->cpu_tag[0] = 0x8000; // CPU tag marker
132 h->cpu_tag[1] = h->cpu_tag[2] = 0;
133 h->cpu_tag[3] = 0;
134 h->cpu_tag[4] = 0;
135 h->cpu_tag[5] = 0;
136 h->cpu_tag[6] = BIT(dest_port) >> 16;
137 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
138
139 /* Enable (AS_QID) and set priority queue (QID) */
140 if (prio >= 0)
141 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
142 }
143
144 static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
145 {
146 h->cpu_tag[0] = 0x8000; // CPU tag marker
147 h->cpu_tag[1] = h->cpu_tag[2] = 0;
148 h->cpu_tag[3] = 0;
149 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
150 if (dest_port >= 32) {
151 dest_port -= 32;
152 h->cpu_tag[4] = BIT(dest_port) >> 16;
153 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
154 } else {
155 h->cpu_tag[6] = BIT(dest_port) >> 16;
156 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
157 }
158
159 /* Enable (AS_QID) and set priority queue (QID) */
160 if (prio >= 0)
161 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
162 }
163
164 static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
165 {
166 h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload
167 h->cpu_tag[2] |= (vlan >> 8) & 0xf;
168 h->cpu_tag[3] |= (vlan & 0xff) << 8;
169 }
170
171 struct rtl838x_rx_q {
172 int id;
173 struct rtl838x_eth_priv *priv;
174 struct napi_struct napi;
175 };
176
177 struct rtl838x_eth_priv {
178 struct net_device *netdev;
179 struct platform_device *pdev;
180 void *membase;
181 spinlock_t lock;
182 struct mii_bus *mii_bus;
183 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
184 struct phylink *phylink;
185 struct phylink_config phylink_config;
186 u16 id;
187 u16 family_id;
188 const struct rtl838x_eth_reg *r;
189 u8 cpu_port;
190 u32 lastEvent;
191 u16 rxrings;
192 u16 rxringlen;
193 u8 smi_bus[MAX_PORTS];
194 u8 smi_addr[MAX_PORTS];
195 u32 sds_id[MAX_PORTS];
196 bool smi_bus_isc45[MAX_SMI_BUSSES];
197 bool phy_is_internal[MAX_PORTS];
198 phy_interface_t interfaces[MAX_PORTS];
199 };
200
201 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
202 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
203 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
204 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
205 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
206 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
207 extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
208 extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
209 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
210 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
211 extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
212 extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
213
214 /* On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
215 * the rings. Writing x into these registers substracts x from its content.
216 * When the content reaches the ring size, the ASIC no longer adds
217 * packets to this receive queue.
218 */
219 void rtl838x_update_cntr(int r, int released)
220 {
221 // This feature is not available on RTL838x SoCs
222 }
223
224 void rtl839x_update_cntr(int r, int released)
225 {
226 // This feature is not available on RTL839x SoCs
227 }
228
229 void rtl930x_update_cntr(int r, int released)
230 {
231 int pos = (r % 3) * 10;
232 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
233 u32 v = sw_r32(reg);
234
235 v = (v >> pos) & 0x3ff;
236 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
237 sw_w32_mask(0x3ff << pos, released << pos, reg);
238 sw_w32(v, reg);
239 }
240
241 void rtl931x_update_cntr(int r, int released)
242 {
243 int pos = (r % 3) * 10;
244 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
245 u32 v = sw_r32(reg);
246
247 v = (v >> pos) & 0x3ff;
248 sw_w32_mask(0x3ff << pos, released << pos, reg);
249 sw_w32(v, reg);
250 }
251
252 struct dsa_tag {
253 u8 reason;
254 u8 queue;
255 u16 port;
256 u8 l2_offloaded;
257 u8 prio;
258 bool crc_error;
259 };
260
261 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
262 {
263 /* cpu_tag[0] is reserved. Fields are off-by-one */
264 t->reason = h->cpu_tag[4] & 0xf;
265 t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
266 t->port = h->cpu_tag[1] & 0x1f;
267 t->crc_error = t->reason == 13;
268
269 pr_debug("Reason: %d\n", t->reason);
270 if (t->reason != 6) // NIC_RX_REASON_SPECIAL_TRAP
271 t->l2_offloaded = 1;
272 else
273 t->l2_offloaded = 0;
274
275 return t->l2_offloaded;
276 }
277
278 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
279 {
280 /* cpu_tag[0] is reserved. Fields are off-by-one */
281 t->reason = h->cpu_tag[5] & 0x1f;
282 t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
283 t->port = h->cpu_tag[1] & 0x3f;
284 t->crc_error = h->cpu_tag[4] & BIT(6);
285
286 pr_debug("Reason: %d\n", t->reason);
287 if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA
288 (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP
289 t->l2_offloaded = 0;
290 else
291 t->l2_offloaded = 1;
292
293 return t->l2_offloaded;
294 }
295
296 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
297 {
298 t->reason = h->cpu_tag[7] & 0x3f;
299 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
300 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
301 t->crc_error = h->cpu_tag[1] & BIT(6);
302
303 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
304 if (t->reason >= 19 && t->reason <= 27)
305 t->l2_offloaded = 0;
306 else
307 t->l2_offloaded = 1;
308
309 return t->l2_offloaded;
310 }
311
312 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
313 {
314 t->reason = h->cpu_tag[7] & 0x3f;
315 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
316 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
317 t->crc_error = h->cpu_tag[1] & BIT(6);
318
319 if (t->reason != 63)
320 pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
321 if (t->reason >= 19 && t->reason <= 27) // NIC_RX_REASON_RMA
322 t->l2_offloaded = 0;
323 else
324 t->l2_offloaded = 1;
325
326 return t->l2_offloaded;
327 }
328
329 /* Discard the RX ring-buffers, called as part of the net-ISR
330 * when the buffer runs over
331 */
332 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
333 {
334 int r;
335 u32 *last;
336 struct p_hdr *h;
337 struct ring_b *ring = priv->membase;
338
339 for (r = 0; r < priv->rxrings; r++) {
340 pr_debug("In %s working on r: %d\n", __func__, r);
341 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
342 do {
343 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
344 break;
345 pr_debug("Got something: %d\n", ring->c_rx[r]);
346 h = &ring->rx_header[r][ring->c_rx[r]];
347 memset(h, 0, sizeof(struct p_hdr));
348 h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
349 r * priv->rxringlen * RING_BUFFER +
350 ring->c_rx[r] * RING_BUFFER);
351 h->size = RING_BUFFER;
352 /* make sure the header is visible to the ASIC */
353 mb();
354
355 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
356 WRAP :
357 0x1);
358 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
359 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
360 }
361 }
362
363 struct fdb_update_work {
364 struct work_struct work;
365 struct net_device *ndev;
366 u64 macs[NOTIFY_EVENTS + 1];
367 };
368
369 void rtl838x_fdb_sync(struct work_struct *work)
370 {
371 const struct fdb_update_work *uw =
372 container_of(work, struct fdb_update_work, work);
373 struct switchdev_notifier_fdb_info info;
374 u8 addr[ETH_ALEN];
375 int i = 0;
376 int action;
377
378 while (uw->macs[i]) {
379 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
380 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
381 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
382 info.addr = &addr[0];
383 info.vid = 0;
384 info.offloaded = 1;
385 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
386 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
387 i++;
388 }
389 kfree(work);
390 }
391
392 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
393 {
394 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
395 u32 e = priv->lastEvent;
396 struct n_event *event;
397 int i;
398 u64 mac;
399 struct fdb_update_work *w;
400
401 while (!(nb->ring[e] & 1)) {
402 w = kzalloc(sizeof(*w), GFP_ATOMIC);
403 if (!w) {
404 pr_err("Out of memory: %s", __func__);
405 return;
406 }
407 INIT_WORK(&w->work, rtl838x_fdb_sync);
408
409 for (i = 0; i < NOTIFY_EVENTS; i++) {
410 event = &nb->blocks[e].events[i];
411 if (!event->valid)
412 continue;
413 mac = event->mac;
414 if (event->type)
415 mac |= 1ULL << 63;
416 w->ndev = priv->netdev;
417 w->macs[i] = mac;
418 }
419
420 /* Hand the ring entry back to the switch */
421 nb->ring[e] = nb->ring[e] | 1;
422 e = (e + 1) % NOTIFY_BLOCKS;
423
424 w->macs[i] = 0ULL;
425 schedule_work(&w->work);
426 }
427 priv->lastEvent = e;
428 }
429
430 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
431 {
432 struct net_device *dev = dev_id;
433 struct rtl838x_eth_priv *priv = netdev_priv(dev);
434 u32 status = sw_r32(priv->r->dma_if_intr_sts);
435 int i;
436
437 pr_debug("IRQ: %08x\n", status);
438
439 /* Ignore TX interrupt */
440 if ((status & 0xf0000)) {
441 /* Clear ISR */
442 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
443 }
444
445 /* RX interrupt */
446 if (status & 0x0ff00) {
447 /* ACK and disable RX interrupt for this ring */
448 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
449 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
450 for (i = 0; i < priv->rxrings; i++) {
451 if (status & BIT(i + 8)) {
452 pr_debug("Scheduling queue: %d\n", i);
453 napi_schedule(&priv->rx_qs[i].napi);
454 }
455 }
456 }
457
458 /* RX buffer overrun */
459 if (status & 0x000ff) {
460 pr_debug("RX buffer overrun: status %x, mask: %x\n",
461 status, sw_r32(priv->r->dma_if_intr_msk));
462 sw_w32(status, priv->r->dma_if_intr_sts);
463 rtl838x_rb_cleanup(priv, status & 0xff);
464 }
465
466 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
467 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
468 rtl839x_l2_notification_handler(priv);
469 }
470
471 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
472 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
473 rtl839x_l2_notification_handler(priv);
474 }
475
476 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
477 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
478 rtl839x_l2_notification_handler(priv);
479 }
480
481 return IRQ_HANDLED;
482 }
483
484 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
485 {
486 struct net_device *dev = dev_id;
487 struct rtl838x_eth_priv *priv = netdev_priv(dev);
488 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
489 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
490 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
491 int i;
492
493 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
494 __func__, status_tx, status_rx, status_rx_r);
495
496 /* Ignore TX interrupt */
497 if (status_tx) {
498 /* Clear ISR */
499 pr_debug("TX done\n");
500 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
501 }
502
503 /* RX interrupt */
504 if (status_rx) {
505 pr_debug("RX IRQ\n");
506 /* ACK and disable RX interrupt for given rings */
507 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
508 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
509 for (i = 0; i < priv->rxrings; i++) {
510 if (status_rx & BIT(i)) {
511 pr_debug("Scheduling queue: %d\n", i);
512 napi_schedule(&priv->rx_qs[i].napi);
513 }
514 }
515 }
516
517 /* RX buffer overrun */
518 if (status_rx_r) {
519 pr_debug("RX buffer overrun: status %x, mask: %x\n",
520 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
521 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
522 rtl838x_rb_cleanup(priv, status_rx_r);
523 }
524
525 return IRQ_HANDLED;
526 }
527
528 static const struct rtl838x_eth_reg rtl838x_reg = {
529 .net_irq = rtl83xx_net_irq,
530 .mac_port_ctrl = rtl838x_mac_port_ctrl,
531 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
532 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
533 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
534 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
535 .dma_rx_base = RTL838X_DMA_RX_BASE,
536 .dma_tx_base = RTL838X_DMA_TX_BASE,
537 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
538 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
539 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
540 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
541 .get_mac_link_sts = rtl838x_get_mac_link_sts,
542 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
543 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
544 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
545 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
546 .mac = RTL838X_MAC,
547 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
548 .update_cntr = rtl838x_update_cntr,
549 .create_tx_header = rtl838x_create_tx_header,
550 .decode_tag = rtl838x_decode_tag,
551 };
552
553 static const struct rtl838x_eth_reg rtl839x_reg = {
554 .net_irq = rtl83xx_net_irq,
555 .mac_port_ctrl = rtl839x_mac_port_ctrl,
556 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
557 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
558 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
559 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
560 .dma_rx_base = RTL839X_DMA_RX_BASE,
561 .dma_tx_base = RTL839X_DMA_TX_BASE,
562 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
563 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
564 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
565 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
566 .get_mac_link_sts = rtl839x_get_mac_link_sts,
567 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
568 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
569 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
570 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
571 .mac = RTL839X_MAC,
572 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
573 .update_cntr = rtl839x_update_cntr,
574 .create_tx_header = rtl839x_create_tx_header,
575 .decode_tag = rtl839x_decode_tag,
576 };
577
578 static const struct rtl838x_eth_reg rtl930x_reg = {
579 .net_irq = rtl93xx_net_irq,
580 .mac_port_ctrl = rtl930x_mac_port_ctrl,
581 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
582 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
583 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
584 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
585 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
586 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
587 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
588 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
589 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
590 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
591 .dma_rx_base = RTL930X_DMA_RX_BASE,
592 .dma_tx_base = RTL930X_DMA_TX_BASE,
593 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
594 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
595 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
596 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
597 .get_mac_link_sts = rtl930x_get_mac_link_sts,
598 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
599 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
600 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
601 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
602 .mac = RTL930X_MAC_L2_ADDR_CTRL,
603 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
604 .update_cntr = rtl930x_update_cntr,
605 .create_tx_header = rtl930x_create_tx_header,
606 .decode_tag = rtl930x_decode_tag,
607 };
608
609 static const struct rtl838x_eth_reg rtl931x_reg = {
610 .net_irq = rtl93xx_net_irq,
611 .mac_port_ctrl = rtl931x_mac_port_ctrl,
612 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
613 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
614 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
615 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
616 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
617 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
618 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
619 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
620 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
621 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
622 .dma_rx_base = RTL931X_DMA_RX_BASE,
623 .dma_tx_base = RTL931X_DMA_TX_BASE,
624 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
625 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
626 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
627 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
628 .get_mac_link_sts = rtl931x_get_mac_link_sts,
629 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
630 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
631 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
632 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
633 .mac = RTL931X_MAC_L2_ADDR_CTRL,
634 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
635 .update_cntr = rtl931x_update_cntr,
636 .create_tx_header = rtl931x_create_tx_header,
637 .decode_tag = rtl931x_decode_tag,
638 };
639
640 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
641 {
642 u32 int_saved, nbuf;
643 u32 reset_mask;
644 int i, pos;
645
646 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
647 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
648 mdelay(100);
649
650 /* Disable and clear interrupts */
651 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
652 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
653 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
654 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
655 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
656 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
657 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
658 } else {
659 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
660 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
661 }
662
663 if (priv->family_id == RTL8390_FAMILY_ID) {
664 /* Preserve L2 notification and NBUF settings */
665 int_saved = sw_r32(priv->r->dma_if_intr_msk);
666 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
667
668 /* Disable link change interrupt on RTL839x */
669 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
670 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
671
672 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
673 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
674 }
675
676 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
677 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
678 reset_mask = 0x6;
679 else
680 reset_mask = 0xc;
681
682 sw_w32(reset_mask, priv->r->rst_glb_ctrl);
683
684 do { /* Wait for reset of NIC and Queues done */
685 udelay(20);
686 } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
687 mdelay(100);
688
689 /* Setup Head of Line */
690 if (priv->family_id == RTL8380_FAMILY_ID)
691 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
692 if (priv->family_id == RTL8390_FAMILY_ID)
693 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
694 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
695 for (i = 0; i < priv->rxrings; i++) {
696 pos = (i % 3) * 10;
697 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
698 sw_w32_mask(0x3ff << pos, priv->rxringlen,
699 priv->r->dma_if_rx_ring_cntr(i));
700 }
701 }
702
703 /* Re-enable link change interrupt */
704 if (priv->family_id == RTL8390_FAMILY_ID) {
705 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
706 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
707 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
708 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
709
710 /* Restore notification settings: on RTL838x these bits are null */
711 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
712 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
713 }
714 }
715
716 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
717 {
718 int i;
719 struct ring_b *ring = priv->membase;
720
721 for (i = 0; i < priv->rxrings; i++)
722 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
723
724 for (i = 0; i < TXRINGS; i++)
725 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
726 }
727
728 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
729 {
730 /* Disable Head of Line features for all RX rings */
731 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
732
733 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
734 sw_w32(0x06400020, priv->r->dma_if_ctrl);
735
736 /* Enable RX done, RX overflow and TX done interrupts */
737 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
738
739 /* Enable DMA, engine expects empty FCS field */
740 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
741
742 /* Restart TX/RX to CPU port */
743 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
744 /* Set Speed, duplex, flow control
745 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
746 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
747 * | MEDIA_SEL
748 */
749 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
750
751 /* Enable CRC checks on CPU-port */
752 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
753 }
754
755 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
756 {
757 /* Setup CPU-Port: RX Buffer */
758 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
759
760 /* Enable Notify, RX done, RX overflow and TX done interrupts */
761 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
762
763 /* Enable DMA */
764 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
765
766 /* Restart TX/RX to CPU port, enable CRC checking */
767 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
768
769 /* CPU port joins Lookup Miss Flooding Portmask */
770 // TODO: The code below should also work for the RTL838x
771 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
772 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
773 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
774
775 /* Force CPU port link up */
776 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
777 }
778
779 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
780 {
781 int i, pos;
782 u32 v;
783
784 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
785 sw_w32(0x06400040, priv->r->dma_if_ctrl);
786
787 for (i = 0; i < priv->rxrings; i++) {
788 pos = (i % 3) * 10;
789 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
790
791 // Some SoCs have issues with missing underflow protection
792 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
793 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
794 }
795
796 /* Enable Notify, RX done, RX overflow and TX done interrupts */
797 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
798 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
799 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
800
801 /* Enable DMA */
802 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
803
804 /* Restart TX/RX to CPU port, enable CRC checking */
805 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
806
807 if (priv->family_id == RTL9300_FAMILY_ID)
808 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
809 else
810 sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
811
812 if (priv->family_id == RTL9300_FAMILY_ID)
813 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
814 else
815 sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
816 }
817
818 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
819 {
820 int i, j;
821 struct p_hdr *h;
822
823 for (i = 0; i < priv->rxrings; i++) {
824 for (j = 0; j < priv->rxringlen; j++) {
825 h = &ring->rx_header[i][j];
826 memset(h, 0, sizeof(struct p_hdr));
827 h->buf = (u8 *)KSEG1ADDR(ring->rx_space +
828 i * priv->rxringlen * RING_BUFFER +
829 j * RING_BUFFER);
830 h->size = RING_BUFFER;
831 /* All rings owned by switch, last one wraps */
832 ring->rx_r[i][j] = KSEG1ADDR(h) | 1 | (j == (priv->rxringlen - 1) ?
833 WRAP :
834 0);
835 }
836 ring->c_rx[i] = 0;
837 }
838
839 for (i = 0; i < TXRINGS; i++) {
840 for (j = 0; j < TXRINGLEN; j++) {
841 h = &ring->tx_header[i][j];
842 memset(h, 0, sizeof(struct p_hdr));
843 h->buf = (u8 *)KSEG1ADDR(ring->tx_space +
844 i * TXRINGLEN * RING_BUFFER +
845 j * RING_BUFFER);
846 h->size = RING_BUFFER;
847 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
848 }
849 /* Last header is wrapping around */
850 ring->tx_r[i][j - 1] |= WRAP;
851 ring->c_tx[i] = 0;
852 }
853 }
854
855 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
856 {
857 int i;
858 struct notify_b *b = priv->membase + sizeof(struct ring_b);
859
860 for (i = 0; i < NOTIFY_BLOCKS; i++)
861 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
862
863 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
864 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
865
866 /* Setup notification events */
867 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
868 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
869
870 /* Enable Notification */
871 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
872 priv->lastEvent = 0;
873 }
874
875 static int rtl838x_eth_open(struct net_device *ndev)
876 {
877 unsigned long flags;
878 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
879 struct ring_b *ring = priv->membase;
880 int i;
881
882 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
883 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
884
885 spin_lock_irqsave(&priv->lock, flags);
886 rtl838x_hw_reset(priv);
887 rtl838x_setup_ring_buffer(priv, ring);
888 if (priv->family_id == RTL8390_FAMILY_ID) {
889 rtl839x_setup_notify_ring_buffer(priv);
890 /* Make sure the ring structure is visible to the ASIC */
891 mb();
892 flush_cache_all();
893 }
894
895 rtl838x_hw_ring_setup(priv);
896 phylink_start(priv->phylink);
897
898 for (i = 0; i < priv->rxrings; i++)
899 napi_enable(&priv->rx_qs[i].napi);
900
901 switch (priv->family_id) {
902 case RTL8380_FAMILY_ID:
903 rtl838x_hw_en_rxtx(priv);
904 /* Trap IGMP/MLD traffic to CPU-Port */
905 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
906 /* Flush learned FDB entries on link down of a port */
907 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
908 break;
909
910 case RTL8390_FAMILY_ID:
911 rtl839x_hw_en_rxtx(priv);
912 // Trap MLD and IGMP messages to CPU_PORT
913 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
914 /* Flush learned FDB entries on link down of a port */
915 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
916 break;
917
918 case RTL9300_FAMILY_ID:
919 rtl93xx_hw_en_rxtx(priv);
920 /* Flush learned FDB entries on link down of a port */
921 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
922 // Trap MLD and IGMP messages to CPU_PORT
923 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
924 break;
925
926 case RTL9310_FAMILY_ID:
927 rtl93xx_hw_en_rxtx(priv);
928
929 // Trap MLD and IGMP messages to CPU_PORT
930 sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
931
932 // Disable External CPU access to switch, clear EXT_CPU_EN
933 sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
934
935 // Set PCIE_PWR_DOWN
936 sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
937 break;
938 }
939
940 netif_tx_start_all_queues(ndev);
941
942 spin_unlock_irqrestore(&priv->lock, flags);
943
944 return 0;
945 }
946
947 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
948 {
949 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
950 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
951 int i;
952
953 // Disable RX/TX from/to CPU-port
954 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
955
956 /* Disable traffic */
957 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
958 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
959 else
960 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
961 mdelay(200); // Test, whether this is needed
962
963 /* Block all ports */
964 if (priv->family_id == RTL8380_FAMILY_ID) {
965 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
966 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
967 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
968 }
969
970 /* Flush L2 address cache */
971 if (priv->family_id == RTL8380_FAMILY_ID) {
972 for (i = 0; i <= priv->cpu_port; i++) {
973 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
974 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
975 }
976 } else if (priv->family_id == RTL8390_FAMILY_ID) {
977 for (i = 0; i <= priv->cpu_port; i++) {
978 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
979 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
980 }
981 }
982 // TODO: L2 flush register is 64 bit on RTL931X and 930X
983
984 /* CPU-Port: Link down */
985 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
986 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
987 else if (priv->family_id == RTL9300_FAMILY_ID)
988 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
989 else if (priv->family_id == RTL9310_FAMILY_ID)
990 sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
991 mdelay(100);
992
993 /* Disable all TX/RX interrupts */
994 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
995 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
996 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
997 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
998 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
999 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
1000 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
1001 } else {
1002 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
1003 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
1004 }
1005
1006 /* Disable TX/RX DMA */
1007 sw_w32(0x00000000, priv->r->dma_if_ctrl);
1008 mdelay(200);
1009 }
1010
1011 static int rtl838x_eth_stop(struct net_device *ndev)
1012 {
1013 unsigned long flags;
1014 int i;
1015 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1016
1017 pr_info("in %s\n", __func__);
1018
1019 phylink_stop(priv->phylink);
1020 rtl838x_hw_stop(priv);
1021
1022 for (i = 0; i < priv->rxrings; i++)
1023 napi_disable(&priv->rx_qs[i].napi);
1024
1025 netif_tx_stop_all_queues(ndev);
1026
1027 return 0;
1028 }
1029
1030 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1031 {
1032 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1033 * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
1034 */
1035 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1036 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1037 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1038 }
1039 if (ndev->flags & IFF_ALLMULTI)
1040 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1041 if (ndev->flags & IFF_PROMISC) {
1042 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1043 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1044 }
1045 }
1046
1047 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1048 {
1049 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1050 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1051 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1052 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1053 */
1054 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1055 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1056 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1057 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1058 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1059 }
1060 if (ndev->flags & IFF_ALLMULTI) {
1061 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1062 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1063 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1064 }
1065 if (ndev->flags & IFF_PROMISC) {
1066 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1067 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1068 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1069 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1070 }
1071 }
1072
1073 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1074 {
1075 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1076 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1077 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1078 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1079 */
1080 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1081 sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
1082 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
1083 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
1084 } else {
1085 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1086 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1087 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1088 }
1089 }
1090
1091 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1092 {
1093 /* Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1094 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1095 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
1096 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1097 */
1098 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1099 sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
1100 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
1101 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
1102 } else {
1103 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1104 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1105 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1106 }
1107 }
1108
1109 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1110 {
1111 unsigned long flags;
1112 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1113
1114 pr_warn("%s\n", __func__);
1115 spin_lock_irqsave(&priv->lock, flags);
1116 rtl838x_hw_stop(priv);
1117 rtl838x_hw_ring_setup(priv);
1118 rtl838x_hw_en_rxtx(priv);
1119 netif_trans_update(ndev);
1120 netif_start_queue(ndev);
1121 spin_unlock_irqrestore(&priv->lock, flags);
1122 }
1123
1124 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1125 {
1126 int len, i;
1127 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1128 struct ring_b *ring = priv->membase;
1129 uint32_t val;
1130 int ret;
1131 unsigned long flags;
1132 struct p_hdr *h;
1133 int dest_port = -1;
1134 int q = skb_get_queue_mapping(skb) % TXRINGS;
1135
1136 if (q) // Check for high prio queue
1137 pr_debug("SKB priority: %d\n", skb->priority);
1138
1139 spin_lock_irqsave(&priv->lock, flags);
1140 len = skb->len;
1141
1142 /* Check for DSA tagging at the end of the buffer */
1143 if (netdev_uses_dsa(dev) &&
1144 skb->data[len - 4] == 0x80 &&
1145 skb->data[len - 3] < priv->cpu_port &&
1146 skb->data[len - 2] == 0x10 &&
1147 skb->data[len - 1] == 0x00) {
1148 /* Reuse tag space for CRC if possible */
1149 dest_port = skb->data[len - 3];
1150 skb->data[len - 4] = skb->data[len - 3] = skb->data[len - 2] = skb->data[len - 1] = 0x00;
1151 len -= 4;
1152 }
1153
1154 len += 4; // Add space for CRC
1155
1156 if (skb_padto(skb, len)) {
1157 ret = NETDEV_TX_OK;
1158 goto txdone;
1159 }
1160
1161 /* We can send this packet if CPU owns the descriptor */
1162 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1163
1164 /* Set descriptor for tx */
1165 h = &ring->tx_header[q][ring->c_tx[q]];
1166 h->size = len;
1167 h->len = len;
1168 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1169 if (priv->family_id == RTL8380_FAMILY_ID) {
1170 if (len < ETH_ZLEN - 4)
1171 h->len -= 4;
1172 }
1173
1174 if (dest_port >= 0)
1175 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1176
1177 /* Copy packet data to tx buffer */
1178 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1179 /* Make sure packet data is visible to ASIC */
1180 wmb();
1181
1182 /* Hand over to switch */
1183 ring->tx_r[q][ring->c_tx[q]] |= 1;
1184
1185 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1186 if (priv->family_id == RTL8380_FAMILY_ID) {
1187 for (i = 0; i < 10; i++) {
1188 val = sw_r32(priv->r->dma_if_ctrl);
1189 if ((val & 0xc) == 0xc)
1190 break;
1191 }
1192 }
1193
1194 /* Tell switch to send data */
1195 if (priv->family_id == RTL9310_FAMILY_ID || priv->family_id == RTL9300_FAMILY_ID) {
1196 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1197 if (!q)
1198 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1199 else
1200 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1201 } else {
1202 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1203 }
1204
1205 dev->stats.tx_packets++;
1206 dev->stats.tx_bytes += len;
1207 dev_kfree_skb(skb);
1208 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1209 ret = NETDEV_TX_OK;
1210 } else {
1211 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1212 ret = NETDEV_TX_BUSY;
1213 }
1214
1215 txdone:
1216 spin_unlock_irqrestore(&priv->lock, flags);
1217
1218 return ret;
1219 }
1220
1221 /* Return queue number for TX. On the RTL83XX, these queues have equal priority
1222 * so we do round-robin
1223 */
1224 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1225 struct net_device *sb_dev)
1226 {
1227 static u8 last = 0;
1228
1229 last++;
1230 return last % TXRINGS;
1231 }
1232
1233 /* Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1234 */
1235 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1236 struct net_device *sb_dev)
1237 {
1238 if (skb->priority >= TC_PRIO_CONTROL)
1239 return 1;
1240
1241 return 0;
1242 }
1243
1244 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1245 {
1246 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1247 struct ring_b *ring = priv->membase;
1248 struct sk_buff *skb;
1249 LIST_HEAD(rx_list);
1250 unsigned long flags;
1251 int i, len, work_done = 0;
1252 u8 *data, *skb_data;
1253 unsigned int val;
1254 u32 *last;
1255 struct p_hdr *h;
1256 bool dsa = netdev_uses_dsa(dev);
1257 struct dsa_tag tag;
1258
1259 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1260 spin_lock_irqsave(&priv->lock, flags);
1261 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1262
1263 do {
1264 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1265 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1266 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1267 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1268 }
1269 break;
1270 }
1271
1272 h = &ring->rx_header[r][ring->c_rx[r]];
1273 data = (u8 *)KSEG1ADDR(h->buf);
1274 len = h->len;
1275 if (!len)
1276 break;
1277 work_done++;
1278
1279 len -= 4; /* strip the CRC */
1280 /* Add 4 bytes for cpu_tag */
1281 if (dsa)
1282 len += 4;
1283
1284 skb = netdev_alloc_skb(dev, len + 4);
1285 skb_reserve(skb, NET_IP_ALIGN);
1286
1287 if (likely(skb)) {
1288 /* BUG: Prevent bug on RTL838x SoCs */
1289 if (priv->family_id == RTL8380_FAMILY_ID) {
1290 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1291 for (i = 0; i < priv->rxrings; i++) {
1292 /* Update each ring cnt */
1293 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1294 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1295 }
1296 }
1297
1298 skb_data = skb_put(skb, len);
1299 /* Make sure data is visible */
1300 mb();
1301 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1302 /* Overwrite CRC with cpu_tag */
1303 if (dsa) {
1304 priv->r->decode_tag(h, &tag);
1305 skb->data[len - 4] = 0x80;
1306 skb->data[len - 3] = tag.port;
1307 skb->data[len - 2] = 0x10;
1308 skb->data[len - 1] = 0x00;
1309 if (tag.l2_offloaded)
1310 skb->data[len - 3] |= 0x40;
1311 }
1312
1313 if (tag.queue >= 0)
1314 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1315 tag.queue, len, tag.reason, tag.port);
1316
1317 skb->protocol = eth_type_trans(skb, dev);
1318 if (dev->features & NETIF_F_RXCSUM) {
1319 if (tag.crc_error)
1320 skb_checksum_none_assert(skb);
1321 else
1322 skb->ip_summed = CHECKSUM_UNNECESSARY;
1323 }
1324 dev->stats.rx_packets++;
1325 dev->stats.rx_bytes += len;
1326
1327 list_add_tail(&skb->list, &rx_list);
1328 } else {
1329 if (net_ratelimit())
1330 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1331 dev->stats.rx_dropped++;
1332 }
1333
1334 /* Reset header structure */
1335 memset(h, 0, sizeof(struct p_hdr));
1336 h->buf = data;
1337 h->size = RING_BUFFER;
1338
1339 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 | (ring->c_rx[r] == (priv->rxringlen - 1) ?
1340 WRAP :
1341 0x1);
1342 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1343 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1344 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1345
1346 netif_receive_skb_list(&rx_list);
1347
1348 // Update counters
1349 priv->r->update_cntr(r, 0);
1350
1351 spin_unlock_irqrestore(&priv->lock, flags);
1352
1353 return work_done;
1354 }
1355
1356 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1357 {
1358 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1359 struct rtl838x_eth_priv *priv = rx_q->priv;
1360 int work_done = 0;
1361 int r = rx_q->id;
1362 int work;
1363
1364 while (work_done < budget) {
1365 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1366 if (!work)
1367 break;
1368 work_done += work;
1369 }
1370
1371 if (work_done < budget) {
1372 napi_complete_done(napi, work_done);
1373
1374 /* Enable RX interrupt */
1375 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1376 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1377 else
1378 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1379 }
1380
1381 return work_done;
1382 }
1383
1384
1385 static void rtl838x_validate(struct phylink_config *config,
1386 unsigned long *supported,
1387 struct phylink_link_state *state)
1388 {
1389 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1390
1391 pr_debug("In %s\n", __func__);
1392
1393 if (!phy_interface_mode_is_rgmii(state->interface) &&
1394 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1395 state->interface != PHY_INTERFACE_MODE_MII &&
1396 state->interface != PHY_INTERFACE_MODE_REVMII &&
1397 state->interface != PHY_INTERFACE_MODE_GMII &&
1398 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1399 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1400 state->interface != PHY_INTERFACE_MODE_SGMII) {
1401 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1402 pr_err("Unsupported interface: %d\n", state->interface);
1403 return;
1404 }
1405
1406 /* Allow all the expected bits */
1407 phylink_set(mask, Autoneg);
1408 phylink_set_port_modes(mask);
1409 phylink_set(mask, Pause);
1410 phylink_set(mask, Asym_Pause);
1411
1412 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1413 * including Half duplex
1414 */
1415 if (state->interface != PHY_INTERFACE_MODE_MII &&
1416 state->interface != PHY_INTERFACE_MODE_REVMII) {
1417 phylink_set(mask, 1000baseT_Full);
1418 phylink_set(mask, 1000baseT_Half);
1419 }
1420
1421 phylink_set(mask, 10baseT_Half);
1422 phylink_set(mask, 10baseT_Full);
1423 phylink_set(mask, 100baseT_Half);
1424 phylink_set(mask, 100baseT_Full);
1425
1426 bitmap_and(supported, supported, mask,
1427 __ETHTOOL_LINK_MODE_MASK_NBITS);
1428 bitmap_and(state->advertising, state->advertising, mask,
1429 __ETHTOOL_LINK_MODE_MASK_NBITS);
1430 }
1431
1432
1433 static void rtl838x_mac_config(struct phylink_config *config,
1434 unsigned int mode,
1435 const struct phylink_link_state *state)
1436 {
1437 /* This is only being called for the master device,
1438 * i.e. the CPU-Port. We don't need to do anything.
1439 */
1440
1441 pr_info("In %s, mode %x\n", __func__, mode);
1442 }
1443
1444 static void rtl838x_mac_an_restart(struct phylink_config *config)
1445 {
1446 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1447 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1448
1449 /* This works only on RTL838x chips */
1450 if (priv->family_id != RTL8380_FAMILY_ID)
1451 return;
1452
1453 pr_debug("In %s\n", __func__);
1454 /* Restart by disabling and re-enabling link */
1455 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1456 mdelay(20);
1457 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1458 }
1459
1460 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1461 struct phylink_link_state *state)
1462 {
1463 u32 speed;
1464 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1465 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1466 int port = priv->cpu_port;
1467
1468 pr_info("In %s\n", __func__);
1469
1470 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1471 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1472
1473 pr_info("%s link status is %d\n", __func__, state->link);
1474 speed = priv->r->get_mac_link_spd_sts(port);
1475 switch (speed) {
1476 case 0:
1477 state->speed = SPEED_10;
1478 break;
1479 case 1:
1480 state->speed = SPEED_100;
1481 break;
1482 case 2:
1483 state->speed = SPEED_1000;
1484 break;
1485 case 5:
1486 state->speed = SPEED_2500;
1487 break;
1488 case 6:
1489 state->speed = SPEED_5000;
1490 break;
1491 case 4:
1492 state->speed = SPEED_10000;
1493 break;
1494 default:
1495 state->speed = SPEED_UNKNOWN;
1496 break;
1497 }
1498
1499 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1500 if (priv->r->get_mac_rx_pause_sts(port))
1501 state->pause |= MLO_PAUSE_RX;
1502 if (priv->r->get_mac_tx_pause_sts(port))
1503 state->pause |= MLO_PAUSE_TX;
1504 }
1505
1506 static void rtl838x_mac_link_down(struct phylink_config *config,
1507 unsigned int mode,
1508 phy_interface_t interface)
1509 {
1510 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1511 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1512
1513 pr_debug("In %s\n", __func__);
1514 /* Stop TX/RX to port */
1515 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1516 }
1517
1518 static void rtl838x_mac_link_up(struct phylink_config *config,
1519 struct phy_device *phy, unsigned int mode,
1520 phy_interface_t interface, int speed, int duplex,
1521 bool tx_pause, bool rx_pause)
1522 {
1523 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1524 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1525
1526 pr_debug("In %s\n", __func__);
1527 /* Restart TX/RX to port */
1528 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1529 }
1530
1531 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1532 {
1533 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1534 unsigned long flags;
1535
1536 spin_lock_irqsave(&priv->lock, flags);
1537 pr_debug("In %s\n", __func__);
1538 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1539 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1540
1541 if (priv->family_id == RTL8380_FAMILY_ID) {
1542 /* 2 more registers, ALE/MAC block */
1543 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1544 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1545 (RTL838X_MAC_ALE + 4));
1546
1547 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1548 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1549 RTL838X_MAC2 + 4);
1550 }
1551 spin_unlock_irqrestore(&priv->lock, flags);
1552 }
1553
1554 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1555 {
1556 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1557 const struct sockaddr *addr = p;
1558 u8 *mac = (u8 *) (addr->sa_data);
1559
1560 if (!is_valid_ether_addr(addr->sa_data))
1561 return -EADDRNOTAVAIL;
1562
1563 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1564 rtl838x_set_mac_hw(dev, mac);
1565
1566 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1567
1568 return 0;
1569 }
1570
1571 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1572 {
1573 // We will need to set-up EEE and the egress-rate limitation
1574 return 0;
1575 }
1576
1577 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1578 {
1579 int i;
1580
1581 if (priv->family_id == 0x8390)
1582 return rtl8390_init_mac(priv);
1583
1584 /* At present we do not know how to set up EEE on any other SoC than RTL8380 */
1585 if (priv->family_id != 0x8380)
1586 return 0;
1587
1588 pr_info("%s\n", __func__);
1589 /* fix timer for EEE */
1590 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1591 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1592
1593 /* Init VLAN. TODO: Understand what is being done, here */
1594 if (priv->id == 0x8382) {
1595 for (i = 0; i <= 28; i++)
1596 sw_w32(0, 0xd57c + i * 0x80);
1597 }
1598 if (priv->id == 0x8380) {
1599 for (i = 8; i <= 28; i++)
1600 sw_w32(0, 0xd57c + i * 0x80);
1601 }
1602
1603 return 0;
1604 }
1605
1606 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1607 struct ethtool_link_ksettings *cmd)
1608 {
1609 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1610
1611 pr_debug("%s called\n", __func__);
1612
1613 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1614 }
1615
1616 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1617 const struct ethtool_link_ksettings *cmd)
1618 {
1619 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1620
1621 pr_debug("%s called\n", __func__);
1622
1623 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1624 }
1625
1626 static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1627 {
1628 u32 val;
1629 int err;
1630 struct rtl838x_eth_priv *priv = bus->priv;
1631
1632 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1633 return rtl838x_read_sds_phy(mii_id, regnum);
1634
1635 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1636 err = rtl838x_read_mmd_phy(mii_id,
1637 mdiobus_c45_devad(regnum),
1638 regnum, &val);
1639 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1640 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1641 val, err);
1642 } else {
1643 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1644 err = rtl838x_read_phy(mii_id, page, regnum, &val);
1645 }
1646 if (err)
1647 return err;
1648
1649 return val;
1650 }
1651
1652 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1653 {
1654 return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
1655 }
1656
1657 static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1658 {
1659 u32 val;
1660 int err;
1661 struct rtl838x_eth_priv *priv = bus->priv;
1662
1663 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1664 return rtl839x_read_sds_phy(mii_id, regnum);
1665
1666 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1667 err = rtl839x_read_mmd_phy(mii_id,
1668 mdiobus_c45_devad(regnum),
1669 regnum, &val);
1670 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1671 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1672 val, err);
1673 } else {
1674 err = rtl839x_read_phy(mii_id, page, regnum, &val);
1675 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1676 }
1677
1678 if (err)
1679 return err;
1680
1681 return val;
1682 }
1683
1684 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1685 {
1686 return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
1687 }
1688
1689 static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1690 {
1691 u32 val;
1692 int err;
1693 struct rtl838x_eth_priv *priv = bus->priv;
1694
1695 if (priv->phy_is_internal[mii_id])
1696 return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1697
1698 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1699 err = rtl930x_read_mmd_phy(mii_id,
1700 mdiobus_c45_devad(regnum),
1701 regnum, &val);
1702 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1703 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1704 val, err);
1705 } else {
1706 err = rtl930x_read_phy(mii_id, page, regnum, &val);
1707 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1708 }
1709
1710 if (err)
1711 return err;
1712
1713 return val;
1714 }
1715
1716 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1717 {
1718 return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
1719 }
1720
1721 static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1722 {
1723 u32 val;
1724 int err, v;
1725 struct rtl838x_eth_priv *priv = bus->priv;
1726
1727 pr_debug("%s: In here, port %d\n", __func__, mii_id);
1728 if (priv->phy_is_internal[mii_id]) {
1729 v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1730 if (v < 0) {
1731 err = v;
1732 } else {
1733 err = 0;
1734 val = v;
1735 }
1736 } else {
1737 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1738 err = rtl931x_read_mmd_phy(mii_id,
1739 mdiobus_c45_devad(regnum),
1740 regnum, &val);
1741 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1742 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1743 val, err);
1744 } else {
1745 err = rtl931x_read_phy(mii_id, page, regnum, &val);
1746 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1747 }
1748 }
1749
1750 if (err)
1751 return err;
1752
1753 return val;
1754 }
1755
1756 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1757 {
1758 return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
1759 }
1760
1761 static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1762 int regnum, u16 value)
1763 {
1764 u32 offset = 0;
1765 struct rtl838x_eth_priv *priv = bus->priv;
1766 int err;
1767
1768 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1769 if (mii_id == 26)
1770 offset = 0x100;
1771 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1772 return 0;
1773 }
1774
1775 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1776 err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1777 regnum, value);
1778 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1779 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1780 value, err);
1781
1782 return err;
1783 }
1784 err = rtl838x_write_phy(mii_id, page, regnum, value);
1785 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1786
1787 return err;
1788 }
1789
1790 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1791 int regnum, u16 value)
1792 {
1793 return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1794 }
1795
1796 static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1797 int regnum, u16 value)
1798 {
1799 struct rtl838x_eth_priv *priv = bus->priv;
1800 int err;
1801
1802 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1803 return rtl839x_write_sds_phy(mii_id, regnum, value);
1804
1805 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1806 err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1807 regnum, value);
1808 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1809 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1810 value, err);
1811
1812 return err;
1813 }
1814
1815 err = rtl839x_write_phy(mii_id, page, regnum, value);
1816 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1817
1818 return err;
1819 }
1820
1821 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1822 int regnum, u16 value)
1823 {
1824 return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1825 }
1826
1827 static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1828 int regnum, u16 value)
1829 {
1830 struct rtl838x_eth_priv *priv = bus->priv;
1831 int err;
1832
1833 if (priv->phy_is_internal[mii_id])
1834 return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1835
1836 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
1837 return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1838 regnum, value);
1839
1840 err = rtl930x_write_phy(mii_id, page, regnum, value);
1841 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1842
1843 return err;
1844 }
1845
1846 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1847 int regnum, u16 value)
1848 {
1849 return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1850 }
1851
1852 static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1853 int regnum, u16 value)
1854 {
1855 struct rtl838x_eth_priv *priv = bus->priv;
1856 int err;
1857
1858 if (priv->phy_is_internal[mii_id])
1859 return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1860
1861 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1862 err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1863 regnum, value);
1864 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1865 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1866 value, err);
1867
1868 return err;
1869 }
1870
1871 err = rtl931x_write_phy(mii_id, page, regnum, value);
1872 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1873
1874 return err;
1875 }
1876
1877 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1878 int regnum, u16 value)
1879 {
1880 return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1881 }
1882
1883 static int rtl838x_mdio_reset(struct mii_bus *bus)
1884 {
1885 pr_debug("%s called\n", __func__);
1886 /* Disable MAC polling the PHY so that we can start configuration */
1887 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1888
1889 /* Enable PHY control via SoC */
1890 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1891
1892 // Probably should reset all PHYs here...
1893 return 0;
1894 }
1895
1896 static int rtl839x_mdio_reset(struct mii_bus *bus)
1897 {
1898 return 0;
1899
1900 pr_debug("%s called\n", __func__);
1901 /* BUG: The following does not work, but should! */
1902 /* Disable MAC polling the PHY so that we can start configuration */
1903 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1904 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1905 /* Disable PHY polling via SoC */
1906 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1907
1908 // Probably should reset all PHYs here...
1909 return 0;
1910 }
1911
1912 u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
1913 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
1914
1915 static int rtl930x_mdio_reset(struct mii_bus *bus)
1916 {
1917 int i;
1918 int pos;
1919 struct rtl838x_eth_priv *priv = bus->priv;
1920 u32 c45_mask = 0;
1921 u32 poll_sel[2];
1922 u32 poll_ctrl = 0;
1923 u32 private_poll_mask = 0;
1924 u32 v;
1925 bool uses_usxgmii = false; // For the Aquantia PHYs
1926 bool uses_hisgmii = false; // For the RTL8221/8226
1927
1928 // Mapping of port to phy-addresses on an SMI bus
1929 poll_sel[0] = poll_sel[1] = 0;
1930 for (i = 0; i < RTL930X_CPU_PORT; i++) {
1931 if (priv->smi_bus[i] > 3)
1932 continue;
1933 pos = (i % 6) * 5;
1934 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
1935 RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1936
1937 pos = (i * 2) % 32;
1938 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1939 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1940 }
1941
1942 // Configure which SMI bus is behind which port number
1943 sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
1944 sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
1945
1946 // Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+)
1947 sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
1948
1949 // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
1950 for (i = 0; i < 4; i++)
1951 if (priv->smi_bus_isc45[i])
1952 c45_mask |= BIT(i + 16);
1953
1954 pr_info("c45_mask: %08x\n", c45_mask);
1955 sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
1956
1957 // Set the MAC type of each port according to the PHY-interface
1958 // Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0
1959 v = 0;
1960 for (i = 0; i < RTL930X_CPU_PORT; i++) {
1961 switch (priv->interfaces[i]) {
1962 case PHY_INTERFACE_MODE_10GBASER:
1963 break; // Serdes: Value = 0
1964 case PHY_INTERFACE_MODE_HSGMII:
1965 private_poll_mask |= BIT(i);
1966 // fallthrough
1967 case PHY_INTERFACE_MODE_USXGMII:
1968 v |= BIT(mac_type_bit[i]);
1969 uses_usxgmii = true;
1970 break;
1971 case PHY_INTERFACE_MODE_QSGMII:
1972 private_poll_mask |= BIT(i);
1973 v |= 3 << mac_type_bit[i];
1974 break;
1975 default:
1976 break;
1977 }
1978 }
1979 sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
1980
1981 // Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones)
1982 sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
1983
1984 /* The following magic values are found in the port configuration, they seem to
1985 * define different ways of polling a PHY. The below is for the Aquantia PHYs of
1986 * the XGS1250 and the RTL8226 of the XGS1210
1987 */
1988 if (uses_usxgmii) {
1989 sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1990 sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1991 sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1992 }
1993 if (uses_hisgmii) {
1994 sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1995 sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1996 sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1997 }
1998
1999 pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
2000 sw_r32(RTL930X_SMI_GLB_CTRL));
2001 pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
2002 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
2003 pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
2004 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
2005 pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
2006 sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
2007 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
2008 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
2009 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
2010 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
2011 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
2012 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
2013 pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
2014 sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
2015
2016 return 0;
2017 }
2018
2019 static int rtl931x_mdio_reset(struct mii_bus *bus)
2020 {
2021 int i;
2022 int pos;
2023 struct rtl838x_eth_priv *priv = bus->priv;
2024 u32 c45_mask = 0;
2025 u32 poll_sel[4];
2026 u32 poll_ctrl = 0;
2027 bool mdc_on[4];
2028
2029 pr_info("%s called\n", __func__);
2030 // Disable port polling for configuration purposes
2031 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
2032 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
2033 msleep(100);
2034
2035 mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
2036 // Mapping of port to phy-addresses on an SMI bus
2037 poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
2038 for (i = 0; i < 56; i++) {
2039 pos = (i % 6) * 5;
2040 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
2041 pos = (i * 2) % 32;
2042 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
2043 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
2044 mdc_on[priv->smi_bus[i]] = true;
2045 }
2046
2047 // Configure which SMI bus is behind which port number
2048 for (i = 0; i < 4; i++) {
2049 pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
2050 sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
2051 }
2052
2053 // Configure which SMI busses
2054 pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2055 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2056 for (i = 0; i < 4; i++) {
2057 // bus is polled in c45
2058 if (priv->smi_bus_isc45[i])
2059 c45_mask |= 0x2 << (i * 2); // Std. C45, non-standard is 0x3
2060 // Enable bus access via MDC
2061 if (mdc_on[i])
2062 sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
2063 }
2064
2065 pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2066 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2067
2068 /* We have a 10G PHY enable polling
2069 * sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
2070 * sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
2071 * sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
2072 */
2073 sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
2074
2075 return 0;
2076 }
2077
2078 static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
2079 {
2080 pr_info("In %s\n", __func__);
2081
2082 // Initialize Encapsulation memory and wait until finished
2083 sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
2084 do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
2085 pr_info("%s: init ENCAP done\n", __func__);
2086
2087 // Initialize Managemen Information Base memory and wait until finished
2088 sw_w32(0x1, RTL931X_MEM_MIB_INIT);
2089 do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
2090 pr_info("%s: init MIB done\n", __func__);
2091
2092 // Initialize ACL (PIE) memory and wait until finished
2093 sw_w32(0x1, RTL931X_MEM_ACL_INIT);
2094 do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
2095 pr_info("%s: init ACL done\n", __func__);
2096
2097 // Initialize ALE memory and wait until finished
2098 sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
2099 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
2100 sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
2101 sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
2102 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
2103 pr_info("%s: init ALE done\n", __func__);
2104
2105 // Enable ESD auto recovery
2106 sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
2107
2108 // Init SPI, is this for thermal control or what?
2109 sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
2110
2111 return 0;
2112 }
2113
2114 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
2115 {
2116 struct device_node *mii_np, *dn;
2117 u32 pn;
2118 int ret;
2119
2120 pr_debug("%s called\n", __func__);
2121 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
2122
2123 if (!mii_np) {
2124 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
2125 return -ENODEV;
2126 }
2127
2128 if (!of_device_is_available(mii_np)) {
2129 ret = -ENODEV;
2130 goto err_put_node;
2131 }
2132
2133 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
2134 if (!priv->mii_bus) {
2135 ret = -ENOMEM;
2136 goto err_put_node;
2137 }
2138
2139 switch(priv->family_id) {
2140 case RTL8380_FAMILY_ID:
2141 priv->mii_bus->name = "rtl838x-eth-mdio";
2142 priv->mii_bus->read = rtl838x_mdio_read;
2143 priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
2144 priv->mii_bus->write = rtl838x_mdio_write;
2145 priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
2146 priv->mii_bus->reset = rtl838x_mdio_reset;
2147 break;
2148 case RTL8390_FAMILY_ID:
2149 priv->mii_bus->name = "rtl839x-eth-mdio";
2150 priv->mii_bus->read = rtl839x_mdio_read;
2151 priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
2152 priv->mii_bus->write = rtl839x_mdio_write;
2153 priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
2154 priv->mii_bus->reset = rtl839x_mdio_reset;
2155 break;
2156 case RTL9300_FAMILY_ID:
2157 priv->mii_bus->name = "rtl930x-eth-mdio";
2158 priv->mii_bus->read = rtl930x_mdio_read;
2159 priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
2160 priv->mii_bus->write = rtl930x_mdio_write;
2161 priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
2162 priv->mii_bus->reset = rtl930x_mdio_reset;
2163 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2164 break;
2165 case RTL9310_FAMILY_ID:
2166 priv->mii_bus->name = "rtl931x-eth-mdio";
2167 priv->mii_bus->read = rtl931x_mdio_read;
2168 priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
2169 priv->mii_bus->write = rtl931x_mdio_write;
2170 priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
2171 priv->mii_bus->reset = rtl931x_mdio_reset;
2172 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2173 break;
2174 }
2175 priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
2176 priv->mii_bus->priv = priv;
2177 priv->mii_bus->parent = &priv->pdev->dev;
2178
2179 for_each_node_by_name(dn, "ethernet-phy") {
2180 u32 smi_addr[2];
2181
2182 if (of_property_read_u32(dn, "reg", &pn))
2183 continue;
2184
2185 if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
2186 smi_addr[0] = 0;
2187 smi_addr[1] = pn;
2188 }
2189
2190 if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
2191 priv->sds_id[pn] = -1;
2192 else {
2193 pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
2194 }
2195
2196 if (pn < MAX_PORTS) {
2197 priv->smi_bus[pn] = smi_addr[0];
2198 priv->smi_addr[pn] = smi_addr[1];
2199 } else {
2200 pr_err("%s: illegal port number %d\n", __func__, pn);
2201 }
2202
2203 if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
2204 priv->smi_bus_isc45[smi_addr[0]] = true;
2205
2206 if (of_property_read_bool(dn, "phy-is-integrated")) {
2207 priv->phy_is_internal[pn] = true;
2208 }
2209 }
2210
2211 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
2212 if (!dn) {
2213 dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
2214 return -ENODEV;
2215 }
2216
2217 for_each_node_by_name(dn, "port") {
2218 if (of_property_read_u32(dn, "reg", &pn))
2219 continue;
2220 pr_debug("%s Looking at port %d\n", __func__, pn);
2221 if (pn > priv->cpu_port)
2222 continue;
2223 if (of_get_phy_mode(dn, &priv->interfaces[pn]))
2224 priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
2225 pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
2226 }
2227
2228 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
2229 ret = of_mdiobus_register(priv->mii_bus, mii_np);
2230
2231 err_put_node:
2232 of_node_put(mii_np);
2233
2234 return ret;
2235 }
2236
2237 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
2238 {
2239 pr_debug("%s called\n", __func__);
2240 if (!priv->mii_bus)
2241 return 0;
2242
2243 mdiobus_unregister(priv->mii_bus);
2244 mdiobus_free(priv->mii_bus);
2245
2246 return 0;
2247 }
2248
2249 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
2250 netdev_features_t features)
2251 {
2252 return features;
2253 }
2254
2255 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
2256 {
2257 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2258
2259 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2260 if (!(features & NETIF_F_RXCSUM))
2261 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2262 else
2263 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
2264 }
2265
2266 return 0;
2267 }
2268
2269 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
2270 {
2271 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2272
2273 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2274 if (!(features & NETIF_F_RXCSUM))
2275 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2276 else
2277 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
2278 }
2279
2280 return 0;
2281 }
2282
2283 static const struct net_device_ops rtl838x_eth_netdev_ops = {
2284 .ndo_open = rtl838x_eth_open,
2285 .ndo_stop = rtl838x_eth_stop,
2286 .ndo_start_xmit = rtl838x_eth_tx,
2287 .ndo_select_queue = rtl83xx_pick_tx_queue,
2288 .ndo_set_mac_address = rtl838x_set_mac_address,
2289 .ndo_validate_addr = eth_validate_addr,
2290 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
2291 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2292 .ndo_set_features = rtl83xx_set_features,
2293 .ndo_fix_features = rtl838x_fix_features,
2294 .ndo_setup_tc = rtl83xx_setup_tc,
2295 };
2296
2297 static const struct net_device_ops rtl839x_eth_netdev_ops = {
2298 .ndo_open = rtl838x_eth_open,
2299 .ndo_stop = rtl838x_eth_stop,
2300 .ndo_start_xmit = rtl838x_eth_tx,
2301 .ndo_select_queue = rtl83xx_pick_tx_queue,
2302 .ndo_set_mac_address = rtl838x_set_mac_address,
2303 .ndo_validate_addr = eth_validate_addr,
2304 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
2305 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2306 .ndo_set_features = rtl83xx_set_features,
2307 .ndo_fix_features = rtl838x_fix_features,
2308 .ndo_setup_tc = rtl83xx_setup_tc,
2309 };
2310
2311 static const struct net_device_ops rtl930x_eth_netdev_ops = {
2312 .ndo_open = rtl838x_eth_open,
2313 .ndo_stop = rtl838x_eth_stop,
2314 .ndo_start_xmit = rtl838x_eth_tx,
2315 .ndo_select_queue = rtl93xx_pick_tx_queue,
2316 .ndo_set_mac_address = rtl838x_set_mac_address,
2317 .ndo_validate_addr = eth_validate_addr,
2318 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
2319 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2320 .ndo_set_features = rtl93xx_set_features,
2321 .ndo_fix_features = rtl838x_fix_features,
2322 .ndo_setup_tc = rtl83xx_setup_tc,
2323 };
2324
2325 static const struct net_device_ops rtl931x_eth_netdev_ops = {
2326 .ndo_open = rtl838x_eth_open,
2327 .ndo_stop = rtl838x_eth_stop,
2328 .ndo_start_xmit = rtl838x_eth_tx,
2329 .ndo_select_queue = rtl93xx_pick_tx_queue,
2330 .ndo_set_mac_address = rtl838x_set_mac_address,
2331 .ndo_validate_addr = eth_validate_addr,
2332 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
2333 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2334 .ndo_set_features = rtl93xx_set_features,
2335 .ndo_fix_features = rtl838x_fix_features,
2336 };
2337
2338 static const struct phylink_mac_ops rtl838x_phylink_ops = {
2339 .validate = rtl838x_validate,
2340 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
2341 .mac_an_restart = rtl838x_mac_an_restart,
2342 .mac_config = rtl838x_mac_config,
2343 .mac_link_down = rtl838x_mac_link_down,
2344 .mac_link_up = rtl838x_mac_link_up,
2345 };
2346
2347 static const struct ethtool_ops rtl838x_ethtool_ops = {
2348 .get_link_ksettings = rtl838x_get_link_ksettings,
2349 .set_link_ksettings = rtl838x_set_link_ksettings,
2350 };
2351
2352 static int __init rtl838x_eth_probe(struct platform_device *pdev)
2353 {
2354 struct net_device *dev;
2355 struct device_node *dn = pdev->dev.of_node;
2356 struct rtl838x_eth_priv *priv;
2357 struct resource *res, *mem;
2358 phy_interface_t phy_mode;
2359 struct phylink *phylink;
2360 int err = 0, i, rxrings, rxringlen;
2361 struct ring_b *ring;
2362
2363 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2364 (u32)pdev, (u32)(&(pdev->dev)));
2365
2366 if (!dn) {
2367 dev_err(&pdev->dev, "No DT found\n");
2368 return -EINVAL;
2369 }
2370
2371 rxrings = (soc_info.family == RTL8380_FAMILY_ID
2372 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
2373 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
2374 rxringlen = MAX_ENTRIES / rxrings;
2375 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
2376
2377 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
2378 if (!dev) {
2379 err = -ENOMEM;
2380 goto err_free;
2381 }
2382 SET_NETDEV_DEV(dev, &pdev->dev);
2383 priv = netdev_priv(dev);
2384
2385 /* obtain buffer memory space */
2386 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2387 if (res) {
2388 mem = devm_request_mem_region(&pdev->dev, res->start,
2389 resource_size(res), res->name);
2390 if (!mem) {
2391 dev_err(&pdev->dev, "cannot request memory space\n");
2392 err = -ENXIO;
2393 goto err_free;
2394 }
2395
2396 dev->mem_start = mem->start;
2397 dev->mem_end = mem->end;
2398 } else {
2399 dev_err(&pdev->dev, "cannot request IO resource\n");
2400 err = -ENXIO;
2401 goto err_free;
2402 }
2403
2404 /* Allocate buffer memory */
2405 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER +
2406 sizeof(struct ring_b) + sizeof(struct notify_b),
2407 (void *)&dev->mem_start, GFP_KERNEL);
2408 if (!priv->membase) {
2409 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2410 err = -ENOMEM;
2411 goto err_free;
2412 }
2413
2414 // Allocate ring-buffer space at the end of the allocated memory
2415 ring = priv->membase;
2416 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2417
2418 spin_lock_init(&priv->lock);
2419
2420 dev->ethtool_ops = &rtl838x_ethtool_ops;
2421 dev->min_mtu = ETH_ZLEN;
2422 dev->max_mtu = 1536;
2423 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2424 dev->hw_features = NETIF_F_RXCSUM;
2425
2426 priv->id = soc_info.id;
2427 priv->family_id = soc_info.family;
2428 if (priv->id) {
2429 pr_info("Found SoC ID: %4x: %s, family %x\n",
2430 priv->id, soc_info.name, priv->family_id);
2431 } else {
2432 pr_err("Unknown chip id (%04x)\n", priv->id);
2433 return -ENODEV;
2434 }
2435
2436 switch (priv->family_id) {
2437 case RTL8380_FAMILY_ID:
2438 priv->cpu_port = RTL838X_CPU_PORT;
2439 priv->r = &rtl838x_reg;
2440 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2441 break;
2442 case RTL8390_FAMILY_ID:
2443 priv->cpu_port = RTL839X_CPU_PORT;
2444 priv->r = &rtl839x_reg;
2445 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2446 break;
2447 case RTL9300_FAMILY_ID:
2448 priv->cpu_port = RTL930X_CPU_PORT;
2449 priv->r = &rtl930x_reg;
2450 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2451 break;
2452 case RTL9310_FAMILY_ID:
2453 priv->cpu_port = RTL931X_CPU_PORT;
2454 priv->r = &rtl931x_reg;
2455 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2456 rtl931x_chip_init(priv);
2457 break;
2458 default:
2459 pr_err("Unknown SoC family\n");
2460 return -ENODEV;
2461 }
2462 priv->rxringlen = rxringlen;
2463 priv->rxrings = rxrings;
2464
2465 /* Obtain device IRQ number */
2466 dev->irq = platform_get_irq(pdev, 0);
2467 if (dev->irq < 0) {
2468 dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
2469 goto err_free;
2470 }
2471
2472 err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
2473 IRQF_SHARED, dev->name, dev);
2474 if (err) {
2475 dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
2476 __func__, err);
2477 goto err_free;
2478 }
2479
2480 rtl8380_init_mac(priv);
2481
2482 /* Try to get mac address in the following order:
2483 * 1) from device tree data
2484 * 2) from internal registers set by bootloader
2485 */
2486 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2487 if (is_valid_ether_addr(dev->dev_addr)) {
2488 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2489 } else {
2490 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2491 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2492 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2493 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2494 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2495 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2496 }
2497 /* if the address is invalid, use a random value */
2498 if (!is_valid_ether_addr(dev->dev_addr)) {
2499 struct sockaddr sa = { AF_UNSPEC };
2500
2501 netdev_warn(dev, "Invalid MAC address, using random\n");
2502 eth_hw_addr_random(dev);
2503 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2504 if (rtl838x_set_mac_address(dev, &sa))
2505 netdev_warn(dev, "Failed to set MAC address.\n");
2506 }
2507 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2508 sw_r32(priv->r->mac + 4));
2509 strcpy(dev->name, "eth%d");
2510 priv->pdev = pdev;
2511 priv->netdev = dev;
2512
2513 err = rtl838x_mdio_init(priv);
2514 if (err)
2515 goto err_free;
2516
2517 err = register_netdev(dev);
2518 if (err)
2519 goto err_free;
2520
2521 for (i = 0; i < priv->rxrings; i++) {
2522 priv->rx_qs[i].id = i;
2523 priv->rx_qs[i].priv = priv;
2524 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2525 }
2526
2527 platform_set_drvdata(pdev, dev);
2528
2529 phy_mode = PHY_INTERFACE_MODE_NA;
2530 err = of_get_phy_mode(dn, &phy_mode);
2531 if (err < 0) {
2532 dev_err(&pdev->dev, "incorrect phy-mode\n");
2533 err = -EINVAL;
2534 goto err_free;
2535 }
2536 priv->phylink_config.dev = &dev->dev;
2537 priv->phylink_config.type = PHYLINK_NETDEV;
2538
2539 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2540 phy_mode, &rtl838x_phylink_ops);
2541
2542 if (IS_ERR(phylink)) {
2543 err = PTR_ERR(phylink);
2544 goto err_free;
2545 }
2546 priv->phylink = phylink;
2547
2548 return 0;
2549
2550 err_free:
2551 pr_err("Error setting up netdev, freeing it again.\n");
2552 free_netdev(dev);
2553
2554 return err;
2555 }
2556
2557 static int rtl838x_eth_remove(struct platform_device *pdev)
2558 {
2559 struct net_device *dev = platform_get_drvdata(pdev);
2560 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2561 int i;
2562
2563 if (dev) {
2564 pr_info("Removing platform driver for rtl838x-eth\n");
2565 rtl838x_mdio_remove(priv);
2566 rtl838x_hw_stop(priv);
2567
2568 netif_tx_stop_all_queues(dev);
2569
2570 for (i = 0; i < priv->rxrings; i++)
2571 netif_napi_del(&priv->rx_qs[i].napi);
2572
2573 unregister_netdev(dev);
2574 free_netdev(dev);
2575 }
2576
2577 return 0;
2578 }
2579
2580 static const struct of_device_id rtl838x_eth_of_ids[] = {
2581 { .compatible = "realtek,rtl838x-eth"},
2582 { /* sentinel */ }
2583 };
2584 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2585
2586 static struct platform_driver rtl838x_eth_driver = {
2587 .probe = rtl838x_eth_probe,
2588 .remove = rtl838x_eth_remove,
2589 .driver = {
2590 .name = "rtl838x-eth",
2591 .pm = NULL,
2592 .of_match_table = rtl838x_eth_of_ids,
2593 },
2594 };
2595
2596 module_platform_driver(rtl838x_eth_driver);
2597
2598 MODULE_AUTHOR("B. Koblitz");
2599 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2600 MODULE_LICENSE("GPL");