target/realtek: use netif_receive_skb_list
[openwrt/staging/neocturne.git] / target / linux / realtek / files-5.10 / drivers / net / ethernet / rtl838x_eth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
20 #include <net/dsa.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
23
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
26
27 extern struct rtl83xx_soc_info soc_info;
28
29 /*
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
34 * for an RX ring, MAX_ENTRIES the maximum number of entries
35 * available in total for all queues.
36 */
37 #define MAX_RXRINGS 32
38 #define MAX_RXLEN 300
39 #define MAX_ENTRIES (300 * 8)
40 #define TXRINGS 2
41 #define TXRINGLEN 160
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
44 #define TX_EN 0x8
45 #define RX_EN 0x4
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
48 #define TX_DO 0x2
49 #define WRAP 0x2
50 #define MAX_PORTS 57
51 #define MAX_SMI_BUSSES 4
52
53 #define RING_BUFFER 1600
54
55 struct p_hdr {
56 uint8_t *buf;
57 uint16_t reserved;
58 uint16_t size; /* buffer size */
59 uint16_t offset;
60 uint16_t len; /* pkt len */
61 /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
62 uint16_t cpu_tag[10];
63 } __packed __aligned(1);
64
65 struct n_event {
66 uint32_t type:2;
67 uint32_t fidVid:12;
68 uint64_t mac:48;
69 uint32_t slp:6;
70 uint32_t valid:1;
71 uint32_t reserved:27;
72 } __packed __aligned(1);
73
74 struct ring_b {
75 uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN];
76 uint32_t tx_r[TXRINGS][TXRINGLEN];
77 struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN];
78 struct p_hdr tx_header[TXRINGS][TXRINGLEN];
79 uint32_t c_rx[MAX_RXRINGS];
80 uint32_t c_tx[TXRINGS];
81 uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER];
82 uint8_t *rx_space;
83 };
84
85 struct notify_block {
86 struct n_event events[NOTIFY_EVENTS];
87 };
88
89 struct notify_b {
90 struct notify_block blocks[NOTIFY_BLOCKS];
91 u32 reserved1[8];
92 u32 ring[NOTIFY_BLOCKS];
93 u32 reserved2[8];
94 };
95
96 static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
97 {
98 // cpu_tag[0] is reserved on the RTL83XX SoCs
99 h->cpu_tag[1] = 0x0401; // BIT 10: RTL8380_CPU_TAG, BIT0: L2LEARNING on
100 h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
101 h->cpu_tag[3] = 0x0000;
102 h->cpu_tag[4] = BIT(dest_port) >> 16;
103 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
104
105 /* Set internal priority (PRI) and enable (AS_PRI) */
106 if (prio >= 0)
107 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12;
108 }
109
110 static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
111 {
112 // cpu_tag[0] is reserved on the RTL83XX SoCs
113 h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
114 h->cpu_tag[2] = BIT(4) | BIT(7); /* AS_DPM (4) and L2LEARNING (7) flags */
115 h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
116 // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
117 if (dest_port >= 32) {
118 dest_port -= 32;
119 h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
120 h->cpu_tag[3] = BIT(dest_port) & 0xffff;
121 } else {
122 h->cpu_tag[4] = BIT(dest_port) >> 16;
123 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
124 }
125
126 /* Set internal priority (PRI) and enable (AS_PRI) */
127 if (prio >= 0)
128 h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8;
129 }
130
131 static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
132 {
133 h->cpu_tag[0] = 0x8000; // CPU tag marker
134 h->cpu_tag[1] = h->cpu_tag[2] = 0;
135 h->cpu_tag[3] = 0;
136 h->cpu_tag[4] = 0;
137 h->cpu_tag[5] = 0;
138 h->cpu_tag[6] = BIT(dest_port) >> 16;
139 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
140
141 /* Enable (AS_QID) and set priority queue (QID) */
142 if (prio >= 0)
143 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
144 }
145
146 static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
147 {
148 h->cpu_tag[0] = 0x8000; // CPU tag marker
149 h->cpu_tag[1] = h->cpu_tag[2] = 0;
150 h->cpu_tag[3] = 0;
151 h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
152 if (dest_port >= 32) {
153 dest_port -= 32;
154 h->cpu_tag[4] = BIT(dest_port) >> 16;
155 h->cpu_tag[5] = BIT(dest_port) & 0xffff;
156 } else {
157 h->cpu_tag[6] = BIT(dest_port) >> 16;
158 h->cpu_tag[7] = BIT(dest_port) & 0xffff;
159 }
160
161 /* Enable (AS_QID) and set priority queue (QID) */
162 if (prio >= 0)
163 h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8;
164 }
165
166 static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
167 {
168 h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload
169 h->cpu_tag[2] |= (vlan >> 8) & 0xf;
170 h->cpu_tag[3] |= (vlan & 0xff) << 8;
171 }
172
173 struct rtl838x_rx_q {
174 int id;
175 struct rtl838x_eth_priv *priv;
176 struct napi_struct napi;
177 };
178
179 struct rtl838x_eth_priv {
180 struct net_device *netdev;
181 struct platform_device *pdev;
182 void *membase;
183 spinlock_t lock;
184 struct mii_bus *mii_bus;
185 struct rtl838x_rx_q rx_qs[MAX_RXRINGS];
186 struct phylink *phylink;
187 struct phylink_config phylink_config;
188 u16 id;
189 u16 family_id;
190 const struct rtl838x_eth_reg *r;
191 u8 cpu_port;
192 u32 lastEvent;
193 u16 rxrings;
194 u16 rxringlen;
195 u8 smi_bus[MAX_PORTS];
196 u8 smi_addr[MAX_PORTS];
197 u32 sds_id[MAX_PORTS];
198 bool smi_bus_isc45[MAX_SMI_BUSSES];
199 bool phy_is_internal[MAX_PORTS];
200 phy_interface_t interfaces[MAX_PORTS];
201 };
202
203 extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
204 extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg);
205 extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg);
206 extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v);
207 extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
208 extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
209 extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg);
210 extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
211 extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
212 extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
213 extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
214 extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val);
215
216 /*
217 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
218 * the rings. Writing x into these registers substracts x from its content.
219 * When the content reaches the ring size, the ASIC no longer adds
220 * packets to this receive queue.
221 */
222 void rtl838x_update_cntr(int r, int released)
223 {
224 // This feature is not available on RTL838x SoCs
225 }
226
227 void rtl839x_update_cntr(int r, int released)
228 {
229 // This feature is not available on RTL839x SoCs
230 }
231
232 void rtl930x_update_cntr(int r, int released)
233 {
234 int pos = (r % 3) * 10;
235 u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
236 u32 v = sw_r32(reg);
237
238 v = (v >> pos) & 0x3ff;
239 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg);
240 sw_w32_mask(0x3ff << pos, released << pos, reg);
241 sw_w32(v, reg);
242 }
243
244 void rtl931x_update_cntr(int r, int released)
245 {
246 int pos = (r % 3) * 10;
247 u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2);
248 u32 v = sw_r32(reg);
249
250 v = (v >> pos) & 0x3ff;
251 sw_w32_mask(0x3ff << pos, released << pos, reg);
252 sw_w32(v, reg);
253 }
254
255 struct dsa_tag {
256 u8 reason;
257 u8 queue;
258 u16 port;
259 u8 l2_offloaded;
260 u8 prio;
261 bool crc_error;
262 };
263
264 bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
265 {
266 /* cpu_tag[0] is reserved. Fields are off-by-one */
267 t->reason = h->cpu_tag[4] & 0xf;
268 t->queue = (h->cpu_tag[1] & 0xe0) >> 5;
269 t->port = h->cpu_tag[1] & 0x1f;
270 t->crc_error = t->reason == 13;
271
272 pr_debug("Reason: %d\n", t->reason);
273 if (t->reason != 6) // NIC_RX_REASON_SPECIAL_TRAP
274 t->l2_offloaded = 1;
275 else
276 t->l2_offloaded = 0;
277
278 return t->l2_offloaded;
279 }
280
281 bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
282 {
283 /* cpu_tag[0] is reserved. Fields are off-by-one */
284 t->reason = h->cpu_tag[5] & 0x1f;
285 t->queue = (h->cpu_tag[4] & 0xe000) >> 13;
286 t->port = h->cpu_tag[1] & 0x3f;
287 t->crc_error = h->cpu_tag[4] & BIT(6);
288
289 pr_debug("Reason: %d\n", t->reason);
290 if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA
291 (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP
292 t->l2_offloaded = 0;
293 else
294 t->l2_offloaded = 1;
295
296 return t->l2_offloaded;
297 }
298
299 bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
300 {
301 t->reason = h->cpu_tag[7] & 0x3f;
302 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
303 t->port = (h->cpu_tag[0] >> 8) & 0x1f;
304 t->crc_error = h->cpu_tag[1] & BIT(6);
305
306 pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
307 if (t->reason >= 19 && t->reason <= 27)
308 t->l2_offloaded = 0;
309 else
310 t->l2_offloaded = 1;
311
312 return t->l2_offloaded;
313 }
314
315 bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
316 {
317 t->reason = h->cpu_tag[7] & 0x3f;
318 t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
319 t->port = (h->cpu_tag[0] >> 8) & 0x3f;
320 t->crc_error = h->cpu_tag[1] & BIT(6);
321
322 if (t->reason != 63)
323 pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
324 if (t->reason >= 19 && t->reason <= 27) // NIC_RX_REASON_RMA
325 t->l2_offloaded = 0;
326 else
327 t->l2_offloaded = 1;
328
329 return t->l2_offloaded;
330 }
331
332 /*
333 * Discard the RX ring-buffers, called as part of the net-ISR
334 * when the buffer runs over
335 */
336 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
337 {
338 int r;
339 u32 *last;
340 struct p_hdr *h;
341 struct ring_b *ring = priv->membase;
342
343 for (r = 0; r < priv->rxrings; r++) {
344 pr_debug("In %s working on r: %d\n", __func__, r);
345 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
346 do {
347 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1))
348 break;
349 pr_debug("Got something: %d\n", ring->c_rx[r]);
350 h = &ring->rx_header[r][ring->c_rx[r]];
351 memset(h, 0, sizeof(struct p_hdr));
352 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
353 + r * priv->rxringlen * RING_BUFFER
354 + ring->c_rx[r] * RING_BUFFER);
355 h->size = RING_BUFFER;
356 /* make sure the header is visible to the ASIC */
357 mb();
358
359 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
360 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
361 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
362 } while (&ring->rx_r[r][ring->c_rx[r]] != last);
363 }
364 }
365
366 struct fdb_update_work {
367 struct work_struct work;
368 struct net_device *ndev;
369 u64 macs[NOTIFY_EVENTS + 1];
370 };
371
372 void rtl838x_fdb_sync(struct work_struct *work)
373 {
374 const struct fdb_update_work *uw =
375 container_of(work, struct fdb_update_work, work);
376 struct switchdev_notifier_fdb_info info;
377 u8 addr[ETH_ALEN];
378 int i = 0;
379 int action;
380
381 while (uw->macs[i]) {
382 action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
383 : SWITCHDEV_FDB_DEL_TO_BRIDGE;
384 u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
385 info.addr = &addr[0];
386 info.vid = 0;
387 info.offloaded = 1;
388 pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
389 call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
390 i++;
391 }
392 kfree(work);
393 }
394
395 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
396 {
397 struct notify_b *nb = priv->membase + sizeof(struct ring_b);
398 u32 e = priv->lastEvent;
399 struct n_event *event;
400 int i;
401 u64 mac;
402 struct fdb_update_work *w;
403
404 while (!(nb->ring[e] & 1)) {
405 w = kzalloc(sizeof(*w), GFP_ATOMIC);
406 if (!w) {
407 pr_err("Out of memory: %s", __func__);
408 return;
409 }
410 INIT_WORK(&w->work, rtl838x_fdb_sync);
411
412 for (i = 0; i < NOTIFY_EVENTS; i++) {
413 event = &nb->blocks[e].events[i];
414 if (!event->valid)
415 continue;
416 mac = event->mac;
417 if (event->type)
418 mac |= 1ULL << 63;
419 w->ndev = priv->netdev;
420 w->macs[i] = mac;
421 }
422
423 /* Hand the ring entry back to the switch */
424 nb->ring[e] = nb->ring[e] | 1;
425 e = (e + 1) % NOTIFY_BLOCKS;
426
427 w->macs[i] = 0ULL;
428 schedule_work(&w->work);
429 }
430 priv->lastEvent = e;
431 }
432
433 static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
434 {
435 struct net_device *dev = dev_id;
436 struct rtl838x_eth_priv *priv = netdev_priv(dev);
437 u32 status = sw_r32(priv->r->dma_if_intr_sts);
438 int i;
439
440 pr_debug("IRQ: %08x\n", status);
441
442 /* Ignore TX interrupt */
443 if ((status & 0xf0000)) {
444 /* Clear ISR */
445 sw_w32(0x000f0000, priv->r->dma_if_intr_sts);
446 }
447
448 /* RX interrupt */
449 if (status & 0x0ff00) {
450 /* ACK and disable RX interrupt for this ring */
451 sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
452 sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
453 for (i = 0; i < priv->rxrings; i++) {
454 if (status & BIT(i + 8)) {
455 pr_debug("Scheduling queue: %d\n", i);
456 napi_schedule(&priv->rx_qs[i].napi);
457 }
458 }
459 }
460
461 /* RX buffer overrun */
462 if (status & 0x000ff) {
463 pr_debug("RX buffer overrun: status %x, mask: %x\n",
464 status, sw_r32(priv->r->dma_if_intr_msk));
465 sw_w32(status, priv->r->dma_if_intr_sts);
466 rtl838x_rb_cleanup(priv, status & 0xff);
467 }
468
469 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) {
470 sw_w32(0x00100000, priv->r->dma_if_intr_sts);
471 rtl839x_l2_notification_handler(priv);
472 }
473
474 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) {
475 sw_w32(0x00200000, priv->r->dma_if_intr_sts);
476 rtl839x_l2_notification_handler(priv);
477 }
478
479 if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) {
480 sw_w32(0x00400000, priv->r->dma_if_intr_sts);
481 rtl839x_l2_notification_handler(priv);
482 }
483
484 return IRQ_HANDLED;
485 }
486
487 static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id)
488 {
489 struct net_device *dev = dev_id;
490 struct rtl838x_eth_priv *priv = netdev_priv(dev);
491 u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
492 u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
493 u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
494 int i;
495
496 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
497 __func__, status_tx, status_rx, status_rx_r);
498
499 /* Ignore TX interrupt */
500 if (status_tx) {
501 /* Clear ISR */
502 pr_debug("TX done\n");
503 sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts);
504 }
505
506 /* RX interrupt */
507 if (status_rx) {
508 pr_debug("RX IRQ\n");
509 /* ACK and disable RX interrupt for given rings */
510 sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
511 sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
512 for (i = 0; i < priv->rxrings; i++) {
513 if (status_rx & BIT(i)) {
514 pr_debug("Scheduling queue: %d\n", i);
515 napi_schedule(&priv->rx_qs[i].napi);
516 }
517 }
518 }
519
520 /* RX buffer overrun */
521 if (status_rx_r) {
522 pr_debug("RX buffer overrun: status %x, mask: %x\n",
523 status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk));
524 sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts);
525 rtl838x_rb_cleanup(priv, status_rx_r);
526 }
527
528 return IRQ_HANDLED;
529 }
530
531 static const struct rtl838x_eth_reg rtl838x_reg = {
532 .net_irq = rtl83xx_net_irq,
533 .mac_port_ctrl = rtl838x_mac_port_ctrl,
534 .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS,
535 .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK,
536 .dma_if_ctrl = RTL838X_DMA_IF_CTRL,
537 .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL,
538 .dma_rx_base = RTL838X_DMA_RX_BASE,
539 .dma_tx_base = RTL838X_DMA_TX_BASE,
540 .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size,
541 .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr,
542 .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR,
543 .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0,
544 .get_mac_link_sts = rtl838x_get_mac_link_sts,
545 .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts,
546 .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts,
547 .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts,
548 .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts,
549 .mac = RTL838X_MAC,
550 .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
551 .update_cntr = rtl838x_update_cntr,
552 .create_tx_header = rtl838x_create_tx_header,
553 .decode_tag = rtl838x_decode_tag,
554 };
555
556 static const struct rtl838x_eth_reg rtl839x_reg = {
557 .net_irq = rtl83xx_net_irq,
558 .mac_port_ctrl = rtl839x_mac_port_ctrl,
559 .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS,
560 .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK,
561 .dma_if_ctrl = RTL839X_DMA_IF_CTRL,
562 .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL,
563 .dma_rx_base = RTL839X_DMA_RX_BASE,
564 .dma_tx_base = RTL839X_DMA_TX_BASE,
565 .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size,
566 .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr,
567 .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR,
568 .rst_glb_ctrl = RTL839X_RST_GLB_CTRL,
569 .get_mac_link_sts = rtl839x_get_mac_link_sts,
570 .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts,
571 .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts,
572 .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts,
573 .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts,
574 .mac = RTL839X_MAC,
575 .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
576 .update_cntr = rtl839x_update_cntr,
577 .create_tx_header = rtl839x_create_tx_header,
578 .decode_tag = rtl839x_decode_tag,
579 };
580
581 static const struct rtl838x_eth_reg rtl930x_reg = {
582 .net_irq = rtl93xx_net_irq,
583 .mac_port_ctrl = rtl930x_mac_port_ctrl,
584 .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS,
585 .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS,
586 .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS,
587 .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK,
588 .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK,
589 .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK,
590 .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS,
591 .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK,
592 .dma_if_ctrl = RTL930X_DMA_IF_CTRL,
593 .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL,
594 .dma_rx_base = RTL930X_DMA_RX_BASE,
595 .dma_tx_base = RTL930X_DMA_TX_BASE,
596 .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size,
597 .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr,
598 .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR,
599 .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0,
600 .get_mac_link_sts = rtl930x_get_mac_link_sts,
601 .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts,
602 .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts,
603 .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts,
604 .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts,
605 .mac = RTL930X_MAC_L2_ADDR_CTRL,
606 .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
607 .update_cntr = rtl930x_update_cntr,
608 .create_tx_header = rtl930x_create_tx_header,
609 .decode_tag = rtl930x_decode_tag,
610 };
611
612 static const struct rtl838x_eth_reg rtl931x_reg = {
613 .net_irq = rtl93xx_net_irq,
614 .mac_port_ctrl = rtl931x_mac_port_ctrl,
615 .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS,
616 .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS,
617 .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS,
618 .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK,
619 .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK,
620 .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK,
621 .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS,
622 .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK,
623 .dma_if_ctrl = RTL931X_DMA_IF_CTRL,
624 .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL,
625 .dma_rx_base = RTL931X_DMA_RX_BASE,
626 .dma_tx_base = RTL931X_DMA_TX_BASE,
627 .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size,
628 .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr,
629 .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR,
630 .rst_glb_ctrl = RTL931X_RST_GLB_CTRL,
631 .get_mac_link_sts = rtl931x_get_mac_link_sts,
632 .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts,
633 .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts,
634 .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts,
635 .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts,
636 .mac = RTL931X_MAC_L2_ADDR_CTRL,
637 .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
638 .update_cntr = rtl931x_update_cntr,
639 .create_tx_header = rtl931x_create_tx_header,
640 .decode_tag = rtl931x_decode_tag,
641 };
642
643 static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv)
644 {
645 u32 int_saved, nbuf;
646 u32 reset_mask;
647 int i, pos;
648
649 pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
650 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
651 mdelay(100);
652
653 /* Disable and clear interrupts */
654 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
655 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
656 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
657 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
658 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
659 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
660 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
661 } else {
662 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
663 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
664 }
665
666 if (priv->family_id == RTL8390_FAMILY_ID) {
667 /* Preserve L2 notification and NBUF settings */
668 int_saved = sw_r32(priv->r->dma_if_intr_msk);
669 nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
670
671 /* Disable link change interrupt on RTL839x */
672 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG);
673 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
674
675 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
676 sw_w32(0xffffffff, priv->r->dma_if_intr_sts);
677 }
678
679 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
680 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
681 reset_mask = 0x6;
682 else
683 reset_mask = 0xc;
684
685 sw_w32(reset_mask, priv->r->rst_glb_ctrl);
686
687 do { /* Wait for reset of NIC and Queues done */
688 udelay(20);
689 } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask);
690 mdelay(100);
691
692 /* Setup Head of Line */
693 if (priv->family_id == RTL8380_FAMILY_ID)
694 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
695 if (priv->family_id == RTL8390_FAMILY_ID)
696 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
697 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
698 for (i = 0; i < priv->rxrings; i++) {
699 pos = (i % 3) * 10;
700 sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
701 sw_w32_mask(0x3ff << pos, priv->rxringlen,
702 priv->r->dma_if_rx_ring_cntr(i));
703 }
704 }
705
706 /* Re-enable link change interrupt */
707 if (priv->family_id == RTL8390_FAMILY_ID) {
708 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG);
709 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4);
710 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG);
711 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4);
712
713 /* Restore notification settings: on RTL838x these bits are null */
714 sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk);
715 sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
716 }
717 }
718
719 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
720 {
721 int i;
722 struct ring_b *ring = priv->membase;
723
724 for (i = 0; i < priv->rxrings; i++)
725 sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
726
727 for (i = 0; i < TXRINGS; i++)
728 sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
729 }
730
731 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
732 {
733 /* Disable Head of Line features for all RX rings */
734 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
735
736 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
737 sw_w32(0x06400020, priv->r->dma_if_ctrl);
738
739 /* Enable RX done, RX overflow and TX done interrupts */
740 sw_w32(0xfffff, priv->r->dma_if_intr_msk);
741
742 /* Enable DMA, engine expects empty FCS field */
743 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
744
745 /* Restart TX/RX to CPU port */
746 sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
747 /* Set Speed, duplex, flow control
748 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
749 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
750 * | MEDIA_SEL
751 */
752 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
753
754 /* Enable CRC checks on CPU-port */
755 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
756 }
757
758 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
759 {
760 /* Setup CPU-Port: RX Buffer */
761 sw_w32(0x0000c808, priv->r->dma_if_ctrl);
762
763 /* Enable Notify, RX done, RX overflow and TX done interrupts */
764 sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
765
766 /* Enable DMA */
767 sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
768
769 /* Restart TX/RX to CPU port, enable CRC checking */
770 sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
771
772 /* CPU port joins Lookup Miss Flooding Portmask */
773 // TODO: The code below should also work for the RTL838x
774 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
775 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
776 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
777
778 /* Force CPU port link up */
779 sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
780 }
781
782 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
783 {
784 int i, pos;
785 u32 v;
786
787 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
788 sw_w32(0x06400040, priv->r->dma_if_ctrl);
789
790 for (i = 0; i < priv->rxrings; i++) {
791 pos = (i % 3) * 10;
792 sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
793
794 // Some SoCs have issues with missing underflow protection
795 v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
796 sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
797 }
798
799 /* Enable Notify, RX done, RX overflow and TX done interrupts */
800 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk);
801 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
802 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk);
803
804 /* Enable DMA */
805 sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
806
807 /* Restart TX/RX to CPU port, enable CRC checking */
808 sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
809
810 if (priv->family_id == RTL9300_FAMILY_ID)
811 sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
812 else
813 sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK);
814
815 if (priv->family_id == RTL9300_FAMILY_ID)
816 sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
817 else
818 sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
819 }
820
821 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
822 {
823 int i, j;
824
825 struct p_hdr *h;
826
827 for (i = 0; i < priv->rxrings; i++) {
828 for (j = 0; j < priv->rxringlen; j++) {
829 h = &ring->rx_header[i][j];
830 memset(h, 0, sizeof(struct p_hdr));
831 h->buf = (u8 *)KSEG1ADDR(ring->rx_space
832 + i * priv->rxringlen * RING_BUFFER
833 + j * RING_BUFFER);
834 h->size = RING_BUFFER;
835 /* All rings owned by switch, last one wraps */
836 ring->rx_r[i][j] = KSEG1ADDR(h) | 1
837 | (j == (priv->rxringlen - 1) ? WRAP : 0);
838 }
839 ring->c_rx[i] = 0;
840 }
841
842 for (i = 0; i < TXRINGS; i++) {
843 for (j = 0; j < TXRINGLEN; j++) {
844 h = &ring->tx_header[i][j];
845 memset(h, 0, sizeof(struct p_hdr));
846 h->buf = (u8 *)KSEG1ADDR(ring->tx_space
847 + i * TXRINGLEN * RING_BUFFER
848 + j * RING_BUFFER);
849 h->size = RING_BUFFER;
850 ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]);
851 }
852 /* Last header is wrapping around */
853 ring->tx_r[i][j-1] |= WRAP;
854 ring->c_tx[i] = 0;
855 }
856 }
857
858 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
859 {
860 int i;
861 struct notify_b *b = priv->membase + sizeof(struct ring_b);
862
863 for (i = 0; i < NOTIFY_BLOCKS; i++)
864 b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
865
866 sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
867 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
868
869 /* Setup notification events */
870 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
871 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
872
873 /* Enable Notification */
874 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
875 priv->lastEvent = 0;
876 }
877
878 static int rtl838x_eth_open(struct net_device *ndev)
879 {
880 unsigned long flags;
881 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
882 struct ring_b *ring = priv->membase;
883 int i;
884
885 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
886 __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
887
888 spin_lock_irqsave(&priv->lock, flags);
889 rtl838x_hw_reset(priv);
890 rtl838x_setup_ring_buffer(priv, ring);
891 if (priv->family_id == RTL8390_FAMILY_ID) {
892 rtl839x_setup_notify_ring_buffer(priv);
893 /* Make sure the ring structure is visible to the ASIC */
894 mb();
895 flush_cache_all();
896 }
897
898 rtl838x_hw_ring_setup(priv);
899 phylink_start(priv->phylink);
900
901 for (i = 0; i < priv->rxrings; i++)
902 napi_enable(&priv->rx_qs[i].napi);
903
904 switch (priv->family_id) {
905 case RTL8380_FAMILY_ID:
906 rtl838x_hw_en_rxtx(priv);
907 /* Trap IGMP/MLD traffic to CPU-Port */
908 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
909 /* Flush learned FDB entries on link down of a port */
910 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
911 break;
912
913 case RTL8390_FAMILY_ID:
914 rtl839x_hw_en_rxtx(priv);
915 // Trap MLD and IGMP messages to CPU_PORT
916 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
917 /* Flush learned FDB entries on link down of a port */
918 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
919 break;
920
921 case RTL9300_FAMILY_ID:
922 rtl93xx_hw_en_rxtx(priv);
923 /* Flush learned FDB entries on link down of a port */
924 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
925 // Trap MLD and IGMP messages to CPU_PORT
926 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
927 break;
928
929 case RTL9310_FAMILY_ID:
930 rtl93xx_hw_en_rxtx(priv);
931
932 // Trap MLD and IGMP messages to CPU_PORT
933 sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
934
935 // Disable External CPU access to switch, clear EXT_CPU_EN
936 sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
937
938 // Set PCIE_PWR_DOWN
939 sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
940 break;
941 }
942
943 netif_tx_start_all_queues(ndev);
944
945 spin_unlock_irqrestore(&priv->lock, flags);
946
947 return 0;
948 }
949
950 static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv)
951 {
952 u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
953 u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
954 int i;
955
956 // Disable RX/TX from/to CPU-port
957 sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
958
959 /* Disable traffic */
960 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
961 sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
962 else
963 sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
964 mdelay(200); // Test, whether this is needed
965
966 /* Block all ports */
967 if (priv->family_id == RTL8380_FAMILY_ID) {
968 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
969 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
970 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0);
971 }
972
973 /* Flush L2 address cache */
974 if (priv->family_id == RTL8380_FAMILY_ID) {
975 for (i = 0; i <= priv->cpu_port; i++) {
976 sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
977 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
978 }
979 } else if (priv->family_id == RTL8390_FAMILY_ID) {
980 for (i = 0; i <= priv->cpu_port; i++) {
981 sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
982 do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
983 }
984 }
985 // TODO: L2 flush register is 64 bit on RTL931X and 930X
986
987 /* CPU-Port: Link down */
988 if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
989 sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
990 else if (priv->family_id == RTL9300_FAMILY_ID)
991 sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
992 else if (priv->family_id == RTL9310_FAMILY_ID)
993 sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4);
994 mdelay(100);
995
996 /* Disable all TX/RX interrupts */
997 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
998 sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk);
999 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts);
1000 sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk);
1001 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts);
1002 sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk);
1003 sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts);
1004 } else {
1005 sw_w32(0x00000000, priv->r->dma_if_intr_msk);
1006 sw_w32(clear_irq, priv->r->dma_if_intr_sts);
1007 }
1008
1009 /* Disable TX/RX DMA */
1010 sw_w32(0x00000000, priv->r->dma_if_ctrl);
1011 mdelay(200);
1012 }
1013
1014 static int rtl838x_eth_stop(struct net_device *ndev)
1015 {
1016 unsigned long flags;
1017 int i;
1018 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1019
1020 pr_info("in %s\n", __func__);
1021
1022 phylink_stop(priv->phylink);
1023 rtl838x_hw_stop(priv);
1024
1025 for (i = 0; i < priv->rxrings; i++)
1026 napi_disable(&priv->rx_qs[i].napi);
1027
1028 netif_tx_stop_all_queues(ndev);
1029
1030 return 0;
1031 }
1032
1033 static void rtl838x_eth_set_multicast_list(struct net_device *ndev)
1034 {
1035 /*
1036 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1037 * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
1038 */
1039 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1040 sw_w32(0x0, RTL838X_RMA_CTRL_0);
1041 sw_w32(0x0, RTL838X_RMA_CTRL_1);
1042 }
1043 if (ndev->flags & IFF_ALLMULTI)
1044 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1045 if (ndev->flags & IFF_PROMISC) {
1046 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0);
1047 sw_w32(0x7fff, RTL838X_RMA_CTRL_1);
1048 }
1049 }
1050
1051 static void rtl839x_eth_set_multicast_list(struct net_device *ndev)
1052 {
1053 /*
1054 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1055 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1056 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1057 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1058 */
1059 if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1060 sw_w32(0x0, RTL839X_RMA_CTRL_0);
1061 sw_w32(0x0, RTL839X_RMA_CTRL_1);
1062 sw_w32(0x0, RTL839X_RMA_CTRL_2);
1063 sw_w32(0x0, RTL839X_RMA_CTRL_3);
1064 }
1065 if (ndev->flags & IFF_ALLMULTI) {
1066 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1067 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1068 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1069 }
1070 if (ndev->flags & IFF_PROMISC) {
1071 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0);
1072 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1);
1073 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2);
1074 sw_w32(0x3ff, RTL839X_RMA_CTRL_3);
1075 }
1076 }
1077
1078 static void rtl930x_eth_set_multicast_list(struct net_device *ndev)
1079 {
1080 /*
1081 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1082 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1083 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1084 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1085 */
1086 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1087 sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0);
1088 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1);
1089 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2);
1090 } else {
1091 sw_w32(0x0, RTL930X_RMA_CTRL_0);
1092 sw_w32(0x0, RTL930X_RMA_CTRL_1);
1093 sw_w32(0x0, RTL930X_RMA_CTRL_2);
1094 }
1095 }
1096
1097 static void rtl931x_eth_set_multicast_list(struct net_device *ndev)
1098 {
1099 /*
1100 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1101 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1102 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
1103 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1104 */
1105 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1106 sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0);
1107 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1);
1108 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2);
1109 } else {
1110 sw_w32(0x0, RTL931X_RMA_CTRL_0);
1111 sw_w32(0x0, RTL931X_RMA_CTRL_1);
1112 sw_w32(0x0, RTL931X_RMA_CTRL_2);
1113 }
1114 }
1115
1116 static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1117 {
1118 unsigned long flags;
1119 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1120
1121 pr_warn("%s\n", __func__);
1122 spin_lock_irqsave(&priv->lock, flags);
1123 rtl838x_hw_stop(priv);
1124 rtl838x_hw_ring_setup(priv);
1125 rtl838x_hw_en_rxtx(priv);
1126 netif_trans_update(ndev);
1127 netif_start_queue(ndev);
1128 spin_unlock_irqrestore(&priv->lock, flags);
1129 }
1130
1131 static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
1132 {
1133 int len, i;
1134 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1135 struct ring_b *ring = priv->membase;
1136 uint32_t val;
1137 int ret;
1138 unsigned long flags;
1139 struct p_hdr *h;
1140 int dest_port = -1;
1141 int q = skb_get_queue_mapping(skb) % TXRINGS;
1142
1143 if (q) // Check for high prio queue
1144 pr_debug("SKB priority: %d\n", skb->priority);
1145
1146 spin_lock_irqsave(&priv->lock, flags);
1147 len = skb->len;
1148
1149 /* Check for DSA tagging at the end of the buffer */
1150 if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80
1151 && skb->data[len-3] < priv->cpu_port
1152 && skb->data[len-2] == 0x10
1153 && skb->data[len-1] == 0x00) {
1154 /* Reuse tag space for CRC if possible */
1155 dest_port = skb->data[len-3];
1156 skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
1157 len -= 4;
1158 }
1159
1160 len += 4; // Add space for CRC
1161
1162 if (skb_padto(skb, len)) {
1163 ret = NETDEV_TX_OK;
1164 goto txdone;
1165 }
1166
1167 /* We can send this packet if CPU owns the descriptor */
1168 if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) {
1169
1170 /* Set descriptor for tx */
1171 h = &ring->tx_header[q][ring->c_tx[q]];
1172 h->size = len;
1173 h->len = len;
1174 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1175 if (priv->family_id == RTL8380_FAMILY_ID) {
1176 if (len < ETH_ZLEN - 4)
1177 h->len -= 4;
1178 }
1179
1180 if (dest_port >= 0)
1181 priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
1182
1183 /* Copy packet data to tx buffer */
1184 memcpy((void *)KSEG1ADDR(h->buf), skb->data, len);
1185 /* Make sure packet data is visible to ASIC */
1186 wmb();
1187
1188 /* Hand over to switch */
1189 ring->tx_r[q][ring->c_tx[q]] |= 1;
1190
1191 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1192 if (priv->family_id == RTL8380_FAMILY_ID) {
1193 for (i = 0; i < 10; i++) {
1194 val = sw_r32(priv->r->dma_if_ctrl);
1195 if ((val & 0xc) == 0xc)
1196 break;
1197 }
1198 }
1199
1200 /* Tell switch to send data */
1201 if (priv->family_id == RTL9310_FAMILY_ID
1202 || priv->family_id == RTL9300_FAMILY_ID) {
1203 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1204 if (!q)
1205 sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
1206 else
1207 sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl);
1208 } else {
1209 sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl);
1210 }
1211
1212 dev->stats.tx_packets++;
1213 dev->stats.tx_bytes += len;
1214 dev_kfree_skb(skb);
1215 ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN;
1216 ret = NETDEV_TX_OK;
1217 } else {
1218 dev_warn(&priv->pdev->dev, "Data is owned by switch\n");
1219 ret = NETDEV_TX_BUSY;
1220 }
1221 txdone:
1222 spin_unlock_irqrestore(&priv->lock, flags);
1223 return ret;
1224 }
1225
1226 /*
1227 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1228 * so we do round-robin
1229 */
1230 u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1231 struct net_device *sb_dev)
1232 {
1233 static u8 last = 0;
1234
1235 last++;
1236 return last % TXRINGS;
1237 }
1238
1239 /*
1240 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1241 */
1242 u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
1243 struct net_device *sb_dev)
1244 {
1245 if (skb->priority >= TC_PRIO_CONTROL)
1246 return 1;
1247 return 0;
1248 }
1249
1250 static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
1251 {
1252 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1253 struct ring_b *ring = priv->membase;
1254 struct sk_buff *skb;
1255 LIST_HEAD(rx_list);
1256 unsigned long flags;
1257 int i, len, work_done = 0;
1258 u8 *data, *skb_data;
1259 unsigned int val;
1260 u32 *last;
1261 struct p_hdr *h;
1262 bool dsa = netdev_uses_dsa(dev);
1263 struct dsa_tag tag;
1264
1265 pr_debug("---------------------------------------------------------- RX - %d\n", r);
1266 spin_lock_irqsave(&priv->lock, flags);
1267 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1268
1269 do {
1270 if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
1271 if (&ring->rx_r[r][ring->c_rx[r]] != last) {
1272 netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
1273 r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]);
1274 }
1275 break;
1276 }
1277
1278 h = &ring->rx_header[r][ring->c_rx[r]];
1279 data = (u8 *)KSEG1ADDR(h->buf);
1280 len = h->len;
1281 if (!len)
1282 break;
1283 work_done++;
1284
1285 len -= 4; /* strip the CRC */
1286 /* Add 4 bytes for cpu_tag */
1287 if (dsa)
1288 len += 4;
1289
1290 skb = netdev_alloc_skb(dev, len + 4);
1291 skb_reserve(skb, NET_IP_ALIGN);
1292
1293 if (likely(skb)) {
1294 /* BUG: Prevent bug on RTL838x SoCs*/
1295 if (priv->family_id == RTL8380_FAMILY_ID) {
1296 sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
1297 for (i = 0; i < priv->rxrings; i++) {
1298 /* Update each ring cnt */
1299 val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
1300 sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
1301 }
1302 }
1303
1304 skb_data = skb_put(skb, len);
1305 /* Make sure data is visible */
1306 mb();
1307 memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
1308 /* Overwrite CRC with cpu_tag */
1309 if (dsa) {
1310 priv->r->decode_tag(h, &tag);
1311 skb->data[len-4] = 0x80;
1312 skb->data[len-3] = tag.port;
1313 skb->data[len-2] = 0x10;
1314 skb->data[len-1] = 0x00;
1315 if (tag.l2_offloaded)
1316 skb->data[len-3] |= 0x40;
1317 }
1318
1319 if (tag.queue >= 0)
1320 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1321 tag.queue, len, tag.reason, tag.port);
1322
1323 skb->protocol = eth_type_trans(skb, dev);
1324 if (dev->features & NETIF_F_RXCSUM) {
1325 if (tag.crc_error)
1326 skb_checksum_none_assert(skb);
1327 else
1328 skb->ip_summed = CHECKSUM_UNNECESSARY;
1329 }
1330 dev->stats.rx_packets++;
1331 dev->stats.rx_bytes += len;
1332
1333 list_add_tail(&skb->list, &rx_list);
1334 } else {
1335 if (net_ratelimit())
1336 dev_warn(&dev->dev, "low on memory - packet dropped\n");
1337 dev->stats.rx_dropped++;
1338 }
1339
1340 /* Reset header structure */
1341 memset(h, 0, sizeof(struct p_hdr));
1342 h->buf = data;
1343 h->size = RING_BUFFER;
1344
1345 ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1
1346 | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1);
1347 ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen;
1348 last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
1349 } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget);
1350
1351 netif_receive_skb_list(&rx_list);
1352
1353 // Update counters
1354 priv->r->update_cntr(r, 0);
1355
1356 spin_unlock_irqrestore(&priv->lock, flags);
1357
1358 return work_done;
1359 }
1360
1361 static int rtl838x_poll_rx(struct napi_struct *napi, int budget)
1362 {
1363 struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi);
1364 struct rtl838x_eth_priv *priv = rx_q->priv;
1365 int work_done = 0;
1366 int r = rx_q->id;
1367 int work;
1368
1369 while (work_done < budget) {
1370 work = rtl838x_hw_receive(priv->netdev, r, budget - work_done);
1371 if (!work)
1372 break;
1373 work_done += work;
1374 }
1375
1376 if (work_done < budget) {
1377 napi_complete_done(napi, work_done);
1378
1379 /* Enable RX interrupt */
1380 if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID)
1381 sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk);
1382 else
1383 sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk);
1384 }
1385 return work_done;
1386 }
1387
1388
1389 static void rtl838x_validate(struct phylink_config *config,
1390 unsigned long *supported,
1391 struct phylink_link_state *state)
1392 {
1393 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1394
1395 pr_debug("In %s\n", __func__);
1396
1397 if (!phy_interface_mode_is_rgmii(state->interface) &&
1398 state->interface != PHY_INTERFACE_MODE_1000BASEX &&
1399 state->interface != PHY_INTERFACE_MODE_MII &&
1400 state->interface != PHY_INTERFACE_MODE_REVMII &&
1401 state->interface != PHY_INTERFACE_MODE_GMII &&
1402 state->interface != PHY_INTERFACE_MODE_QSGMII &&
1403 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1404 state->interface != PHY_INTERFACE_MODE_SGMII) {
1405 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1406 pr_err("Unsupported interface: %d\n", state->interface);
1407 return;
1408 }
1409
1410 /* Allow all the expected bits */
1411 phylink_set(mask, Autoneg);
1412 phylink_set_port_modes(mask);
1413 phylink_set(mask, Pause);
1414 phylink_set(mask, Asym_Pause);
1415
1416 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1417 * including Half duplex
1418 */
1419 if (state->interface != PHY_INTERFACE_MODE_MII &&
1420 state->interface != PHY_INTERFACE_MODE_REVMII) {
1421 phylink_set(mask, 1000baseT_Full);
1422 phylink_set(mask, 1000baseT_Half);
1423 }
1424
1425 phylink_set(mask, 10baseT_Half);
1426 phylink_set(mask, 10baseT_Full);
1427 phylink_set(mask, 100baseT_Half);
1428 phylink_set(mask, 100baseT_Full);
1429
1430 bitmap_and(supported, supported, mask,
1431 __ETHTOOL_LINK_MODE_MASK_NBITS);
1432 bitmap_and(state->advertising, state->advertising, mask,
1433 __ETHTOOL_LINK_MODE_MASK_NBITS);
1434 }
1435
1436
1437 static void rtl838x_mac_config(struct phylink_config *config,
1438 unsigned int mode,
1439 const struct phylink_link_state *state)
1440 {
1441 /* This is only being called for the master device,
1442 * i.e. the CPU-Port. We don't need to do anything.
1443 */
1444
1445 pr_info("In %s, mode %x\n", __func__, mode);
1446 }
1447
1448 static void rtl838x_mac_an_restart(struct phylink_config *config)
1449 {
1450 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1451 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1452
1453 /* This works only on RTL838x chips */
1454 if (priv->family_id != RTL8380_FAMILY_ID)
1455 return;
1456
1457 pr_debug("In %s\n", __func__);
1458 /* Restart by disabling and re-enabling link */
1459 sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1460 mdelay(20);
1461 sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
1462 }
1463
1464 static void rtl838x_mac_pcs_get_state(struct phylink_config *config,
1465 struct phylink_link_state *state)
1466 {
1467 u32 speed;
1468 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1469 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1470 int port = priv->cpu_port;
1471
1472 pr_info("In %s\n", __func__);
1473
1474 state->link = priv->r->get_mac_link_sts(port) ? 1 : 0;
1475 state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0;
1476
1477 pr_info("%s link status is %d\n", __func__, state->link);
1478 speed = priv->r->get_mac_link_spd_sts(port);
1479 switch (speed) {
1480 case 0:
1481 state->speed = SPEED_10;
1482 break;
1483 case 1:
1484 state->speed = SPEED_100;
1485 break;
1486 case 2:
1487 state->speed = SPEED_1000;
1488 break;
1489 case 5:
1490 state->speed = SPEED_2500;
1491 break;
1492 case 6:
1493 state->speed = SPEED_5000;
1494 break;
1495 case 4:
1496 state->speed = SPEED_10000;
1497 break;
1498 default:
1499 state->speed = SPEED_UNKNOWN;
1500 break;
1501 }
1502
1503 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
1504 if (priv->r->get_mac_rx_pause_sts(port))
1505 state->pause |= MLO_PAUSE_RX;
1506 if (priv->r->get_mac_tx_pause_sts(port))
1507 state->pause |= MLO_PAUSE_TX;
1508 }
1509
1510 static void rtl838x_mac_link_down(struct phylink_config *config,
1511 unsigned int mode,
1512 phy_interface_t interface)
1513 {
1514 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1515 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1516
1517 pr_debug("In %s\n", __func__);
1518 /* Stop TX/RX to port */
1519 sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port));
1520 }
1521
1522 static void rtl838x_mac_link_up(struct phylink_config *config,
1523 struct phy_device *phy, unsigned int mode,
1524 phy_interface_t interface, int speed, int duplex,
1525 bool tx_pause, bool rx_pause)
1526 {
1527 struct net_device *dev = container_of(config->dev, struct net_device, dev);
1528 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1529
1530 pr_debug("In %s\n", __func__);
1531 /* Restart TX/RX to port */
1532 sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port));
1533 }
1534
1535 static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac)
1536 {
1537 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1538 unsigned long flags;
1539
1540 spin_lock_irqsave(&priv->lock, flags);
1541 pr_debug("In %s\n", __func__);
1542 sw_w32((mac[0] << 8) | mac[1], priv->r->mac);
1543 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4);
1544
1545 if (priv->family_id == RTL8380_FAMILY_ID) {
1546 /* 2 more registers, ALE/MAC block */
1547 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE);
1548 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1549 (RTL838X_MAC_ALE + 4));
1550
1551 sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2);
1552 sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1553 RTL838X_MAC2 + 4);
1554 }
1555 spin_unlock_irqrestore(&priv->lock, flags);
1556 }
1557
1558 static int rtl838x_set_mac_address(struct net_device *dev, void *p)
1559 {
1560 struct rtl838x_eth_priv *priv = netdev_priv(dev);
1561 const struct sockaddr *addr = p;
1562 u8 *mac = (u8 *) (addr->sa_data);
1563
1564 if (!is_valid_ether_addr(addr->sa_data))
1565 return -EADDRNOTAVAIL;
1566
1567 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1568 rtl838x_set_mac_hw(dev, mac);
1569
1570 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4));
1571 return 0;
1572 }
1573
1574 static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
1575 {
1576 // We will need to set-up EEE and the egress-rate limitation
1577 return 0;
1578 }
1579
1580 static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
1581 {
1582 int i;
1583
1584 if (priv->family_id == 0x8390)
1585 return rtl8390_init_mac(priv);
1586
1587 // At present we do not know how to set up EEE on any other SoC than RTL8380
1588 if (priv->family_id != 0x8380)
1589 return 0;
1590
1591 pr_info("%s\n", __func__);
1592 /* fix timer for EEE */
1593 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
1594 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
1595
1596 /* Init VLAN. TODO: Understand what is being done, here */
1597 if (priv->id == 0x8382) {
1598 for (i = 0; i <= 28; i++)
1599 sw_w32(0, 0xd57c + i * 0x80);
1600 }
1601 if (priv->id == 0x8380) {
1602 for (i = 8; i <= 28; i++)
1603 sw_w32(0, 0xd57c + i * 0x80);
1604 }
1605 return 0;
1606 }
1607
1608 static int rtl838x_get_link_ksettings(struct net_device *ndev,
1609 struct ethtool_link_ksettings *cmd)
1610 {
1611 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1612
1613 pr_debug("%s called\n", __func__);
1614 return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1615 }
1616
1617 static int rtl838x_set_link_ksettings(struct net_device *ndev,
1618 const struct ethtool_link_ksettings *cmd)
1619 {
1620 struct rtl838x_eth_priv *priv = netdev_priv(ndev);
1621
1622 pr_debug("%s called\n", __func__);
1623 return phylink_ethtool_ksettings_set(priv->phylink, cmd);
1624 }
1625
1626 static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1627 {
1628 u32 val;
1629 int err;
1630 struct rtl838x_eth_priv *priv = bus->priv;
1631
1632 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380)
1633 return rtl838x_read_sds_phy(mii_id, regnum);
1634
1635 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1636 err = rtl838x_read_mmd_phy(mii_id,
1637 mdiobus_c45_devad(regnum),
1638 regnum, &val);
1639 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1640 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1641 val, err);
1642 } else {
1643 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1644 err = rtl838x_read_phy(mii_id, page, regnum, &val);
1645 }
1646 if (err)
1647 return err;
1648 return val;
1649 }
1650
1651 static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1652 {
1653 return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum);
1654 }
1655
1656 static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1657 {
1658 u32 val;
1659 int err;
1660 struct rtl838x_eth_priv *priv = bus->priv;
1661
1662 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1663 return rtl839x_read_sds_phy(mii_id, regnum);
1664
1665 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1666 err = rtl839x_read_mmd_phy(mii_id,
1667 mdiobus_c45_devad(regnum),
1668 regnum, &val);
1669 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1670 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1671 val, err);
1672 } else {
1673 err = rtl839x_read_phy(mii_id, page, regnum, &val);
1674 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1675 }
1676 if (err)
1677 return err;
1678 return val;
1679 }
1680
1681 static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1682 {
1683 return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum);
1684 }
1685
1686 static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1687 {
1688 u32 val;
1689 int err;
1690 struct rtl838x_eth_priv *priv = bus->priv;
1691
1692 if (priv->phy_is_internal[mii_id])
1693 return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1694
1695 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1696 err = rtl930x_read_mmd_phy(mii_id,
1697 mdiobus_c45_devad(regnum),
1698 regnum, &val);
1699 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1700 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1701 val, err);
1702 } else {
1703 err = rtl930x_read_phy(mii_id, page, regnum, &val);
1704 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1705 }
1706 if (err)
1707 return err;
1708 return val;
1709 }
1710
1711 static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1712 {
1713 return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum);
1714 }
1715
1716 static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum)
1717 {
1718 u32 val;
1719 int err, v;
1720 struct rtl838x_eth_priv *priv = bus->priv;
1721
1722 pr_debug("%s: In here, port %d\n", __func__, mii_id);
1723 if (priv->phy_is_internal[mii_id]) {
1724 v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum);
1725 if (v < 0) {
1726 err = v;
1727 } else {
1728 err = 0;
1729 val = v;
1730 }
1731 } else {
1732 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1733 err = rtl931x_read_mmd_phy(mii_id,
1734 mdiobus_c45_devad(regnum),
1735 regnum, &val);
1736 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id,
1737 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1738 val, err);
1739 } else {
1740 err = rtl931x_read_phy(mii_id, page, regnum, &val);
1741 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err);
1742 }
1743 }
1744
1745 if (err)
1746 return err;
1747 return val;
1748 }
1749
1750 static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1751 {
1752 return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum);
1753 }
1754
1755 static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1756 int regnum, u16 value)
1757 {
1758 u32 offset = 0;
1759 struct rtl838x_eth_priv *priv = bus->priv;
1760 int err;
1761
1762 if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) {
1763 if (mii_id == 26)
1764 offset = 0x100;
1765 sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2));
1766 return 0;
1767 }
1768
1769 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1770 err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1771 regnum, value);
1772 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1773 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1774 value, err);
1775
1776 return err;
1777 }
1778 err = rtl838x_write_phy(mii_id, page, regnum, value);
1779 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1780 return err;
1781 }
1782
1783 static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id,
1784 int regnum, u16 value)
1785 {
1786 return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1787 }
1788
1789 static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1790 int regnum, u16 value)
1791 {
1792 struct rtl838x_eth_priv *priv = bus->priv;
1793 int err;
1794
1795 if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1796 return rtl839x_write_sds_phy(mii_id, regnum, value);
1797
1798 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1799 err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1800 regnum, value);
1801 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1802 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1803 value, err);
1804
1805 return err;
1806 }
1807
1808 err = rtl839x_write_phy(mii_id, page, regnum, value);
1809 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1810 return err;
1811 }
1812
1813 static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id,
1814 int regnum, u16 value)
1815 {
1816 return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1817 }
1818
1819 static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1820 int regnum, u16 value)
1821 {
1822 struct rtl838x_eth_priv *priv = bus->priv;
1823 int err;
1824
1825 if (priv->phy_is_internal[mii_id])
1826 return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1827
1828 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD))
1829 return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1830 regnum, value);
1831
1832 err = rtl930x_write_phy(mii_id, page, regnum, value);
1833 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1834 return err;
1835 }
1836
1837 static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id,
1838 int regnum, u16 value)
1839 {
1840 return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1841 }
1842
1843 static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page,
1844 int regnum, u16 value)
1845 {
1846 struct rtl838x_eth_priv *priv = bus->priv;
1847 int err;
1848
1849 if (priv->phy_is_internal[mii_id])
1850 return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value);
1851
1852 if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) {
1853 err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum),
1854 regnum, value);
1855 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id,
1856 mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum),
1857 value, err);
1858
1859 return err;
1860 }
1861
1862 err = rtl931x_write_phy(mii_id, page, regnum, value);
1863 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err);
1864 return err;
1865 }
1866
1867 static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id,
1868 int regnum, u16 value)
1869 {
1870 return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value);
1871 }
1872
1873 static int rtl838x_mdio_reset(struct mii_bus *bus)
1874 {
1875 pr_debug("%s called\n", __func__);
1876 /* Disable MAC polling the PHY so that we can start configuration */
1877 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL);
1878
1879 /* Enable PHY control via SoC */
1880 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
1881
1882 // Probably should reset all PHYs here...
1883 return 0;
1884 }
1885
1886 static int rtl839x_mdio_reset(struct mii_bus *bus)
1887 {
1888 return 0;
1889
1890 pr_debug("%s called\n", __func__);
1891 /* BUG: The following does not work, but should! */
1892 /* Disable MAC polling the PHY so that we can start configuration */
1893 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL);
1894 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4);
1895 /* Disable PHY polling via SoC */
1896 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
1897
1898 // Probably should reset all PHYs here...
1899 return 0;
1900 }
1901
1902 u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
1903 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
1904
1905 static int rtl930x_mdio_reset(struct mii_bus *bus)
1906 {
1907 int i;
1908 int pos;
1909 struct rtl838x_eth_priv *priv = bus->priv;
1910 u32 c45_mask = 0;
1911 u32 poll_sel[2];
1912 u32 poll_ctrl = 0;
1913 u32 private_poll_mask = 0;
1914 u32 v;
1915 bool uses_usxgmii = false; // For the Aquantia PHYs
1916 bool uses_hisgmii = false; // For the RTL8221/8226
1917
1918 // Mapping of port to phy-addresses on an SMI bus
1919 poll_sel[0] = poll_sel[1] = 0;
1920 for (i = 0; i < RTL930X_CPU_PORT; i++) {
1921 if (priv->smi_bus[i] > 3)
1922 continue;
1923 pos = (i % 6) * 5;
1924 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
1925 RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
1926
1927 pos = (i * 2) % 32;
1928 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
1929 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
1930 }
1931
1932 // Configure which SMI bus is behind which port number
1933 sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
1934 sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
1935
1936 // Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+)
1937 sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
1938
1939 // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
1940 for (i = 0; i < 4; i++)
1941 if (priv->smi_bus_isc45[i])
1942 c45_mask |= BIT(i + 16);
1943
1944 pr_info("c45_mask: %08x\n", c45_mask);
1945 sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
1946
1947 // Set the MAC type of each port according to the PHY-interface
1948 // Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0
1949 v = 0;
1950 for (i = 0; i < RTL930X_CPU_PORT; i++) {
1951 switch (priv->interfaces[i]) {
1952 case PHY_INTERFACE_MODE_10GBASER:
1953 break; // Serdes: Value = 0
1954
1955 case PHY_INTERFACE_MODE_HSGMII:
1956 private_poll_mask |= BIT(i);
1957 // fallthrough
1958 case PHY_INTERFACE_MODE_USXGMII:
1959 v |= BIT(mac_type_bit[i]);
1960 uses_usxgmii = true;
1961 break;
1962
1963 case PHY_INTERFACE_MODE_QSGMII:
1964 private_poll_mask |= BIT(i);
1965 v |= 3 << mac_type_bit[i];
1966 break;
1967
1968 default:
1969 break;
1970 }
1971 }
1972 sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
1973
1974 // Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones)
1975 sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
1976
1977 /* The following magic values are found in the port configuration, they seem to
1978 * define different ways of polling a PHY. The below is for the Aquantia PHYs of
1979 * the XGS1250 and the RTL8226 of the XGS1210 */
1980 if (uses_usxgmii) {
1981 sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1982 sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1983 sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1984 }
1985 if (uses_hisgmii) {
1986 sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG);
1987 sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG);
1988 sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG);
1989 }
1990
1991 pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__,
1992 sw_r32(RTL930X_SMI_GLB_CTRL));
1993 pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__,
1994 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL));
1995 pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__,
1996 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
1997 pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__,
1998 sw_r32(RTL930X_SMI_MAC_TYPE_CTRL));
1999 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__,
2000 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG));
2001 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__,
2002 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG));
2003 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__,
2004 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG));
2005 pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__,
2006 sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL));
2007 return 0;
2008 }
2009
2010 static int rtl931x_mdio_reset(struct mii_bus *bus)
2011 {
2012 int i;
2013 int pos;
2014 struct rtl838x_eth_priv *priv = bus->priv;
2015 u32 c45_mask = 0;
2016 u32 poll_sel[4];
2017 u32 poll_ctrl = 0;
2018 bool mdc_on[4];
2019
2020 pr_info("%s called\n", __func__);
2021 // Disable port polling for configuration purposes
2022 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
2023 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
2024 msleep(100);
2025
2026 mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
2027 // Mapping of port to phy-addresses on an SMI bus
2028 poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
2029 for (i = 0; i < 56; i++) {
2030 pos = (i % 6) * 5;
2031 sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
2032 pos = (i * 2) % 32;
2033 poll_sel[i / 16] |= priv->smi_bus[i] << pos;
2034 poll_ctrl |= BIT(20 + priv->smi_bus[i]);
2035 mdc_on[priv->smi_bus[i]] = true;
2036 }
2037
2038 // Configure which SMI bus is behind which port number
2039 for (i = 0; i < 4; i++) {
2040 pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
2041 sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
2042 }
2043
2044 // Configure which SMI busses
2045 pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2046 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2047 for (i = 0; i < 4; i++) {
2048 // bus is polled in c45
2049 if (priv->smi_bus_isc45[i])
2050 c45_mask |= 0x2 << (i * 2); // Std. C45, non-standard is 0x3
2051 // Enable bus access via MDC
2052 if (mdc_on[i])
2053 sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
2054 }
2055
2056 pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
2057 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
2058
2059 /* We have a 10G PHY enable polling
2060 sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
2061 sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
2062 sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
2063 */
2064 sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1);
2065
2066 return 0;
2067 }
2068
2069 static int rtl931x_chip_init(struct rtl838x_eth_priv *priv)
2070 {
2071 pr_info("In %s\n", __func__);
2072
2073 // Initialize Encapsulation memory and wait until finished
2074 sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
2075 do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
2076 pr_info("%s: init ENCAP done\n", __func__);
2077
2078 // Initialize Managemen Information Base memory and wait until finished
2079 sw_w32(0x1, RTL931X_MEM_MIB_INIT);
2080 do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
2081 pr_info("%s: init MIB done\n", __func__);
2082
2083 // Initialize ACL (PIE) memory and wait until finished
2084 sw_w32(0x1, RTL931X_MEM_ACL_INIT);
2085 do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
2086 pr_info("%s: init ACL done\n", __func__);
2087
2088 // Initialize ALE memory and wait until finished
2089 sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
2090 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
2091 sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
2092 sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2);
2093 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
2094 pr_info("%s: init ALE done\n", __func__);
2095
2096 // Enable ESD auto recovery
2097 sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
2098
2099 // Init SPI, is this for thermal control or what?
2100 sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
2101
2102 return 0;
2103 }
2104
2105 static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
2106 {
2107 struct device_node *mii_np, *dn;
2108 u32 pn;
2109 int ret;
2110
2111 pr_debug("%s called\n", __func__);
2112 mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus");
2113
2114 if (!mii_np) {
2115 dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus");
2116 return -ENODEV;
2117 }
2118
2119 if (!of_device_is_available(mii_np)) {
2120 ret = -ENODEV;
2121 goto err_put_node;
2122 }
2123
2124 priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev);
2125 if (!priv->mii_bus) {
2126 ret = -ENOMEM;
2127 goto err_put_node;
2128 }
2129
2130 switch(priv->family_id) {
2131 case RTL8380_FAMILY_ID:
2132 priv->mii_bus->name = "rtl838x-eth-mdio";
2133 priv->mii_bus->read = rtl838x_mdio_read;
2134 priv->mii_bus->read_paged = rtl838x_mdio_read_paged;
2135 priv->mii_bus->write = rtl838x_mdio_write;
2136 priv->mii_bus->write_paged = rtl838x_mdio_write_paged;
2137 priv->mii_bus->reset = rtl838x_mdio_reset;
2138 break;
2139 case RTL8390_FAMILY_ID:
2140 priv->mii_bus->name = "rtl839x-eth-mdio";
2141 priv->mii_bus->read = rtl839x_mdio_read;
2142 priv->mii_bus->read_paged = rtl839x_mdio_read_paged;
2143 priv->mii_bus->write = rtl839x_mdio_write;
2144 priv->mii_bus->write_paged = rtl839x_mdio_write_paged;
2145 priv->mii_bus->reset = rtl839x_mdio_reset;
2146 break;
2147 case RTL9300_FAMILY_ID:
2148 priv->mii_bus->name = "rtl930x-eth-mdio";
2149 priv->mii_bus->read = rtl930x_mdio_read;
2150 priv->mii_bus->read_paged = rtl930x_mdio_read_paged;
2151 priv->mii_bus->write = rtl930x_mdio_write;
2152 priv->mii_bus->write_paged = rtl930x_mdio_write_paged;
2153 priv->mii_bus->reset = rtl930x_mdio_reset;
2154 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2155 break;
2156 case RTL9310_FAMILY_ID:
2157 priv->mii_bus->name = "rtl931x-eth-mdio";
2158 priv->mii_bus->read = rtl931x_mdio_read;
2159 priv->mii_bus->read_paged = rtl931x_mdio_read_paged;
2160 priv->mii_bus->write = rtl931x_mdio_write;
2161 priv->mii_bus->write_paged = rtl931x_mdio_write_paged;
2162 priv->mii_bus->reset = rtl931x_mdio_reset;
2163 priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
2164 break;
2165 }
2166 priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD;
2167 priv->mii_bus->priv = priv;
2168 priv->mii_bus->parent = &priv->pdev->dev;
2169
2170 for_each_node_by_name(dn, "ethernet-phy") {
2171 u32 smi_addr[2];
2172
2173 if (of_property_read_u32(dn, "reg", &pn))
2174 continue;
2175
2176 if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
2177 smi_addr[0] = 0;
2178 smi_addr[1] = pn;
2179 }
2180
2181 if (of_property_read_u32(dn, "sds", &priv->sds_id[pn]))
2182 priv->sds_id[pn] = -1;
2183 else {
2184 pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]);
2185 }
2186
2187 if (pn < MAX_PORTS) {
2188 priv->smi_bus[pn] = smi_addr[0];
2189 priv->smi_addr[pn] = smi_addr[1];
2190 } else {
2191 pr_err("%s: illegal port number %d\n", __func__, pn);
2192 }
2193
2194 if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
2195 priv->smi_bus_isc45[smi_addr[0]] = true;
2196
2197 if (of_property_read_bool(dn, "phy-is-integrated")) {
2198 priv->phy_is_internal[pn] = true;
2199 }
2200 }
2201
2202 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
2203 if (!dn) {
2204 dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n");
2205 return -ENODEV;
2206 }
2207
2208 for_each_node_by_name(dn, "port") {
2209 if (of_property_read_u32(dn, "reg", &pn))
2210 continue;
2211 pr_debug("%s Looking at port %d\n", __func__, pn);
2212 if (pn > priv->cpu_port)
2213 continue;
2214 if (of_get_phy_mode(dn, &priv->interfaces[pn]))
2215 priv->interfaces[pn] = PHY_INTERFACE_MODE_NA;
2216 pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn]));
2217 }
2218
2219 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
2220 ret = of_mdiobus_register(priv->mii_bus, mii_np);
2221
2222 err_put_node:
2223 of_node_put(mii_np);
2224 return ret;
2225 }
2226
2227 static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
2228 {
2229 pr_debug("%s called\n", __func__);
2230 if (!priv->mii_bus)
2231 return 0;
2232
2233 mdiobus_unregister(priv->mii_bus);
2234 mdiobus_free(priv->mii_bus);
2235
2236 return 0;
2237 }
2238
2239 static netdev_features_t rtl838x_fix_features(struct net_device *dev,
2240 netdev_features_t features)
2241 {
2242 return features;
2243 }
2244
2245 static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
2246 {
2247 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2248
2249 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2250 if (!(features & NETIF_F_RXCSUM))
2251 sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2252 else
2253 sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
2254 }
2255
2256 return 0;
2257 }
2258
2259 static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
2260 {
2261 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2262
2263 if ((features ^ dev->features) & NETIF_F_RXCSUM) {
2264 if (!(features & NETIF_F_RXCSUM))
2265 sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
2266 else
2267 sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
2268 }
2269
2270 return 0;
2271 }
2272
2273 static const struct net_device_ops rtl838x_eth_netdev_ops = {
2274 .ndo_open = rtl838x_eth_open,
2275 .ndo_stop = rtl838x_eth_stop,
2276 .ndo_start_xmit = rtl838x_eth_tx,
2277 .ndo_select_queue = rtl83xx_pick_tx_queue,
2278 .ndo_set_mac_address = rtl838x_set_mac_address,
2279 .ndo_validate_addr = eth_validate_addr,
2280 .ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
2281 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2282 .ndo_set_features = rtl83xx_set_features,
2283 .ndo_fix_features = rtl838x_fix_features,
2284 .ndo_setup_tc = rtl83xx_setup_tc,
2285 };
2286
2287 static const struct net_device_ops rtl839x_eth_netdev_ops = {
2288 .ndo_open = rtl838x_eth_open,
2289 .ndo_stop = rtl838x_eth_stop,
2290 .ndo_start_xmit = rtl838x_eth_tx,
2291 .ndo_select_queue = rtl83xx_pick_tx_queue,
2292 .ndo_set_mac_address = rtl838x_set_mac_address,
2293 .ndo_validate_addr = eth_validate_addr,
2294 .ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
2295 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2296 .ndo_set_features = rtl83xx_set_features,
2297 .ndo_fix_features = rtl838x_fix_features,
2298 .ndo_setup_tc = rtl83xx_setup_tc,
2299 };
2300
2301 static const struct net_device_ops rtl930x_eth_netdev_ops = {
2302 .ndo_open = rtl838x_eth_open,
2303 .ndo_stop = rtl838x_eth_stop,
2304 .ndo_start_xmit = rtl838x_eth_tx,
2305 .ndo_select_queue = rtl93xx_pick_tx_queue,
2306 .ndo_set_mac_address = rtl838x_set_mac_address,
2307 .ndo_validate_addr = eth_validate_addr,
2308 .ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
2309 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2310 .ndo_set_features = rtl93xx_set_features,
2311 .ndo_fix_features = rtl838x_fix_features,
2312 .ndo_setup_tc = rtl83xx_setup_tc,
2313 };
2314
2315 static const struct net_device_ops rtl931x_eth_netdev_ops = {
2316 .ndo_open = rtl838x_eth_open,
2317 .ndo_stop = rtl838x_eth_stop,
2318 .ndo_start_xmit = rtl838x_eth_tx,
2319 .ndo_select_queue = rtl93xx_pick_tx_queue,
2320 .ndo_set_mac_address = rtl838x_set_mac_address,
2321 .ndo_validate_addr = eth_validate_addr,
2322 .ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
2323 .ndo_tx_timeout = rtl838x_eth_tx_timeout,
2324 .ndo_set_features = rtl93xx_set_features,
2325 .ndo_fix_features = rtl838x_fix_features,
2326 };
2327
2328 static const struct phylink_mac_ops rtl838x_phylink_ops = {
2329 .validate = rtl838x_validate,
2330 .mac_pcs_get_state = rtl838x_mac_pcs_get_state,
2331 .mac_an_restart = rtl838x_mac_an_restart,
2332 .mac_config = rtl838x_mac_config,
2333 .mac_link_down = rtl838x_mac_link_down,
2334 .mac_link_up = rtl838x_mac_link_up,
2335 };
2336
2337 static const struct ethtool_ops rtl838x_ethtool_ops = {
2338 .get_link_ksettings = rtl838x_get_link_ksettings,
2339 .set_link_ksettings = rtl838x_set_link_ksettings,
2340 };
2341
2342 static int __init rtl838x_eth_probe(struct platform_device *pdev)
2343 {
2344 struct net_device *dev;
2345 struct device_node *dn = pdev->dev.of_node;
2346 struct rtl838x_eth_priv *priv;
2347 struct resource *res, *mem;
2348 phy_interface_t phy_mode;
2349 struct phylink *phylink;
2350 int err = 0, i, rxrings, rxringlen;
2351 struct ring_b *ring;
2352
2353 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2354 (u32)pdev, (u32)(&(pdev->dev)));
2355
2356 if (!dn) {
2357 dev_err(&pdev->dev, "No DT found\n");
2358 return -EINVAL;
2359 }
2360
2361 rxrings = (soc_info.family == RTL8380_FAMILY_ID
2362 || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32;
2363 rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings;
2364 rxringlen = MAX_ENTRIES / rxrings;
2365 rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen;
2366
2367 dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings);
2368 if (!dev) {
2369 err = -ENOMEM;
2370 goto err_free;
2371 }
2372 SET_NETDEV_DEV(dev, &pdev->dev);
2373 priv = netdev_priv(dev);
2374
2375 /* obtain buffer memory space */
2376 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2377 if (res) {
2378 mem = devm_request_mem_region(&pdev->dev, res->start,
2379 resource_size(res), res->name);
2380 if (!mem) {
2381 dev_err(&pdev->dev, "cannot request memory space\n");
2382 err = -ENXIO;
2383 goto err_free;
2384 }
2385
2386 dev->mem_start = mem->start;
2387 dev->mem_end = mem->end;
2388 } else {
2389 dev_err(&pdev->dev, "cannot request IO resource\n");
2390 err = -ENXIO;
2391 goto err_free;
2392 }
2393
2394 /* Allocate buffer memory */
2395 priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
2396 + sizeof(struct ring_b) + sizeof(struct notify_b),
2397 (void *)&dev->mem_start, GFP_KERNEL);
2398 if (!priv->membase) {
2399 dev_err(&pdev->dev, "cannot allocate DMA buffer\n");
2400 err = -ENOMEM;
2401 goto err_free;
2402 }
2403
2404 // Allocate ring-buffer space at the end of the allocated memory
2405 ring = priv->membase;
2406 ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
2407
2408 spin_lock_init(&priv->lock);
2409
2410 dev->ethtool_ops = &rtl838x_ethtool_ops;
2411 dev->min_mtu = ETH_ZLEN;
2412 dev->max_mtu = 1536;
2413 dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
2414 dev->hw_features = NETIF_F_RXCSUM;
2415
2416 priv->id = soc_info.id;
2417 priv->family_id = soc_info.family;
2418 if (priv->id) {
2419 pr_info("Found SoC ID: %4x: %s, family %x\n",
2420 priv->id, soc_info.name, priv->family_id);
2421 } else {
2422 pr_err("Unknown chip id (%04x)\n", priv->id);
2423 return -ENODEV;
2424 }
2425
2426 switch (priv->family_id) {
2427 case RTL8380_FAMILY_ID:
2428 priv->cpu_port = RTL838X_CPU_PORT;
2429 priv->r = &rtl838x_reg;
2430 dev->netdev_ops = &rtl838x_eth_netdev_ops;
2431 break;
2432 case RTL8390_FAMILY_ID:
2433 priv->cpu_port = RTL839X_CPU_PORT;
2434 priv->r = &rtl839x_reg;
2435 dev->netdev_ops = &rtl839x_eth_netdev_ops;
2436 break;
2437 case RTL9300_FAMILY_ID:
2438 priv->cpu_port = RTL930X_CPU_PORT;
2439 priv->r = &rtl930x_reg;
2440 dev->netdev_ops = &rtl930x_eth_netdev_ops;
2441 break;
2442 case RTL9310_FAMILY_ID:
2443 priv->cpu_port = RTL931X_CPU_PORT;
2444 priv->r = &rtl931x_reg;
2445 dev->netdev_ops = &rtl931x_eth_netdev_ops;
2446 rtl931x_chip_init(priv);
2447 break;
2448 default:
2449 pr_err("Unknown SoC family\n");
2450 return -ENODEV;
2451 }
2452 priv->rxringlen = rxringlen;
2453 priv->rxrings = rxrings;
2454
2455 /* Obtain device IRQ number */
2456 dev->irq = platform_get_irq(pdev, 0);
2457 if (dev->irq < 0) {
2458 dev_err(&pdev->dev, "cannot obtain network-device IRQ\n");
2459 goto err_free;
2460 }
2461
2462 err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq,
2463 IRQF_SHARED, dev->name, dev);
2464 if (err) {
2465 dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n",
2466 __func__, err);
2467 goto err_free;
2468 }
2469
2470 rtl8380_init_mac(priv);
2471
2472 /* try to get mac address in the following order:
2473 * 1) from device tree data
2474 * 2) from internal registers set by bootloader
2475 */
2476 of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
2477 if (is_valid_ether_addr(dev->dev_addr)) {
2478 rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr);
2479 } else {
2480 dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff;
2481 dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff;
2482 dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff;
2483 dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff;
2484 dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff;
2485 dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff;
2486 }
2487 /* if the address is invalid, use a random value */
2488 if (!is_valid_ether_addr(dev->dev_addr)) {
2489 struct sockaddr sa = { AF_UNSPEC };
2490
2491 netdev_warn(dev, "Invalid MAC address, using random\n");
2492 eth_hw_addr_random(dev);
2493 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
2494 if (rtl838x_set_mac_address(dev, &sa))
2495 netdev_warn(dev, "Failed to set MAC address.\n");
2496 }
2497 pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac),
2498 sw_r32(priv->r->mac + 4));
2499 strcpy(dev->name, "eth%d");
2500 priv->pdev = pdev;
2501 priv->netdev = dev;
2502
2503 err = rtl838x_mdio_init(priv);
2504 if (err)
2505 goto err_free;
2506
2507 err = register_netdev(dev);
2508 if (err)
2509 goto err_free;
2510
2511 for (i = 0; i < priv->rxrings; i++) {
2512 priv->rx_qs[i].id = i;
2513 priv->rx_qs[i].priv = priv;
2514 netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
2515 }
2516
2517 platform_set_drvdata(pdev, dev);
2518
2519 phy_mode = PHY_INTERFACE_MODE_NA;
2520 err = of_get_phy_mode(dn, &phy_mode);
2521 if (err < 0) {
2522 dev_err(&pdev->dev, "incorrect phy-mode\n");
2523 err = -EINVAL;
2524 goto err_free;
2525 }
2526 priv->phylink_config.dev = &dev->dev;
2527 priv->phylink_config.type = PHYLINK_NETDEV;
2528
2529 phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode,
2530 phy_mode, &rtl838x_phylink_ops);
2531
2532 if (IS_ERR(phylink)) {
2533 err = PTR_ERR(phylink);
2534 goto err_free;
2535 }
2536 priv->phylink = phylink;
2537
2538 return 0;
2539
2540 err_free:
2541 pr_err("Error setting up netdev, freeing it again.\n");
2542 free_netdev(dev);
2543 return err;
2544 }
2545
2546 static int rtl838x_eth_remove(struct platform_device *pdev)
2547 {
2548 struct net_device *dev = platform_get_drvdata(pdev);
2549 struct rtl838x_eth_priv *priv = netdev_priv(dev);
2550 int i;
2551
2552 if (dev) {
2553 pr_info("Removing platform driver for rtl838x-eth\n");
2554 rtl838x_mdio_remove(priv);
2555 rtl838x_hw_stop(priv);
2556
2557 netif_tx_stop_all_queues(dev);
2558
2559 for (i = 0; i < priv->rxrings; i++)
2560 netif_napi_del(&priv->rx_qs[i].napi);
2561
2562 unregister_netdev(dev);
2563 free_netdev(dev);
2564 }
2565 return 0;
2566 }
2567
2568 static const struct of_device_id rtl838x_eth_of_ids[] = {
2569 { .compatible = "realtek,rtl838x-eth"},
2570 { /* sentinel */ }
2571 };
2572 MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids);
2573
2574 static struct platform_driver rtl838x_eth_driver = {
2575 .probe = rtl838x_eth_probe,
2576 .remove = rtl838x_eth_remove,
2577 .driver = {
2578 .name = "rtl838x-eth",
2579 .pm = NULL,
2580 .of_match_table = rtl838x_eth_of_ids,
2581 },
2582 };
2583
2584 module_platform_driver(rtl838x_eth_driver);
2585
2586 MODULE_AUTHOR("B. Koblitz");
2587 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2588 MODULE_LICENSE("GPL");