1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
27 extern struct rtl83xx_soc_info soc_info
;
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length
34 * for an RX ring, MAX_ENTRIES the maximum number of entries
35 * available in total for all queues.
37 #define MAX_RXRINGS 32
39 #define MAX_ENTRIES (300 * 8)
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
51 #define MAX_SMI_BUSSES 4
53 #define RING_BUFFER 1600
58 uint16_t size
; /* buffer size */
60 uint16_t len
; /* pkt len */
61 /* cpu_tag[0] is a reserved uint16_t on RTL83xx */
63 } __packed
__aligned(1);
72 } __packed
__aligned(1);
75 uint32_t rx_r
[MAX_RXRINGS
][MAX_RXLEN
];
76 uint32_t tx_r
[TXRINGS
][TXRINGLEN
];
77 struct p_hdr rx_header
[MAX_RXRINGS
][MAX_RXLEN
];
78 struct p_hdr tx_header
[TXRINGS
][TXRINGLEN
];
79 uint32_t c_rx
[MAX_RXRINGS
];
80 uint32_t c_tx
[TXRINGS
];
81 uint8_t tx_space
[TXRINGS
* TXRINGLEN
* RING_BUFFER
];
86 struct n_event events
[NOTIFY_EVENTS
];
90 struct notify_block blocks
[NOTIFY_BLOCKS
];
92 u32 ring
[NOTIFY_BLOCKS
];
96 static void rtl838x_create_tx_header(struct p_hdr
*h
, unsigned int dest_port
, int prio
)
98 // cpu_tag[0] is reserved on the RTL83XX SoCs
99 h
->cpu_tag
[1] = 0x0400; // BIT 10: RTL8380_CPU_TAG
100 h
->cpu_tag
[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
101 h
->cpu_tag
[3] = 0x0000;
102 h
->cpu_tag
[4] = BIT(dest_port
) >> 16;
103 h
->cpu_tag
[5] = BIT(dest_port
) & 0xffff;
105 /* Set internal priority (PRI) and enable (AS_PRI) */
107 h
->cpu_tag
[2] |= ((prio
& 0x7) | BIT(3)) << 12;
110 static void rtl839x_create_tx_header(struct p_hdr
*h
, unsigned int dest_port
, int prio
)
112 // cpu_tag[0] is reserved on the RTL83XX SoCs
113 h
->cpu_tag
[1] = 0x0100; // RTL8390_CPU_TAG marker
114 h
->cpu_tag
[2] = BIT(4); /* AS_DPM flag */
115 h
->cpu_tag
[3] = h
->cpu_tag
[4] = h
->cpu_tag
[5] = 0;
116 // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
117 if (dest_port
>= 32) {
119 h
->cpu_tag
[2] |= (BIT(dest_port
) >> 16) & 0xf;
120 h
->cpu_tag
[3] = BIT(dest_port
) & 0xffff;
122 h
->cpu_tag
[4] = BIT(dest_port
) >> 16;
123 h
->cpu_tag
[5] = BIT(dest_port
) & 0xffff;
126 /* Set internal priority (PRI) and enable (AS_PRI) */
128 h
->cpu_tag
[2] |= ((prio
& 0x7) | BIT(3)) << 8;
131 static void rtl930x_create_tx_header(struct p_hdr
*h
, unsigned int dest_port
, int prio
)
133 h
->cpu_tag
[0] = 0x8000; // CPU tag marker
134 h
->cpu_tag
[1] = h
->cpu_tag
[2] = 0;
138 h
->cpu_tag
[6] = BIT(dest_port
) >> 16;
139 h
->cpu_tag
[7] = BIT(dest_port
) & 0xffff;
141 /* Enable (AS_QID) and set priority queue (QID) */
143 h
->cpu_tag
[2] = (BIT(5) | (prio
& 0x1f)) << 8;
146 static void rtl931x_create_tx_header(struct p_hdr
*h
, unsigned int dest_port
, int prio
)
148 h
->cpu_tag
[0] = 0x8000; // CPU tag marker
149 h
->cpu_tag
[1] = h
->cpu_tag
[2] = 0;
151 h
->cpu_tag
[4] = h
->cpu_tag
[5] = h
->cpu_tag
[6] = h
->cpu_tag
[7] = 0;
152 if (dest_port
>= 32) {
154 h
->cpu_tag
[4] = BIT(dest_port
) >> 16;
155 h
->cpu_tag
[5] = BIT(dest_port
) & 0xffff;
157 h
->cpu_tag
[6] = BIT(dest_port
) >> 16;
158 h
->cpu_tag
[7] = BIT(dest_port
) & 0xffff;
161 /* Enable (AS_QID) and set priority queue (QID) */
163 h
->cpu_tag
[2] = (BIT(5) | (prio
& 0x1f)) << 8;
166 static void rtl93xx_header_vlan_set(struct p_hdr
*h
, int vlan
)
168 h
->cpu_tag
[2] |= BIT(4); // Enable VLAN forwarding offload
169 h
->cpu_tag
[2] |= (vlan
>> 8) & 0xf;
170 h
->cpu_tag
[3] |= (vlan
& 0xff) << 8;
173 struct rtl838x_rx_q
{
175 struct rtl838x_eth_priv
*priv
;
176 struct napi_struct napi
;
179 struct rtl838x_eth_priv
{
180 struct net_device
*netdev
;
181 struct platform_device
*pdev
;
184 struct mii_bus
*mii_bus
;
185 struct rtl838x_rx_q rx_qs
[MAX_RXRINGS
];
186 struct phylink
*phylink
;
187 struct phylink_config phylink_config
;
190 const struct rtl838x_eth_reg
*r
;
195 u8 smi_bus
[MAX_PORTS
];
196 u8 smi_addr
[MAX_PORTS
];
197 u32 sds_id
[MAX_PORTS
];
198 bool smi_bus_isc45
[MAX_SMI_BUSSES
];
199 bool phy_is_internal
[MAX_PORTS
];
200 phy_interface_t interfaces
[MAX_PORTS
];
203 extern int rtl838x_phy_init(struct rtl838x_eth_priv
*priv
);
204 extern int rtl838x_read_sds_phy(int phy_addr
, int phy_reg
);
205 extern int rtl839x_read_sds_phy(int phy_addr
, int phy_reg
);
206 extern int rtl839x_write_sds_phy(int phy_addr
, int phy_reg
, u16 v
);
207 extern int rtl930x_read_sds_phy(int phy_addr
, int page
, int phy_reg
);
208 extern int rtl930x_write_sds_phy(int phy_addr
, int page
, int phy_reg
, u16 v
);
209 extern int rtl931x_read_sds_phy(int phy_addr
, int page
, int phy_reg
);
210 extern int rtl931x_write_sds_phy(int phy_addr
, int page
, int phy_reg
, u16 v
);
211 extern int rtl930x_read_mmd_phy(u32 port
, u32 devnum
, u32 regnum
, u32
*val
);
212 extern int rtl930x_write_mmd_phy(u32 port
, u32 devnum
, u32 regnum
, u32 val
);
213 extern int rtl931x_read_mmd_phy(u32 port
, u32 devnum
, u32 regnum
, u32
*val
);
214 extern int rtl931x_write_mmd_phy(u32 port
, u32 devnum
, u32 regnum
, u32 val
);
217 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
218 * the rings. Writing x into these registers substracts x from its content.
219 * When the content reaches the ring size, the ASIC no longer adds
220 * packets to this receive queue.
222 void rtl838x_update_cntr(int r
, int released
)
224 // This feature is not available on RTL838x SoCs
227 void rtl839x_update_cntr(int r
, int released
)
229 // This feature is not available on RTL839x SoCs
232 void rtl930x_update_cntr(int r
, int released
)
234 int pos
= (r
% 3) * 10;
235 u32 reg
= RTL930X_DMA_IF_RX_RING_CNTR
+ ((r
/ 3) << 2);
238 v
= (v
>> pos
) & 0x3ff;
239 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released
, v
, pos
, reg
);
240 sw_w32_mask(0x3ff << pos
, released
<< pos
, reg
);
244 void rtl931x_update_cntr(int r
, int released
)
246 int pos
= (r
% 3) * 10;
247 u32 reg
= RTL931X_DMA_IF_RX_RING_CNTR
+ ((r
/ 3) << 2);
250 v
= (v
>> pos
) & 0x3ff;
251 sw_w32_mask(0x3ff << pos
, released
<< pos
, reg
);
264 bool rtl838x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
266 /* cpu_tag[0] is reserved. Fields are off-by-one */
267 t
->reason
= h
->cpu_tag
[4] & 0xf;
268 t
->queue
= (h
->cpu_tag
[1] & 0xe0) >> 5;
269 t
->port
= h
->cpu_tag
[1] & 0x1f;
270 t
->crc_error
= t
->reason
== 13;
272 pr_debug("Reason: %d\n", t
->reason
);
273 if (t
->reason
!= 6) // NIC_RX_REASON_SPECIAL_TRAP
278 return t
->l2_offloaded
;
281 bool rtl839x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
283 /* cpu_tag[0] is reserved. Fields are off-by-one */
284 t
->reason
= h
->cpu_tag
[5] & 0x1f;
285 t
->queue
= (h
->cpu_tag
[4] & 0xe000) >> 13;
286 t
->port
= h
->cpu_tag
[1] & 0x3f;
287 t
->crc_error
= h
->cpu_tag
[4] & BIT(6);
289 pr_debug("Reason: %d\n", t
->reason
);
290 if ((t
->reason
>= 7 && t
->reason
<= 13) || // NIC_RX_REASON_RMA
291 (t
->reason
>= 23 && t
->reason
<= 25)) // NIC_RX_REASON_SPECIAL_TRAP
296 return t
->l2_offloaded
;
299 bool rtl930x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
301 t
->reason
= h
->cpu_tag
[7] & 0x3f;
302 t
->queue
= (h
->cpu_tag
[2] >> 11) & 0x1f;
303 t
->port
= (h
->cpu_tag
[0] >> 8) & 0x1f;
304 t
->crc_error
= h
->cpu_tag
[1] & BIT(6);
306 pr_debug("Reason %d, port %d, queue %d\n", t
->reason
, t
->port
, t
->queue
);
307 if (t
->reason
>= 19 && t
->reason
<= 27)
312 return t
->l2_offloaded
;
315 bool rtl931x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
317 t
->reason
= h
->cpu_tag
[7] & 0x3f;
318 t
->queue
= (h
->cpu_tag
[2] >> 11) & 0x1f;
319 t
->port
= (h
->cpu_tag
[0] >> 8) & 0x3f;
320 t
->crc_error
= h
->cpu_tag
[1] & BIT(6);
323 pr_info("%s: Reason %d, port %d, queue %d\n", __func__
, t
->reason
, t
->port
, t
->queue
);
324 if (t
->reason
>= 19 && t
->reason
<= 27) // NIC_RX_REASON_RMA
329 return t
->l2_offloaded
;
333 * Discard the RX ring-buffers, called as part of the net-ISR
334 * when the buffer runs over
336 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv
*priv
, int status
)
341 struct ring_b
*ring
= priv
->membase
;
343 for (r
= 0; r
< priv
->rxrings
; r
++) {
344 pr_debug("In %s working on r: %d\n", __func__
, r
);
345 last
= (u32
*)KSEG1ADDR(sw_r32(priv
->r
->dma_if_rx_cur
+ r
* 4));
347 if ((ring
->rx_r
[r
][ring
->c_rx
[r
]] & 0x1))
349 pr_debug("Got something: %d\n", ring
->c_rx
[r
]);
350 h
= &ring
->rx_header
[r
][ring
->c_rx
[r
]];
351 memset(h
, 0, sizeof(struct p_hdr
));
352 h
->buf
= (u8
*)KSEG1ADDR(ring
->rx_space
353 + r
* priv
->rxringlen
* RING_BUFFER
354 + ring
->c_rx
[r
] * RING_BUFFER
);
355 h
->size
= RING_BUFFER
;
356 /* make sure the header is visible to the ASIC */
359 ring
->rx_r
[r
][ring
->c_rx
[r
]] = KSEG1ADDR(h
) | 0x1
360 | (ring
->c_rx
[r
] == (priv
->rxringlen
- 1) ? WRAP
: 0x1);
361 ring
->c_rx
[r
] = (ring
->c_rx
[r
] + 1) % priv
->rxringlen
;
362 } while (&ring
->rx_r
[r
][ring
->c_rx
[r
]] != last
);
366 struct fdb_update_work
{
367 struct work_struct work
;
368 struct net_device
*ndev
;
369 u64 macs
[NOTIFY_EVENTS
+ 1];
372 void rtl838x_fdb_sync(struct work_struct
*work
)
374 const struct fdb_update_work
*uw
=
375 container_of(work
, struct fdb_update_work
, work
);
376 struct switchdev_notifier_fdb_info info
;
381 while (uw
->macs
[i
]) {
382 action
= (uw
->macs
[i
] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
383 : SWITCHDEV_FDB_DEL_TO_BRIDGE
;
384 u64_to_ether_addr(uw
->macs
[i
] & 0xffffffffffffULL
, addr
);
385 info
.addr
= &addr
[0];
388 pr_debug("FDB entry %d: %llx, action %d\n", i
, uw
->macs
[0], action
);
389 call_switchdev_notifiers(action
, uw
->ndev
, &info
.info
, NULL
);
395 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv
*priv
)
397 struct notify_b
*nb
= priv
->membase
+ sizeof(struct ring_b
);
398 u32 e
= priv
->lastEvent
;
399 struct n_event
*event
;
402 struct fdb_update_work
*w
;
404 while (!(nb
->ring
[e
] & 1)) {
405 w
= kzalloc(sizeof(*w
), GFP_ATOMIC
);
407 pr_err("Out of memory: %s", __func__
);
410 INIT_WORK(&w
->work
, rtl838x_fdb_sync
);
412 for (i
= 0; i
< NOTIFY_EVENTS
; i
++) {
413 event
= &nb
->blocks
[e
].events
[i
];
419 w
->ndev
= priv
->netdev
;
423 /* Hand the ring entry back to the switch */
424 nb
->ring
[e
] = nb
->ring
[e
] | 1;
425 e
= (e
+ 1) % NOTIFY_BLOCKS
;
428 schedule_work(&w
->work
);
433 static irqreturn_t
rtl83xx_net_irq(int irq
, void *dev_id
)
435 struct net_device
*dev
= dev_id
;
436 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
437 u32 status
= sw_r32(priv
->r
->dma_if_intr_sts
);
440 pr_debug("IRQ: %08x\n", status
);
442 /* Ignore TX interrupt */
443 if ((status
& 0xf0000)) {
445 sw_w32(0x000f0000, priv
->r
->dma_if_intr_sts
);
449 if (status
& 0x0ff00) {
450 /* ACK and disable RX interrupt for this ring */
451 sw_w32_mask(0xff00 & status
, 0, priv
->r
->dma_if_intr_msk
);
452 sw_w32(0x0000ff00 & status
, priv
->r
->dma_if_intr_sts
);
453 for (i
= 0; i
< priv
->rxrings
; i
++) {
454 if (status
& BIT(i
+ 8)) {
455 pr_debug("Scheduling queue: %d\n", i
);
456 napi_schedule(&priv
->rx_qs
[i
].napi
);
461 /* RX buffer overrun */
462 if (status
& 0x000ff) {
463 pr_debug("RX buffer overrun: status %x, mask: %x\n",
464 status
, sw_r32(priv
->r
->dma_if_intr_msk
));
465 sw_w32(status
, priv
->r
->dma_if_intr_sts
);
466 rtl838x_rb_cleanup(priv
, status
& 0xff);
469 if (priv
->family_id
== RTL8390_FAMILY_ID
&& status
& 0x00100000) {
470 sw_w32(0x00100000, priv
->r
->dma_if_intr_sts
);
471 rtl839x_l2_notification_handler(priv
);
474 if (priv
->family_id
== RTL8390_FAMILY_ID
&& status
& 0x00200000) {
475 sw_w32(0x00200000, priv
->r
->dma_if_intr_sts
);
476 rtl839x_l2_notification_handler(priv
);
479 if (priv
->family_id
== RTL8390_FAMILY_ID
&& status
& 0x00400000) {
480 sw_w32(0x00400000, priv
->r
->dma_if_intr_sts
);
481 rtl839x_l2_notification_handler(priv
);
487 static irqreturn_t
rtl93xx_net_irq(int irq
, void *dev_id
)
489 struct net_device
*dev
= dev_id
;
490 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
491 u32 status_rx_r
= sw_r32(priv
->r
->dma_if_intr_rx_runout_sts
);
492 u32 status_rx
= sw_r32(priv
->r
->dma_if_intr_rx_done_sts
);
493 u32 status_tx
= sw_r32(priv
->r
->dma_if_intr_tx_done_sts
);
496 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
497 __func__
, status_tx
, status_rx
, status_rx_r
);
499 /* Ignore TX interrupt */
502 pr_debug("TX done\n");
503 sw_w32(status_tx
, priv
->r
->dma_if_intr_tx_done_sts
);
508 pr_debug("RX IRQ\n");
509 /* ACK and disable RX interrupt for given rings */
510 sw_w32(status_rx
, priv
->r
->dma_if_intr_rx_done_sts
);
511 sw_w32_mask(status_rx
, 0, priv
->r
->dma_if_intr_rx_done_msk
);
512 for (i
= 0; i
< priv
->rxrings
; i
++) {
513 if (status_rx
& BIT(i
)) {
514 pr_debug("Scheduling queue: %d\n", i
);
515 napi_schedule(&priv
->rx_qs
[i
].napi
);
520 /* RX buffer overrun */
522 pr_debug("RX buffer overrun: status %x, mask: %x\n",
523 status_rx_r
, sw_r32(priv
->r
->dma_if_intr_rx_runout_msk
));
524 sw_w32(status_rx_r
, priv
->r
->dma_if_intr_rx_runout_sts
);
525 rtl838x_rb_cleanup(priv
, status_rx_r
);
531 static const struct rtl838x_eth_reg rtl838x_reg
= {
532 .net_irq
= rtl83xx_net_irq
,
533 .mac_port_ctrl
= rtl838x_mac_port_ctrl
,
534 .dma_if_intr_sts
= RTL838X_DMA_IF_INTR_STS
,
535 .dma_if_intr_msk
= RTL838X_DMA_IF_INTR_MSK
,
536 .dma_if_ctrl
= RTL838X_DMA_IF_CTRL
,
537 .mac_force_mode_ctrl
= RTL838X_MAC_FORCE_MODE_CTRL
,
538 .dma_rx_base
= RTL838X_DMA_RX_BASE
,
539 .dma_tx_base
= RTL838X_DMA_TX_BASE
,
540 .dma_if_rx_ring_size
= rtl838x_dma_if_rx_ring_size
,
541 .dma_if_rx_ring_cntr
= rtl838x_dma_if_rx_ring_cntr
,
542 .dma_if_rx_cur
= RTL838X_DMA_IF_RX_CUR
,
543 .rst_glb_ctrl
= RTL838X_RST_GLB_CTRL_0
,
544 .get_mac_link_sts
= rtl838x_get_mac_link_sts
,
545 .get_mac_link_dup_sts
= rtl838x_get_mac_link_dup_sts
,
546 .get_mac_link_spd_sts
= rtl838x_get_mac_link_spd_sts
,
547 .get_mac_rx_pause_sts
= rtl838x_get_mac_rx_pause_sts
,
548 .get_mac_tx_pause_sts
= rtl838x_get_mac_tx_pause_sts
,
550 .l2_tbl_flush_ctrl
= RTL838X_L2_TBL_FLUSH_CTRL
,
551 .update_cntr
= rtl838x_update_cntr
,
552 .create_tx_header
= rtl838x_create_tx_header
,
553 .decode_tag
= rtl838x_decode_tag
,
556 static const struct rtl838x_eth_reg rtl839x_reg
= {
557 .net_irq
= rtl83xx_net_irq
,
558 .mac_port_ctrl
= rtl839x_mac_port_ctrl
,
559 .dma_if_intr_sts
= RTL839X_DMA_IF_INTR_STS
,
560 .dma_if_intr_msk
= RTL839X_DMA_IF_INTR_MSK
,
561 .dma_if_ctrl
= RTL839X_DMA_IF_CTRL
,
562 .mac_force_mode_ctrl
= RTL839X_MAC_FORCE_MODE_CTRL
,
563 .dma_rx_base
= RTL839X_DMA_RX_BASE
,
564 .dma_tx_base
= RTL839X_DMA_TX_BASE
,
565 .dma_if_rx_ring_size
= rtl839x_dma_if_rx_ring_size
,
566 .dma_if_rx_ring_cntr
= rtl839x_dma_if_rx_ring_cntr
,
567 .dma_if_rx_cur
= RTL839X_DMA_IF_RX_CUR
,
568 .rst_glb_ctrl
= RTL839X_RST_GLB_CTRL
,
569 .get_mac_link_sts
= rtl839x_get_mac_link_sts
,
570 .get_mac_link_dup_sts
= rtl839x_get_mac_link_dup_sts
,
571 .get_mac_link_spd_sts
= rtl839x_get_mac_link_spd_sts
,
572 .get_mac_rx_pause_sts
= rtl839x_get_mac_rx_pause_sts
,
573 .get_mac_tx_pause_sts
= rtl839x_get_mac_tx_pause_sts
,
575 .l2_tbl_flush_ctrl
= RTL839X_L2_TBL_FLUSH_CTRL
,
576 .update_cntr
= rtl839x_update_cntr
,
577 .create_tx_header
= rtl839x_create_tx_header
,
578 .decode_tag
= rtl839x_decode_tag
,
581 static const struct rtl838x_eth_reg rtl930x_reg
= {
582 .net_irq
= rtl93xx_net_irq
,
583 .mac_port_ctrl
= rtl930x_mac_port_ctrl
,
584 .dma_if_intr_rx_runout_sts
= RTL930X_DMA_IF_INTR_RX_RUNOUT_STS
,
585 .dma_if_intr_rx_done_sts
= RTL930X_DMA_IF_INTR_RX_DONE_STS
,
586 .dma_if_intr_tx_done_sts
= RTL930X_DMA_IF_INTR_TX_DONE_STS
,
587 .dma_if_intr_rx_runout_msk
= RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK
,
588 .dma_if_intr_rx_done_msk
= RTL930X_DMA_IF_INTR_RX_DONE_MSK
,
589 .dma_if_intr_tx_done_msk
= RTL930X_DMA_IF_INTR_TX_DONE_MSK
,
590 .l2_ntfy_if_intr_sts
= RTL930X_L2_NTFY_IF_INTR_STS
,
591 .l2_ntfy_if_intr_msk
= RTL930X_L2_NTFY_IF_INTR_MSK
,
592 .dma_if_ctrl
= RTL930X_DMA_IF_CTRL
,
593 .mac_force_mode_ctrl
= RTL930X_MAC_FORCE_MODE_CTRL
,
594 .dma_rx_base
= RTL930X_DMA_RX_BASE
,
595 .dma_tx_base
= RTL930X_DMA_TX_BASE
,
596 .dma_if_rx_ring_size
= rtl930x_dma_if_rx_ring_size
,
597 .dma_if_rx_ring_cntr
= rtl930x_dma_if_rx_ring_cntr
,
598 .dma_if_rx_cur
= RTL930X_DMA_IF_RX_CUR
,
599 .rst_glb_ctrl
= RTL930X_RST_GLB_CTRL_0
,
600 .get_mac_link_sts
= rtl930x_get_mac_link_sts
,
601 .get_mac_link_dup_sts
= rtl930x_get_mac_link_dup_sts
,
602 .get_mac_link_spd_sts
= rtl930x_get_mac_link_spd_sts
,
603 .get_mac_rx_pause_sts
= rtl930x_get_mac_rx_pause_sts
,
604 .get_mac_tx_pause_sts
= rtl930x_get_mac_tx_pause_sts
,
605 .mac
= RTL930X_MAC_L2_ADDR_CTRL
,
606 .l2_tbl_flush_ctrl
= RTL930X_L2_TBL_FLUSH_CTRL
,
607 .update_cntr
= rtl930x_update_cntr
,
608 .create_tx_header
= rtl930x_create_tx_header
,
609 .decode_tag
= rtl930x_decode_tag
,
612 static const struct rtl838x_eth_reg rtl931x_reg
= {
613 .net_irq
= rtl93xx_net_irq
,
614 .mac_port_ctrl
= rtl931x_mac_port_ctrl
,
615 .dma_if_intr_rx_runout_sts
= RTL931X_DMA_IF_INTR_RX_RUNOUT_STS
,
616 .dma_if_intr_rx_done_sts
= RTL931X_DMA_IF_INTR_RX_DONE_STS
,
617 .dma_if_intr_tx_done_sts
= RTL931X_DMA_IF_INTR_TX_DONE_STS
,
618 .dma_if_intr_rx_runout_msk
= RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK
,
619 .dma_if_intr_rx_done_msk
= RTL931X_DMA_IF_INTR_RX_DONE_MSK
,
620 .dma_if_intr_tx_done_msk
= RTL931X_DMA_IF_INTR_TX_DONE_MSK
,
621 .l2_ntfy_if_intr_sts
= RTL931X_L2_NTFY_IF_INTR_STS
,
622 .l2_ntfy_if_intr_msk
= RTL931X_L2_NTFY_IF_INTR_MSK
,
623 .dma_if_ctrl
= RTL931X_DMA_IF_CTRL
,
624 .mac_force_mode_ctrl
= RTL931X_MAC_FORCE_MODE_CTRL
,
625 .dma_rx_base
= RTL931X_DMA_RX_BASE
,
626 .dma_tx_base
= RTL931X_DMA_TX_BASE
,
627 .dma_if_rx_ring_size
= rtl931x_dma_if_rx_ring_size
,
628 .dma_if_rx_ring_cntr
= rtl931x_dma_if_rx_ring_cntr
,
629 .dma_if_rx_cur
= RTL931X_DMA_IF_RX_CUR
,
630 .rst_glb_ctrl
= RTL931X_RST_GLB_CTRL
,
631 .get_mac_link_sts
= rtl931x_get_mac_link_sts
,
632 .get_mac_link_dup_sts
= rtl931x_get_mac_link_dup_sts
,
633 .get_mac_link_spd_sts
= rtl931x_get_mac_link_spd_sts
,
634 .get_mac_rx_pause_sts
= rtl931x_get_mac_rx_pause_sts
,
635 .get_mac_tx_pause_sts
= rtl931x_get_mac_tx_pause_sts
,
636 .mac
= RTL931X_MAC_L2_ADDR_CTRL
,
637 .l2_tbl_flush_ctrl
= RTL931X_L2_TBL_FLUSH_CTRL
,
638 .update_cntr
= rtl931x_update_cntr
,
639 .create_tx_header
= rtl931x_create_tx_header
,
640 .decode_tag
= rtl931x_decode_tag
,
643 static void rtl838x_hw_reset(struct rtl838x_eth_priv
*priv
)
649 pr_info("RESETTING %x, CPU_PORT %d\n", priv
->family_id
, priv
->cpu_port
);
650 sw_w32_mask(0x3, 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
653 /* Disable and clear interrupts */
654 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
) {
655 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_runout_msk
);
656 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_runout_sts
);
657 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_done_msk
);
658 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_sts
);
659 sw_w32(0x00000000, priv
->r
->dma_if_intr_tx_done_msk
);
660 sw_w32(0x0000000f, priv
->r
->dma_if_intr_tx_done_sts
);
662 sw_w32(0x00000000, priv
->r
->dma_if_intr_msk
);
663 sw_w32(0xffffffff, priv
->r
->dma_if_intr_sts
);
666 if (priv
->family_id
== RTL8390_FAMILY_ID
) {
667 /* Preserve L2 notification and NBUF settings */
668 int_saved
= sw_r32(priv
->r
->dma_if_intr_msk
);
669 nbuf
= sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL
);
671 /* Disable link change interrupt on RTL839x */
672 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG
);
673 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG
+ 4);
675 sw_w32(0x00000000, priv
->r
->dma_if_intr_msk
);
676 sw_w32(0xffffffff, priv
->r
->dma_if_intr_sts
);
679 /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */
680 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
)
685 sw_w32_mask(0, reset_mask
, priv
->r
->rst_glb_ctrl
);
687 do { /* Wait for reset of NIC and Queues done */
689 } while (sw_r32(priv
->r
->rst_glb_ctrl
) & reset_mask
);
692 /* Setup Head of Line */
693 if (priv
->family_id
== RTL8380_FAMILY_ID
)
694 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE
); // Disabled on RTL8380
695 if (priv
->family_id
== RTL8390_FAMILY_ID
)
696 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR
);
697 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
) {
698 for (i
= 0; i
< priv
->rxrings
; i
++) {
700 sw_w32_mask(0x3ff << pos
, 0, priv
->r
->dma_if_rx_ring_size(i
));
701 sw_w32_mask(0x3ff << pos
, priv
->rxringlen
,
702 priv
->r
->dma_if_rx_ring_cntr(i
));
706 /* Re-enable link change interrupt */
707 if (priv
->family_id
== RTL8390_FAMILY_ID
) {
708 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG
);
709 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG
+ 4);
710 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG
);
711 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG
+ 4);
713 /* Restore notification settings: on RTL838x these bits are null */
714 sw_w32_mask(7 << 20, int_saved
& (7 << 20), priv
->r
->dma_if_intr_msk
);
715 sw_w32(nbuf
, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL
);
719 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv
*priv
)
722 struct ring_b
*ring
= priv
->membase
;
724 for (i
= 0; i
< priv
->rxrings
; i
++)
725 sw_w32(KSEG1ADDR(&ring
->rx_r
[i
]), priv
->r
->dma_rx_base
+ i
* 4);
727 for (i
= 0; i
< TXRINGS
; i
++)
728 sw_w32(KSEG1ADDR(&ring
->tx_r
[i
]), priv
->r
->dma_tx_base
+ i
* 4);
731 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv
*priv
)
733 /* Disable Head of Line features for all RX rings */
734 sw_w32(0xffffffff, priv
->r
->dma_if_rx_ring_size(0));
736 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
737 sw_w32(0x06400020, priv
->r
->dma_if_ctrl
);
739 /* Enable RX done, RX overflow and TX done interrupts */
740 sw_w32(0xfffff, priv
->r
->dma_if_intr_msk
);
742 /* Enable DMA, engine expects empty FCS field */
743 sw_w32_mask(0, RX_EN
| TX_EN
, priv
->r
->dma_if_ctrl
);
745 /* Restart TX/RX to CPU port */
746 sw_w32_mask(0x0, 0x3, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
747 /* Set Speed, duplex, flow control
748 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
749 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
752 sw_w32(0x6192F, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
754 /* Enable CRC checks on CPU-port */
755 sw_w32_mask(0, BIT(3), priv
->r
->mac_port_ctrl(priv
->cpu_port
));
758 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv
*priv
)
760 /* Setup CPU-Port: RX Buffer */
761 sw_w32(0x0000c808, priv
->r
->dma_if_ctrl
);
763 /* Enable Notify, RX done, RX overflow and TX done interrupts */
764 sw_w32(0x007fffff, priv
->r
->dma_if_intr_msk
); // Notify IRQ!
767 sw_w32_mask(0, RX_EN
| TX_EN
, priv
->r
->dma_if_ctrl
);
769 /* Restart TX/RX to CPU port, enable CRC checking */
770 sw_w32_mask(0x0, 0x3 | BIT(3), priv
->r
->mac_port_ctrl(priv
->cpu_port
));
772 /* CPU port joins Lookup Miss Flooding Portmask */
773 // TODO: The code below should also work for the RTL838x
774 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL
);
775 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
776 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL
);
778 /* Force CPU port link up */
779 sw_w32_mask(0, 3, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
782 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv
*priv
)
787 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
788 sw_w32(0x06400040, priv
->r
->dma_if_ctrl
);
790 for (i
= 0; i
< priv
->rxrings
; i
++) {
792 sw_w32_mask(0x3ff << pos
, priv
->rxringlen
<< pos
, priv
->r
->dma_if_rx_ring_size(i
));
794 // Some SoCs have issues with missing underflow protection
795 v
= (sw_r32(priv
->r
->dma_if_rx_ring_cntr(i
)) >> pos
) & 0x3ff;
796 sw_w32_mask(0x3ff << pos
, v
, priv
->r
->dma_if_rx_ring_cntr(i
));
799 /* Enable Notify, RX done, RX overflow and TX done interrupts */
800 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_runout_msk
);
801 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_msk
);
802 sw_w32(0x0000000f, priv
->r
->dma_if_intr_tx_done_msk
);
805 sw_w32_mask(0, RX_EN_93XX
| TX_EN_93XX
, priv
->r
->dma_if_ctrl
);
807 /* Restart TX/RX to CPU port, enable CRC checking */
808 sw_w32_mask(0x0, 0x3 | BIT(4), priv
->r
->mac_port_ctrl(priv
->cpu_port
));
810 if (priv
->family_id
== RTL9300_FAMILY_ID
)
811 sw_w32_mask(0, BIT(priv
->cpu_port
), RTL930X_L2_UNKN_UC_FLD_PMSK
);
813 sw_w32_mask(0, BIT(priv
->cpu_port
), RTL931X_L2_UNKN_UC_FLD_PMSK
);
815 if (priv
->family_id
== RTL9300_FAMILY_ID
)
816 sw_w32(0x217, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
818 sw_w32(0x2a1d, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
821 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv
*priv
, struct ring_b
*ring
)
827 for (i
= 0; i
< priv
->rxrings
; i
++) {
828 for (j
= 0; j
< priv
->rxringlen
; j
++) {
829 h
= &ring
->rx_header
[i
][j
];
830 memset(h
, 0, sizeof(struct p_hdr
));
831 h
->buf
= (u8
*)KSEG1ADDR(ring
->rx_space
832 + i
* priv
->rxringlen
* RING_BUFFER
834 h
->size
= RING_BUFFER
;
835 /* All rings owned by switch, last one wraps */
836 ring
->rx_r
[i
][j
] = KSEG1ADDR(h
) | 1
837 | (j
== (priv
->rxringlen
- 1) ? WRAP
: 0);
842 for (i
= 0; i
< TXRINGS
; i
++) {
843 for (j
= 0; j
< TXRINGLEN
; j
++) {
844 h
= &ring
->tx_header
[i
][j
];
845 memset(h
, 0, sizeof(struct p_hdr
));
846 h
->buf
= (u8
*)KSEG1ADDR(ring
->tx_space
847 + i
* TXRINGLEN
* RING_BUFFER
849 h
->size
= RING_BUFFER
;
850 ring
->tx_r
[i
][j
] = KSEG1ADDR(&ring
->tx_header
[i
][j
]);
852 /* Last header is wrapping around */
853 ring
->tx_r
[i
][j
-1] |= WRAP
;
858 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv
*priv
)
861 struct notify_b
*b
= priv
->membase
+ sizeof(struct ring_b
);
863 for (i
= 0; i
< NOTIFY_BLOCKS
; i
++)
864 b
->ring
[i
] = KSEG1ADDR(&b
->blocks
[i
]) | 1 | (i
== (NOTIFY_BLOCKS
- 1) ? WRAP
: 0);
866 sw_w32((u32
) b
->ring
, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL
);
867 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL
);
869 /* Setup notification events */
870 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0
); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
871 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL
); // SUSPEND_NOTIFICATION_EN
873 /* Enable Notification */
874 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL
);
878 static int rtl838x_eth_open(struct net_device
*ndev
)
881 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
882 struct ring_b
*ring
= priv
->membase
;
885 pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
886 __func__
, priv
->rxrings
, priv
->rxringlen
, TXRINGS
, TXRINGLEN
);
888 spin_lock_irqsave(&priv
->lock
, flags
);
889 rtl838x_hw_reset(priv
);
890 rtl838x_setup_ring_buffer(priv
, ring
);
891 if (priv
->family_id
== RTL8390_FAMILY_ID
) {
892 rtl839x_setup_notify_ring_buffer(priv
);
893 /* Make sure the ring structure is visible to the ASIC */
898 rtl838x_hw_ring_setup(priv
);
899 phylink_start(priv
->phylink
);
901 for (i
= 0; i
< priv
->rxrings
; i
++)
902 napi_enable(&priv
->rx_qs
[i
].napi
);
904 switch (priv
->family_id
) {
905 case RTL8380_FAMILY_ID
:
906 rtl838x_hw_en_rxtx(priv
);
907 /* Trap IGMP/MLD traffic to CPU-Port */
908 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL
);
909 /* Flush learned FDB entries on link down of a port */
910 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0
);
913 case RTL8390_FAMILY_ID
:
914 rtl839x_hw_en_rxtx(priv
);
915 // Trap MLD and IGMP messages to CPU_PORT
916 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL
);
917 /* Flush learned FDB entries on link down of a port */
918 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0
);
921 case RTL9300_FAMILY_ID
:
922 rtl93xx_hw_en_rxtx(priv
);
923 /* Flush learned FDB entries on link down of a port */
924 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL
);
925 // Trap MLD and IGMP messages to CPU_PORT
926 sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL
);
929 case RTL9310_FAMILY_ID
:
930 rtl93xx_hw_en_rxtx(priv
);
932 // Trap MLD and IGMP messages to CPU_PORT
933 sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL
);
935 // Disable External CPU access to switch, clear EXT_CPU_EN
936 sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2
);
939 sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL
);
943 netif_tx_start_all_queues(ndev
);
945 spin_unlock_irqrestore(&priv
->lock
, flags
);
950 static void rtl838x_hw_stop(struct rtl838x_eth_priv
*priv
)
952 u32 force_mac
= priv
->family_id
== RTL8380_FAMILY_ID
? 0x6192C : 0x75;
953 u32 clear_irq
= priv
->family_id
== RTL8380_FAMILY_ID
? 0x000fffff : 0x007fffff;
956 // Disable RX/TX from/to CPU-port
957 sw_w32_mask(0x3, 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
959 /* Disable traffic */
960 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
)
961 sw_w32_mask(RX_EN_93XX
| TX_EN_93XX
, 0, priv
->r
->dma_if_ctrl
);
963 sw_w32_mask(RX_EN
| TX_EN
, 0, priv
->r
->dma_if_ctrl
);
964 mdelay(200); // Test, whether this is needed
966 /* Block all ports */
967 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
968 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
969 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
970 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0
);
973 /* Flush L2 address cache */
974 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
975 for (i
= 0; i
<= priv
->cpu_port
; i
++) {
976 sw_w32(1 << 26 | 1 << 23 | i
<< 5, priv
->r
->l2_tbl_flush_ctrl
);
977 do { } while (sw_r32(priv
->r
->l2_tbl_flush_ctrl
) & (1 << 26));
979 } else if (priv
->family_id
== RTL8390_FAMILY_ID
) {
980 for (i
= 0; i
<= priv
->cpu_port
; i
++) {
981 sw_w32(1 << 28 | 1 << 25 | i
<< 5, priv
->r
->l2_tbl_flush_ctrl
);
982 do { } while (sw_r32(priv
->r
->l2_tbl_flush_ctrl
) & (1 << 28));
985 // TODO: L2 flush register is 64 bit on RTL931X and 930X
987 /* CPU-Port: Link down */
988 if (priv
->family_id
== RTL8380_FAMILY_ID
|| priv
->family_id
== RTL8390_FAMILY_ID
)
989 sw_w32(force_mac
, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
990 else if (priv
->family_id
== RTL9300_FAMILY_ID
)
991 sw_w32_mask(0x3, 0, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
*4);
992 else if (priv
->family_id
== RTL9310_FAMILY_ID
)
993 sw_w32_mask(BIT(0) | BIT(9), 0, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
*4);
996 /* Disable all TX/RX interrupts */
997 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
) {
998 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_runout_msk
);
999 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_runout_sts
);
1000 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_done_msk
);
1001 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_sts
);
1002 sw_w32(0x00000000, priv
->r
->dma_if_intr_tx_done_msk
);
1003 sw_w32(0x0000000f, priv
->r
->dma_if_intr_tx_done_sts
);
1005 sw_w32(0x00000000, priv
->r
->dma_if_intr_msk
);
1006 sw_w32(clear_irq
, priv
->r
->dma_if_intr_sts
);
1009 /* Disable TX/RX DMA */
1010 sw_w32(0x00000000, priv
->r
->dma_if_ctrl
);
1014 static int rtl838x_eth_stop(struct net_device
*ndev
)
1016 unsigned long flags
;
1018 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1020 pr_info("in %s\n", __func__
);
1022 phylink_stop(priv
->phylink
);
1023 rtl838x_hw_stop(priv
);
1025 for (i
= 0; i
< priv
->rxrings
; i
++)
1026 napi_disable(&priv
->rx_qs
[i
].napi
);
1028 netif_tx_stop_all_queues(ndev
);
1033 static void rtl838x_eth_set_multicast_list(struct net_device
*ndev
)
1036 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1037 * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF
1039 if (!(ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
))) {
1040 sw_w32(0x0, RTL838X_RMA_CTRL_0
);
1041 sw_w32(0x0, RTL838X_RMA_CTRL_1
);
1043 if (ndev
->flags
& IFF_ALLMULTI
)
1044 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0
);
1045 if (ndev
->flags
& IFF_PROMISC
) {
1046 sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0
);
1047 sw_w32(0x7fff, RTL838X_RMA_CTRL_1
);
1051 static void rtl839x_eth_set_multicast_list(struct net_device
*ndev
)
1054 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1055 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1056 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1057 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1059 if (!(ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
))) {
1060 sw_w32(0x0, RTL839X_RMA_CTRL_0
);
1061 sw_w32(0x0, RTL839X_RMA_CTRL_1
);
1062 sw_w32(0x0, RTL839X_RMA_CTRL_2
);
1063 sw_w32(0x0, RTL839X_RMA_CTRL_3
);
1065 if (ndev
->flags
& IFF_ALLMULTI
) {
1066 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0
);
1067 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1
);
1068 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2
);
1070 if (ndev
->flags
& IFF_PROMISC
) {
1071 sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0
);
1072 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1
);
1073 sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2
);
1074 sw_w32(0x3ff, RTL839X_RMA_CTRL_3
);
1078 static void rtl930x_eth_set_multicast_list(struct net_device
*ndev
)
1081 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1082 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1083 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00
1084 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1086 if (ndev
->flags
& (IFF_ALLMULTI
| IFF_PROMISC
)) {
1087 sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0
);
1088 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1
);
1089 sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2
);
1091 sw_w32(0x0, RTL930X_RMA_CTRL_0
);
1092 sw_w32(0x0, RTL930X_RMA_CTRL_1
);
1093 sw_w32(0x0, RTL930X_RMA_CTRL_2
);
1097 static void rtl931x_eth_set_multicast_list(struct net_device
*ndev
)
1100 * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F})
1101 * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC
1102 * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00.
1103 * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0)
1105 if (ndev
->flags
& (IFF_ALLMULTI
| IFF_PROMISC
)) {
1106 sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0
);
1107 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1
);
1108 sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2
);
1110 sw_w32(0x0, RTL931X_RMA_CTRL_0
);
1111 sw_w32(0x0, RTL931X_RMA_CTRL_1
);
1112 sw_w32(0x0, RTL931X_RMA_CTRL_2
);
1116 static void rtl838x_eth_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1118 unsigned long flags
;
1119 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1121 pr_warn("%s\n", __func__
);
1122 spin_lock_irqsave(&priv
->lock
, flags
);
1123 rtl838x_hw_stop(priv
);
1124 rtl838x_hw_ring_setup(priv
);
1125 rtl838x_hw_en_rxtx(priv
);
1126 netif_trans_update(ndev
);
1127 netif_start_queue(ndev
);
1128 spin_unlock_irqrestore(&priv
->lock
, flags
);
1131 static int rtl838x_eth_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1134 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1135 struct ring_b
*ring
= priv
->membase
;
1138 unsigned long flags
;
1141 int q
= skb_get_queue_mapping(skb
) % TXRINGS
;
1143 if (q
) // Check for high prio queue
1144 pr_debug("SKB priority: %d\n", skb
->priority
);
1146 spin_lock_irqsave(&priv
->lock
, flags
);
1149 /* Check for DSA tagging at the end of the buffer */
1150 if (netdev_uses_dsa(dev
) && skb
->data
[len
-4] == 0x80
1151 && skb
->data
[len
-3] < priv
->cpu_port
1152 && skb
->data
[len
-2] == 0x10
1153 && skb
->data
[len
-1] == 0x00) {
1154 /* Reuse tag space for CRC if possible */
1155 dest_port
= skb
->data
[len
-3];
1156 skb
->data
[len
-4] = skb
->data
[len
-3] = skb
->data
[len
-2] = skb
->data
[len
-1] = 0x00;
1160 len
+= 4; // Add space for CRC
1162 if (skb_padto(skb
, len
)) {
1167 /* We can send this packet if CPU owns the descriptor */
1168 if (!(ring
->tx_r
[q
][ring
->c_tx
[q
]] & 0x1)) {
1170 /* Set descriptor for tx */
1171 h
= &ring
->tx_header
[q
][ring
->c_tx
[q
]];
1174 // On RTL8380 SoCs, small packet lengths being sent need adjustments
1175 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1176 if (len
< ETH_ZLEN
- 4)
1181 priv
->r
->create_tx_header(h
, dest_port
, skb
->priority
>> 1);
1183 /* Copy packet data to tx buffer */
1184 memcpy((void *)KSEG1ADDR(h
->buf
), skb
->data
, len
);
1185 /* Make sure packet data is visible to ASIC */
1188 /* Hand over to switch */
1189 ring
->tx_r
[q
][ring
->c_tx
[q
]] |= 1;
1191 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1192 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1193 for (i
= 0; i
< 10; i
++) {
1194 val
= sw_r32(priv
->r
->dma_if_ctrl
);
1195 if ((val
& 0xc) == 0xc)
1200 /* Tell switch to send data */
1201 if (priv
->family_id
== RTL9310_FAMILY_ID
1202 || priv
->family_id
== RTL9300_FAMILY_ID
) {
1203 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1205 sw_w32_mask(0, BIT(2), priv
->r
->dma_if_ctrl
);
1207 sw_w32_mask(0, BIT(3), priv
->r
->dma_if_ctrl
);
1209 sw_w32_mask(0, TX_DO
, priv
->r
->dma_if_ctrl
);
1212 dev
->stats
.tx_packets
++;
1213 dev
->stats
.tx_bytes
+= len
;
1215 ring
->c_tx
[q
] = (ring
->c_tx
[q
] + 1) % TXRINGLEN
;
1218 dev_warn(&priv
->pdev
->dev
, "Data is owned by switch\n");
1219 ret
= NETDEV_TX_BUSY
;
1222 spin_unlock_irqrestore(&priv
->lock
, flags
);
1227 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1228 * so we do round-robin
1230 u16
rtl83xx_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1231 struct net_device
*sb_dev
)
1236 return last
% TXRINGS
;
1240 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1242 u16
rtl93xx_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1243 struct net_device
*sb_dev
)
1245 if (skb
->priority
>= TC_PRIO_CONTROL
)
1250 static int rtl838x_hw_receive(struct net_device
*dev
, int r
, int budget
)
1252 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1253 struct ring_b
*ring
= priv
->membase
;
1254 struct sk_buff
*skb
;
1256 unsigned long flags
;
1257 int i
, len
, work_done
= 0;
1258 u8
*data
, *skb_data
;
1262 bool dsa
= netdev_uses_dsa(dev
);
1265 pr_debug("---------------------------------------------------------- RX - %d\n", r
);
1266 spin_lock_irqsave(&priv
->lock
, flags
);
1267 last
= (u32
*)KSEG1ADDR(sw_r32(priv
->r
->dma_if_rx_cur
+ r
* 4));
1270 if ((ring
->rx_r
[r
][ring
->c_rx
[r
]] & 0x1)) {
1271 if (&ring
->rx_r
[r
][ring
->c_rx
[r
]] != last
) {
1272 netdev_warn(dev
, "Ring contention: r: %x, last %x, cur %x\n",
1273 r
, (uint32_t)last
, (u32
) &ring
->rx_r
[r
][ring
->c_rx
[r
]]);
1278 h
= &ring
->rx_header
[r
][ring
->c_rx
[r
]];
1279 data
= (u8
*)KSEG1ADDR(h
->buf
);
1285 len
-= 4; /* strip the CRC */
1286 /* Add 4 bytes for cpu_tag */
1290 skb
= netdev_alloc_skb(dev
, len
+ 4);
1291 skb_reserve(skb
, NET_IP_ALIGN
);
1294 /* BUG: Prevent bug on RTL838x SoCs*/
1295 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1296 sw_w32(0xffffffff, priv
->r
->dma_if_rx_ring_size(0));
1297 for (i
= 0; i
< priv
->rxrings
; i
++) {
1298 /* Update each ring cnt */
1299 val
= sw_r32(priv
->r
->dma_if_rx_ring_cntr(i
));
1300 sw_w32(val
, priv
->r
->dma_if_rx_ring_cntr(i
));
1304 skb_data
= skb_put(skb
, len
);
1305 /* Make sure data is visible */
1307 memcpy(skb
->data
, (u8
*)KSEG1ADDR(data
), len
);
1308 /* Overwrite CRC with cpu_tag */
1310 priv
->r
->decode_tag(h
, &tag
);
1311 skb
->data
[len
-4] = 0x80;
1312 skb
->data
[len
-3] = tag
.port
;
1313 skb
->data
[len
-2] = 0x10;
1314 skb
->data
[len
-1] = 0x00;
1315 if (tag
.l2_offloaded
)
1316 skb
->data
[len
-3] |= 0x40;
1320 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1321 tag
.queue
, len
, tag
.reason
, tag
.port
);
1323 skb
->protocol
= eth_type_trans(skb
, dev
);
1324 if (dev
->features
& NETIF_F_RXCSUM
) {
1326 skb_checksum_none_assert(skb
);
1328 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1330 dev
->stats
.rx_packets
++;
1331 dev
->stats
.rx_bytes
+= len
;
1333 list_add_tail(&skb
->list
, &rx_list
);
1335 if (net_ratelimit())
1336 dev_warn(&dev
->dev
, "low on memory - packet dropped\n");
1337 dev
->stats
.rx_dropped
++;
1340 /* Reset header structure */
1341 memset(h
, 0, sizeof(struct p_hdr
));
1343 h
->size
= RING_BUFFER
;
1345 ring
->rx_r
[r
][ring
->c_rx
[r
]] = KSEG1ADDR(h
) | 0x1
1346 | (ring
->c_rx
[r
] == (priv
->rxringlen
- 1) ? WRAP
: 0x1);
1347 ring
->c_rx
[r
] = (ring
->c_rx
[r
] + 1) % priv
->rxringlen
;
1348 last
= (u32
*)KSEG1ADDR(sw_r32(priv
->r
->dma_if_rx_cur
+ r
* 4));
1349 } while (&ring
->rx_r
[r
][ring
->c_rx
[r
]] != last
&& work_done
< budget
);
1351 netif_receive_skb_list(&rx_list
);
1354 priv
->r
->update_cntr(r
, 0);
1356 spin_unlock_irqrestore(&priv
->lock
, flags
);
1361 static int rtl838x_poll_rx(struct napi_struct
*napi
, int budget
)
1363 struct rtl838x_rx_q
*rx_q
= container_of(napi
, struct rtl838x_rx_q
, napi
);
1364 struct rtl838x_eth_priv
*priv
= rx_q
->priv
;
1369 while (work_done
< budget
) {
1370 work
= rtl838x_hw_receive(priv
->netdev
, r
, budget
- work_done
);
1376 if (work_done
< budget
) {
1377 napi_complete_done(napi
, work_done
);
1379 /* Enable RX interrupt */
1380 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
)
1381 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_msk
);
1383 sw_w32_mask(0, 0xf00ff | BIT(r
+ 8), priv
->r
->dma_if_intr_msk
);
1389 static void rtl838x_validate(struct phylink_config
*config
,
1390 unsigned long *supported
,
1391 struct phylink_link_state
*state
)
1393 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
1395 pr_debug("In %s\n", __func__
);
1397 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
1398 state
->interface
!= PHY_INTERFACE_MODE_1000BASEX
&&
1399 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1400 state
->interface
!= PHY_INTERFACE_MODE_REVMII
&&
1401 state
->interface
!= PHY_INTERFACE_MODE_GMII
&&
1402 state
->interface
!= PHY_INTERFACE_MODE_QSGMII
&&
1403 state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
&&
1404 state
->interface
!= PHY_INTERFACE_MODE_SGMII
) {
1405 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1406 pr_err("Unsupported interface: %d\n", state
->interface
);
1410 /* Allow all the expected bits */
1411 phylink_set(mask
, Autoneg
);
1412 phylink_set_port_modes(mask
);
1413 phylink_set(mask
, Pause
);
1414 phylink_set(mask
, Asym_Pause
);
1416 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1417 * including Half duplex
1419 if (state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1420 state
->interface
!= PHY_INTERFACE_MODE_REVMII
) {
1421 phylink_set(mask
, 1000baseT_Full
);
1422 phylink_set(mask
, 1000baseT_Half
);
1425 phylink_set(mask
, 10baseT_Half
);
1426 phylink_set(mask
, 10baseT_Full
);
1427 phylink_set(mask
, 100baseT_Half
);
1428 phylink_set(mask
, 100baseT_Full
);
1430 bitmap_and(supported
, supported
, mask
,
1431 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1432 bitmap_and(state
->advertising
, state
->advertising
, mask
,
1433 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1437 static void rtl838x_mac_config(struct phylink_config
*config
,
1439 const struct phylink_link_state
*state
)
1441 /* This is only being called for the master device,
1442 * i.e. the CPU-Port. We don't need to do anything.
1445 pr_info("In %s, mode %x\n", __func__
, mode
);
1448 static void rtl838x_mac_an_restart(struct phylink_config
*config
)
1450 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1451 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1453 /* This works only on RTL838x chips */
1454 if (priv
->family_id
!= RTL8380_FAMILY_ID
)
1457 pr_debug("In %s\n", __func__
);
1458 /* Restart by disabling and re-enabling link */
1459 sw_w32(0x6192D, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
1461 sw_w32(0x6192F, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
1464 static void rtl838x_mac_pcs_get_state(struct phylink_config
*config
,
1465 struct phylink_link_state
*state
)
1468 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1469 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1470 int port
= priv
->cpu_port
;
1472 pr_info("In %s\n", __func__
);
1474 state
->link
= priv
->r
->get_mac_link_sts(port
) ? 1 : 0;
1475 state
->duplex
= priv
->r
->get_mac_link_dup_sts(port
) ? 1 : 0;
1477 pr_info("%s link status is %d\n", __func__
, state
->link
);
1478 speed
= priv
->r
->get_mac_link_spd_sts(port
);
1481 state
->speed
= SPEED_10
;
1484 state
->speed
= SPEED_100
;
1487 state
->speed
= SPEED_1000
;
1490 state
->speed
= SPEED_2500
;
1493 state
->speed
= SPEED_5000
;
1496 state
->speed
= SPEED_10000
;
1499 state
->speed
= SPEED_UNKNOWN
;
1503 state
->pause
&= (MLO_PAUSE_RX
| MLO_PAUSE_TX
);
1504 if (priv
->r
->get_mac_rx_pause_sts(port
))
1505 state
->pause
|= MLO_PAUSE_RX
;
1506 if (priv
->r
->get_mac_tx_pause_sts(port
))
1507 state
->pause
|= MLO_PAUSE_TX
;
1510 static void rtl838x_mac_link_down(struct phylink_config
*config
,
1512 phy_interface_t interface
)
1514 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1515 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1517 pr_debug("In %s\n", __func__
);
1518 /* Stop TX/RX to port */
1519 sw_w32_mask(0x03, 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
1522 static void rtl838x_mac_link_up(struct phylink_config
*config
,
1523 struct phy_device
*phy
, unsigned int mode
,
1524 phy_interface_t interface
, int speed
, int duplex
,
1525 bool tx_pause
, bool rx_pause
)
1527 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1528 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1530 pr_debug("In %s\n", __func__
);
1531 /* Restart TX/RX to port */
1532 sw_w32_mask(0, 0x03, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
1535 static void rtl838x_set_mac_hw(struct net_device
*dev
, u8
*mac
)
1537 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1538 unsigned long flags
;
1540 spin_lock_irqsave(&priv
->lock
, flags
);
1541 pr_debug("In %s\n", __func__
);
1542 sw_w32((mac
[0] << 8) | mac
[1], priv
->r
->mac
);
1543 sw_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5], priv
->r
->mac
+ 4);
1545 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1546 /* 2 more registers, ALE/MAC block */
1547 sw_w32((mac
[0] << 8) | mac
[1], RTL838X_MAC_ALE
);
1548 sw_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
1549 (RTL838X_MAC_ALE
+ 4));
1551 sw_w32((mac
[0] << 8) | mac
[1], RTL838X_MAC2
);
1552 sw_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
1555 spin_unlock_irqrestore(&priv
->lock
, flags
);
1558 static int rtl838x_set_mac_address(struct net_device
*dev
, void *p
)
1560 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1561 const struct sockaddr
*addr
= p
;
1562 u8
*mac
= (u8
*) (addr
->sa_data
);
1564 if (!is_valid_ether_addr(addr
->sa_data
))
1565 return -EADDRNOTAVAIL
;
1567 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
1568 rtl838x_set_mac_hw(dev
, mac
);
1570 pr_info("Using MAC %08x%08x\n", sw_r32(priv
->r
->mac
), sw_r32(priv
->r
->mac
+ 4));
1574 static int rtl8390_init_mac(struct rtl838x_eth_priv
*priv
)
1576 // We will need to set-up EEE and the egress-rate limitation
1580 static int rtl8380_init_mac(struct rtl838x_eth_priv
*priv
)
1584 if (priv
->family_id
== 0x8390)
1585 return rtl8390_init_mac(priv
);
1587 // At present we do not know how to set up EEE on any other SoC than RTL8380
1588 if (priv
->family_id
!= 0x8380)
1591 pr_info("%s\n", __func__
);
1592 /* fix timer for EEE */
1593 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL
);
1594 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL
);
1596 /* Init VLAN. TODO: Understand what is being done, here */
1597 if (priv
->id
== 0x8382) {
1598 for (i
= 0; i
<= 28; i
++)
1599 sw_w32(0, 0xd57c + i
* 0x80);
1601 if (priv
->id
== 0x8380) {
1602 for (i
= 8; i
<= 28; i
++)
1603 sw_w32(0, 0xd57c + i
* 0x80);
1608 static int rtl838x_get_link_ksettings(struct net_device
*ndev
,
1609 struct ethtool_link_ksettings
*cmd
)
1611 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1613 pr_debug("%s called\n", __func__
);
1614 return phylink_ethtool_ksettings_get(priv
->phylink
, cmd
);
1617 static int rtl838x_set_link_ksettings(struct net_device
*ndev
,
1618 const struct ethtool_link_ksettings
*cmd
)
1620 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1622 pr_debug("%s called\n", __func__
);
1623 return phylink_ethtool_ksettings_set(priv
->phylink
, cmd
);
1626 static int rtl838x_mdio_read_paged(struct mii_bus
*bus
, int mii_id
, u16 page
, int regnum
)
1630 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1632 if (mii_id
>= 24 && mii_id
<= 27 && priv
->id
== 0x8380)
1633 return rtl838x_read_sds_phy(mii_id
, regnum
);
1635 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1636 err
= rtl838x_read_mmd_phy(mii_id
,
1637 mdiobus_c45_devad(regnum
),
1639 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id
,
1640 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1643 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id
, regnum
, val
, err
);
1644 err
= rtl838x_read_phy(mii_id
, page
, regnum
, &val
);
1651 static int rtl838x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1653 return rtl838x_mdio_read_paged(bus
, mii_id
, 0, regnum
);
1656 static int rtl839x_mdio_read_paged(struct mii_bus
*bus
, int mii_id
, u16 page
, int regnum
)
1660 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1662 if (mii_id
>= 48 && mii_id
<= 49 && priv
->id
== 0x8393)
1663 return rtl839x_read_sds_phy(mii_id
, regnum
);
1665 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1666 err
= rtl839x_read_mmd_phy(mii_id
,
1667 mdiobus_c45_devad(regnum
),
1669 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id
,
1670 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1673 err
= rtl839x_read_phy(mii_id
, page
, regnum
, &val
);
1674 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id
, regnum
, val
, err
);
1681 static int rtl839x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1683 return rtl839x_mdio_read_paged(bus
, mii_id
, 0, regnum
);
1686 static int rtl930x_mdio_read_paged(struct mii_bus
*bus
, int mii_id
, u16 page
, int regnum
)
1690 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1692 if (priv
->phy_is_internal
[mii_id
])
1693 return rtl930x_read_sds_phy(priv
->sds_id
[mii_id
], page
, regnum
);
1695 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1696 err
= rtl930x_read_mmd_phy(mii_id
,
1697 mdiobus_c45_devad(regnum
),
1699 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id
,
1700 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1703 err
= rtl930x_read_phy(mii_id
, page
, regnum
, &val
);
1704 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id
, regnum
, val
, err
);
1711 static int rtl930x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1713 return rtl930x_mdio_read_paged(bus
, mii_id
, 0, regnum
);
1716 static int rtl931x_mdio_read_paged(struct mii_bus
*bus
, int mii_id
, u16 page
, int regnum
)
1720 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1722 pr_debug("%s: In here, port %d\n", __func__
, mii_id
);
1723 if (priv
->phy_is_internal
[mii_id
]) {
1724 v
= rtl931x_read_sds_phy(priv
->sds_id
[mii_id
], page
, regnum
);
1732 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1733 err
= rtl931x_read_mmd_phy(mii_id
,
1734 mdiobus_c45_devad(regnum
),
1736 pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id
,
1737 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1740 err
= rtl931x_read_phy(mii_id
, page
, regnum
, &val
);
1741 pr_debug("PHY: %d register %x read %x, err %d\n", mii_id
, regnum
, val
, err
);
1750 static int rtl931x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1752 return rtl931x_mdio_read_paged(bus
, mii_id
, 0, regnum
);
1755 static int rtl838x_mdio_write_paged(struct mii_bus
*bus
, int mii_id
, u16 page
,
1756 int regnum
, u16 value
)
1759 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1762 if (mii_id
>= 24 && mii_id
<= 27 && priv
->id
== 0x8380) {
1765 sw_w32(value
, RTL838X_SDS4_FIB_REG0
+ offset
+ (regnum
<< 2));
1769 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1770 err
= rtl838x_write_mmd_phy(mii_id
, mdiobus_c45_devad(regnum
),
1772 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id
,
1773 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1778 err
= rtl838x_write_phy(mii_id
, page
, regnum
, value
);
1779 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id
, regnum
, value
, err
);
1783 static int rtl838x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1784 int regnum
, u16 value
)
1786 return rtl838x_mdio_write_paged(bus
, mii_id
, 0, regnum
, value
);
1789 static int rtl839x_mdio_write_paged(struct mii_bus
*bus
, int mii_id
, u16 page
,
1790 int regnum
, u16 value
)
1792 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1795 if (mii_id
>= 48 && mii_id
<= 49 && priv
->id
== 0x8393)
1796 return rtl839x_write_sds_phy(mii_id
, regnum
, value
);
1798 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1799 err
= rtl839x_write_mmd_phy(mii_id
, mdiobus_c45_devad(regnum
),
1801 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id
,
1802 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1808 err
= rtl839x_write_phy(mii_id
, page
, regnum
, value
);
1809 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id
, regnum
, value
, err
);
1813 static int rtl839x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1814 int regnum
, u16 value
)
1816 return rtl839x_mdio_write_paged(bus
, mii_id
, 0, regnum
, value
);
1819 static int rtl930x_mdio_write_paged(struct mii_bus
*bus
, int mii_id
, u16 page
,
1820 int regnum
, u16 value
)
1822 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1825 if (priv
->phy_is_internal
[mii_id
])
1826 return rtl930x_write_sds_phy(priv
->sds_id
[mii_id
], page
, regnum
, value
);
1828 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
))
1829 return rtl930x_write_mmd_phy(mii_id
, mdiobus_c45_devad(regnum
),
1832 err
= rtl930x_write_phy(mii_id
, page
, regnum
, value
);
1833 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id
, regnum
, value
, err
);
1837 static int rtl930x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1838 int regnum
, u16 value
)
1840 return rtl930x_mdio_write_paged(bus
, mii_id
, 0, regnum
, value
);
1843 static int rtl931x_mdio_write_paged(struct mii_bus
*bus
, int mii_id
, u16 page
,
1844 int regnum
, u16 value
)
1846 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1849 if (priv
->phy_is_internal
[mii_id
])
1850 return rtl931x_write_sds_phy(priv
->sds_id
[mii_id
], page
, regnum
, value
);
1852 if (regnum
& (MII_ADDR_C45
| MII_ADDR_C22_MMD
)) {
1853 err
= rtl931x_write_mmd_phy(mii_id
, mdiobus_c45_devad(regnum
),
1855 pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id
,
1856 mdiobus_c45_devad(regnum
), mdiobus_c45_regad(regnum
),
1862 err
= rtl931x_write_phy(mii_id
, page
, regnum
, value
);
1863 pr_debug("PHY: %d register %x write %x, err %d\n", mii_id
, regnum
, value
, err
);
1867 static int rtl931x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1868 int regnum
, u16 value
)
1870 return rtl931x_mdio_write_paged(bus
, mii_id
, 0, regnum
, value
);
1873 static int rtl838x_mdio_reset(struct mii_bus
*bus
)
1875 pr_debug("%s called\n", __func__
);
1876 /* Disable MAC polling the PHY so that we can start configuration */
1877 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL
);
1879 /* Enable PHY control via SoC */
1880 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL
);
1882 // Probably should reset all PHYs here...
1886 static int rtl839x_mdio_reset(struct mii_bus
*bus
)
1890 pr_debug("%s called\n", __func__
);
1891 /* BUG: The following does not work, but should! */
1892 /* Disable MAC polling the PHY so that we can start configuration */
1893 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL
);
1894 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL
+ 4);
1895 /* Disable PHY polling via SoC */
1896 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL
);
1898 // Probably should reset all PHYs here...
1902 u8 mac_type_bit
[RTL930X_CPU_PORT
] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6,
1903 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21};
1905 static int rtl930x_mdio_reset(struct mii_bus
*bus
)
1909 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1913 u32 private_poll_mask
= 0;
1915 bool uses_usxgmii
= false; // For the Aquantia PHYs
1916 bool uses_hisgmii
= false; // For the RTL8221/8226
1918 // Mapping of port to phy-addresses on an SMI bus
1919 poll_sel
[0] = poll_sel
[1] = 0;
1920 for (i
= 0; i
< RTL930X_CPU_PORT
; i
++) {
1921 if (priv
->smi_bus
[i
] > 3)
1924 sw_w32_mask(0x1f << pos
, priv
->smi_addr
[i
] << pos
,
1925 RTL930X_SMI_PORT0_5_ADDR
+ (i
/ 6) * 4);
1928 poll_sel
[i
/ 16] |= priv
->smi_bus
[i
] << pos
;
1929 poll_ctrl
|= BIT(20 + priv
->smi_bus
[i
]);
1932 // Configure which SMI bus is behind which port number
1933 sw_w32(poll_sel
[0], RTL930X_SMI_PORT0_15_POLLING_SEL
);
1934 sw_w32(poll_sel
[1], RTL930X_SMI_PORT16_27_POLLING_SEL
);
1936 // Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+)
1937 sw_w32_mask(poll_ctrl
, 0, RTL930X_SMI_GLB_CTRL
);
1939 // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
1940 for (i
= 0; i
< 4; i
++)
1941 if (priv
->smi_bus_isc45
[i
])
1942 c45_mask
|= BIT(i
+ 16);
1944 pr_info("c45_mask: %08x\n", c45_mask
);
1945 sw_w32_mask(0, c45_mask
, RTL930X_SMI_GLB_CTRL
);
1947 // Set the MAC type of each port according to the PHY-interface
1948 // Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0
1950 for (i
= 0; i
< RTL930X_CPU_PORT
; i
++) {
1951 switch (priv
->interfaces
[i
]) {
1952 case PHY_INTERFACE_MODE_10GBASER
:
1953 break; // Serdes: Value = 0
1955 case PHY_INTERFACE_MODE_HSGMII
:
1956 private_poll_mask
|= BIT(i
);
1958 case PHY_INTERFACE_MODE_USXGMII
:
1959 v
|= BIT(mac_type_bit
[i
]);
1960 uses_usxgmii
= true;
1963 case PHY_INTERFACE_MODE_QSGMII
:
1964 private_poll_mask
|= BIT(i
);
1965 v
|= 3 << mac_type_bit
[i
];
1972 sw_w32(v
, RTL930X_SMI_MAC_TYPE_CTRL
);
1974 // Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones)
1975 sw_w32(private_poll_mask
, RTL930X_SMI_PRVTE_POLLING_CTRL
);
1977 /* The following magic values are found in the port configuration, they seem to
1978 * define different ways of polling a PHY. The below is for the Aquantia PHYs of
1979 * the XGS1250 and the RTL8226 of the XGS1210 */
1981 sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG
);
1982 sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG
);
1983 sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG
);
1986 sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG
);
1987 sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG
);
1988 sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG
);
1991 pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__
,
1992 sw_r32(RTL930X_SMI_GLB_CTRL
));
1993 pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__
,
1994 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL
));
1995 pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__
,
1996 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL
));
1997 pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__
,
1998 sw_r32(RTL930X_SMI_MAC_TYPE_CTRL
));
1999 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__
,
2000 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG
));
2001 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__
,
2002 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG
));
2003 pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__
,
2004 sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG
));
2005 pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__
,
2006 sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL
));
2010 static int rtl931x_mdio_reset(struct mii_bus
*bus
)
2014 struct rtl838x_eth_priv
*priv
= bus
->priv
;
2020 pr_info("%s called\n", __func__
);
2021 // Disable port polling for configuration purposes
2022 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL
);
2023 sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL
+ 4);
2026 mdc_on
[0] = mdc_on
[1] = mdc_on
[2] = mdc_on
[3] = false;
2027 // Mapping of port to phy-addresses on an SMI bus
2028 poll_sel
[0] = poll_sel
[1] = poll_sel
[2] = poll_sel
[3] = 0;
2029 for (i
= 0; i
< 56; i
++) {
2031 sw_w32_mask(0x1f << pos
, priv
->smi_addr
[i
] << pos
, RTL931X_SMI_PORT_ADDR
+ (i
/ 6) * 4);
2033 poll_sel
[i
/ 16] |= priv
->smi_bus
[i
] << pos
;
2034 poll_ctrl
|= BIT(20 + priv
->smi_bus
[i
]);
2035 mdc_on
[priv
->smi_bus
[i
]] = true;
2038 // Configure which SMI bus is behind which port number
2039 for (i
= 0; i
< 4; i
++) {
2040 pr_info("poll sel %d, %08x\n", i
, poll_sel
[i
]);
2041 sw_w32(poll_sel
[i
], RTL931X_SMI_PORT_POLLING_SEL
+ (i
* 4));
2044 // Configure which SMI busses
2045 pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__
, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2
));
2046 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask
, sw_r32(RTL931X_SMI_GLB_CTRL0
));
2047 for (i
= 0; i
< 4; i
++) {
2048 // bus is polled in c45
2049 if (priv
->smi_bus_isc45
[i
])
2050 c45_mask
|= 0x2 << (i
* 2); // Std. C45, non-standard is 0x3
2051 // Enable bus access via MDC
2053 sw_w32_mask(0, BIT(9 + i
), RTL931X_MAC_L2_GLOBAL_CTRL2
);
2056 pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__
, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2
));
2057 pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask
, sw_r32(RTL931X_SMI_GLB_CTRL0
));
2059 /* We have a 10G PHY enable polling
2060 sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2);
2061 sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3);
2062 sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4);
2064 sw_w32_mask(0xff, c45_mask
, RTL931X_SMI_GLB_CTRL1
);
2069 static int rtl931x_chip_init(struct rtl838x_eth_priv
*priv
)
2071 pr_info("In %s\n", __func__
);
2073 // Initialize Encapsulation memory and wait until finished
2074 sw_w32(0x1, RTL931X_MEM_ENCAP_INIT
);
2075 do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT
) & 1);
2076 pr_info("%s: init ENCAP done\n", __func__
);
2078 // Initialize Managemen Information Base memory and wait until finished
2079 sw_w32(0x1, RTL931X_MEM_MIB_INIT
);
2080 do { } while (sw_r32(RTL931X_MEM_MIB_INIT
) & 1);
2081 pr_info("%s: init MIB done\n", __func__
);
2083 // Initialize ACL (PIE) memory and wait until finished
2084 sw_w32(0x1, RTL931X_MEM_ACL_INIT
);
2085 do { } while (sw_r32(RTL931X_MEM_ACL_INIT
) & 1);
2086 pr_info("%s: init ACL done\n", __func__
);
2088 // Initialize ALE memory and wait until finished
2089 sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0
);
2090 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0
));
2091 sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1
);
2092 sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2
);
2093 do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2
) & 0x7ff);
2094 pr_info("%s: init ALE done\n", __func__
);
2096 // Enable ESD auto recovery
2097 sw_w32(0x1, RTL931X_MDX_CTRL_RSVD
);
2099 // Init SPI, is this for thermal control or what?
2100 sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0
);
2105 static int rtl838x_mdio_init(struct rtl838x_eth_priv
*priv
)
2107 struct device_node
*mii_np
, *dn
;
2111 pr_debug("%s called\n", __func__
);
2112 mii_np
= of_get_child_by_name(priv
->pdev
->dev
.of_node
, "mdio-bus");
2115 dev_err(&priv
->pdev
->dev
, "no %s child node found", "mdio-bus");
2119 if (!of_device_is_available(mii_np
)) {
2124 priv
->mii_bus
= devm_mdiobus_alloc(&priv
->pdev
->dev
);
2125 if (!priv
->mii_bus
) {
2130 switch(priv
->family_id
) {
2131 case RTL8380_FAMILY_ID
:
2132 priv
->mii_bus
->name
= "rtl838x-eth-mdio";
2133 priv
->mii_bus
->read
= rtl838x_mdio_read
;
2134 priv
->mii_bus
->read_paged
= rtl838x_mdio_read_paged
;
2135 priv
->mii_bus
->write
= rtl838x_mdio_write
;
2136 priv
->mii_bus
->write_paged
= rtl838x_mdio_write_paged
;
2137 priv
->mii_bus
->reset
= rtl838x_mdio_reset
;
2139 case RTL8390_FAMILY_ID
:
2140 priv
->mii_bus
->name
= "rtl839x-eth-mdio";
2141 priv
->mii_bus
->read
= rtl839x_mdio_read
;
2142 priv
->mii_bus
->read_paged
= rtl839x_mdio_read_paged
;
2143 priv
->mii_bus
->write
= rtl839x_mdio_write
;
2144 priv
->mii_bus
->write_paged
= rtl839x_mdio_write_paged
;
2145 priv
->mii_bus
->reset
= rtl839x_mdio_reset
;
2147 case RTL9300_FAMILY_ID
:
2148 priv
->mii_bus
->name
= "rtl930x-eth-mdio";
2149 priv
->mii_bus
->read
= rtl930x_mdio_read
;
2150 priv
->mii_bus
->read_paged
= rtl930x_mdio_read_paged
;
2151 priv
->mii_bus
->write
= rtl930x_mdio_write
;
2152 priv
->mii_bus
->write_paged
= rtl930x_mdio_write_paged
;
2153 priv
->mii_bus
->reset
= rtl930x_mdio_reset
;
2154 priv
->mii_bus
->probe_capabilities
= MDIOBUS_C22_C45
;
2156 case RTL9310_FAMILY_ID
:
2157 priv
->mii_bus
->name
= "rtl931x-eth-mdio";
2158 priv
->mii_bus
->read
= rtl931x_mdio_read
;
2159 priv
->mii_bus
->read_paged
= rtl931x_mdio_read_paged
;
2160 priv
->mii_bus
->write
= rtl931x_mdio_write
;
2161 priv
->mii_bus
->write_paged
= rtl931x_mdio_write_paged
;
2162 priv
->mii_bus
->reset
= rtl931x_mdio_reset
;
2163 priv
->mii_bus
->probe_capabilities
= MDIOBUS_C22_C45
;
2166 priv
->mii_bus
->access_capabilities
= MDIOBUS_ACCESS_C22_MMD
;
2167 priv
->mii_bus
->priv
= priv
;
2168 priv
->mii_bus
->parent
= &priv
->pdev
->dev
;
2170 for_each_node_by_name(dn
, "ethernet-phy") {
2173 if (of_property_read_u32(dn
, "reg", &pn
))
2176 if (of_property_read_u32_array(dn
, "rtl9300,smi-address", &smi_addr
[0], 2)) {
2181 if (of_property_read_u32(dn
, "sds", &priv
->sds_id
[pn
]))
2182 priv
->sds_id
[pn
] = -1;
2184 pr_info("set sds port %d to %d\n", pn
, priv
->sds_id
[pn
]);
2187 if (pn
< MAX_PORTS
) {
2188 priv
->smi_bus
[pn
] = smi_addr
[0];
2189 priv
->smi_addr
[pn
] = smi_addr
[1];
2191 pr_err("%s: illegal port number %d\n", __func__
, pn
);
2194 if (of_device_is_compatible(dn
, "ethernet-phy-ieee802.3-c45"))
2195 priv
->smi_bus_isc45
[smi_addr
[0]] = true;
2197 if (of_property_read_bool(dn
, "phy-is-integrated")) {
2198 priv
->phy_is_internal
[pn
] = true;
2202 dn
= of_find_compatible_node(NULL
, NULL
, "realtek,rtl83xx-switch");
2204 dev_err(&priv
->pdev
->dev
, "No RTL switch node in DTS\n");
2208 for_each_node_by_name(dn
, "port") {
2209 if (of_property_read_u32(dn
, "reg", &pn
))
2211 pr_debug("%s Looking at port %d\n", __func__
, pn
);
2212 if (pn
> priv
->cpu_port
)
2214 if (of_get_phy_mode(dn
, &priv
->interfaces
[pn
]))
2215 priv
->interfaces
[pn
] = PHY_INTERFACE_MODE_NA
;
2216 pr_debug("%s phy mode of port %d is %s\n", __func__
, pn
, phy_modes(priv
->interfaces
[pn
]));
2219 snprintf(priv
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%pOFn", mii_np
);
2220 ret
= of_mdiobus_register(priv
->mii_bus
, mii_np
);
2223 of_node_put(mii_np
);
2227 static int rtl838x_mdio_remove(struct rtl838x_eth_priv
*priv
)
2229 pr_debug("%s called\n", __func__
);
2233 mdiobus_unregister(priv
->mii_bus
);
2234 mdiobus_free(priv
->mii_bus
);
2239 static netdev_features_t
rtl838x_fix_features(struct net_device
*dev
,
2240 netdev_features_t features
)
2245 static int rtl83xx_set_features(struct net_device
*dev
, netdev_features_t features
)
2247 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
2249 if ((features
^ dev
->features
) & NETIF_F_RXCSUM
) {
2250 if (!(features
& NETIF_F_RXCSUM
))
2251 sw_w32_mask(BIT(3), 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
2253 sw_w32_mask(0, BIT(3), priv
->r
->mac_port_ctrl(priv
->cpu_port
));
2259 static int rtl93xx_set_features(struct net_device
*dev
, netdev_features_t features
)
2261 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
2263 if ((features
^ dev
->features
) & NETIF_F_RXCSUM
) {
2264 if (!(features
& NETIF_F_RXCSUM
))
2265 sw_w32_mask(BIT(4), 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
2267 sw_w32_mask(0, BIT(4), priv
->r
->mac_port_ctrl(priv
->cpu_port
));
2273 static const struct net_device_ops rtl838x_eth_netdev_ops
= {
2274 .ndo_open
= rtl838x_eth_open
,
2275 .ndo_stop
= rtl838x_eth_stop
,
2276 .ndo_start_xmit
= rtl838x_eth_tx
,
2277 .ndo_select_queue
= rtl83xx_pick_tx_queue
,
2278 .ndo_set_mac_address
= rtl838x_set_mac_address
,
2279 .ndo_validate_addr
= eth_validate_addr
,
2280 .ndo_set_rx_mode
= rtl838x_eth_set_multicast_list
,
2281 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
2282 .ndo_set_features
= rtl83xx_set_features
,
2283 .ndo_fix_features
= rtl838x_fix_features
,
2284 .ndo_setup_tc
= rtl83xx_setup_tc
,
2287 static const struct net_device_ops rtl839x_eth_netdev_ops
= {
2288 .ndo_open
= rtl838x_eth_open
,
2289 .ndo_stop
= rtl838x_eth_stop
,
2290 .ndo_start_xmit
= rtl838x_eth_tx
,
2291 .ndo_select_queue
= rtl83xx_pick_tx_queue
,
2292 .ndo_set_mac_address
= rtl838x_set_mac_address
,
2293 .ndo_validate_addr
= eth_validate_addr
,
2294 .ndo_set_rx_mode
= rtl839x_eth_set_multicast_list
,
2295 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
2296 .ndo_set_features
= rtl83xx_set_features
,
2297 .ndo_fix_features
= rtl838x_fix_features
,
2298 .ndo_setup_tc
= rtl83xx_setup_tc
,
2301 static const struct net_device_ops rtl930x_eth_netdev_ops
= {
2302 .ndo_open
= rtl838x_eth_open
,
2303 .ndo_stop
= rtl838x_eth_stop
,
2304 .ndo_start_xmit
= rtl838x_eth_tx
,
2305 .ndo_select_queue
= rtl93xx_pick_tx_queue
,
2306 .ndo_set_mac_address
= rtl838x_set_mac_address
,
2307 .ndo_validate_addr
= eth_validate_addr
,
2308 .ndo_set_rx_mode
= rtl930x_eth_set_multicast_list
,
2309 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
2310 .ndo_set_features
= rtl93xx_set_features
,
2311 .ndo_fix_features
= rtl838x_fix_features
,
2312 .ndo_setup_tc
= rtl83xx_setup_tc
,
2315 static const struct net_device_ops rtl931x_eth_netdev_ops
= {
2316 .ndo_open
= rtl838x_eth_open
,
2317 .ndo_stop
= rtl838x_eth_stop
,
2318 .ndo_start_xmit
= rtl838x_eth_tx
,
2319 .ndo_select_queue
= rtl93xx_pick_tx_queue
,
2320 .ndo_set_mac_address
= rtl838x_set_mac_address
,
2321 .ndo_validate_addr
= eth_validate_addr
,
2322 .ndo_set_rx_mode
= rtl931x_eth_set_multicast_list
,
2323 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
2324 .ndo_set_features
= rtl93xx_set_features
,
2325 .ndo_fix_features
= rtl838x_fix_features
,
2328 static const struct phylink_mac_ops rtl838x_phylink_ops
= {
2329 .validate
= rtl838x_validate
,
2330 .mac_pcs_get_state
= rtl838x_mac_pcs_get_state
,
2331 .mac_an_restart
= rtl838x_mac_an_restart
,
2332 .mac_config
= rtl838x_mac_config
,
2333 .mac_link_down
= rtl838x_mac_link_down
,
2334 .mac_link_up
= rtl838x_mac_link_up
,
2337 static const struct ethtool_ops rtl838x_ethtool_ops
= {
2338 .get_link_ksettings
= rtl838x_get_link_ksettings
,
2339 .set_link_ksettings
= rtl838x_set_link_ksettings
,
2342 static int __init
rtl838x_eth_probe(struct platform_device
*pdev
)
2344 struct net_device
*dev
;
2345 struct device_node
*dn
= pdev
->dev
.of_node
;
2346 struct rtl838x_eth_priv
*priv
;
2347 struct resource
*res
, *mem
;
2348 phy_interface_t phy_mode
;
2349 struct phylink
*phylink
;
2350 int err
= 0, i
, rxrings
, rxringlen
;
2351 struct ring_b
*ring
;
2353 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
2354 (u32
)pdev
, (u32
)(&(pdev
->dev
)));
2357 dev_err(&pdev
->dev
, "No DT found\n");
2361 rxrings
= (soc_info
.family
== RTL8380_FAMILY_ID
2362 || soc_info
.family
== RTL8390_FAMILY_ID
) ? 8 : 32;
2363 rxrings
= rxrings
> MAX_RXRINGS
? MAX_RXRINGS
: rxrings
;
2364 rxringlen
= MAX_ENTRIES
/ rxrings
;
2365 rxringlen
= rxringlen
> MAX_RXLEN
? MAX_RXLEN
: rxringlen
;
2367 dev
= alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv
), TXRINGS
, rxrings
);
2372 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2373 priv
= netdev_priv(dev
);
2375 /* obtain buffer memory space */
2376 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2378 mem
= devm_request_mem_region(&pdev
->dev
, res
->start
,
2379 resource_size(res
), res
->name
);
2381 dev_err(&pdev
->dev
, "cannot request memory space\n");
2386 dev
->mem_start
= mem
->start
;
2387 dev
->mem_end
= mem
->end
;
2389 dev_err(&pdev
->dev
, "cannot request IO resource\n");
2394 /* Allocate buffer memory */
2395 priv
->membase
= dmam_alloc_coherent(&pdev
->dev
, rxrings
* rxringlen
* RING_BUFFER
2396 + sizeof(struct ring_b
) + sizeof(struct notify_b
),
2397 (void *)&dev
->mem_start
, GFP_KERNEL
);
2398 if (!priv
->membase
) {
2399 dev_err(&pdev
->dev
, "cannot allocate DMA buffer\n");
2404 // Allocate ring-buffer space at the end of the allocated memory
2405 ring
= priv
->membase
;
2406 ring
->rx_space
= priv
->membase
+ sizeof(struct ring_b
) + sizeof(struct notify_b
);
2408 spin_lock_init(&priv
->lock
);
2410 dev
->ethtool_ops
= &rtl838x_ethtool_ops
;
2411 dev
->min_mtu
= ETH_ZLEN
;
2412 dev
->max_mtu
= 1536;
2413 dev
->features
= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
;
2414 dev
->hw_features
= NETIF_F_RXCSUM
;
2416 priv
->id
= soc_info
.id
;
2417 priv
->family_id
= soc_info
.family
;
2419 pr_info("Found SoC ID: %4x: %s, family %x\n",
2420 priv
->id
, soc_info
.name
, priv
->family_id
);
2422 pr_err("Unknown chip id (%04x)\n", priv
->id
);
2426 switch (priv
->family_id
) {
2427 case RTL8380_FAMILY_ID
:
2428 priv
->cpu_port
= RTL838X_CPU_PORT
;
2429 priv
->r
= &rtl838x_reg
;
2430 dev
->netdev_ops
= &rtl838x_eth_netdev_ops
;
2432 case RTL8390_FAMILY_ID
:
2433 priv
->cpu_port
= RTL839X_CPU_PORT
;
2434 priv
->r
= &rtl839x_reg
;
2435 dev
->netdev_ops
= &rtl839x_eth_netdev_ops
;
2437 case RTL9300_FAMILY_ID
:
2438 priv
->cpu_port
= RTL930X_CPU_PORT
;
2439 priv
->r
= &rtl930x_reg
;
2440 dev
->netdev_ops
= &rtl930x_eth_netdev_ops
;
2442 case RTL9310_FAMILY_ID
:
2443 priv
->cpu_port
= RTL931X_CPU_PORT
;
2444 priv
->r
= &rtl931x_reg
;
2445 dev
->netdev_ops
= &rtl931x_eth_netdev_ops
;
2446 rtl931x_chip_init(priv
);
2449 pr_err("Unknown SoC family\n");
2452 priv
->rxringlen
= rxringlen
;
2453 priv
->rxrings
= rxrings
;
2455 /* Obtain device IRQ number */
2456 dev
->irq
= platform_get_irq(pdev
, 0);
2458 dev_err(&pdev
->dev
, "cannot obtain network-device IRQ\n");
2462 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, priv
->r
->net_irq
,
2463 IRQF_SHARED
, dev
->name
, dev
);
2465 dev_err(&pdev
->dev
, "%s: could not acquire interrupt: %d\n",
2470 rtl8380_init_mac(priv
);
2472 /* try to get mac address in the following order:
2473 * 1) from device tree data
2474 * 2) from internal registers set by bootloader
2476 of_get_mac_address(pdev
->dev
.of_node
, dev
->dev_addr
);
2477 if (is_valid_ether_addr(dev
->dev_addr
)) {
2478 rtl838x_set_mac_hw(dev
, (u8
*)dev
->dev_addr
);
2480 dev
->dev_addr
[0] = (sw_r32(priv
->r
->mac
) >> 8) & 0xff;
2481 dev
->dev_addr
[1] = sw_r32(priv
->r
->mac
) & 0xff;
2482 dev
->dev_addr
[2] = (sw_r32(priv
->r
->mac
+ 4) >> 24) & 0xff;
2483 dev
->dev_addr
[3] = (sw_r32(priv
->r
->mac
+ 4) >> 16) & 0xff;
2484 dev
->dev_addr
[4] = (sw_r32(priv
->r
->mac
+ 4) >> 8) & 0xff;
2485 dev
->dev_addr
[5] = sw_r32(priv
->r
->mac
+ 4) & 0xff;
2487 /* if the address is invalid, use a random value */
2488 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2489 struct sockaddr sa
= { AF_UNSPEC
};
2491 netdev_warn(dev
, "Invalid MAC address, using random\n");
2492 eth_hw_addr_random(dev
);
2493 memcpy(sa
.sa_data
, dev
->dev_addr
, ETH_ALEN
);
2494 if (rtl838x_set_mac_address(dev
, &sa
))
2495 netdev_warn(dev
, "Failed to set MAC address.\n");
2497 pr_info("Using MAC %08x%08x\n", sw_r32(priv
->r
->mac
),
2498 sw_r32(priv
->r
->mac
+ 4));
2499 strcpy(dev
->name
, "eth%d");
2503 err
= rtl838x_mdio_init(priv
);
2507 err
= register_netdev(dev
);
2511 for (i
= 0; i
< priv
->rxrings
; i
++) {
2512 priv
->rx_qs
[i
].id
= i
;
2513 priv
->rx_qs
[i
].priv
= priv
;
2514 netif_napi_add(dev
, &priv
->rx_qs
[i
].napi
, rtl838x_poll_rx
, 64);
2517 platform_set_drvdata(pdev
, dev
);
2519 phy_mode
= PHY_INTERFACE_MODE_NA
;
2520 err
= of_get_phy_mode(dn
, &phy_mode
);
2522 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
2526 priv
->phylink_config
.dev
= &dev
->dev
;
2527 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
2529 phylink
= phylink_create(&priv
->phylink_config
, pdev
->dev
.fwnode
,
2530 phy_mode
, &rtl838x_phylink_ops
);
2532 if (IS_ERR(phylink
)) {
2533 err
= PTR_ERR(phylink
);
2536 priv
->phylink
= phylink
;
2541 pr_err("Error setting up netdev, freeing it again.\n");
2546 static int rtl838x_eth_remove(struct platform_device
*pdev
)
2548 struct net_device
*dev
= platform_get_drvdata(pdev
);
2549 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
2553 pr_info("Removing platform driver for rtl838x-eth\n");
2554 rtl838x_mdio_remove(priv
);
2555 rtl838x_hw_stop(priv
);
2557 netif_tx_stop_all_queues(dev
);
2559 for (i
= 0; i
< priv
->rxrings
; i
++)
2560 netif_napi_del(&priv
->rx_qs
[i
].napi
);
2562 unregister_netdev(dev
);
2568 static const struct of_device_id rtl838x_eth_of_ids
[] = {
2569 { .compatible
= "realtek,rtl838x-eth"},
2572 MODULE_DEVICE_TABLE(of
, rtl838x_eth_of_ids
);
2574 static struct platform_driver rtl838x_eth_driver
= {
2575 .probe
= rtl838x_eth_probe
,
2576 .remove
= rtl838x_eth_remove
,
2578 .name
= "rtl838x-eth",
2580 .of_match_table
= rtl838x_eth_of_ids
,
2584 module_platform_driver(rtl838x_eth_driver
);
2586 MODULE_AUTHOR("B. Koblitz");
2587 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2588 MODULE_LICENSE("GPL");