1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/net/ethernet/rtl838x_eth.c
4 * Copyright (C) 2020 B. Koblitz
7 #include <linux/dma-mapping.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/module.h>
18 #include <linux/phylink.h>
19 #include <linux/pkt_sched.h>
21 #include <net/switchdev.h>
22 #include <asm/cacheflush.h>
24 #include <asm/mach-rtl838x/mach-rtl83xx.h>
25 #include "rtl838x_eth.h"
27 extern struct rtl83xx_soc_info soc_info
;
30 * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX
31 * The ring is assigned by switch based on packet/port priortity
32 * Maximum number of TX rings is 2, Ring 2 being the high priority
33 * ring on the RTL93xx SoCs. MAX_RING_SIZE * RING_BUFFER gives
34 * the memory used for the ring buffer.
36 #define MAX_RXRINGS 32
38 #define MAX_ENTRIES (200 * 8)
40 // BUG: TXRINGLEN can be 160
42 #define NOTIFY_EVENTS 10
43 #define NOTIFY_BLOCKS 10
46 #define TX_EN_93XX 0x20
47 #define RX_EN_93XX 0x10
51 #define RING_BUFFER 1600
53 #define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
54 #define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
55 #define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
56 #define RTL838X_ATK_PRVNT_STS (0x5B1C)
61 uint16_t size
; /* buffer size */
63 uint16_t len
; /* pkt len */
65 } __packed
__aligned(1);
74 } __packed
__aligned(1);
77 uint32_t rx_r
[MAX_RXRINGS
][MAX_RXLEN
];
78 uint32_t tx_r
[TXRINGS
][TXRINGLEN
];
79 struct p_hdr rx_header
[MAX_RXRINGS
][MAX_RXLEN
];
80 struct p_hdr tx_header
[TXRINGS
][TXRINGLEN
];
81 uint32_t c_rx
[MAX_RXRINGS
];
82 uint32_t c_tx
[TXRINGS
];
83 uint8_t tx_space
[TXRINGS
* TXRINGLEN
* RING_BUFFER
];
88 struct n_event events
[NOTIFY_EVENTS
];
92 struct notify_block blocks
[NOTIFY_BLOCKS
];
94 u32 ring
[NOTIFY_BLOCKS
];
98 void rtl838x_create_tx_header(struct p_hdr
*h
, int dest_port
, int prio
)
103 // cpu_tag[0] is reserved on the RTL83XX SoCs
104 h
->cpu_tag
[1] = 0x0400;
105 h
->cpu_tag
[2] = 0x0200;
106 h
->cpu_tag
[3] = 0x0000;
107 h
->cpu_tag
[4] = BIT(dest_port
) >> 16;
108 h
->cpu_tag
[5] = BIT(dest_port
) & 0xffff;
109 // Set internal priority and AS_PRIO
111 h
->cpu_tag
[2] |= (prio
| 0x8) << 12;
115 void rtl839x_create_tx_header(struct p_hdr
*h
, int dest_port
, int prio
)
120 // cpu_tag[0] is reserved on the RTL83XX SoCs
121 h
->cpu_tag
[1] = 0x0100;
122 h
->cpu_tag
[2] = h
->cpu_tag
[3] = h
->cpu_tag
[4] = h
->cpu_tag
[5] = 0;
123 if (dest_port
>= 32) {
125 h
->cpu_tag
[2] = BIT(dest_port
) >> 16;
126 h
->cpu_tag
[3] = BIT(dest_port
) & 0xffff;
128 h
->cpu_tag
[4] = BIT(dest_port
) >> 16;
129 h
->cpu_tag
[5] = BIT(dest_port
) & 0xffff;
131 h
->cpu_tag
[6] |= BIT(21); // Enable destination port mask use
132 // Set internal priority and AS_PRIO
134 h
->cpu_tag
[1] |= prio
| BIT(3);
138 void rtl930x_create_tx_header(struct p_hdr
*h
, int dest_port
, int prio
)
140 h
->cpu_tag
[0] = 0x8000;
141 h
->cpu_tag
[1] = 0; // TODO: Fill port and prio
147 h
->cpu_tag
[7] = 0xffff;
150 void rtl931x_create_tx_header(struct p_hdr
*h
, int dest_port
, int prio
)
152 h
->cpu_tag
[0] = 0x8000;
153 h
->cpu_tag
[1] = 0; // TODO: Fill port and prio
159 h
->cpu_tag
[7] = 0xffff;
162 struct rtl838x_rx_q
{
164 struct rtl838x_eth_priv
*priv
;
165 struct napi_struct napi
;
168 struct rtl838x_eth_priv
{
169 struct net_device
*netdev
;
170 struct platform_device
*pdev
;
173 struct mii_bus
*mii_bus
;
174 struct rtl838x_rx_q rx_qs
[MAX_RXRINGS
];
175 struct phylink
*phylink
;
176 struct phylink_config phylink_config
;
179 const struct rtl838x_reg
*r
;
186 extern int rtl838x_phy_init(struct rtl838x_eth_priv
*priv
);
187 extern int rtl838x_read_sds_phy(int phy_addr
, int phy_reg
);
188 extern int rtl839x_read_sds_phy(int phy_addr
, int phy_reg
);
189 extern int rtl839x_write_sds_phy(int phy_addr
, int phy_reg
, u16 v
);
190 extern int rtl930x_read_sds_phy(int phy_addr
, int page
, int phy_reg
);
191 extern int rtl930x_write_sds_phy(int phy_addr
, int page
, int phy_reg
, u16 v
);
192 extern int rtl930x_read_mmd_phy(u32 port
, u32 devnum
, u32 regnum
, u32
*val
);
193 extern int rtl930x_write_mmd_phy(u32 port
, u32 devnum
, u32 regnum
, u32 val
);
196 * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of
197 * the rings. Writing x into these registers substracts x from its content.
198 * When the content reaches the ring size, the ASIC no longer adds
199 * packets to this receive queue.
201 void rtl838x_update_cntr(int r
, int released
)
203 // This feature is not available on RTL838x SoCs
206 void rtl839x_update_cntr(int r
, int released
)
208 // This feature is not available on RTL839x SoCs
211 void rtl930x_update_cntr(int r
, int released
)
213 int pos
= (r
% 3) * 10;
214 u32 reg
= RTL930X_DMA_IF_RX_RING_CNTR
+ ((r
/ 3) << 2);
217 v
= (v
>> pos
) & 0x3ff;
218 pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released
, v
, pos
, reg
);
219 sw_w32_mask(0x3ff << pos
, released
<< pos
, reg
);
223 void rtl931x_update_cntr(int r
, int released
)
225 int pos
= (r
% 3) * 10;
226 u32 reg
= RTL931X_DMA_IF_RX_RING_CNTR
+ ((r
/ 3) << 2);
228 sw_w32_mask(0x3ff << pos
, released
<< pos
, reg
);
239 bool rtl838x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
241 t
->reason
= h
->cpu_tag
[3] & 0xf;
243 pr_debug("Reason: %d\n", t
->reason
);
244 t
->queue
= (h
->cpu_tag
[0] & 0xe0) >> 5;
245 if (t
->reason
!= 4) // NIC_RX_REASON_SPECIAL_TRAP
249 t
->port
= h
->cpu_tag
[1] & 0x1f;
251 return t
->l2_offloaded
;
254 bool rtl839x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
256 t
->reason
= h
->cpu_tag
[4] & 0x1f;
258 pr_debug("Reason: %d\n", t
->reason
);
259 t
->queue
= (h
->cpu_tag
[3] & 0xe000) >> 13;
260 if ((t
->reason
!= 7) && (t
->reason
!= 8)) // NIC_RX_REASON_RMA_USR
265 t
->port
= h
->cpu_tag
[1] & 0x3f;
267 return t
->l2_offloaded
;
270 bool rtl931x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
272 t
->reason
= h
->cpu_tag
[7] & 0x3f;
273 pr_debug("Reason %d\n", t
->reason
);
274 t
->queue
= (h
->cpu_tag
[2] >> 11) & 0x1f;
275 if (t
->reason
>= 19 && t
->reason
<= 27)
279 t
->port
= (h
->cpu_tag
[0] >> 8) & 0x3f;
281 return t
->l2_offloaded
;
284 bool rtl930x_decode_tag(struct p_hdr
*h
, struct dsa_tag
*t
)
286 rtl931x_decode_tag(h
, t
);
288 return t
->l2_offloaded
;
292 * Discard the RX ring-buffers, called as part of the net-ISR
293 * when the buffer runs over
294 * Caller needs to hold priv->lock
296 static void rtl838x_rb_cleanup(struct rtl838x_eth_priv
*priv
, int status
)
301 struct ring_b
*ring
= priv
->membase
;
303 for (r
= 0; r
< priv
->rxrings
; r
++) {
304 pr_debug("In %s working on r: %d\n", __func__
, r
);
305 last
= (u32
*)KSEG1ADDR(sw_r32(priv
->r
->dma_if_rx_cur
+ r
* 4));
307 if ((ring
->rx_r
[r
][ring
->c_rx
[r
]] & 0x1))
309 pr_debug("Got something: %d\n", ring
->c_rx
[r
]);
310 h
= &ring
->rx_header
[r
][ring
->c_rx
[r
]];
311 memset(h
, 0, sizeof(struct p_hdr
));
312 h
->buf
= (u8
*)KSEG1ADDR(ring
->rx_space
313 + r
* priv
->rxringlen
* RING_BUFFER
314 + ring
->c_rx
[r
] * RING_BUFFER
);
315 h
->size
= RING_BUFFER
;
316 /* make sure the header is visible to the ASIC */
319 ring
->rx_r
[r
][ring
->c_rx
[r
]] = KSEG1ADDR(h
) | 0x1
320 | (ring
->c_rx
[r
] == (priv
->rxringlen
- 1) ? WRAP
: 0x1);
321 ring
->c_rx
[r
] = (ring
->c_rx
[r
] + 1) % priv
->rxringlen
;
322 } while (&ring
->rx_r
[r
][ring
->c_rx
[r
]] != last
);
326 struct fdb_update_work
{
327 struct work_struct work
;
328 struct net_device
*ndev
;
329 u64 macs
[NOTIFY_EVENTS
+ 1];
332 void rtl838x_fdb_sync(struct work_struct
*work
)
334 const struct fdb_update_work
*uw
=
335 container_of(work
, struct fdb_update_work
, work
);
336 struct switchdev_notifier_fdb_info info
;
341 while (uw
->macs
[i
]) {
342 action
= (uw
->macs
[i
] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
343 : SWITCHDEV_FDB_DEL_TO_BRIDGE
;
344 u64_to_ether_addr(uw
->macs
[i
] & 0xffffffffffffULL
, addr
);
345 info
.addr
= &addr
[0];
348 pr_debug("FDB entry %d: %llx, action %d\n", i
, uw
->macs
[0], action
);
349 call_switchdev_notifiers(action
, uw
->ndev
, &info
.info
, NULL
);
355 static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv
*priv
)
357 struct notify_b
*nb
= priv
->membase
+ sizeof(struct ring_b
);
358 u32 e
= priv
->lastEvent
;
359 struct n_event
*event
;
362 struct fdb_update_work
*w
;
364 while (!(nb
->ring
[e
] & 1)) {
365 w
= kzalloc(sizeof(*w
), GFP_ATOMIC
);
367 pr_err("Out of memory: %s", __func__
);
370 INIT_WORK(&w
->work
, rtl838x_fdb_sync
);
372 for (i
= 0; i
< NOTIFY_EVENTS
; i
++) {
373 event
= &nb
->blocks
[e
].events
[i
];
379 w
->ndev
= priv
->netdev
;
383 /* Hand the ring entry back to the switch */
384 nb
->ring
[e
] = nb
->ring
[e
] | 1;
385 e
= (e
+ 1) % NOTIFY_BLOCKS
;
388 schedule_work(&w
->work
);
393 static irqreturn_t
rtl83xx_net_irq(int irq
, void *dev_id
)
395 struct net_device
*dev
= dev_id
;
396 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
397 u32 status
= sw_r32(priv
->r
->dma_if_intr_sts
);
398 bool triggered
= false;
399 u32 atk
= sw_r32(RTL838X_ATK_PRVNT_STS
);
401 u32 storm_uc
= sw_r32(RTL838X_STORM_CTRL_PORT_UC_EXCEED
);
402 u32 storm_mc
= sw_r32(RTL838X_STORM_CTRL_PORT_MC_EXCEED
);
403 u32 storm_bc
= sw_r32(RTL838X_STORM_CTRL_PORT_BC_EXCEED
);
405 pr_debug("IRQ: %08x\n", status
);
406 if (storm_uc
|| storm_mc
|| storm_bc
) {
407 pr_warn("Storm control UC: %08x, MC: %08x, BC: %08x\n",
408 storm_uc
, storm_mc
, storm_bc
);
410 sw_w32(storm_uc
, RTL838X_STORM_CTRL_PORT_UC_EXCEED
);
411 sw_w32(storm_mc
, RTL838X_STORM_CTRL_PORT_MC_EXCEED
);
412 sw_w32(storm_bc
, RTL838X_STORM_CTRL_PORT_BC_EXCEED
);
418 pr_debug("Attack prevention triggered: %08x\n", atk
);
419 sw_w32(atk
, RTL838X_ATK_PRVNT_STS
);
422 spin_lock(&priv
->lock
);
423 /* Ignore TX interrupt */
424 if ((status
& 0xf0000)) {
426 sw_w32(0x000f0000, priv
->r
->dma_if_intr_sts
);
430 if (status
& 0x0ff00) {
431 /* ACK and disable RX interrupt for this ring */
432 sw_w32_mask(0xff00 & status
, 0, priv
->r
->dma_if_intr_msk
);
433 sw_w32(0x0000ff00 & status
, priv
->r
->dma_if_intr_sts
);
434 for (i
= 0; i
< priv
->rxrings
; i
++) {
435 if (status
& BIT(i
+ 8)) {
436 pr_debug("Scheduling queue: %d\n", i
);
437 napi_schedule(&priv
->rx_qs
[i
].napi
);
442 /* RX buffer overrun */
443 if (status
& 0x000ff) {
444 pr_info("RX buffer overrun: status %x, mask: %x\n",
445 status
, sw_r32(priv
->r
->dma_if_intr_msk
));
446 sw_w32(status
, priv
->r
->dma_if_intr_sts
);
447 rtl838x_rb_cleanup(priv
, status
& 0xff);
450 if (priv
->family_id
== RTL8390_FAMILY_ID
&& status
& 0x00100000) {
451 sw_w32(0x00100000, priv
->r
->dma_if_intr_sts
);
452 rtl839x_l2_notification_handler(priv
);
455 if (priv
->family_id
== RTL8390_FAMILY_ID
&& status
& 0x00200000) {
456 sw_w32(0x00200000, priv
->r
->dma_if_intr_sts
);
457 rtl839x_l2_notification_handler(priv
);
460 if (priv
->family_id
== RTL8390_FAMILY_ID
&& status
& 0x00400000) {
461 sw_w32(0x00400000, priv
->r
->dma_if_intr_sts
);
462 rtl839x_l2_notification_handler(priv
);
465 spin_unlock(&priv
->lock
);
469 static irqreturn_t
rtl93xx_net_irq(int irq
, void *dev_id
)
471 struct net_device
*dev
= dev_id
;
472 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
473 u32 status_rx_r
= sw_r32(priv
->r
->dma_if_intr_rx_runout_sts
);
474 u32 status_rx
= sw_r32(priv
->r
->dma_if_intr_rx_done_sts
);
475 u32 status_tx
= sw_r32(priv
->r
->dma_if_intr_tx_done_sts
);
478 pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
479 __func__
, status_tx
, status_rx
, status_rx_r
);
480 spin_lock(&priv
->lock
);
482 /* Ignore TX interrupt */
485 pr_debug("TX done\n");
486 sw_w32(status_tx
, priv
->r
->dma_if_intr_tx_done_sts
);
491 pr_debug("RX IRQ\n");
492 /* ACK and disable RX interrupt for given rings */
493 sw_w32(status_rx
, priv
->r
->dma_if_intr_rx_done_sts
);
494 sw_w32_mask(status_rx
, 0, priv
->r
->dma_if_intr_rx_done_msk
);
495 for (i
= 0; i
< priv
->rxrings
; i
++) {
496 if (status_rx
& BIT(i
)) {
497 pr_debug("Scheduling queue: %d\n", i
);
498 napi_schedule(&priv
->rx_qs
[i
].napi
);
503 /* RX buffer overrun */
505 pr_debug("RX buffer overrun: status %x, mask: %x\n",
506 status_rx_r
, sw_r32(priv
->r
->dma_if_intr_rx_runout_msk
));
507 sw_w32(status_rx_r
, priv
->r
->dma_if_intr_rx_runout_sts
);
508 rtl838x_rb_cleanup(priv
, status_rx_r
);
511 spin_unlock(&priv
->lock
);
515 static const struct rtl838x_reg rtl838x_reg
= {
516 .net_irq
= rtl83xx_net_irq
,
517 .mac_port_ctrl
= rtl838x_mac_port_ctrl
,
518 .dma_if_intr_sts
= RTL838X_DMA_IF_INTR_STS
,
519 .dma_if_intr_msk
= RTL838X_DMA_IF_INTR_MSK
,
520 .dma_if_ctrl
= RTL838X_DMA_IF_CTRL
,
521 .mac_force_mode_ctrl
= RTL838X_MAC_FORCE_MODE_CTRL
,
522 .dma_rx_base
= RTL838X_DMA_RX_BASE
,
523 .dma_tx_base
= RTL838X_DMA_TX_BASE
,
524 .dma_if_rx_ring_size
= rtl838x_dma_if_rx_ring_size
,
525 .dma_if_rx_ring_cntr
= rtl838x_dma_if_rx_ring_cntr
,
526 .dma_if_rx_cur
= RTL838X_DMA_IF_RX_CUR
,
527 .rst_glb_ctrl
= RTL838X_RST_GLB_CTRL_0
,
528 .get_mac_link_sts
= rtl838x_get_mac_link_sts
,
529 .get_mac_link_dup_sts
= rtl838x_get_mac_link_dup_sts
,
530 .get_mac_link_spd_sts
= rtl838x_get_mac_link_spd_sts
,
531 .get_mac_rx_pause_sts
= rtl838x_get_mac_rx_pause_sts
,
532 .get_mac_tx_pause_sts
= rtl838x_get_mac_tx_pause_sts
,
534 .l2_tbl_flush_ctrl
= RTL838X_L2_TBL_FLUSH_CTRL
,
535 .update_cntr
= rtl838x_update_cntr
,
536 .create_tx_header
= rtl838x_create_tx_header
,
537 .decode_tag
= rtl838x_decode_tag
,
540 static const struct rtl838x_reg rtl839x_reg
= {
541 .net_irq
= rtl83xx_net_irq
,
542 .mac_port_ctrl
= rtl839x_mac_port_ctrl
,
543 .dma_if_intr_sts
= RTL839X_DMA_IF_INTR_STS
,
544 .dma_if_intr_msk
= RTL839X_DMA_IF_INTR_MSK
,
545 .dma_if_ctrl
= RTL839X_DMA_IF_CTRL
,
546 .mac_force_mode_ctrl
= RTL839X_MAC_FORCE_MODE_CTRL
,
547 .dma_rx_base
= RTL839X_DMA_RX_BASE
,
548 .dma_tx_base
= RTL839X_DMA_TX_BASE
,
549 .dma_if_rx_ring_size
= rtl839x_dma_if_rx_ring_size
,
550 .dma_if_rx_ring_cntr
= rtl839x_dma_if_rx_ring_cntr
,
551 .dma_if_rx_cur
= RTL839X_DMA_IF_RX_CUR
,
552 .rst_glb_ctrl
= RTL839X_RST_GLB_CTRL
,
553 .get_mac_link_sts
= rtl839x_get_mac_link_sts
,
554 .get_mac_link_dup_sts
= rtl839x_get_mac_link_dup_sts
,
555 .get_mac_link_spd_sts
= rtl839x_get_mac_link_spd_sts
,
556 .get_mac_rx_pause_sts
= rtl839x_get_mac_rx_pause_sts
,
557 .get_mac_tx_pause_sts
= rtl839x_get_mac_tx_pause_sts
,
559 .l2_tbl_flush_ctrl
= RTL839X_L2_TBL_FLUSH_CTRL
,
560 .update_cntr
= rtl839x_update_cntr
,
561 .create_tx_header
= rtl839x_create_tx_header
,
562 .decode_tag
= rtl839x_decode_tag
,
565 static const struct rtl838x_reg rtl930x_reg
= {
566 .net_irq
= rtl93xx_net_irq
,
567 .mac_port_ctrl
= rtl930x_mac_port_ctrl
,
568 .dma_if_intr_rx_runout_sts
= RTL930X_DMA_IF_INTR_RX_RUNOUT_STS
,
569 .dma_if_intr_rx_done_sts
= RTL930X_DMA_IF_INTR_RX_DONE_STS
,
570 .dma_if_intr_tx_done_sts
= RTL930X_DMA_IF_INTR_TX_DONE_STS
,
571 .dma_if_intr_rx_runout_msk
= RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK
,
572 .dma_if_intr_rx_done_msk
= RTL930X_DMA_IF_INTR_RX_DONE_MSK
,
573 .dma_if_intr_tx_done_msk
= RTL930X_DMA_IF_INTR_TX_DONE_MSK
,
574 .l2_ntfy_if_intr_sts
= RTL930X_L2_NTFY_IF_INTR_STS
,
575 .l2_ntfy_if_intr_msk
= RTL930X_L2_NTFY_IF_INTR_MSK
,
576 .dma_if_ctrl
= RTL930X_DMA_IF_CTRL
,
577 .mac_force_mode_ctrl
= RTL930X_MAC_FORCE_MODE_CTRL
,
578 .dma_rx_base
= RTL930X_DMA_RX_BASE
,
579 .dma_tx_base
= RTL930X_DMA_TX_BASE
,
580 .dma_if_rx_ring_size
= rtl930x_dma_if_rx_ring_size
,
581 .dma_if_rx_ring_cntr
= rtl930x_dma_if_rx_ring_cntr
,
582 .dma_if_rx_cur
= RTL930X_DMA_IF_RX_CUR
,
583 .rst_glb_ctrl
= RTL930X_RST_GLB_CTRL_0
,
584 .get_mac_link_sts
= rtl930x_get_mac_link_sts
,
585 .get_mac_link_dup_sts
= rtl930x_get_mac_link_dup_sts
,
586 .get_mac_link_spd_sts
= rtl930x_get_mac_link_spd_sts
,
587 .get_mac_rx_pause_sts
= rtl930x_get_mac_rx_pause_sts
,
588 .get_mac_tx_pause_sts
= rtl930x_get_mac_tx_pause_sts
,
589 .mac
= RTL930X_MAC_L2_ADDR_CTRL
,
590 .l2_tbl_flush_ctrl
= RTL930X_L2_TBL_FLUSH_CTRL
,
591 .update_cntr
= rtl930x_update_cntr
,
592 .create_tx_header
= rtl930x_create_tx_header
,
593 .decode_tag
= rtl930x_decode_tag
,
596 static const struct rtl838x_reg rtl931x_reg
= {
597 .net_irq
= rtl93xx_net_irq
,
598 .mac_port_ctrl
= rtl931x_mac_port_ctrl
,
599 .dma_if_intr_rx_runout_sts
= RTL931X_DMA_IF_INTR_RX_RUNOUT_STS
,
600 .dma_if_intr_rx_done_sts
= RTL931X_DMA_IF_INTR_RX_DONE_STS
,
601 .dma_if_intr_tx_done_sts
= RTL931X_DMA_IF_INTR_TX_DONE_STS
,
602 .dma_if_intr_rx_runout_msk
= RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK
,
603 .dma_if_intr_rx_done_msk
= RTL931X_DMA_IF_INTR_RX_DONE_MSK
,
604 .dma_if_intr_tx_done_msk
= RTL931X_DMA_IF_INTR_TX_DONE_MSK
,
605 .l2_ntfy_if_intr_sts
= RTL931X_L2_NTFY_IF_INTR_STS
,
606 .l2_ntfy_if_intr_msk
= RTL931X_L2_NTFY_IF_INTR_MSK
,
607 .dma_if_ctrl
= RTL931X_DMA_IF_CTRL
,
608 .mac_force_mode_ctrl
= RTL931X_MAC_FORCE_MODE_CTRL
,
609 .dma_rx_base
= RTL931X_DMA_RX_BASE
,
610 .dma_tx_base
= RTL931X_DMA_TX_BASE
,
611 .dma_if_rx_ring_size
= rtl931x_dma_if_rx_ring_size
,
612 .dma_if_rx_ring_cntr
= rtl931x_dma_if_rx_ring_cntr
,
613 .dma_if_rx_cur
= RTL931X_DMA_IF_RX_CUR
,
614 .rst_glb_ctrl
= RTL931X_RST_GLB_CTRL
,
615 .get_mac_link_sts
= rtl931x_get_mac_link_sts
,
616 .get_mac_link_dup_sts
= rtl931x_get_mac_link_dup_sts
,
617 .get_mac_link_spd_sts
= rtl931x_get_mac_link_spd_sts
,
618 .get_mac_rx_pause_sts
= rtl931x_get_mac_rx_pause_sts
,
619 .get_mac_tx_pause_sts
= rtl931x_get_mac_tx_pause_sts
,
620 .mac
= RTL931X_MAC_L2_ADDR_CTRL
,
621 .l2_tbl_flush_ctrl
= RTL931X_L2_TBL_FLUSH_CTRL
,
622 .update_cntr
= rtl931x_update_cntr
,
623 .create_tx_header
= rtl931x_create_tx_header
,
624 .decode_tag
= rtl931x_decode_tag
,
627 static void rtl838x_hw_reset(struct rtl838x_eth_priv
*priv
)
632 pr_info("RESETTING %x, CPU_PORT %d\n", priv
->family_id
, priv
->cpu_port
);
633 sw_w32_mask(0x3, 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
636 /* Disable and clear interrupts */
637 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
) {
638 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_runout_msk
);
639 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_runout_sts
);
640 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_done_msk
);
641 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_sts
);
642 sw_w32(0x00000000, priv
->r
->dma_if_intr_tx_done_msk
);
643 sw_w32(0x0000000f, priv
->r
->dma_if_intr_tx_done_sts
);
645 sw_w32(0x00000000, priv
->r
->dma_if_intr_msk
);
646 sw_w32(0xffffffff, priv
->r
->dma_if_intr_sts
);
649 if (priv
->family_id
== RTL8390_FAMILY_ID
) {
650 /* Preserve L2 notification and NBUF settings */
651 int_saved
= sw_r32(priv
->r
->dma_if_intr_msk
);
652 nbuf
= sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL
);
654 /* Disable link change interrupt on RTL839x */
655 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG
);
656 sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG
+ 4);
658 sw_w32(0x00000000, priv
->r
->dma_if_intr_msk
);
659 sw_w32(0xffffffff, priv
->r
->dma_if_intr_sts
);
663 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
)
664 sw_w32(0x4, priv
->r
->rst_glb_ctrl
);
666 sw_w32(0x8, priv
->r
->rst_glb_ctrl
);
668 do { /* Wait for reset of NIC and Queues done */
670 } while (sw_r32(priv
->r
->rst_glb_ctrl
) & 0xc);
673 /* Setup Head of Line */
674 if (priv
->family_id
== RTL8380_FAMILY_ID
)
675 sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE
); // Disabled on RTL8380
676 if (priv
->family_id
== RTL8390_FAMILY_ID
)
677 sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR
);
678 if (priv
->family_id
== RTL9300_FAMILY_ID
) {
679 for (i
= 0; i
< priv
->rxrings
; i
++) {
681 sw_w32_mask(0x3ff << pos
, 0, priv
->r
->dma_if_rx_ring_size(i
));
682 sw_w32_mask(0x3ff << pos
, priv
->rxringlen
,
683 priv
->r
->dma_if_rx_ring_cntr(i
));
687 /* Re-enable link change interrupt */
688 if (priv
->family_id
== RTL8390_FAMILY_ID
) {
689 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG
);
690 sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG
+ 4);
691 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG
);
692 sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG
+ 4);
694 /* Restore notification settings: on RTL838x these bits are null */
695 sw_w32_mask(7 << 20, int_saved
& (7 << 20), priv
->r
->dma_if_intr_msk
);
696 sw_w32(nbuf
, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL
);
700 static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv
*priv
)
703 struct ring_b
*ring
= priv
->membase
;
705 for (i
= 0; i
< priv
->rxrings
; i
++)
706 sw_w32(KSEG1ADDR(&ring
->rx_r
[i
]), priv
->r
->dma_rx_base
+ i
* 4);
708 for (i
= 0; i
< TXRINGS
; i
++)
709 sw_w32(KSEG1ADDR(&ring
->tx_r
[i
]), priv
->r
->dma_tx_base
+ i
* 4);
712 static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv
*priv
)
714 /* Disable Head of Line features for all RX rings */
715 sw_w32(0xffffffff, priv
->r
->dma_if_rx_ring_size(0));
717 /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */
718 sw_w32(0x06400020, priv
->r
->dma_if_ctrl
);
720 /* Enable RX done, RX overflow and TX done interrupts */
721 sw_w32(0xfffff, priv
->r
->dma_if_intr_msk
);
723 /* Enable DMA, engine expects empty FCS field */
724 sw_w32_mask(0, RX_EN
| TX_EN
, priv
->r
->dma_if_ctrl
);
726 /* Restart TX/RX to CPU port */
727 sw_w32_mask(0x0, 0x3, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
728 /* Set Speed, duplex, flow control
729 * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
730 * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
733 sw_w32(0x6192F, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
734 /* allow CRC errors on CPU-port */
735 sw_w32_mask(0, 0x8, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
738 static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv
*priv
)
740 /* Setup CPU-Port: RX Buffer */
741 sw_w32(0x0000c808, priv
->r
->dma_if_ctrl
);
743 /* Enable Notify, RX done, RX overflow and TX done interrupts */
744 sw_w32(0x007fffff, priv
->r
->dma_if_intr_msk
); // Notify IRQ!
747 sw_w32_mask(0, RX_EN
| TX_EN
, priv
->r
->dma_if_ctrl
);
749 /* Restart TX/RX to CPU port */
750 sw_w32_mask(0x0, 0x3, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
752 /* CPU port joins Lookup Miss Flooding Portmask */
753 // TODO: The code below should also work for the RTL838x
754 sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL
);
755 sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
756 sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL
);
758 /* Force CPU port link up */
759 sw_w32_mask(0, 3, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
762 static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv
*priv
)
767 /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
768 sw_w32(0x06400040, priv
->r
->dma_if_ctrl
);
770 for (i
= 0; i
< priv
->rxrings
; i
++) {
772 sw_w32_mask(0x3ff << pos
, priv
->rxringlen
<< pos
, priv
->r
->dma_if_rx_ring_size(i
));
774 // Some SoCs have issues with missing underflow protection
775 v
= (sw_r32(priv
->r
->dma_if_rx_ring_cntr(i
)) >> pos
) & 0x3ff;
776 sw_w32_mask(0x3ff << pos
, v
, priv
->r
->dma_if_rx_ring_cntr(i
));
779 /* Enable Notify, RX done, RX overflow and TX done interrupts */
780 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_runout_msk
);
781 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_msk
);
782 sw_w32(0x0000000f, priv
->r
->dma_if_intr_tx_done_msk
);
785 sw_w32_mask(0, RX_EN_93XX
| TX_EN_93XX
, priv
->r
->dma_if_ctrl
);
787 /* Restart TX/RX to CPU port */
788 sw_w32_mask(0x0, 0x3, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
790 sw_w32_mask(0, BIT(priv
->cpu_port
), RTL930X_L2_UNKN_UC_FLD_PMSK
);
791 sw_w32(0x217, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
794 static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv
*priv
, struct ring_b
*ring
)
800 for (i
= 0; i
< priv
->rxrings
; i
++) {
801 for (j
= 0; j
< priv
->rxringlen
; j
++) {
802 h
= &ring
->rx_header
[i
][j
];
803 memset(h
, 0, sizeof(struct p_hdr
));
804 h
->buf
= (u8
*)KSEG1ADDR(ring
->rx_space
805 + i
* priv
->rxringlen
* RING_BUFFER
807 h
->size
= RING_BUFFER
;
808 /* All rings owned by switch, last one wraps */
809 ring
->rx_r
[i
][j
] = KSEG1ADDR(h
) | 1
810 | (j
== (priv
->rxringlen
- 1) ? WRAP
: 0);
815 for (i
= 0; i
< TXRINGS
; i
++) {
816 for (j
= 0; j
< TXRINGLEN
; j
++) {
817 h
= &ring
->tx_header
[i
][j
];
818 memset(h
, 0, sizeof(struct p_hdr
));
819 h
->buf
= (u8
*)KSEG1ADDR(ring
->tx_space
820 + i
* TXRINGLEN
* RING_BUFFER
822 h
->size
= RING_BUFFER
;
823 ring
->tx_r
[i
][j
] = KSEG1ADDR(&ring
->tx_header
[i
][j
]);
825 /* Last header is wrapping around */
826 ring
->tx_r
[i
][j
-1] |= WRAP
;
831 static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv
*priv
)
834 struct notify_b
*b
= priv
->membase
+ sizeof(struct ring_b
);
836 for (i
= 0; i
< NOTIFY_BLOCKS
; i
++)
837 b
->ring
[i
] = KSEG1ADDR(&b
->blocks
[i
]) | 1 | (i
== (NOTIFY_BLOCKS
- 1) ? WRAP
: 0);
839 sw_w32((u32
) b
->ring
, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL
);
840 sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL
);
842 /* Setup notification events */
843 sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0
); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
844 sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL
); // SUSPEND_NOTIFICATION_EN
846 /* Enable Notification */
847 sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL
);
851 static int rtl838x_eth_open(struct net_device
*ndev
)
854 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
855 struct ring_b
*ring
= priv
->membase
;
858 pr_info("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
859 __func__
, priv
->rxrings
, priv
->rxringlen
, TXRINGS
, TXRINGLEN
);
861 spin_lock_irqsave(&priv
->lock
, flags
);
862 rtl838x_hw_reset(priv
);
863 rtl838x_setup_ring_buffer(priv
, ring
);
864 if (priv
->family_id
== RTL8390_FAMILY_ID
) {
865 rtl839x_setup_notify_ring_buffer(priv
);
866 /* Make sure the ring structure is visible to the ASIC */
871 rtl838x_hw_ring_setup(priv
);
872 err
= request_irq(ndev
->irq
, priv
->r
->net_irq
, IRQF_SHARED
, ndev
->name
, ndev
);
874 netdev_err(ndev
, "%s: could not acquire interrupt: %d\n",
878 phylink_start(priv
->phylink
);
880 for (i
= 0; i
< priv
->rxrings
; i
++)
881 napi_enable(&priv
->rx_qs
[i
].napi
);
883 switch (priv
->family_id
) {
884 case RTL8380_FAMILY_ID
:
885 rtl838x_hw_en_rxtx(priv
);
886 /* Trap IGMP traffic to CPU-Port */
887 sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL
);
888 /* Flush learned FDB entries on link down of a port */
889 sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0
);
891 case RTL8390_FAMILY_ID
:
892 rtl839x_hw_en_rxtx(priv
);
893 sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL
);
894 /* Flush learned FDB entries on link down of a port */
895 sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0
);
897 case RTL9300_FAMILY_ID
:
898 rtl93xx_hw_en_rxtx(priv
);
899 /* Flush learned FDB entries on link down of a port */
900 sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL
);
901 sw_w32_mask(BIT(28), 0, RTL930X_L2_PORT_SABLK_CTRL
);
902 sw_w32_mask(BIT(28), 0, RTL930X_L2_PORT_DABLK_CTRL
);
905 case RTL9310_FAMILY_ID
:
906 rtl93xx_hw_en_rxtx(priv
);
907 // TODO: Add trapping of IGMP frames to CPU-port
911 netif_tx_start_all_queues(ndev
);
913 spin_unlock_irqrestore(&priv
->lock
, flags
);
918 static void rtl838x_hw_stop(struct rtl838x_eth_priv
*priv
)
920 u32 force_mac
= priv
->family_id
== RTL8380_FAMILY_ID
? 0x6192C : 0x75;
921 u32 clear_irq
= priv
->family_id
== RTL8380_FAMILY_ID
? 0x000fffff : 0x007fffff;
924 // Disable RX/TX from/to CPU-port
925 sw_w32_mask(0x3, 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
927 /* Disable traffic */
928 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
)
929 sw_w32_mask(RX_EN_93XX
| TX_EN_93XX
, 0, priv
->r
->dma_if_ctrl
);
931 sw_w32_mask(RX_EN
| TX_EN
, 0, priv
->r
->dma_if_ctrl
);
932 mdelay(200); // Test, whether this is needed
934 /* Block all ports */
935 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
936 sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0));
937 sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1));
938 sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0
);
941 /* Flush L2 address cache */
942 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
943 for (i
= 0; i
<= priv
->cpu_port
; i
++) {
944 sw_w32(1 << 26 | 1 << 23 | i
<< 5, priv
->r
->l2_tbl_flush_ctrl
);
945 do { } while (sw_r32(priv
->r
->l2_tbl_flush_ctrl
) & (1 << 26));
947 } else if (priv
->family_id
== RTL8390_FAMILY_ID
) {
948 for (i
= 0; i
<= priv
->cpu_port
; i
++) {
949 sw_w32(1 << 28 | 1 << 25 | i
<< 5, priv
->r
->l2_tbl_flush_ctrl
);
950 do { } while (sw_r32(priv
->r
->l2_tbl_flush_ctrl
) & (1 << 28));
953 // TODO: L2 flush register is 64 bit on RTL931X and 930X
955 /* CPU-Port: Link down */
956 if (priv
->family_id
== RTL8380_FAMILY_ID
|| priv
->family_id
== RTL8390_FAMILY_ID
)
957 sw_w32(force_mac
, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
959 sw_w32_mask(0x3, 0, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
*4);
962 /* Disable all TX/RX interrupts */
963 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
) {
964 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_runout_msk
);
965 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_runout_sts
);
966 sw_w32(0x00000000, priv
->r
->dma_if_intr_rx_done_msk
);
967 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_sts
);
968 sw_w32(0x00000000, priv
->r
->dma_if_intr_tx_done_msk
);
969 sw_w32(0x0000000f, priv
->r
->dma_if_intr_tx_done_sts
);
971 sw_w32(0x00000000, priv
->r
->dma_if_intr_msk
);
972 sw_w32(clear_irq
, priv
->r
->dma_if_intr_sts
);
975 /* Disable TX/RX DMA */
976 sw_w32(0x00000000, priv
->r
->dma_if_ctrl
);
980 static int rtl838x_eth_stop(struct net_device
*ndev
)
984 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
986 pr_info("in %s\n", __func__
);
988 spin_lock_irqsave(&priv
->lock
, flags
);
989 phylink_stop(priv
->phylink
);
990 rtl838x_hw_stop(priv
);
991 free_irq(ndev
->irq
, ndev
);
993 for (i
= 0; i
< priv
->rxrings
; i
++)
994 napi_disable(&priv
->rx_qs
[i
].napi
);
996 netif_tx_stop_all_queues(ndev
);
998 spin_unlock_irqrestore(&priv
->lock
, flags
);
1003 static void rtl839x_eth_set_multicast_list(struct net_device
*ndev
)
1005 if (!(ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
))) {
1006 sw_w32(0x0, RTL839X_RMA_CTRL_0
);
1007 sw_w32(0x0, RTL839X_RMA_CTRL_1
);
1008 sw_w32(0x0, RTL839X_RMA_CTRL_2
);
1009 sw_w32(0x0, RTL839X_RMA_CTRL_3
);
1011 if (ndev
->flags
& IFF_ALLMULTI
) {
1012 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0
);
1013 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1
);
1014 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2
);
1016 if (ndev
->flags
& IFF_PROMISC
) {
1017 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_0
);
1018 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_1
);
1019 sw_w32(0x7fffffff, RTL839X_RMA_CTRL_2
);
1020 sw_w32(0x3ff, RTL839X_RMA_CTRL_3
);
1024 static void rtl838x_eth_set_multicast_list(struct net_device
*ndev
)
1026 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1028 if (priv
->family_id
== RTL8390_FAMILY_ID
)
1029 return rtl839x_eth_set_multicast_list(ndev
);
1031 if (!(ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
))) {
1032 sw_w32(0x0, RTL838X_RMA_CTRL_0
);
1033 sw_w32(0x0, RTL838X_RMA_CTRL_1
);
1035 if (ndev
->flags
& IFF_ALLMULTI
)
1036 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0
);
1037 if (ndev
->flags
& IFF_PROMISC
) {
1038 sw_w32(0x1fffff, RTL838X_RMA_CTRL_0
);
1039 sw_w32(0x7fff, RTL838X_RMA_CTRL_1
);
1043 static void rtl930x_eth_set_multicast_list(struct net_device
*ndev
)
1045 if (!(ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
))) {
1046 sw_w32(0x0, RTL930X_RMA_CTRL_0
);
1047 sw_w32(0x0, RTL930X_RMA_CTRL_1
);
1048 sw_w32(0x0, RTL930X_RMA_CTRL_2
);
1050 if (ndev
->flags
& IFF_ALLMULTI
) {
1051 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0
);
1052 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1
);
1053 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2
);
1055 if (ndev
->flags
& IFF_PROMISC
) {
1056 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_0
);
1057 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_1
);
1058 sw_w32(0x7fffffff, RTL930X_RMA_CTRL_2
);
1062 static void rtl931x_eth_set_multicast_list(struct net_device
*ndev
)
1064 if (!(ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
))) {
1065 sw_w32(0x0, RTL931X_RMA_CTRL_0
);
1066 sw_w32(0x0, RTL931X_RMA_CTRL_1
);
1067 sw_w32(0x0, RTL931X_RMA_CTRL_2
);
1069 if (ndev
->flags
& IFF_ALLMULTI
) {
1070 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0
);
1071 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1
);
1072 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2
);
1074 if (ndev
->flags
& IFF_PROMISC
) {
1075 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_0
);
1076 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_1
);
1077 sw_w32(0x7fffffff, RTL931X_RMA_CTRL_2
);
1081 static void rtl838x_eth_tx_timeout(struct net_device
*ndev
)
1083 unsigned long flags
;
1084 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1086 pr_warn("%s\n", __func__
);
1087 spin_lock_irqsave(&priv
->lock
, flags
);
1088 rtl838x_hw_stop(priv
);
1089 rtl838x_hw_ring_setup(priv
);
1090 rtl838x_hw_en_rxtx(priv
);
1091 netif_trans_update(ndev
);
1092 netif_start_queue(ndev
);
1093 spin_unlock_irqrestore(&priv
->lock
, flags
);
1096 static int rtl838x_eth_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1099 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1100 struct ring_b
*ring
= priv
->membase
;
1103 unsigned long flags
;
1106 int q
= skb_get_queue_mapping(skb
) % TXRINGS
;
1108 if (q
) // Check for high prio queue
1109 pr_debug("SKB priority: %d\n", skb
->priority
);
1111 spin_lock_irqsave(&priv
->lock
, flags
);
1114 /* Check for DSA tagging at the end of the buffer */
1115 if (netdev_uses_dsa(dev
) && skb
->data
[len
-4] == 0x80 && skb
->data
[len
-3] > 0
1116 && skb
->data
[len
-3] < 28 && skb
->data
[len
-2] == 0x10
1117 && skb
->data
[len
-1] == 0x00) {
1118 /* Reuse tag space for CRC */
1119 dest_port
= skb
->data
[len
-3];
1125 /* ASIC expects that packet includes CRC, so we extend by 4 bytes */
1128 if (skb_padto(skb
, len
)) {
1133 /* We can send this packet if CPU owns the descriptor */
1134 if (!(ring
->tx_r
[q
][ring
->c_tx
[q
]] & 0x1)) {
1136 /* Set descriptor for tx */
1137 h
= &ring
->tx_header
[q
][ring
->c_tx
[q
]];
1141 priv
->r
->create_tx_header(h
, dest_port
, skb
->priority
>> 1);
1143 /* Copy packet data to tx buffer */
1144 memcpy((void *)KSEG1ADDR(h
->buf
), skb
->data
, len
);
1145 /* Make sure packet data is visible to ASIC */
1148 /* Hand over to switch */
1149 ring
->tx_r
[q
][ring
->c_tx
[q
]] |= 1;
1151 // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
1152 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1153 for (i
= 0; i
< 10; i
++) {
1154 val
= sw_r32(priv
->r
->dma_if_ctrl
);
1155 if ((val
& 0xc) == 0xc)
1160 /* Tell switch to send data */
1161 if (priv
->family_id
== RTL9310_FAMILY_ID
1162 || priv
->family_id
== RTL9300_FAMILY_ID
) {
1163 // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
1165 sw_w32_mask(0, BIT(2), priv
->r
->dma_if_ctrl
);
1167 sw_w32_mask(0, BIT(3), priv
->r
->dma_if_ctrl
);
1169 sw_w32_mask(0, TX_DO
, priv
->r
->dma_if_ctrl
);
1172 dev
->stats
.tx_packets
++;
1173 dev
->stats
.tx_bytes
+= len
;
1175 ring
->c_tx
[q
] = (ring
->c_tx
[q
] + 1) % TXRINGLEN
;
1178 dev_warn(&priv
->pdev
->dev
, "Data is owned by switch\n");
1179 ret
= NETDEV_TX_BUSY
;
1182 spin_unlock_irqrestore(&priv
->lock
, flags
);
1187 * Return queue number for TX. On the RTL83XX, these queues have equal priority
1188 * so we do round-robin
1190 u16
rtl83xx_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1191 struct net_device
*sb_dev
)
1196 return last
% TXRINGS
;
1200 * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue
1202 u16
rtl93xx_pick_tx_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1203 struct net_device
*sb_dev
)
1205 if (skb
->priority
>= TC_PRIO_CONTROL
)
1210 static int rtl838x_hw_receive(struct net_device
*dev
, int r
, int budget
)
1212 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1213 struct ring_b
*ring
= priv
->membase
;
1214 struct sk_buff
*skb
;
1215 unsigned long flags
;
1216 int i
, len
, work_done
= 0;
1217 u8
*data
, *skb_data
;
1221 bool dsa
= netdev_uses_dsa(dev
);
1224 spin_lock_irqsave(&priv
->lock
, flags
);
1225 last
= (u32
*)KSEG1ADDR(sw_r32(priv
->r
->dma_if_rx_cur
+ r
* 4));
1226 pr_debug("---------------------------------------------------------- RX - %d\n", r
);
1229 if ((ring
->rx_r
[r
][ring
->c_rx
[r
]] & 0x1)) {
1230 if (&ring
->rx_r
[r
][ring
->c_rx
[r
]] != last
) {
1231 netdev_warn(dev
, "Ring contention: r: %x, last %x, cur %x\n",
1232 r
, (uint32_t)last
, (u32
) &ring
->rx_r
[r
][ring
->c_rx
[r
]]);
1237 h
= &ring
->rx_header
[r
][ring
->c_rx
[r
]];
1238 data
= (u8
*)KSEG1ADDR(h
->buf
);
1244 len
-= 4; /* strip the CRC */
1245 /* Add 4 bytes for cpu_tag */
1249 skb
= alloc_skb(len
+ 4, GFP_KERNEL
);
1250 skb_reserve(skb
, NET_IP_ALIGN
);
1253 /* BUG: Prevent bug on RTL838x SoCs*/
1254 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1255 sw_w32(0xffffffff, priv
->r
->dma_if_rx_ring_size(0));
1256 for (i
= 0; i
< priv
->rxrings
; i
++) {
1257 /* Update each ring cnt */
1258 val
= sw_r32(priv
->r
->dma_if_rx_ring_cntr(i
));
1259 sw_w32(val
, priv
->r
->dma_if_rx_ring_cntr(i
));
1263 skb_data
= skb_put(skb
, len
);
1264 /* Make sure data is visible */
1266 memcpy(skb
->data
, (u8
*)KSEG1ADDR(data
), len
);
1267 /* Overwrite CRC with cpu_tag */
1269 priv
->r
->decode_tag(h
, &tag
);
1270 skb
->data
[len
-4] = 0x80;
1271 skb
->data
[len
-3] = tag
.port
;
1272 skb
->data
[len
-2] = 0x10;
1273 skb
->data
[len
-1] = 0x00;
1274 if (tag
.l2_offloaded
)
1275 skb
->data
[len
-3] |= 0x40;
1279 pr_debug("Queue: %d, len: %d, reason %d port %d\n",
1280 tag
.queue
, len
, tag
.reason
, tag
.port
);
1282 skb
->protocol
= eth_type_trans(skb
, dev
);
1283 dev
->stats
.rx_packets
++;
1284 dev
->stats
.rx_bytes
+= len
;
1286 netif_receive_skb(skb
);
1288 if (net_ratelimit())
1289 dev_warn(&dev
->dev
, "low on memory - packet dropped\n");
1290 dev
->stats
.rx_dropped
++;
1293 /* Reset header structure */
1294 memset(h
, 0, sizeof(struct p_hdr
));
1296 h
->size
= RING_BUFFER
;
1298 ring
->rx_r
[r
][ring
->c_rx
[r
]] = KSEG1ADDR(h
) | 0x1
1299 | (ring
->c_rx
[r
] == (priv
->rxringlen
- 1) ? WRAP
: 0x1);
1300 ring
->c_rx
[r
] = (ring
->c_rx
[r
] + 1) % priv
->rxringlen
;
1301 last
= (u32
*)KSEG1ADDR(sw_r32(priv
->r
->dma_if_rx_cur
+ r
* 4));
1302 } while (&ring
->rx_r
[r
][ring
->c_rx
[r
]] != last
&& work_done
< budget
);
1305 priv
->r
->update_cntr(r
, 0);
1307 spin_unlock_irqrestore(&priv
->lock
, flags
);
1311 static int rtl838x_poll_rx(struct napi_struct
*napi
, int budget
)
1313 struct rtl838x_rx_q
*rx_q
= container_of(napi
, struct rtl838x_rx_q
, napi
);
1314 struct rtl838x_eth_priv
*priv
= rx_q
->priv
;
1319 while (work_done
< budget
) {
1320 work
= rtl838x_hw_receive(priv
->netdev
, r
, budget
- work_done
);
1326 if (work_done
< budget
) {
1327 napi_complete_done(napi
, work_done
);
1329 /* Enable RX interrupt */
1330 if (priv
->family_id
== RTL9300_FAMILY_ID
|| priv
->family_id
== RTL9310_FAMILY_ID
)
1331 sw_w32(0xffffffff, priv
->r
->dma_if_intr_rx_done_msk
);
1333 sw_w32_mask(0, 0xf00ff | BIT(r
+ 8), priv
->r
->dma_if_intr_msk
);
1339 static void rtl838x_validate(struct phylink_config
*config
,
1340 unsigned long *supported
,
1341 struct phylink_link_state
*state
)
1343 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
1345 pr_info("In %s\n", __func__
);
1347 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
1348 state
->interface
!= PHY_INTERFACE_MODE_1000BASEX
&&
1349 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1350 state
->interface
!= PHY_INTERFACE_MODE_REVMII
&&
1351 state
->interface
!= PHY_INTERFACE_MODE_GMII
&&
1352 state
->interface
!= PHY_INTERFACE_MODE_QSGMII
&&
1353 state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
&&
1354 state
->interface
!= PHY_INTERFACE_MODE_SGMII
) {
1355 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1356 pr_err("Unsupported interface: %d\n", state
->interface
);
1360 /* Allow all the expected bits */
1361 phylink_set(mask
, Autoneg
);
1362 phylink_set_port_modes(mask
);
1363 phylink_set(mask
, Pause
);
1364 phylink_set(mask
, Asym_Pause
);
1366 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1367 * including Half duplex
1369 if (state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1370 state
->interface
!= PHY_INTERFACE_MODE_REVMII
) {
1371 phylink_set(mask
, 1000baseT_Full
);
1372 phylink_set(mask
, 1000baseT_Half
);
1375 phylink_set(mask
, 10baseT_Half
);
1376 phylink_set(mask
, 10baseT_Full
);
1377 phylink_set(mask
, 100baseT_Half
);
1378 phylink_set(mask
, 100baseT_Full
);
1380 bitmap_and(supported
, supported
, mask
,
1381 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1382 bitmap_and(state
->advertising
, state
->advertising
, mask
,
1383 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1387 static void rtl838x_mac_config(struct phylink_config
*config
,
1389 const struct phylink_link_state
*state
)
1391 /* This is only being called for the master device,
1392 * i.e. the CPU-Port. We don't need to do anything.
1395 pr_info("In %s, mode %x\n", __func__
, mode
);
1398 static void rtl838x_mac_an_restart(struct phylink_config
*config
)
1400 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1401 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1403 /* This works only on RTL838x chips */
1404 if (priv
->family_id
!= RTL8380_FAMILY_ID
)
1407 pr_info("In %s\n", __func__
);
1408 /* Restart by disabling and re-enabling link */
1409 sw_w32(0x6192D, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
1411 sw_w32(0x6192F, priv
->r
->mac_force_mode_ctrl
+ priv
->cpu_port
* 4);
1414 static int rtl838x_mac_pcs_get_state(struct phylink_config
*config
,
1415 struct phylink_link_state
*state
)
1418 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1419 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1420 int port
= priv
->cpu_port
;
1422 pr_info("In %s\n", __func__
);
1424 state
->link
= priv
->r
->get_mac_link_sts(port
) ? 1 : 0;
1425 state
->duplex
= priv
->r
->get_mac_link_dup_sts(port
) ? 1 : 0;
1427 speed
= priv
->r
->get_mac_link_spd_sts(port
);
1430 state
->speed
= SPEED_10
;
1433 state
->speed
= SPEED_100
;
1436 state
->speed
= SPEED_1000
;
1439 state
->speed
= SPEED_UNKNOWN
;
1443 state
->pause
&= (MLO_PAUSE_RX
| MLO_PAUSE_TX
);
1444 if (priv
->r
->get_mac_rx_pause_sts(port
))
1445 state
->pause
|= MLO_PAUSE_RX
;
1446 if (priv
->r
->get_mac_tx_pause_sts(port
))
1447 state
->pause
|= MLO_PAUSE_TX
;
1452 static void rtl838x_mac_link_down(struct phylink_config
*config
,
1454 phy_interface_t interface
)
1456 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1457 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1459 pr_info("In %s\n", __func__
);
1460 /* Stop TX/RX to port */
1461 sw_w32_mask(0x03, 0, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
1464 static void rtl838x_mac_link_up(struct phylink_config
*config
, unsigned int mode
,
1465 phy_interface_t interface
,
1466 struct phy_device
*phy
)
1468 struct net_device
*dev
= container_of(config
->dev
, struct net_device
, dev
);
1469 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1471 pr_info("In %s\n", __func__
);
1472 /* Restart TX/RX to port */
1473 sw_w32_mask(0, 0x03, priv
->r
->mac_port_ctrl(priv
->cpu_port
));
1476 static void rtl838x_set_mac_hw(struct net_device
*dev
, u8
*mac
)
1478 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1479 unsigned long flags
;
1481 spin_lock_irqsave(&priv
->lock
, flags
);
1482 pr_info("In %s\n", __func__
);
1483 sw_w32((mac
[0] << 8) | mac
[1], priv
->r
->mac
);
1484 sw_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5], priv
->r
->mac
+ 4);
1486 if (priv
->family_id
== RTL8380_FAMILY_ID
) {
1487 /* 2 more registers, ALE/MAC block */
1488 sw_w32((mac
[0] << 8) | mac
[1], RTL838X_MAC_ALE
);
1489 sw_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
1490 (RTL838X_MAC_ALE
+ 4));
1492 sw_w32((mac
[0] << 8) | mac
[1], RTL838X_MAC2
);
1493 sw_w32((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
1496 spin_unlock_irqrestore(&priv
->lock
, flags
);
1499 static int rtl838x_set_mac_address(struct net_device
*dev
, void *p
)
1501 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
1502 const struct sockaddr
*addr
= p
;
1503 u8
*mac
= (u8
*) (addr
->sa_data
);
1505 if (!is_valid_ether_addr(addr
->sa_data
))
1506 return -EADDRNOTAVAIL
;
1508 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
1509 rtl838x_set_mac_hw(dev
, mac
);
1511 pr_info("Using MAC %08x%08x\n", sw_r32(priv
->r
->mac
), sw_r32(priv
->r
->mac
+ 4));
1515 static int rtl8390_init_mac(struct rtl838x_eth_priv
*priv
)
1517 // We will need to set-up EEE and the egress-rate limitation
1521 static int rtl8380_init_mac(struct rtl838x_eth_priv
*priv
)
1525 if (priv
->family_id
== 0x8390)
1526 return rtl8390_init_mac(priv
);
1528 pr_info("%s\n", __func__
);
1529 /* fix timer for EEE */
1530 sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL
);
1531 sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL
);
1534 if (priv
->id
== 0x8382) {
1535 for (i
= 0; i
<= 28; i
++)
1536 sw_w32(0, 0xd57c + i
* 0x80);
1538 if (priv
->id
== 0x8380) {
1539 for (i
= 8; i
<= 28; i
++)
1540 sw_w32(0, 0xd57c + i
* 0x80);
1545 static int rtl838x_get_link_ksettings(struct net_device
*ndev
,
1546 struct ethtool_link_ksettings
*cmd
)
1548 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1550 pr_info("%s called\n", __func__
);
1551 return phylink_ethtool_ksettings_get(priv
->phylink
, cmd
);
1554 static int rtl838x_set_link_ksettings(struct net_device
*ndev
,
1555 const struct ethtool_link_ksettings
*cmd
)
1557 struct rtl838x_eth_priv
*priv
= netdev_priv(ndev
);
1559 pr_info("%s called\n", __func__
);
1560 return phylink_ethtool_ksettings_set(priv
->phylink
, cmd
);
1563 static int rtl838x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1567 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1569 if (mii_id
>= 24 && mii_id
<= 27 && priv
->id
== 0x8380)
1570 return rtl838x_read_sds_phy(mii_id
, regnum
);
1571 err
= rtl838x_read_phy(mii_id
, 0, regnum
, &val
);
1577 static int rtl839x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1581 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1583 if (mii_id
>= 48 && mii_id
<= 49 && priv
->id
== 0x8393)
1584 return rtl839x_read_sds_phy(mii_id
, regnum
);
1586 err
= rtl839x_read_phy(mii_id
, 0, regnum
, &val
);
1592 static int rtl930x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1597 // TODO: These are hard-coded for the 2 Fibre Ports of the XGS1210
1598 if (mii_id
>= 26 && mii_id
<= 27)
1599 return rtl930x_read_sds_phy(mii_id
- 18, 0, regnum
);
1601 if (regnum
& MII_ADDR_C45
) {
1602 regnum
&= ~MII_ADDR_C45
;
1603 err
= rtl930x_read_mmd_phy(mii_id
, regnum
>> 16, regnum
& 0xffff, &val
);
1605 err
= rtl930x_read_phy(mii_id
, 0, regnum
, &val
);
1612 static int rtl931x_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1616 // struct rtl838x_eth_priv *priv = bus->priv;
1618 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1619 // return rtl839x_read_sds_phy(mii_id, regnum);
1621 err
= rtl931x_read_phy(mii_id
, 0, regnum
, &val
);
1627 static int rtl838x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1628 int regnum
, u16 value
)
1631 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1633 if (mii_id
>= 24 && mii_id
<= 27 && priv
->id
== 0x8380) {
1636 sw_w32(value
, RTL838X_SDS4_FIB_REG0
+ offset
+ (regnum
<< 2));
1639 return rtl838x_write_phy(mii_id
, 0, regnum
, value
);
1642 static int rtl839x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1643 int regnum
, u16 value
)
1645 struct rtl838x_eth_priv
*priv
= bus
->priv
;
1647 if (mii_id
>= 48 && mii_id
<= 49 && priv
->id
== 0x8393)
1648 return rtl839x_write_sds_phy(mii_id
, regnum
, value
);
1650 return rtl839x_write_phy(mii_id
, 0, regnum
, value
);
1653 static int rtl930x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1654 int regnum
, u16 value
)
1656 // struct rtl838x_eth_priv *priv = bus->priv;
1658 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1659 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1660 if (regnum
& MII_ADDR_C45
) {
1661 regnum
&= ~MII_ADDR_C45
;
1662 return rtl930x_write_mmd_phy(mii_id
, regnum
>> 16, regnum
& 0xffff, value
);
1665 return rtl930x_write_phy(mii_id
, 0, regnum
, value
);
1668 static int rtl931x_mdio_write(struct mii_bus
*bus
, int mii_id
,
1669 int regnum
, u16 value
)
1671 // struct rtl838x_eth_priv *priv = bus->priv;
1673 // if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393)
1674 // return rtl839x_write_sds_phy(mii_id, regnum, value);
1676 return rtl931x_write_phy(mii_id
, 0, regnum
, value
);
1679 static int rtl838x_mdio_reset(struct mii_bus
*bus
)
1681 pr_info("%s called\n", __func__
);
1682 /* Disable MAC polling the PHY so that we can start configuration */
1683 sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL
);
1685 /* Enable PHY control via SoC */
1686 sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL
);
1688 // Probably should reset all PHYs here...
1692 static int rtl839x_mdio_reset(struct mii_bus
*bus
)
1696 pr_info("%s called\n", __func__
);
1697 /* BUG: The following does not work, but should! */
1698 /* Disable MAC polling the PHY so that we can start configuration */
1699 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL
);
1700 sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL
+ 4);
1701 /* Disable PHY polling via SoC */
1702 sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL
);
1704 // Probably should reset all PHYs here...
1708 static int rtl931x_mdio_reset(struct mii_bus
*bus
)
1710 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL
);
1711 sw_w32(0x00000000, RTL931X_SMI_PORT_POLLING_CTRL
+ 4);
1713 pr_info("%s called\n", __func__
);
1718 static int rtl930x_mdio_reset(struct mii_bus
*bus
)
1723 pr_info("RTL930X_SMI_PORT0_15_POLLING_SEL %08x 16-27: %08x\n",
1724 sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL
),
1725 sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL
));
1727 pr_info("%s: Enable SMI polling on SMI bus 0, SMI1, SMI2, disable on SMI3\n", __func__
);
1728 sw_w32_mask(BIT(20) | BIT(21) | BIT(22), BIT(23), RTL930X_SMI_GLB_CTRL
);
1730 pr_info("RTL9300 Powering on SerDes ports\n");
1731 rtl9300_sds_power(24, 1);
1732 rtl9300_sds_power(25, 1);
1733 rtl9300_sds_power(26, 1);
1734 rtl9300_sds_power(27, 1);
1737 // RTL930X_SMI_PORT0_15_POLLING_SEL 55550000 16-27: 00f9aaaa
1738 // i.e SMI=0 for all ports
1739 for (i
= 0; i
< 5; i
++)
1740 pr_info("port phy: %08x\n", sw_r32(RTL930X_SMI_PORT0_5_ADDR
+ i
*4));
1742 // 1-to-1 mapping of port to phy-address
1743 for (i
= 0; i
< 24; i
++) {
1745 sw_w32_mask(0x1f << pos
, i
<< pos
, RTL930X_SMI_PORT0_5_ADDR
+ (i
/ 6) * 4);
1748 // ports 24 and 25 have PHY addresses 8 and 9, ports 26/27 PHY 26/27
1749 sw_w32(8 | 9 << 5 | 26 << 10 | 27 << 15, RTL930X_SMI_PORT0_5_ADDR
+ 4 * 4);
1751 // Ports 24 and 25 live on SMI bus 1 and 2
1752 sw_w32_mask(0x3 << 16, 0x1 << 16, RTL930X_SMI_PORT16_27_POLLING_SEL
);
1753 sw_w32_mask(0x3 << 18, 0x2 << 18, RTL930X_SMI_PORT16_27_POLLING_SEL
);
1755 // SMI bus 1 and 2 speak Clause 45 TODO: Configure from .dts
1756 sw_w32_mask(0, BIT(17) | BIT(18), RTL930X_SMI_GLB_CTRL
);
1758 // Ports 24 and 25 are 2.5 Gig, set this type (1)
1759 sw_w32_mask(0x7 << 12, 1 << 12, RTL930X_SMI_MAC_TYPE_CTRL
);
1760 sw_w32_mask(0x7 << 15, 1 << 15, RTL930X_SMI_MAC_TYPE_CTRL
);
1765 static int rtl838x_mdio_init(struct rtl838x_eth_priv
*priv
)
1767 struct device_node
*mii_np
;
1770 pr_info("%s called\n", __func__
);
1771 mii_np
= of_get_child_by_name(priv
->pdev
->dev
.of_node
, "mdio-bus");
1774 dev_err(&priv
->pdev
->dev
, "no %s child node found", "mdio-bus");
1778 if (!of_device_is_available(mii_np
)) {
1783 priv
->mii_bus
= devm_mdiobus_alloc(&priv
->pdev
->dev
);
1784 if (!priv
->mii_bus
) {
1789 switch(priv
->family_id
) {
1790 case RTL8380_FAMILY_ID
:
1791 priv
->mii_bus
->name
= "rtl838x-eth-mdio";
1792 priv
->mii_bus
->read
= rtl838x_mdio_read
;
1793 priv
->mii_bus
->write
= rtl838x_mdio_write
;
1794 priv
->mii_bus
->reset
= rtl838x_mdio_reset
;
1796 case RTL8390_FAMILY_ID
:
1797 priv
->mii_bus
->name
= "rtl839x-eth-mdio";
1798 priv
->mii_bus
->read
= rtl839x_mdio_read
;
1799 priv
->mii_bus
->write
= rtl839x_mdio_write
;
1800 priv
->mii_bus
->reset
= rtl839x_mdio_reset
;
1802 case RTL9300_FAMILY_ID
:
1803 priv
->mii_bus
->name
= "rtl930x-eth-mdio";
1804 priv
->mii_bus
->read
= rtl930x_mdio_read
;
1805 priv
->mii_bus
->write
= rtl930x_mdio_write
;
1806 priv
->mii_bus
->reset
= rtl930x_mdio_reset
;
1807 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1809 case RTL9310_FAMILY_ID
:
1810 priv
->mii_bus
->name
= "rtl931x-eth-mdio";
1811 priv
->mii_bus
->read
= rtl931x_mdio_read
;
1812 priv
->mii_bus
->write
= rtl931x_mdio_write
;
1813 priv
->mii_bus
->reset
= rtl931x_mdio_reset
;
1814 // priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; TODO for linux 5.9
1817 priv
->mii_bus
->priv
= priv
;
1818 priv
->mii_bus
->parent
= &priv
->pdev
->dev
;
1820 snprintf(priv
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%pOFn", mii_np
);
1821 ret
= of_mdiobus_register(priv
->mii_bus
, mii_np
);
1824 of_node_put(mii_np
);
1828 static int rtl838x_mdio_remove(struct rtl838x_eth_priv
*priv
)
1830 pr_info("%s called\n", __func__
);
1834 mdiobus_unregister(priv
->mii_bus
);
1835 mdiobus_free(priv
->mii_bus
);
1840 static const struct net_device_ops rtl838x_eth_netdev_ops
= {
1841 .ndo_open
= rtl838x_eth_open
,
1842 .ndo_stop
= rtl838x_eth_stop
,
1843 .ndo_start_xmit
= rtl838x_eth_tx
,
1844 .ndo_select_queue
= rtl83xx_pick_tx_queue
,
1845 .ndo_set_mac_address
= rtl838x_set_mac_address
,
1846 .ndo_validate_addr
= eth_validate_addr
,
1847 .ndo_set_rx_mode
= rtl838x_eth_set_multicast_list
,
1848 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
1851 static const struct net_device_ops rtl839x_eth_netdev_ops
= {
1852 .ndo_open
= rtl838x_eth_open
,
1853 .ndo_stop
= rtl838x_eth_stop
,
1854 .ndo_start_xmit
= rtl838x_eth_tx
,
1855 .ndo_select_queue
= rtl83xx_pick_tx_queue
,
1856 .ndo_set_mac_address
= rtl838x_set_mac_address
,
1857 .ndo_validate_addr
= eth_validate_addr
,
1858 .ndo_set_rx_mode
= rtl839x_eth_set_multicast_list
,
1859 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
1862 static const struct net_device_ops rtl930x_eth_netdev_ops
= {
1863 .ndo_open
= rtl838x_eth_open
,
1864 .ndo_stop
= rtl838x_eth_stop
,
1865 .ndo_start_xmit
= rtl838x_eth_tx
,
1866 .ndo_select_queue
= rtl93xx_pick_tx_queue
,
1867 .ndo_set_mac_address
= rtl838x_set_mac_address
,
1868 .ndo_validate_addr
= eth_validate_addr
,
1869 .ndo_set_rx_mode
= rtl930x_eth_set_multicast_list
,
1870 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
1873 static const struct net_device_ops rtl931x_eth_netdev_ops
= {
1874 .ndo_open
= rtl838x_eth_open
,
1875 .ndo_stop
= rtl838x_eth_stop
,
1876 .ndo_start_xmit
= rtl838x_eth_tx
,
1877 .ndo_select_queue
= rtl93xx_pick_tx_queue
,
1878 .ndo_set_mac_address
= rtl838x_set_mac_address
,
1879 .ndo_validate_addr
= eth_validate_addr
,
1880 .ndo_set_rx_mode
= rtl931x_eth_set_multicast_list
,
1881 .ndo_tx_timeout
= rtl838x_eth_tx_timeout
,
1884 static const struct phylink_mac_ops rtl838x_phylink_ops
= {
1885 .validate
= rtl838x_validate
,
1886 .mac_link_state
= rtl838x_mac_pcs_get_state
,
1887 .mac_an_restart
= rtl838x_mac_an_restart
,
1888 .mac_config
= rtl838x_mac_config
,
1889 .mac_link_down
= rtl838x_mac_link_down
,
1890 .mac_link_up
= rtl838x_mac_link_up
,
1893 static const struct ethtool_ops rtl838x_ethtool_ops
= {
1894 .get_link_ksettings
= rtl838x_get_link_ksettings
,
1895 .set_link_ksettings
= rtl838x_set_link_ksettings
,
1898 static int __init
rtl838x_eth_probe(struct platform_device
*pdev
)
1900 struct net_device
*dev
;
1901 struct device_node
*dn
= pdev
->dev
.of_node
;
1902 struct rtl838x_eth_priv
*priv
;
1903 struct resource
*res
, *mem
;
1905 phy_interface_t phy_mode
;
1906 struct phylink
*phylink
;
1907 int err
= 0, i
, rxrings
, rxringlen
;
1908 struct ring_b
*ring
;
1910 pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
1911 (u32
)pdev
, (u32
)(&(pdev
->dev
)));
1914 dev_err(&pdev
->dev
, "No DT found\n");
1918 rxrings
= (soc_info
.family
== RTL8380_FAMILY_ID
1919 || soc_info
.family
== RTL8390_FAMILY_ID
) ? 8 : 32;
1920 rxrings
= rxrings
> MAX_RXRINGS
? MAX_RXRINGS
: rxrings
;
1921 rxringlen
= MAX_ENTRIES
/ rxrings
;
1922 rxringlen
= rxringlen
> MAX_RXLEN
? MAX_RXLEN
: rxringlen
;
1924 dev
= alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv
), TXRINGS
, rxrings
);
1929 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1930 priv
= netdev_priv(dev
);
1932 /* obtain buffer memory space */
1933 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1935 mem
= devm_request_mem_region(&pdev
->dev
, res
->start
,
1936 resource_size(res
), res
->name
);
1938 dev_err(&pdev
->dev
, "cannot request memory space\n");
1943 dev
->mem_start
= mem
->start
;
1944 dev
->mem_end
= mem
->end
;
1946 dev_err(&pdev
->dev
, "cannot request IO resource\n");
1951 /* Allocate buffer memory */
1952 priv
->membase
= dmam_alloc_coherent(&pdev
->dev
, rxrings
* rxringlen
* RING_BUFFER
1953 + sizeof(struct ring_b
) + sizeof(struct notify_b
),
1954 (void *)&dev
->mem_start
, GFP_KERNEL
);
1955 if (!priv
->membase
) {
1956 dev_err(&pdev
->dev
, "cannot allocate DMA buffer\n");
1961 // Allocate ring-buffer space at the end of the allocated memory
1962 ring
= priv
->membase
;
1963 ring
->rx_space
= priv
->membase
+ sizeof(struct ring_b
) + sizeof(struct notify_b
);
1965 spin_lock_init(&priv
->lock
);
1967 /* obtain device IRQ number */
1968 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1970 dev_err(&pdev
->dev
, "cannot obtain IRQ, using default 24\n");
1973 dev
->irq
= res
->start
;
1975 dev
->ethtool_ops
= &rtl838x_ethtool_ops
;
1976 dev
->min_mtu
= ETH_ZLEN
;
1977 dev
->max_mtu
= 1536;
1979 priv
->id
= soc_info
.id
;
1980 priv
->family_id
= soc_info
.family
;
1982 pr_info("Found SoC ID: %4x: %s, family %x\n",
1983 priv
->id
, soc_info
.name
, priv
->family_id
);
1985 pr_err("Unknown chip id (%04x)\n", priv
->id
);
1989 switch (priv
->family_id
) {
1990 case RTL8380_FAMILY_ID
:
1991 priv
->cpu_port
= RTL838X_CPU_PORT
;
1992 priv
->r
= &rtl838x_reg
;
1993 dev
->netdev_ops
= &rtl838x_eth_netdev_ops
;
1995 case RTL8390_FAMILY_ID
:
1996 priv
->cpu_port
= RTL839X_CPU_PORT
;
1997 priv
->r
= &rtl839x_reg
;
1998 dev
->netdev_ops
= &rtl839x_eth_netdev_ops
;
2000 case RTL9300_FAMILY_ID
:
2001 priv
->cpu_port
= RTL930X_CPU_PORT
;
2002 priv
->r
= &rtl930x_reg
;
2003 dev
->netdev_ops
= &rtl930x_eth_netdev_ops
;
2005 case RTL9310_FAMILY_ID
:
2006 priv
->cpu_port
= RTL931X_CPU_PORT
;
2007 priv
->r
= &rtl931x_reg
;
2008 dev
->netdev_ops
= &rtl931x_eth_netdev_ops
;
2011 pr_err("Unknown SoC family\n");
2014 priv
->rxringlen
= rxringlen
;
2015 priv
->rxrings
= rxrings
;
2017 rtl8380_init_mac(priv
);
2019 /* try to get mac address in the following order:
2020 * 1) from device tree data
2021 * 2) from internal registers set by bootloader
2023 mac
= of_get_mac_address(pdev
->dev
.of_node
);
2025 memcpy(dev
->dev_addr
, mac
, ETH_ALEN
);
2026 rtl838x_set_mac_hw(dev
, (u8
*)mac
);
2028 dev
->dev_addr
[0] = (sw_r32(priv
->r
->mac
) >> 8) & 0xff;
2029 dev
->dev_addr
[1] = sw_r32(priv
->r
->mac
) & 0xff;
2030 dev
->dev_addr
[2] = (sw_r32(priv
->r
->mac
+ 4) >> 24) & 0xff;
2031 dev
->dev_addr
[3] = (sw_r32(priv
->r
->mac
+ 4) >> 16) & 0xff;
2032 dev
->dev_addr
[4] = (sw_r32(priv
->r
->mac
+ 4) >> 8) & 0xff;
2033 dev
->dev_addr
[5] = sw_r32(priv
->r
->mac
+ 4) & 0xff;
2035 /* if the address is invalid, use a random value */
2036 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2037 struct sockaddr sa
= { AF_UNSPEC
};
2039 netdev_warn(dev
, "Invalid MAC address, using random\n");
2040 eth_hw_addr_random(dev
);
2041 memcpy(sa
.sa_data
, dev
->dev_addr
, ETH_ALEN
);
2042 if (rtl838x_set_mac_address(dev
, &sa
))
2043 netdev_warn(dev
, "Failed to set MAC address.\n");
2045 pr_info("Using MAC %08x%08x\n", sw_r32(priv
->r
->mac
),
2046 sw_r32(priv
->r
->mac
+ 4));
2047 strcpy(dev
->name
, "eth%d");
2051 err
= rtl838x_mdio_init(priv
);
2055 err
= register_netdev(dev
);
2059 for (i
= 0; i
< priv
->rxrings
; i
++) {
2060 priv
->rx_qs
[i
].id
= i
;
2061 priv
->rx_qs
[i
].priv
= priv
;
2062 netif_napi_add(dev
, &priv
->rx_qs
[i
].napi
, rtl838x_poll_rx
, 64);
2065 platform_set_drvdata(pdev
, dev
);
2067 phy_mode
= of_get_phy_mode(dn
);
2069 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
2073 priv
->phylink_config
.dev
= &dev
->dev
;
2074 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
2076 phylink
= phylink_create(&priv
->phylink_config
, pdev
->dev
.fwnode
,
2077 phy_mode
, &rtl838x_phylink_ops
);
2078 if (IS_ERR(phylink
)) {
2079 err
= PTR_ERR(phylink
);
2082 priv
->phylink
= phylink
;
2087 pr_err("Error setting up netdev, freeing it again.\n");
2092 static int rtl838x_eth_remove(struct platform_device
*pdev
)
2094 struct net_device
*dev
= platform_get_drvdata(pdev
);
2095 struct rtl838x_eth_priv
*priv
= netdev_priv(dev
);
2099 pr_info("Removing platform driver for rtl838x-eth\n");
2100 rtl838x_mdio_remove(priv
);
2101 rtl838x_hw_stop(priv
);
2103 netif_tx_stop_all_queues(dev
);
2105 for (i
= 0; i
< priv
->rxrings
; i
++)
2106 netif_napi_del(&priv
->rx_qs
[i
].napi
);
2108 unregister_netdev(dev
);
2114 static const struct of_device_id rtl838x_eth_of_ids
[] = {
2115 { .compatible
= "realtek,rtl838x-eth"},
2118 MODULE_DEVICE_TABLE(of
, rtl838x_eth_of_ids
);
2120 static struct platform_driver rtl838x_eth_driver
= {
2121 .probe
= rtl838x_eth_probe
,
2122 .remove
= rtl838x_eth_remove
,
2124 .name
= "rtl838x-eth",
2126 .of_match_table
= rtl838x_eth_of_ids
,
2130 module_platform_driver(rtl838x_eth_driver
);
2132 MODULE_AUTHOR("B. Koblitz");
2133 MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver");
2134 MODULE_LICENSE("GPL");