1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * BCM6368 Ethernet Switch Controller Driver
5 * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/of_clk.h>
18 #include <linux/of_net.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
25 #define ENETSW_TAG_SIZE 6
26 #define ENETSW_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
29 /* default number of descriptor */
30 #define ENETSW_DEF_RX_DESC 64
31 #define ENETSW_DEF_TX_DESC 32
32 #define ENETSW_DEF_CPY_BREAK 128
34 /* maximum burst len for dma (4 bytes unit) */
35 #define ENETSW_DMA_MAXBURST 8
38 #define DMA_CHAN_WIDTH 0x10
40 /* Controller Configuration Register */
41 #define DMA_CFG_REG 0x0
42 #define DMA_CFG_EN_SHIFT 0
43 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
44 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
46 /* Flow Control Descriptor Low Threshold register */
47 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
49 /* Flow Control Descriptor High Threshold register */
50 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
52 /* Flow Control Descriptor Buffer Alloca Threshold register */
53 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
54 #define DMA_BUFALLOC_FORCE_SHIFT 31
55 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
57 /* Channel Configuration register */
58 #define DMAC_CHANCFG_REG 0x0
59 #define DMAC_CHANCFG_EN_SHIFT 0
60 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
61 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
62 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
63 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
64 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
65 #define DMAC_CHANCFG_CHAINING_SHIFT 2
66 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
67 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
68 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
69 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
70 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
72 /* Interrupt Control/Status register */
73 #define DMAC_IR_REG 0x4
74 #define DMAC_IR_BUFDONE_MASK (1 << 0)
75 #define DMAC_IR_PKTDONE_MASK (1 << 1)
76 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
78 /* Interrupt Mask register */
79 #define DMAC_IRMASK_REG 0x8
81 /* Maximum Burst Length */
82 #define DMAC_MAXBURST_REG 0xc
84 /* Ring Start Address register */
85 #define DMAS_RSTART_REG 0x0
87 /* State Ram Word 2 */
88 #define DMAS_SRAM2_REG 0x4
90 /* State Ram Word 3 */
91 #define DMAS_SRAM3_REG 0x8
93 /* State Ram Word 4 */
94 #define DMAS_SRAM4_REG 0xc
96 struct bcm6368_enetsw_desc
{
102 #define DMADESC_LENGTH_SHIFT 16
103 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
104 #define DMADESC_OWNER_MASK (1 << 15)
105 #define DMADESC_EOP_MASK (1 << 14)
106 #define DMADESC_SOP_MASK (1 << 13)
107 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
108 #define DMADESC_WRAP_MASK (1 << 12)
109 #define DMADESC_USB_NOZERO_MASK (1 << 1)
110 #define DMADESC_USB_ZERO_MASK (1 << 0)
113 #define DMADESC_UNDER_MASK (1 << 9)
114 #define DMADESC_APPEND_CRC (1 << 8)
115 #define DMADESC_OVSIZE_MASK (1 << 4)
116 #define DMADESC_RXER_MASK (1 << 2)
117 #define DMADESC_CRC_MASK (1 << 1)
118 #define DMADESC_OV_MASK (1 << 0)
119 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
120 DMADESC_OVSIZE_MASK | \
121 DMADESC_RXER_MASK | \
125 struct bcm6368_enetsw
{
126 void __iomem
*dma_base
;
127 void __iomem
*dma_chan
;
128 void __iomem
*dma_sram
;
131 struct device_link
**link_pm
;
135 unsigned int num_clocks
;
137 struct reset_control
**reset
;
138 unsigned int num_resets
;
145 /* hw view of rx & tx dma ring */
146 dma_addr_t rx_desc_dma
;
147 dma_addr_t tx_desc_dma
;
149 /* allocated size (in bytes) for rx & tx dma ring */
150 unsigned int rx_desc_alloc_size
;
151 unsigned int tx_desc_alloc_size
;
153 struct napi_struct napi
;
155 /* dma channel id for rx */
158 /* number of dma desc in rx ring */
161 /* cpu view of rx dma ring */
162 struct bcm6368_enetsw_desc
*rx_desc_cpu
;
164 /* current number of armed descriptor given to hardware for rx */
167 /* next rx descriptor to fetch from hardware */
170 /* next dirty rx descriptor to refill */
173 /* size of allocated rx skbs */
174 unsigned int rx_skb_size
;
176 /* list of skb given to hw for rx */
177 struct sk_buff
**rx_skb
;
179 /* used when rx skb allocation failed, so we defer rx queue
181 struct timer_list rx_timeout
;
183 /* lock rx_timeout against rx normal operation */
186 /* dma channel id for tx */
189 /* number of dma desc in tx ring */
192 /* maximum dma burst size */
195 /* cpu view of rx dma ring */
196 struct bcm6368_enetsw_desc
*tx_desc_cpu
;
198 /* number of available descriptor for tx */
201 /* next tx descriptor avaiable */
204 /* next dirty tx descriptor to reclaim */
207 /* list of skb given to hw for tx */
208 struct sk_buff
**tx_skb
;
210 /* lock used by tx reclaim and xmit */
213 /* network device reference */
214 struct net_device
*net_dev
;
216 /* platform device reference */
217 struct platform_device
*pdev
;
219 /* dma channel enable mask */
220 u32 dma_chan_en_mask
;
222 /* dma channel interrupt mask */
223 u32 dma_chan_int_mask
;
225 /* dma channel width */
226 unsigned int dma_chan_width
;
229 static inline void dma_writel(struct bcm6368_enetsw
*priv
, u32 val
, u32 off
)
231 __raw_writel(val
, priv
->dma_base
+ off
);
234 static inline u32
dma_readl(struct bcm6368_enetsw
*priv
, u32 off
, int chan
)
236 return __raw_readl(priv
->dma_chan
+ off
+ chan
* priv
->dma_chan_width
);
239 static inline void dmac_writel(struct bcm6368_enetsw
*priv
, u32 val
,
242 __raw_writel(val
, priv
->dma_chan
+ off
+ chan
* priv
->dma_chan_width
);
245 static inline void dmas_writel(struct bcm6368_enetsw
*priv
, u32 val
,
248 __raw_writel(val
, priv
->dma_sram
+ off
+ chan
* priv
->dma_chan_width
);
254 static int bcm6368_enetsw_refill_rx(struct net_device
*dev
)
256 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
258 while (priv
->rx_desc_count
< priv
->rx_ring_size
) {
259 struct bcm6368_enetsw_desc
*desc
;
265 desc_idx
= priv
->rx_dirty_desc
;
266 desc
= &priv
->rx_desc_cpu
[desc_idx
];
268 if (!priv
->rx_skb
[desc_idx
]) {
269 skb
= netdev_alloc_skb(dev
, priv
->rx_skb_size
);
272 priv
->rx_skb
[desc_idx
] = skb
;
273 p
= dma_map_single(&priv
->pdev
->dev
, skb
->data
,
279 len_stat
= priv
->rx_skb_size
<< DMADESC_LENGTH_SHIFT
;
280 len_stat
|= DMADESC_OWNER_MASK
;
281 if (priv
->rx_dirty_desc
== priv
->rx_ring_size
- 1) {
282 len_stat
|= DMADESC_WRAP_MASK
;
283 priv
->rx_dirty_desc
= 0;
285 priv
->rx_dirty_desc
++;
288 desc
->len_stat
= len_stat
;
290 priv
->rx_desc_count
++;
292 /* tell dma engine we allocated one buffer */
293 dma_writel(priv
, 1, DMA_BUFALLOC_REG(priv
->rx_chan
));
296 /* If rx ring is still empty, set a timer to try allocating
297 * again at a later time. */
298 if (priv
->rx_desc_count
== 0 && netif_running(dev
)) {
299 dev_warn(&priv
->pdev
->dev
, "unable to refill rx ring\n");
300 priv
->rx_timeout
.expires
= jiffies
+ HZ
;
301 add_timer(&priv
->rx_timeout
);
308 * timer callback to defer refill rx queue in case we're OOM
310 static void bcm6368_enetsw_refill_rx_timer(struct timer_list
*t
)
312 struct bcm6368_enetsw
*priv
= from_timer(priv
, t
, rx_timeout
);
313 struct net_device
*dev
= priv
->net_dev
;
315 spin_lock(&priv
->rx_lock
);
316 bcm6368_enetsw_refill_rx(dev
);
317 spin_unlock(&priv
->rx_lock
);
321 * extract packet from rx queue
323 static int bcm6368_enetsw_receive_queue(struct net_device
*dev
, int budget
)
325 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
326 struct device
*kdev
= &priv
->pdev
->dev
;
329 /* don't scan ring further than number of refilled
331 if (budget
> priv
->rx_desc_count
)
332 budget
= priv
->rx_desc_count
;
335 struct bcm6368_enetsw_desc
*desc
;
341 desc_idx
= priv
->rx_curr_desc
;
342 desc
= &priv
->rx_desc_cpu
[desc_idx
];
344 /* make sure we actually read the descriptor status at
348 len_stat
= desc
->len_stat
;
350 /* break if dma ownership belongs to hw */
351 if (len_stat
& DMADESC_OWNER_MASK
)
355 priv
->rx_curr_desc
++;
356 if (priv
->rx_curr_desc
== priv
->rx_ring_size
)
357 priv
->rx_curr_desc
= 0;
358 priv
->rx_desc_count
--;
360 /* if the packet does not have start of packet _and_
361 * end of packet flag set, then just recycle it */
362 if ((len_stat
& DMADESC_ESOP_MASK
) != DMADESC_ESOP_MASK
) {
363 dev
->stats
.rx_dropped
++;
368 skb
= priv
->rx_skb
[desc_idx
];
369 len
= (len_stat
& DMADESC_LENGTH_MASK
)
370 >> DMADESC_LENGTH_SHIFT
;
371 /* don't include FCS */
374 if (len
< priv
->copybreak
) {
375 struct sk_buff
*nskb
;
377 nskb
= napi_alloc_skb(&priv
->napi
, len
);
379 /* forget packet, just rearm desc */
380 dev
->stats
.rx_dropped
++;
384 dma_sync_single_for_cpu(kdev
, desc
->address
,
385 len
, DMA_FROM_DEVICE
);
386 memcpy(nskb
->data
, skb
->data
, len
);
387 dma_sync_single_for_device(kdev
, desc
->address
,
388 len
, DMA_FROM_DEVICE
);
391 dma_unmap_single(&priv
->pdev
->dev
, desc
->address
,
392 priv
->rx_skb_size
, DMA_FROM_DEVICE
);
393 priv
->rx_skb
[desc_idx
] = NULL
;
397 skb
->protocol
= eth_type_trans(skb
, dev
);
398 dev
->stats
.rx_packets
++;
399 dev
->stats
.rx_bytes
+= len
;
400 netif_receive_skb(skb
);
401 } while (--budget
> 0);
403 if (processed
|| !priv
->rx_desc_count
) {
404 bcm6368_enetsw_refill_rx(dev
);
407 dmac_writel(priv
, priv
->dma_chan_en_mask
,
408 DMAC_CHANCFG_REG
, priv
->rx_chan
);
415 * try to or force reclaim of transmitted buffers
417 static int bcm6368_enetsw_tx_reclaim(struct net_device
*dev
, int force
)
419 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
422 while (priv
->tx_desc_count
< priv
->tx_ring_size
) {
423 struct bcm6368_enetsw_desc
*desc
;
426 /* We run in a bh and fight against start_xmit, which
427 * is called with bh disabled */
428 spin_lock(&priv
->tx_lock
);
430 desc
= &priv
->tx_desc_cpu
[priv
->tx_dirty_desc
];
432 if (!force
&& (desc
->len_stat
& DMADESC_OWNER_MASK
)) {
433 spin_unlock(&priv
->tx_lock
);
437 /* ensure other field of the descriptor were not read
438 * before we checked ownership */
441 skb
= priv
->tx_skb
[priv
->tx_dirty_desc
];
442 priv
->tx_skb
[priv
->tx_dirty_desc
] = NULL
;
443 dma_unmap_single(&priv
->pdev
->dev
, desc
->address
, skb
->len
,
446 priv
->tx_dirty_desc
++;
447 if (priv
->tx_dirty_desc
== priv
->tx_ring_size
)
448 priv
->tx_dirty_desc
= 0;
449 priv
->tx_desc_count
++;
451 spin_unlock(&priv
->tx_lock
);
453 if (desc
->len_stat
& DMADESC_UNDER_MASK
)
454 dev
->stats
.tx_errors
++;
460 if (netif_queue_stopped(dev
) && released
)
461 netif_wake_queue(dev
);
467 * poll func, called by network core
469 static int bcm6368_enetsw_poll(struct napi_struct
*napi
, int budget
)
471 struct bcm6368_enetsw
*priv
= container_of(napi
, struct bcm6368_enetsw
, napi
);
472 struct net_device
*dev
= priv
->net_dev
;
476 dmac_writel(priv
, priv
->dma_chan_int_mask
,
477 DMAC_IR_REG
, priv
->rx_chan
);
478 dmac_writel(priv
, priv
->dma_chan_int_mask
,
479 DMAC_IR_REG
, priv
->tx_chan
);
481 /* reclaim sent skb */
482 bcm6368_enetsw_tx_reclaim(dev
, 0);
484 spin_lock(&priv
->rx_lock
);
485 rx_work_done
= bcm6368_enetsw_receive_queue(dev
, budget
);
486 spin_unlock(&priv
->rx_lock
);
488 if (rx_work_done
>= budget
) {
489 /* rx queue is not yet empty/clean */
493 /* no more packet in rx/tx queue, remove device from poll
495 napi_complete_done(napi
, rx_work_done
);
497 /* restore rx/tx interrupt */
498 dmac_writel(priv
, priv
->dma_chan_int_mask
,
499 DMAC_IRMASK_REG
, priv
->rx_chan
);
500 dmac_writel(priv
, priv
->dma_chan_int_mask
,
501 DMAC_IRMASK_REG
, priv
->tx_chan
);
507 * rx/tx dma interrupt handler
509 static irqreturn_t
bcm6368_enetsw_isr_dma(int irq
, void *dev_id
)
511 struct net_device
*dev
= dev_id
;
512 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
514 /* mask rx/tx interrupts */
515 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
516 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
518 napi_schedule(&priv
->napi
);
524 * tx request callback
527 bcm6368_enetsw_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
529 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
530 struct bcm6368_enetsw_desc
*desc
;
534 /* lock against tx reclaim */
535 spin_lock(&priv
->tx_lock
);
537 /* make sure the tx hw queue is not full, should not happen
538 * since we stop queue before it's the case */
539 if (unlikely(!priv
->tx_desc_count
)) {
540 netif_stop_queue(dev
);
541 dev_err(&priv
->pdev
->dev
, "xmit called with no tx desc "
543 ret
= NETDEV_TX_BUSY
;
547 /* pad small packets */
548 if (skb
->len
< (ETH_ZLEN
+ ETH_FCS_LEN
)) {
549 int needed
= (ETH_ZLEN
+ ETH_FCS_LEN
) - skb
->len
;
552 if (unlikely(skb_tailroom(skb
) < needed
)) {
553 struct sk_buff
*nskb
;
555 nskb
= skb_copy_expand(skb
, 0, needed
, GFP_ATOMIC
);
557 ret
= NETDEV_TX_BUSY
;
564 data
= skb_put_zero(skb
, needed
);
567 /* point to the next available desc */
568 desc
= &priv
->tx_desc_cpu
[priv
->tx_curr_desc
];
569 priv
->tx_skb
[priv
->tx_curr_desc
] = skb
;
571 /* fill descriptor */
572 desc
->address
= dma_map_single(&priv
->pdev
->dev
, skb
->data
, skb
->len
,
575 len_stat
= (skb
->len
<< DMADESC_LENGTH_SHIFT
) & DMADESC_LENGTH_MASK
;
576 len_stat
|= DMADESC_ESOP_MASK
| DMADESC_APPEND_CRC
|
579 priv
->tx_curr_desc
++;
580 if (priv
->tx_curr_desc
== priv
->tx_ring_size
) {
581 priv
->tx_curr_desc
= 0;
582 len_stat
|= DMADESC_WRAP_MASK
;
584 priv
->tx_desc_count
--;
586 /* dma might be already polling, make sure we update desc
587 * fields in correct order */
589 desc
->len_stat
= len_stat
;
593 dmac_writel(priv
, priv
->dma_chan_en_mask
, DMAC_CHANCFG_REG
,
596 /* stop queue if no more desc available */
597 if (!priv
->tx_desc_count
)
598 netif_stop_queue(dev
);
600 dev
->stats
.tx_bytes
+= skb
->len
;
601 dev
->stats
.tx_packets
++;
605 spin_unlock(&priv
->tx_lock
);
610 * disable dma in given channel
612 static void bcm6368_enetsw_disable_dma(struct bcm6368_enetsw
*priv
, int chan
)
616 dmac_writel(priv
, 0, DMAC_CHANCFG_REG
, chan
);
621 val
= dma_readl(priv
, DMAC_CHANCFG_REG
, chan
);
622 if (!(val
& DMAC_CHANCFG_EN_MASK
))
629 static int bcm6368_enetsw_open(struct net_device
*dev
)
631 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
632 struct device
*kdev
= &priv
->pdev
->dev
;
638 /* mask all interrupts and request them */
639 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
640 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
642 ret
= request_irq(priv
->irq_rx
, bcm6368_enetsw_isr_dma
,
647 if (priv
->irq_tx
!= -1) {
648 ret
= request_irq(priv
->irq_tx
, bcm6368_enetsw_isr_dma
,
654 /* allocate rx dma ring */
655 size
= priv
->rx_ring_size
* sizeof(struct bcm6368_enetsw_desc
);
656 p
= dma_alloc_coherent(kdev
, size
, &priv
->rx_desc_dma
, GFP_KERNEL
);
658 dev_err(kdev
, "cannot allocate rx ring %u\n", size
);
664 priv
->rx_desc_alloc_size
= size
;
665 priv
->rx_desc_cpu
= p
;
667 /* allocate tx dma ring */
668 size
= priv
->tx_ring_size
* sizeof(struct bcm6368_enetsw_desc
);
669 p
= dma_alloc_coherent(kdev
, size
, &priv
->tx_desc_dma
, GFP_KERNEL
);
671 dev_err(kdev
, "cannot allocate tx ring\n");
673 goto out_free_rx_ring
;
677 priv
->tx_desc_alloc_size
= size
;
678 priv
->tx_desc_cpu
= p
;
680 priv
->tx_skb
= kzalloc(sizeof(struct sk_buff
*) * priv
->tx_ring_size
,
683 dev_err(kdev
, "cannot allocate rx skb queue\n");
685 goto out_free_tx_ring
;
688 priv
->tx_desc_count
= priv
->tx_ring_size
;
689 priv
->tx_dirty_desc
= 0;
690 priv
->tx_curr_desc
= 0;
691 spin_lock_init(&priv
->tx_lock
);
693 /* init & fill rx ring with skbs */
694 priv
->rx_skb
= kzalloc(sizeof(struct sk_buff
*) * priv
->rx_ring_size
,
697 dev_err(kdev
, "cannot allocate rx skb queue\n");
699 goto out_free_tx_skb
;
702 priv
->rx_desc_count
= 0;
703 priv
->rx_dirty_desc
= 0;
704 priv
->rx_curr_desc
= 0;
706 /* initialize flow control buffer allocation */
707 dma_writel(priv
, DMA_BUFALLOC_FORCE_MASK
| 0,
708 DMA_BUFALLOC_REG(priv
->rx_chan
));
710 if (bcm6368_enetsw_refill_rx(dev
)) {
711 dev_err(kdev
, "cannot allocate rx skb queue\n");
716 /* write rx & tx ring addresses */
717 dmas_writel(priv
, priv
->rx_desc_dma
,
718 DMAS_RSTART_REG
, priv
->rx_chan
);
719 dmas_writel(priv
, priv
->tx_desc_dma
,
720 DMAS_RSTART_REG
, priv
->tx_chan
);
722 /* clear remaining state ram for rx & tx channel */
723 dmas_writel(priv
, 0, DMAS_SRAM2_REG
, priv
->rx_chan
);
724 dmas_writel(priv
, 0, DMAS_SRAM2_REG
, priv
->tx_chan
);
725 dmas_writel(priv
, 0, DMAS_SRAM3_REG
, priv
->rx_chan
);
726 dmas_writel(priv
, 0, DMAS_SRAM3_REG
, priv
->tx_chan
);
727 dmas_writel(priv
, 0, DMAS_SRAM4_REG
, priv
->rx_chan
);
728 dmas_writel(priv
, 0, DMAS_SRAM4_REG
, priv
->tx_chan
);
730 /* set dma maximum burst len */
731 dmac_writel(priv
, priv
->dma_maxburst
,
732 DMAC_MAXBURST_REG
, priv
->rx_chan
);
733 dmac_writel(priv
, priv
->dma_maxburst
,
734 DMAC_MAXBURST_REG
, priv
->tx_chan
);
736 /* set flow control low/high threshold to 1/3 / 2/3 */
737 val
= priv
->rx_ring_size
/ 3;
738 dma_writel(priv
, val
, DMA_FLOWCL_REG(priv
->rx_chan
));
739 val
= (priv
->rx_ring_size
* 2) / 3;
740 dma_writel(priv
, val
, DMA_FLOWCH_REG(priv
->rx_chan
));
742 /* all set, enable mac and interrupts, start dma engine and
743 * kick rx dma channel
746 dma_writel(priv
, DMA_CFG_EN_MASK
, DMA_CFG_REG
);
747 dmac_writel(priv
, DMAC_CHANCFG_EN_MASK
,
748 DMAC_CHANCFG_REG
, priv
->rx_chan
);
750 /* watch "packet transferred" interrupt in rx and tx */
751 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
752 DMAC_IR_REG
, priv
->rx_chan
);
753 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
754 DMAC_IR_REG
, priv
->tx_chan
);
756 /* make sure we enable napi before rx interrupt */
757 napi_enable(&priv
->napi
);
759 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
760 DMAC_IRMASK_REG
, priv
->rx_chan
);
761 dmac_writel(priv
, DMAC_IR_PKTDONE_MASK
,
762 DMAC_IRMASK_REG
, priv
->tx_chan
);
764 netif_carrier_on(dev
);
765 netif_start_queue(dev
);
770 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
771 struct bcm6368_enetsw_desc
*desc
;
773 if (!priv
->rx_skb
[i
])
776 desc
= &priv
->rx_desc_cpu
[i
];
777 dma_unmap_single(kdev
, desc
->address
, priv
->rx_skb_size
,
779 kfree_skb(priv
->rx_skb
[i
]);
787 dma_free_coherent(kdev
, priv
->tx_desc_alloc_size
,
788 priv
->tx_desc_cpu
, priv
->tx_desc_dma
);
791 dma_free_coherent(kdev
, priv
->rx_desc_alloc_size
,
792 priv
->rx_desc_cpu
, priv
->rx_desc_dma
);
795 if (priv
->irq_tx
!= -1)
796 free_irq(priv
->irq_tx
, dev
);
799 free_irq(priv
->irq_rx
, dev
);
805 static int bcm6368_enetsw_stop(struct net_device
*dev
)
807 struct bcm6368_enetsw
*priv
= netdev_priv(dev
);
808 struct device
*kdev
= &priv
->pdev
->dev
;
811 netif_stop_queue(dev
);
812 napi_disable(&priv
->napi
);
813 del_timer_sync(&priv
->rx_timeout
);
815 /* mask all interrupts */
816 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->rx_chan
);
817 dmac_writel(priv
, 0, DMAC_IRMASK_REG
, priv
->tx_chan
);
819 /* disable dma & mac */
820 bcm6368_enetsw_disable_dma(priv
, priv
->tx_chan
);
821 bcm6368_enetsw_disable_dma(priv
, priv
->rx_chan
);
823 /* force reclaim of all tx buffers */
824 bcm6368_enetsw_tx_reclaim(dev
, 1);
826 /* free the rx skb ring */
827 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
828 struct bcm6368_enetsw_desc
*desc
;
830 if (!priv
->rx_skb
[i
])
833 desc
= &priv
->rx_desc_cpu
[i
];
834 dma_unmap_single_attrs(kdev
, desc
->address
, priv
->rx_skb_size
,
836 DMA_ATTR_SKIP_CPU_SYNC
);
837 kfree_skb(priv
->rx_skb
[i
]);
840 /* free remaining allocated memory */
843 dma_free_coherent(kdev
, priv
->rx_desc_alloc_size
,
844 priv
->rx_desc_cpu
, priv
->rx_desc_dma
);
845 dma_free_coherent(kdev
, priv
->tx_desc_alloc_size
,
846 priv
->tx_desc_cpu
, priv
->tx_desc_dma
);
847 if (priv
->irq_tx
!= -1)
848 free_irq(priv
->irq_tx
, dev
);
849 free_irq(priv
->irq_rx
, dev
);
854 static const struct net_device_ops bcm6368_enetsw_ops
= {
855 .ndo_open
= bcm6368_enetsw_open
,
856 .ndo_stop
= bcm6368_enetsw_stop
,
857 .ndo_start_xmit
= bcm6368_enetsw_start_xmit
,
860 static int bcm6368_enetsw_probe(struct platform_device
*pdev
)
862 struct bcm6368_enetsw
*priv
;
863 struct device
*dev
= &pdev
->dev
;
864 struct device_node
*node
= dev
->of_node
;
865 struct net_device
*ndev
;
866 struct resource
*res
;
870 ndev
= alloc_etherdev(sizeof(*priv
));
874 priv
= netdev_priv(ndev
);
876 priv
->num_pms
= of_count_phandle_with_args(node
, "power-domains",
877 "#power-domain-cells");
878 if (priv
->num_pms
> 1) {
879 priv
->pm
= devm_kcalloc(dev
, priv
->num_pms
,
880 sizeof(struct device
*), GFP_KERNEL
);
884 priv
->link_pm
= devm_kcalloc(dev
, priv
->num_pms
,
885 sizeof(struct device_link
*),
890 for (i
= 0; i
< priv
->num_pms
; i
++) {
891 priv
->pm
[i
] = genpd_dev_pm_attach_by_id(dev
, i
);
892 if (IS_ERR(priv
->pm
[i
])) {
893 dev_err(dev
, "error getting pm %d\n", i
);
897 priv
->link_pm
[i
] = device_link_add(dev
, priv
->pm
[i
],
898 DL_FLAG_STATELESS
| DL_FLAG_PM_RUNTIME
|
903 pm_runtime_enable(dev
);
904 pm_runtime_no_callbacks(dev
);
905 ret
= pm_runtime_get_sync(dev
);
907 pm_runtime_disable(dev
);
908 dev_info(dev
, "PM prober defer: ret=%d\n", ret
);
909 return -EPROBE_DEFER
;
912 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dma");
913 priv
->dma_base
= devm_ioremap_resource(dev
, res
);
914 if (IS_ERR(priv
->dma_base
))
915 return PTR_ERR(priv
->dma_base
);
917 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
919 priv
->dma_chan
= devm_ioremap_resource(dev
, res
);
920 if (IS_ERR(priv
->dma_chan
))
921 return PTR_ERR(priv
->dma_chan
);
923 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dma-sram");
924 priv
->dma_sram
= devm_ioremap_resource(dev
, res
);
925 if (IS_ERR(priv
->dma_sram
))
926 return PTR_ERR(priv
->dma_sram
);
928 priv
->irq_rx
= platform_get_irq_byname(pdev
, "rx");
932 priv
->irq_tx
= platform_get_irq_byname(pdev
, "tx");
935 else if (priv
->irq_tx
< 0)
938 if (device_property_read_u32(dev
, "dma-rx", &priv
->rx_chan
))
941 if (device_property_read_u32(dev
, "dma-tx", &priv
->tx_chan
))
944 priv
->rx_ring_size
= ENETSW_DEF_RX_DESC
;
945 priv
->tx_ring_size
= ENETSW_DEF_TX_DESC
;
947 priv
->dma_maxburst
= ENETSW_DMA_MAXBURST
;
949 priv
->copybreak
= ENETSW_DEF_CPY_BREAK
;
951 priv
->dma_chan_en_mask
= DMAC_CHANCFG_EN_MASK
;
952 priv
->dma_chan_int_mask
= DMAC_IR_PKTDONE_MASK
;
953 priv
->dma_chan_width
= DMA_CHAN_WIDTH
;
955 of_get_mac_address(node
, ndev
->dev_addr
);
956 if (is_valid_ether_addr(ndev
->dev_addr
)) {
957 dev_info(dev
, "mtd mac %pM\n", ndev
->dev_addr
);
959 random_ether_addr(ndev
->dev_addr
);
960 dev_info(dev
, "random mac %pM\n", ndev
->dev_addr
);
963 priv
->rx_skb_size
= ALIGN(ndev
->mtu
+ ENETSW_MTU_OVERHEAD
,
964 priv
->dma_maxburst
* 4);
966 priv
->num_clocks
= of_clk_get_parent_count(node
);
967 if (priv
->num_clocks
) {
968 priv
->clock
= devm_kcalloc(dev
, priv
->num_clocks
,
969 sizeof(struct clk
*), GFP_KERNEL
);
973 for (i
= 0; i
< priv
->num_clocks
; i
++) {
974 priv
->clock
[i
] = of_clk_get(node
, i
);
975 if (IS_ERR(priv
->clock
[i
])) {
976 dev_err(dev
, "error getting clock %d\n", i
);
980 ret
= clk_prepare_enable(priv
->clock
[i
]);
982 dev_err(dev
, "error enabling clock %d\n", i
);
987 priv
->num_resets
= of_count_phandle_with_args(node
, "resets",
989 if (priv
->num_resets
) {
990 priv
->reset
= devm_kcalloc(dev
, priv
->num_resets
,
991 sizeof(struct reset_control
*),
996 for (i
= 0; i
< priv
->num_resets
; i
++) {
997 priv
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
998 if (IS_ERR(priv
->reset
[i
])) {
999 dev_err(dev
, "error getting reset %d\n", i
);
1003 ret
= reset_control_reset(priv
->reset
[i
]);
1005 dev_err(dev
, "error performing reset %d\n", i
);
1010 spin_lock_init(&priv
->rx_lock
);
1012 timer_setup(&priv
->rx_timeout
, bcm6368_enetsw_refill_rx_timer
, 0);
1014 /* register netdevice */
1015 ndev
->netdev_ops
= &bcm6368_enetsw_ops
;
1016 ndev
->min_mtu
= ETH_ZLEN
;
1017 ndev
->mtu
= ETH_DATA_LEN
+ ENETSW_TAG_SIZE
;
1018 ndev
->max_mtu
= ETH_DATA_LEN
+ ENETSW_TAG_SIZE
;
1019 netif_napi_add(ndev
, &priv
->napi
, bcm6368_enetsw_poll
, 16);
1020 SET_NETDEV_DEV(ndev
, dev
);
1022 ret
= register_netdev(ndev
);
1024 goto out_disable_clk
;
1026 netif_carrier_off(ndev
);
1027 platform_set_drvdata(pdev
, ndev
);
1029 priv
->net_dev
= ndev
;
1034 for (i
= 0; i
< priv
->num_resets
; i
++)
1035 reset_control_assert(priv
->reset
[i
]);
1037 for (i
= 0; i
< priv
->num_clocks
; i
++)
1038 clk_disable_unprepare(priv
->clock
[i
]);
1043 static int bcm6368_enetsw_remove(struct platform_device
*pdev
)
1045 struct device
*dev
= &pdev
->dev
;
1046 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1047 struct bcm6368_enetsw
*priv
= netdev_priv(ndev
);
1050 unregister_netdev(ndev
);
1052 pm_runtime_put_sync(dev
);
1053 for (i
= 0; priv
->pm
&& i
< priv
->num_pms
; i
++) {
1054 dev_pm_domain_detach(priv
->pm
[i
], true);
1055 device_link_del(priv
->link_pm
[i
]);
1058 for (i
= 0; i
< priv
->num_resets
; i
++)
1059 reset_control_assert(priv
->reset
[i
]);
1061 for (i
= 0; i
< priv
->num_clocks
; i
++)
1062 clk_disable_unprepare(priv
->clock
[i
]);
1069 static const struct of_device_id bcm6368_enetsw_of_match
[] = {
1070 { .compatible
= "brcm,bcm6318-enetsw", },
1071 { .compatible
= "brcm,bcm6328-enetsw", },
1072 { .compatible
= "brcm,bcm6362-enetsw", },
1073 { .compatible
= "brcm,bcm6368-enetsw", },
1074 { .compatible
= "brcm,bcm63268-enetsw", },
1077 MODULE_DEVICE_TABLE(of
, bcm6368_enetsw_of_match
);
1079 static struct platform_driver bcm6368_enetsw_driver
= {
1081 .name
= "bcm6368-enetsw",
1082 .of_match_table
= of_match_ptr(bcm6368_enetsw_of_match
),
1084 .probe
= bcm6368_enetsw_probe
,
1085 .remove
= bcm6368_enetsw_remove
,
1087 module_platform_driver(bcm6368_enetsw_driver
);