1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * BCM6348 Ethernet Controller Driver
5 * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/of_address.h>
20 #include <linux/of_clk.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_mdio.h>
23 #include <linux/of_net.h>
24 #include <linux/of_platform.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/reset.h>
30 #define DMA_CHAN_WIDTH 0x10
32 /* Controller Configuration Register */
33 #define DMA_CFG_REG 0x0
34 #define DMA_CFG_EN_SHIFT 0
35 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
36 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
38 /* Flow Control Descriptor Low Threshold register */
39 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
41 /* Flow Control Descriptor High Threshold register */
42 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
44 /* Flow Control Descriptor Buffer Alloca Threshold register */
45 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
46 #define DMA_BUFALLOC_FORCE_SHIFT 31
47 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
49 /* Channel Configuration register */
50 #define DMAC_CHANCFG_REG 0x0
51 #define DMAC_CHANCFG_EN_SHIFT 0
52 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
53 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
54 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
55 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
56 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
57 #define DMAC_CHANCFG_CHAINING_SHIFT 2
58 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
59 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
60 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
61 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
62 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
64 /* Interrupt Control/Status register */
65 #define DMAC_IR_REG 0x4
66 #define DMAC_IR_BUFDONE_MASK (1 << 0)
67 #define DMAC_IR_PKTDONE_MASK (1 << 1)
68 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
70 /* Interrupt Mask register */
71 #define DMAC_IRMASK_REG 0x8
73 /* Maximum Burst Length */
74 #define DMAC_MAXBURST_REG 0xc
76 /* Ring Start Address register */
77 #define DMAS_RSTART_REG 0x0
79 /* State Ram Word 2 */
80 #define DMAS_SRAM2_REG 0x4
82 /* State Ram Word 3 */
83 #define DMAS_SRAM3_REG 0x8
85 /* State Ram Word 4 */
86 #define DMAS_SRAM4_REG 0xc
88 struct bcm6348_iudma_desc
{
94 #define DMADESC_LENGTH_SHIFT 16
95 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
96 #define DMADESC_OWNER_MASK (1 << 15)
97 #define DMADESC_EOP_MASK (1 << 14)
98 #define DMADESC_SOP_MASK (1 << 13)
99 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
100 #define DMADESC_WRAP_MASK (1 << 12)
103 #define DMADESC_UNDER_MASK (1 << 9)
104 #define DMADESC_APPEND_CRC (1 << 8)
105 #define DMADESC_OVSIZE_MASK (1 << 4)
106 #define DMADESC_RXER_MASK (1 << 2)
107 #define DMADESC_CRC_MASK (1 << 1)
108 #define DMADESC_OV_MASK (1 << 0)
109 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
110 DMADESC_OVSIZE_MASK | \
111 DMADESC_RXER_MASK | \
115 struct bcm6348_iudma
{
116 void __iomem
*dma_base
;
117 void __iomem
*dma_chan
;
118 void __iomem
*dma_sram
;
120 spinlock_t dma_base_lock
;
123 unsigned int num_clocks
;
125 struct reset_control
**reset
;
126 unsigned int num_resets
;
128 unsigned int dma_channels
;
131 static inline u32
dma_readl(struct bcm6348_iudma
*iudma
, u32 off
)
135 spin_lock(&iudma
->dma_base_lock
);
136 val
= __raw_readl(iudma
->dma_base
+ off
);
137 spin_unlock(&iudma
->dma_base_lock
);
142 static inline void dma_writel(struct bcm6348_iudma
*iudma
, u32 val
, u32 off
)
144 spin_lock(&iudma
->dma_base_lock
);
145 __raw_writel(val
, iudma
->dma_base
+ off
);
146 spin_unlock(&iudma
->dma_base_lock
);
149 static inline u32
dmac_readl(struct bcm6348_iudma
*iudma
, u32 off
, int chan
)
151 return __raw_readl(iudma
->dma_chan
+ chan
* DMA_CHAN_WIDTH
+ off
);
154 static inline void dmac_writel(struct bcm6348_iudma
*iudma
, u32 val
, u32 off
,
157 __raw_writel(val
, iudma
->dma_chan
+ chan
* DMA_CHAN_WIDTH
+ off
);
160 static inline void dmas_writel(struct bcm6348_iudma
*iudma
, u32 val
, u32 off
,
163 __raw_writel(val
, iudma
->dma_sram
+ chan
* DMA_CHAN_WIDTH
+ off
);
166 static void bcm6348_iudma_chan_stop(struct bcm6348_iudma
*iudma
, int chan
)
170 dmac_writel(iudma
, 0, DMAC_CHANCFG_REG
, chan
);
175 val
= dmac_readl(iudma
, DMAC_CHANCFG_REG
, chan
);
176 if (!(val
& DMAC_CHANCFG_EN_MASK
))
183 static int bcm6348_iudma_probe(struct platform_device
*pdev
)
185 struct device
*dev
= &pdev
->dev
;
186 struct device_node
*node
= dev
->of_node
;
187 struct bcm6348_iudma
*iudma
;
192 iudma
= devm_kzalloc(dev
, sizeof(*iudma
), GFP_KERNEL
);
196 if (of_property_read_u32(node
, "dma-channels", &iudma
->dma_channels
))
199 iudma
->dma_base
= devm_platform_ioremap_resource_byname(pdev
, "dma");
200 if (IS_ERR_OR_NULL(iudma
->dma_base
))
201 return PTR_ERR(iudma
->dma_base
);
203 iudma
->dma_chan
= devm_platform_ioremap_resource_byname(pdev
,
205 if (IS_ERR_OR_NULL(iudma
->dma_chan
))
206 return PTR_ERR(iudma
->dma_chan
);
208 iudma
->dma_sram
= devm_platform_ioremap_resource_byname(pdev
,
210 if (IS_ERR_OR_NULL(iudma
->dma_sram
))
211 return PTR_ERR(iudma
->dma_sram
);
213 iudma
->num_clocks
= of_clk_get_parent_count(node
);
214 if (iudma
->num_clocks
) {
215 iudma
->clock
= devm_kcalloc(dev
, iudma
->num_clocks
,
216 sizeof(struct clk
*), GFP_KERNEL
);
217 if (IS_ERR_OR_NULL(iudma
->clock
))
218 return PTR_ERR(iudma
->clock
);
220 for (i
= 0; i
< iudma
->num_clocks
; i
++) {
221 iudma
->clock
[i
] = of_clk_get(node
, i
);
222 if (IS_ERR_OR_NULL(iudma
->clock
[i
])) {
223 dev_err(dev
, "error getting iudma clock %d\n", i
);
224 return PTR_ERR(iudma
->clock
[i
]);
227 ret
= clk_prepare_enable(iudma
->clock
[i
]);
229 dev_err(dev
, "error enabling iudma clock %d\n", i
);
234 num_resets
= of_count_phandle_with_args(node
, "resets",
237 iudma
->num_resets
= num_resets
;
239 iudma
->num_resets
= 0;
240 if (iudma
->num_resets
) {
241 iudma
->reset
= devm_kcalloc(dev
, iudma
->num_resets
,
242 sizeof(struct reset_control
*),
244 if (IS_ERR_OR_NULL(iudma
->reset
))
245 return PTR_ERR(iudma
->reset
);
247 for (i
= 0; i
< iudma
->num_resets
; i
++) {
248 iudma
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
249 if (IS_ERR_OR_NULL(iudma
->reset
[i
])) {
250 dev_err(dev
, "error getting iudma reset %d\n", i
);
251 return PTR_ERR(iudma
->reset
[i
]);
254 ret
= reset_control_reset(iudma
->reset
[i
]);
256 dev_err(dev
, "error performing iudma reset %d\n", i
);
261 dma_writel(iudma
, 0, DMA_CFG_REG
);
262 for (i
= 0; i
< iudma
->dma_channels
; i
++)
263 bcm6348_iudma_chan_stop(iudma
, i
);
264 dma_writel(iudma
, DMA_CFG_EN_MASK
, DMA_CFG_REG
);
266 spin_lock_init(&iudma
->dma_base_lock
);
268 dev_info(dev
, "bcm6348-iudma @ 0x%px\n", iudma
->dma_base
);
270 platform_set_drvdata(pdev
, iudma
);
275 static const struct of_device_id bcm6348_iudma_of_match
[] = {
276 { .compatible
= "brcm,bcm6338-iudma", },
277 { .compatible
= "brcm,bcm6348-iudma", },
278 { .compatible
= "brcm,bcm6358-iudma", },
282 static struct platform_driver bcm6348_iudma_driver
= {
284 .name
= "bcm6348-iudma",
285 .of_match_table
= of_match_ptr(bcm6348_iudma_of_match
),
287 .probe
= bcm6348_iudma_probe
,
289 builtin_platform_driver(bcm6348_iudma_driver
);
292 * BCM6348 Eternet MACs
296 #define ENET_MAX_MTU 2046
298 #define ENET_TAG_SIZE 6
299 #define ENET_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
302 /* Default number of descriptor */
303 #define ENET_DEF_RX_DESC 64
304 #define ENET_DEF_TX_DESC 32
305 #define ENET_DEF_CPY_BREAK 128
307 /* Maximum burst len for dma (4 bytes unit) */
308 #define ENET_DMA_MAXBURST 8
310 /* Receiver Configuration register */
311 #define ENET_RXCFG_REG 0x0
312 #define ENET_RXCFG_ALLMCAST_SHIFT 1
313 #define ENET_RXCFG_ALLMCAST_MASK (1 << ENET_RXCFG_ALLMCAST_SHIFT)
314 #define ENET_RXCFG_PROMISC_SHIFT 3
315 #define ENET_RXCFG_PROMISC_MASK (1 << ENET_RXCFG_PROMISC_SHIFT)
316 #define ENET_RXCFG_LOOPBACK_SHIFT 4
317 #define ENET_RXCFG_LOOPBACK_MASK (1 << ENET_RXCFG_LOOPBACK_SHIFT)
318 #define ENET_RXCFG_ENFLOW_SHIFT 5
319 #define ENET_RXCFG_ENFLOW_MASK (1 << ENET_RXCFG_ENFLOW_SHIFT)
321 /* Receive Maximum Length register */
322 #define ENET_RXMAXLEN_REG 0x4
323 #define ENET_RXMAXLEN_SHIFT 0
324 #define ENET_RXMAXLEN_MASK (0x7ff << ENET_RXMAXLEN_SHIFT)
326 /* Transmit Maximum Length register */
327 #define ENET_TXMAXLEN_REG 0x8
328 #define ENET_TXMAXLEN_SHIFT 0
329 #define ENET_TXMAXLEN_MASK (0x7ff << ENET_TXMAXLEN_SHIFT)
331 /* MII Status/Control register */
332 #define ENET_MIISC_REG 0x10
333 #define ENET_MIISC_MDCFREQDIV_SHIFT 0
334 #define ENET_MIISC_MDCFREQDIV_MASK (0x7f << ENET_MIISC_MDCFREQDIV_SHIFT)
335 #define ENET_MIISC_PREAMBLEEN_SHIFT 7
336 #define ENET_MIISC_PREAMBLEEN_MASK (1 << ENET_MIISC_PREAMBLEEN_SHIFT)
338 /* MII Data register */
339 #define ENET_MIID_REG 0x14
340 #define ENET_MIID_DATA_SHIFT 0
341 #define ENET_MIID_DATA_MASK (0xffff << ENET_MIID_DATA_SHIFT)
342 #define ENET_MIID_TA_SHIFT 16
343 #define ENET_MIID_TA_MASK (0x3 << ENET_MIID_TA_SHIFT)
344 #define ENET_MIID_REG_SHIFT 18
345 #define ENET_MIID_REG_MASK (0x1f << ENET_MIID_REG_SHIFT)
346 #define ENET_MIID_PHY_SHIFT 23
347 #define ENET_MIID_PHY_MASK (0x1f << ENET_MIID_PHY_SHIFT)
348 #define ENET_MIID_OP_SHIFT 28
349 #define ENET_MIID_OP_WRITE (0x5 << ENET_MIID_OP_SHIFT)
350 #define ENET_MIID_OP_READ (0x6 << ENET_MIID_OP_SHIFT)
352 /* Ethernet Interrupt Mask register */
353 #define ENET_IRMASK_REG 0x18
355 /* Ethernet Interrupt register */
356 #define ENET_IR_REG 0x1c
357 #define ENET_IR_MII BIT(0)
358 #define ENET_IR_MIB BIT(1)
359 #define ENET_IR_FLOWC BIT(2)
361 /* Ethernet Control register */
362 #define ENET_CTL_REG 0x2c
363 #define ENET_CTL_ENABLE_SHIFT 0
364 #define ENET_CTL_ENABLE_MASK (1 << ENET_CTL_ENABLE_SHIFT)
365 #define ENET_CTL_DISABLE_SHIFT 1
366 #define ENET_CTL_DISABLE_MASK (1 << ENET_CTL_DISABLE_SHIFT)
367 #define ENET_CTL_SRESET_SHIFT 2
368 #define ENET_CTL_SRESET_MASK (1 << ENET_CTL_SRESET_SHIFT)
369 #define ENET_CTL_EPHYSEL_SHIFT 3
370 #define ENET_CTL_EPHYSEL_MASK (1 << ENET_CTL_EPHYSEL_SHIFT)
372 /* Transmit Control register */
373 #define ENET_TXCTL_REG 0x30
374 #define ENET_TXCTL_FD_SHIFT 0
375 #define ENET_TXCTL_FD_MASK (1 << ENET_TXCTL_FD_SHIFT)
377 /* Transmit Watermask register */
378 #define ENET_TXWMARK_REG 0x34
379 #define ENET_TXWMARK_WM_SHIFT 0
380 #define ENET_TXWMARK_WM_MASK (0x3f << ENET_TXWMARK_WM_SHIFT)
382 /* MIB Control register */
383 #define ENET_MIBCTL_REG 0x38
384 #define ENET_MIBCTL_RDCLEAR_SHIFT 0
385 #define ENET_MIBCTL_RDCLEAR_MASK (1 << ENET_MIBCTL_RDCLEAR_SHIFT)
387 /* Perfect Match Data Low register */
388 #define ENET_PML_REG(x) (0x58 + (x) * 8)
389 #define ENET_PMH_REG(x) (0x5c + (x) * 8)
390 #define ENET_PMH_DATAVALID_SHIFT 16
391 #define ENET_PMH_DATAVALID_MASK (1 << ENET_PMH_DATAVALID_SHIFT)
394 #define ENET_MIB_REG(x) (0x200 + (x) * 4)
395 #define ENET_MIB_REG_COUNT 55
398 * TX transmit threshold (4 bytes unit), FIFO is 256 bytes, the value
399 * must be low enough so that a DMA transfer of above burst length can
400 * not overflow the fifo
402 #define ENET_TX_FIFO_TRESH 32
404 struct bcm6348_emac
{
405 struct bcm6348_iudma
*iudma
;
409 unsigned int num_clocks
;
411 struct reset_control
**reset
;
412 unsigned int num_resets
;
419 /* hw view of rx & tx dma ring */
420 dma_addr_t rx_desc_dma
;
421 dma_addr_t tx_desc_dma
;
423 /* allocated size (in bytes) for rx & tx dma ring */
424 unsigned int rx_desc_alloc_size
;
425 unsigned int tx_desc_alloc_size
;
427 struct napi_struct napi
;
429 /* dma channel id for rx */
432 /* number of dma desc in rx ring */
435 /* cpu view of rx dma ring */
436 struct bcm6348_iudma_desc
*rx_desc_cpu
;
438 /* current number of armed descriptor given to hardware for rx */
441 /* next rx descriptor to fetch from hardware */
444 /* next dirty rx descriptor to refill */
447 /* size of allocated rx skbs */
448 unsigned int rx_skb_size
;
450 /* list of skb given to hw for rx */
451 struct sk_buff
**rx_skb
;
453 /* used when rx skb allocation failed, so we defer rx queue
455 struct timer_list rx_timeout
;
457 /* lock rx_timeout against rx normal operation */
460 /* dma channel id for tx */
463 /* number of dma desc in tx ring */
466 /* cpu view of tx dma ring */
467 struct bcm6348_iudma_desc
*tx_desc_cpu
;
469 /* number of available descriptor for tx */
472 /* next tx descriptor avaiable */
475 /* next dirty tx descriptor to reclaim */
478 /* list of skb given to hw for tx */
479 struct sk_buff
**tx_skb
;
481 /* lock used by tx reclaim and xmit */
484 /* network device reference */
485 struct net_device
*net_dev
;
487 /* platform device reference */
488 struct platform_device
*pdev
;
490 /* external mii bus */
499 static inline void emac_writel(struct bcm6348_emac
*emac
, u32 val
, u32 off
)
501 __raw_writel(val
, emac
->base
+ off
);
504 static inline u32
emac_readl(struct bcm6348_emac
*emac
, u32 off
)
506 return __raw_readl(emac
->base
+ off
);
512 static int bcm6348_emac_refill_rx(struct net_device
*ndev
)
514 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
515 struct bcm6348_iudma
*iudma
= emac
->iudma
;
516 struct platform_device
*pdev
= emac
->pdev
;
517 struct device
*dev
= &pdev
->dev
;
519 while (emac
->rx_desc_count
< emac
->rx_ring_size
) {
520 struct bcm6348_iudma_desc
*desc
;
526 desc_idx
= emac
->rx_dirty_desc
;
527 desc
= &emac
->rx_desc_cpu
[desc_idx
];
529 if (!emac
->rx_skb
[desc_idx
]) {
530 skb
= netdev_alloc_skb(ndev
, emac
->rx_skb_size
);
533 emac
->rx_skb
[desc_idx
] = skb
;
534 p
= dma_map_single(dev
, skb
->data
, emac
->rx_skb_size
,
539 len_stat
= emac
->rx_skb_size
<< DMADESC_LENGTH_SHIFT
;
540 len_stat
|= DMADESC_OWNER_MASK
;
541 if (emac
->rx_dirty_desc
== emac
->rx_ring_size
- 1) {
542 len_stat
|= DMADESC_WRAP_MASK
;
543 emac
->rx_dirty_desc
= 0;
545 emac
->rx_dirty_desc
++;
548 desc
->len_stat
= len_stat
;
550 emac
->rx_desc_count
++;
552 /* tell dma engine we allocated one buffer */
553 dma_writel(iudma
, 1, DMA_BUFALLOC_REG(emac
->rx_chan
));
556 /* If rx ring is still empty, set a timer to try allocating
557 * again at a later time. */
558 if (emac
->rx_desc_count
== 0 && netif_running(ndev
)) {
559 dev_warn(dev
, "unable to refill rx ring\n");
560 emac
->rx_timeout
.expires
= jiffies
+ HZ
;
561 add_timer(&emac
->rx_timeout
);
568 * timer callback to defer refill rx queue in case we're OOM
570 static void bcm6348_emac_refill_rx_timer(struct timer_list
*t
)
572 struct bcm6348_emac
*emac
= from_timer(emac
, t
, rx_timeout
);
573 struct net_device
*ndev
= emac
->net_dev
;
575 spin_lock(&emac
->rx_lock
);
576 bcm6348_emac_refill_rx(ndev
);
577 spin_unlock(&emac
->rx_lock
);
581 * extract packet from rx queue
583 static int bcm6348_emac_receive_queue(struct net_device
*ndev
, int budget
)
585 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
586 struct bcm6348_iudma
*iudma
= emac
->iudma
;
587 struct platform_device
*pdev
= emac
->pdev
;
588 struct device
*dev
= &pdev
->dev
;
591 /* don't scan ring further than number of refilled
593 if (budget
> emac
->rx_desc_count
)
594 budget
= emac
->rx_desc_count
;
597 struct bcm6348_iudma_desc
*desc
;
603 desc_idx
= emac
->rx_curr_desc
;
604 desc
= &emac
->rx_desc_cpu
[desc_idx
];
606 /* make sure we actually read the descriptor status at
610 len_stat
= desc
->len_stat
;
612 /* break if dma ownership belongs to hw */
613 if (len_stat
& DMADESC_OWNER_MASK
)
617 emac
->rx_curr_desc
++;
618 if (emac
->rx_curr_desc
== emac
->rx_ring_size
)
619 emac
->rx_curr_desc
= 0;
620 emac
->rx_desc_count
--;
622 /* if the packet does not have start of packet _and_
623 * end of packet flag set, then just recycle it */
624 if ((len_stat
& DMADESC_ESOP_MASK
) != DMADESC_ESOP_MASK
) {
625 ndev
->stats
.rx_dropped
++;
630 skb
= emac
->rx_skb
[desc_idx
];
631 len
= (len_stat
& DMADESC_LENGTH_MASK
)
632 >> DMADESC_LENGTH_SHIFT
;
633 /* don't include FCS */
636 if (len
< emac
->copybreak
) {
637 struct sk_buff
*nskb
;
639 nskb
= napi_alloc_skb(&emac
->napi
, len
);
641 /* forget packet, just rearm desc */
642 ndev
->stats
.rx_dropped
++;
646 dma_sync_single_for_cpu(dev
, desc
->address
,
647 len
, DMA_FROM_DEVICE
);
648 memcpy(nskb
->data
, skb
->data
, len
);
649 dma_sync_single_for_device(dev
, desc
->address
,
650 len
, DMA_FROM_DEVICE
);
653 dma_unmap_single(dev
, desc
->address
,
654 emac
->rx_skb_size
, DMA_FROM_DEVICE
);
655 emac
->rx_skb
[desc_idx
] = NULL
;
659 skb
->protocol
= eth_type_trans(skb
, ndev
);
660 ndev
->stats
.rx_packets
++;
661 ndev
->stats
.rx_bytes
+= len
;
662 netif_receive_skb(skb
);
663 } while (--budget
> 0);
665 if (processed
|| !emac
->rx_desc_count
) {
666 bcm6348_emac_refill_rx(ndev
);
669 dmac_writel(iudma
, DMAC_CHANCFG_EN_MASK
, DMAC_CHANCFG_REG
,
677 * try to or force reclaim of transmitted buffers
679 static int bcm6348_emac_tx_reclaim(struct net_device
*ndev
, int force
)
681 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
682 struct platform_device
*pdev
= emac
->pdev
;
683 struct device
*dev
= &pdev
->dev
;
686 while (emac
->tx_desc_count
< emac
->tx_ring_size
) {
687 struct bcm6348_iudma_desc
*desc
;
690 /* We run in a bh and fight against start_xmit, which
691 * is called with bh disabled */
692 spin_lock(&emac
->tx_lock
);
694 desc
= &emac
->tx_desc_cpu
[emac
->tx_dirty_desc
];
696 if (!force
&& (desc
->len_stat
& DMADESC_OWNER_MASK
)) {
697 spin_unlock(&emac
->tx_lock
);
701 /* ensure other field of the descriptor were not read
702 * before we checked ownership */
705 skb
= emac
->tx_skb
[emac
->tx_dirty_desc
];
706 emac
->tx_skb
[emac
->tx_dirty_desc
] = NULL
;
707 dma_unmap_single(dev
, desc
->address
, skb
->len
, DMA_TO_DEVICE
);
709 emac
->tx_dirty_desc
++;
710 if (emac
->tx_dirty_desc
== emac
->tx_ring_size
)
711 emac
->tx_dirty_desc
= 0;
712 emac
->tx_desc_count
++;
714 spin_unlock(&emac
->tx_lock
);
716 if (desc
->len_stat
& DMADESC_UNDER_MASK
)
717 ndev
->stats
.tx_errors
++;
723 if (netif_queue_stopped(ndev
) && released
)
724 netif_wake_queue(ndev
);
729 static int bcm6348_emac_poll(struct napi_struct
*napi
, int budget
)
731 struct bcm6348_emac
*emac
= container_of(napi
, struct bcm6348_emac
,
733 struct bcm6348_iudma
*iudma
= emac
->iudma
;
734 struct net_device
*ndev
= emac
->net_dev
;
738 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IR_REG
,
740 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IR_REG
,
743 /* reclaim sent skb */
744 bcm6348_emac_tx_reclaim(ndev
, 0);
746 spin_lock(&emac
->rx_lock
);
747 rx_work_done
= bcm6348_emac_receive_queue(ndev
, budget
);
748 spin_unlock(&emac
->rx_lock
);
750 if (rx_work_done
>= budget
) {
751 /* rx queue is not yet empty/clean */
755 /* no more packet in rx/tx queue, remove device from poll
757 napi_complete_done(napi
, rx_work_done
);
759 /* restore rx/tx interrupt */
760 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IRMASK_REG
,
762 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IRMASK_REG
,
769 * emac interrupt handler
771 static irqreturn_t
bcm6348_emac_isr_mac(int irq
, void *dev_id
)
773 struct net_device
*ndev
= dev_id
;
774 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
777 stat
= emac_readl(emac
, ENET_IR_REG
);
778 if (!(stat
& ENET_IR_MIB
))
781 /* clear & mask interrupt */
782 emac_writel(emac
, ENET_IR_MIB
, ENET_IR_REG
);
783 emac_writel(emac
, 0, ENET_IRMASK_REG
);
789 * rx/tx dma interrupt handler
791 static irqreturn_t
bcm6348_emac_isr_dma(int irq
, void *dev_id
)
793 struct net_device
*ndev
= dev_id
;
794 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
795 struct bcm6348_iudma
*iudma
= emac
->iudma
;
797 /* mask rx/tx interrupts */
798 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->rx_chan
);
799 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->tx_chan
);
801 napi_schedule(&emac
->napi
);
807 * tx request callback
809 static netdev_tx_t
bcm6348_emac_start_xmit(struct sk_buff
*skb
,
810 struct net_device
*ndev
)
812 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
813 struct bcm6348_iudma
*iudma
= emac
->iudma
;
814 struct platform_device
*pdev
= emac
->pdev
;
815 struct device
*dev
= &pdev
->dev
;
816 struct bcm6348_iudma_desc
*desc
;
820 /* lock against tx reclaim */
821 spin_lock(&emac
->tx_lock
);
823 /* make sure the tx hw queue is not full, should not happen
824 * since we stop queue before it's the case */
825 if (unlikely(!emac
->tx_desc_count
)) {
826 netif_stop_queue(ndev
);
827 dev_err(dev
, "xmit called with no tx desc available?\n");
828 ret
= NETDEV_TX_BUSY
;
832 /* point to the next available desc */
833 desc
= &emac
->tx_desc_cpu
[emac
->tx_curr_desc
];
834 emac
->tx_skb
[emac
->tx_curr_desc
] = skb
;
836 /* fill descriptor */
837 desc
->address
= dma_map_single(dev
, skb
->data
, skb
->len
,
840 len_stat
= (skb
->len
<< DMADESC_LENGTH_SHIFT
) & DMADESC_LENGTH_MASK
;
841 len_stat
|= DMADESC_ESOP_MASK
| DMADESC_APPEND_CRC
|
844 emac
->tx_curr_desc
++;
845 if (emac
->tx_curr_desc
== emac
->tx_ring_size
) {
846 emac
->tx_curr_desc
= 0;
847 len_stat
|= DMADESC_WRAP_MASK
;
849 emac
->tx_desc_count
--;
851 /* dma might be already polling, make sure we update desc
852 * fields in correct order */
854 desc
->len_stat
= len_stat
;
858 dmac_writel(iudma
, DMAC_CHANCFG_EN_MASK
, DMAC_CHANCFG_REG
,
861 /* stop queue if no more desc available */
862 if (!emac
->tx_desc_count
)
863 netif_stop_queue(ndev
);
865 ndev
->stats
.tx_bytes
+= skb
->len
;
866 ndev
->stats
.tx_packets
++;
870 spin_unlock(&emac
->tx_lock
);
875 * Change the interface's emac address.
877 static int bcm6348_emac_set_mac_address(struct net_device
*ndev
, void *p
)
879 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
880 struct sockaddr
*addr
= p
;
883 eth_hw_addr_set(ndev
, addr
->sa_data
);
885 /* use perfect match register 0 to store my emac address */
886 val
= (ndev
->dev_addr
[2] << 24) | (ndev
->dev_addr
[3] << 16) |
887 (ndev
->dev_addr
[4] << 8) | ndev
->dev_addr
[5];
888 emac_writel(emac
, val
, ENET_PML_REG(0));
890 val
= (ndev
->dev_addr
[0] << 8 | ndev
->dev_addr
[1]);
891 val
|= ENET_PMH_DATAVALID_MASK
;
892 emac_writel(emac
, val
, ENET_PMH_REG(0));
898 * Change rx mode (promiscuous/allmulti) and update multicast list
900 static void bcm6348_emac_set_multicast_list(struct net_device
*ndev
)
902 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
903 struct netdev_hw_addr
*ha
;
907 val
= emac_readl(emac
, ENET_RXCFG_REG
);
909 if (ndev
->flags
& IFF_PROMISC
)
910 val
|= ENET_RXCFG_PROMISC_MASK
;
912 val
&= ~ENET_RXCFG_PROMISC_MASK
;
914 /* only 3 perfect match registers left, first one is used for
916 if ((ndev
->flags
& IFF_ALLMULTI
) || netdev_mc_count(ndev
) > 3)
917 val
|= ENET_RXCFG_ALLMCAST_MASK
;
919 val
&= ~ENET_RXCFG_ALLMCAST_MASK
;
921 /* no need to set perfect match registers if we catch all
923 if (val
& ENET_RXCFG_ALLMCAST_MASK
) {
924 emac_writel(emac
, val
, ENET_RXCFG_REG
);
929 netdev_for_each_mc_addr(ha
, ndev
) {
936 /* update perfect match registers */
938 tmp
= (dmi_addr
[2] << 24) | (dmi_addr
[3] << 16) |
939 (dmi_addr
[4] << 8) | dmi_addr
[5];
940 emac_writel(emac
, tmp
, ENET_PML_REG(i
+ 1));
942 tmp
= (dmi_addr
[0] << 8 | dmi_addr
[1]);
943 tmp
|= ENET_PMH_DATAVALID_MASK
;
944 emac_writel(emac
, tmp
, ENET_PMH_REG(i
++ + 1));
948 emac_writel(emac
, 0, ENET_PML_REG(i
+ 1));
949 emac_writel(emac
, 0, ENET_PMH_REG(i
+ 1));
952 emac_writel(emac
, val
, ENET_RXCFG_REG
);
958 static void bcm6348_emac_disable_mac(struct bcm6348_emac
*emac
)
963 val
= emac_readl(emac
, ENET_CTL_REG
);
964 val
|= ENET_CTL_DISABLE_MASK
;
965 emac_writel(emac
, val
, ENET_CTL_REG
);
969 val
= emac_readl(emac
, ENET_CTL_REG
);
970 if (!(val
& ENET_CTL_DISABLE_MASK
))
977 * set emac duplex parameters
979 static void bcm6348_emac_set_duplex(struct bcm6348_emac
*emac
, int fullduplex
)
983 val
= emac_readl(emac
, ENET_TXCTL_REG
);
985 val
|= ENET_TXCTL_FD_MASK
;
987 val
&= ~ENET_TXCTL_FD_MASK
;
988 emac_writel(emac
, val
, ENET_TXCTL_REG
);
992 * set emac flow control parameters
994 static void bcm6348_emac_set_flow(struct bcm6348_emac
*emac
, bool rx_en
, bool tx_en
)
996 struct bcm6348_iudma
*iudma
= emac
->iudma
;
999 val
= emac_readl(emac
, ENET_RXCFG_REG
);
1001 val
|= ENET_RXCFG_ENFLOW_MASK
;
1003 val
&= ~ENET_RXCFG_ENFLOW_MASK
;
1004 emac_writel(emac
, val
, ENET_RXCFG_REG
);
1006 dmas_writel(iudma
, emac
->rx_desc_dma
, DMAS_RSTART_REG
, emac
->rx_chan
);
1007 dmas_writel(iudma
, emac
->tx_desc_dma
, DMAS_RSTART_REG
, emac
->tx_chan
);
1009 val
= dma_readl(iudma
, DMA_CFG_REG
);
1011 val
|= DMA_CFG_FLOWCH_MASK(emac
->rx_chan
);
1013 val
&= ~DMA_CFG_FLOWCH_MASK(emac
->rx_chan
);
1014 dma_writel(iudma
, val
, DMA_CFG_REG
);
1020 static void bcm6348_emac_adjust_phy(struct net_device
*ndev
)
1022 struct phy_device
*phydev
= ndev
->phydev
;
1023 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1024 struct platform_device
*pdev
= emac
->pdev
;
1025 struct device
*dev
= &pdev
->dev
;
1026 bool status_changed
= false;
1028 if (emac
->old_link
!= phydev
->link
) {
1029 status_changed
= true;
1030 emac
->old_link
= phydev
->link
;
1033 if (phydev
->link
&& phydev
->duplex
!= emac
->old_duplex
) {
1034 bcm6348_emac_set_duplex(emac
, phydev
->duplex
== DUPLEX_FULL
);
1035 status_changed
= true;
1036 emac
->old_duplex
= phydev
->duplex
;
1039 if (phydev
->link
&& phydev
->pause
!= emac
->old_pause
) {
1040 bool rx_pause_en
, tx_pause_en
;
1042 if (phydev
->pause
) {
1046 rx_pause_en
= false;
1047 tx_pause_en
= false;
1050 bcm6348_emac_set_flow(emac
, rx_pause_en
, tx_pause_en
);
1051 status_changed
= true;
1052 emac
->old_pause
= phydev
->pause
;
1056 dev_info(dev
, "%s: phy link %s %s/%s/%s/%s\n",
1058 phydev
->link
? "UP" : "DOWN",
1059 phy_modes(phydev
->interface
),
1060 phy_speed_to_str(phydev
->speed
),
1061 phy_duplex_to_str(phydev
->duplex
),
1062 phydev
->pause
? "rx/tx" : "off");
1066 static int bcm6348_emac_open(struct net_device
*ndev
)
1068 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1069 struct bcm6348_iudma
*iudma
= emac
->iudma
;
1070 struct platform_device
*pdev
= emac
->pdev
;
1071 struct device
*dev
= &pdev
->dev
;
1072 struct sockaddr addr
;
1073 unsigned int i
, size
;
1078 /* mask all interrupts and request them */
1079 emac_writel(emac
, 0, ENET_IRMASK_REG
);
1080 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->rx_chan
);
1081 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->tx_chan
);
1083 ret
= request_irq(ndev
->irq
, bcm6348_emac_isr_mac
, 0, ndev
->name
,
1088 ret
= request_irq(emac
->irq_rx
, bcm6348_emac_isr_dma
,
1089 0, ndev
->name
, ndev
);
1093 ret
= request_irq(emac
->irq_tx
, bcm6348_emac_isr_dma
,
1094 0, ndev
->name
, ndev
);
1096 goto out_freeirq_rx
;
1098 /* initialize perfect match registers */
1099 for (i
= 0; i
< 4; i
++) {
1100 emac_writel(emac
, 0, ENET_PML_REG(i
));
1101 emac_writel(emac
, 0, ENET_PMH_REG(i
));
1104 /* write device mac address */
1105 memcpy(addr
.sa_data
, ndev
->dev_addr
, ETH_ALEN
);
1106 bcm6348_emac_set_mac_address(ndev
, &addr
);
1108 /* allocate rx dma ring */
1109 size
= emac
->rx_ring_size
* sizeof(struct bcm6348_iudma_desc
);
1110 p
= dma_alloc_coherent(dev
, size
, &emac
->rx_desc_dma
, GFP_KERNEL
);
1112 dev_err(dev
, "cannot allocate rx ring %u\n", size
);
1114 goto out_freeirq_tx
;
1118 emac
->rx_desc_alloc_size
= size
;
1119 emac
->rx_desc_cpu
= p
;
1121 /* allocate tx dma ring */
1122 size
= emac
->tx_ring_size
* sizeof(struct bcm6348_iudma_desc
);
1123 p
= dma_alloc_coherent(dev
, size
, &emac
->tx_desc_dma
, GFP_KERNEL
);
1125 dev_err(dev
, "cannot allocate tx ring\n");
1127 goto out_free_rx_ring
;
1131 emac
->tx_desc_alloc_size
= size
;
1132 emac
->tx_desc_cpu
= p
;
1134 emac
->tx_skb
= kzalloc(sizeof(struct sk_buff
*) * emac
->tx_ring_size
,
1136 if (!emac
->tx_skb
) {
1137 dev_err(dev
, "cannot allocate rx skb queue\n");
1139 goto out_free_tx_ring
;
1142 emac
->tx_desc_count
= emac
->tx_ring_size
;
1143 emac
->tx_dirty_desc
= 0;
1144 emac
->tx_curr_desc
= 0;
1145 spin_lock_init(&emac
->tx_lock
);
1147 /* init & fill rx ring with skbs */
1148 emac
->rx_skb
= kzalloc(sizeof(struct sk_buff
*) * emac
->rx_ring_size
,
1150 if (!emac
->rx_skb
) {
1151 dev_err(dev
, "cannot allocate rx skb queue\n");
1153 goto out_free_tx_skb
;
1156 emac
->rx_desc_count
= 0;
1157 emac
->rx_dirty_desc
= 0;
1158 emac
->rx_curr_desc
= 0;
1160 /* initialize flow control buffer allocation */
1161 dma_writel(iudma
, DMA_BUFALLOC_FORCE_MASK
| 0,
1162 DMA_BUFALLOC_REG(emac
->rx_chan
));
1164 if (bcm6348_emac_refill_rx(ndev
)) {
1165 dev_err(dev
, "cannot allocate rx skb queue\n");
1170 /* write rx & tx ring addresses */
1171 dmas_writel(iudma
, emac
->rx_desc_dma
,
1172 DMAS_RSTART_REG
, emac
->rx_chan
);
1173 dmas_writel(iudma
, emac
->tx_desc_dma
,
1174 DMAS_RSTART_REG
, emac
->tx_chan
);
1176 /* clear remaining state ram for rx & tx channel */
1177 dmas_writel(iudma
, 0, DMAS_SRAM2_REG
, emac
->rx_chan
);
1178 dmas_writel(iudma
, 0, DMAS_SRAM2_REG
, emac
->tx_chan
);
1179 dmas_writel(iudma
, 0, DMAS_SRAM3_REG
, emac
->rx_chan
);
1180 dmas_writel(iudma
, 0, DMAS_SRAM3_REG
, emac
->tx_chan
);
1181 dmas_writel(iudma
, 0, DMAS_SRAM4_REG
, emac
->rx_chan
);
1182 dmas_writel(iudma
, 0, DMAS_SRAM4_REG
, emac
->tx_chan
);
1184 /* set max rx/tx length */
1185 emac_writel(emac
, ndev
->mtu
, ENET_RXMAXLEN_REG
);
1186 emac_writel(emac
, ndev
->mtu
, ENET_TXMAXLEN_REG
);
1188 /* set dma maximum burst len */
1189 dmac_writel(iudma
, ENET_DMA_MAXBURST
,
1190 DMAC_MAXBURST_REG
, emac
->rx_chan
);
1191 dmac_writel(iudma
, ENET_DMA_MAXBURST
,
1192 DMAC_MAXBURST_REG
, emac
->tx_chan
);
1194 /* set correct transmit fifo watermark */
1195 emac_writel(emac
, ENET_TX_FIFO_TRESH
, ENET_TXWMARK_REG
);
1197 /* set flow control low/high threshold to 1/3 / 2/3 */
1198 val
= emac
->rx_ring_size
/ 3;
1199 dma_writel(iudma
, val
, DMA_FLOWCL_REG(emac
->rx_chan
));
1200 val
= (emac
->rx_ring_size
* 2) / 3;
1201 dma_writel(iudma
, val
, DMA_FLOWCH_REG(emac
->rx_chan
));
1203 /* all set, enable emac and interrupts, start dma engine and
1204 * kick rx dma channel
1207 val
= emac_readl(emac
, ENET_CTL_REG
);
1208 val
|= ENET_CTL_ENABLE_MASK
;
1209 emac_writel(emac
, val
, ENET_CTL_REG
);
1210 dmac_writel(iudma
, DMAC_CHANCFG_EN_MASK
,
1211 DMAC_CHANCFG_REG
, emac
->rx_chan
);
1213 /* watch "mib counters about to overflow" interrupt */
1214 emac_writel(emac
, ENET_IR_MIB
, ENET_IR_REG
);
1215 emac_writel(emac
, ENET_IR_MIB
, ENET_IRMASK_REG
);
1217 /* watch "packet transferred" interrupt in rx and tx */
1218 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1219 DMAC_IR_REG
, emac
->rx_chan
);
1220 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1221 DMAC_IR_REG
, emac
->tx_chan
);
1223 /* make sure we enable napi before rx interrupt */
1224 napi_enable(&emac
->napi
);
1226 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1227 DMAC_IRMASK_REG
, emac
->rx_chan
);
1228 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1229 DMAC_IRMASK_REG
, emac
->tx_chan
);
1232 phy_start(ndev
->phydev
);
1234 netif_carrier_on(ndev
);
1235 netif_start_queue(ndev
);
1240 for (i
= 0; i
< emac
->rx_ring_size
; i
++) {
1241 struct bcm6348_iudma_desc
*desc
;
1243 if (!emac
->rx_skb
[i
])
1246 desc
= &emac
->rx_desc_cpu
[i
];
1247 dma_unmap_single(dev
, desc
->address
, emac
->rx_skb_size
,
1249 kfree_skb(emac
->rx_skb
[i
]);
1251 kfree(emac
->rx_skb
);
1254 kfree(emac
->tx_skb
);
1257 dma_free_coherent(dev
, emac
->tx_desc_alloc_size
,
1258 emac
->tx_desc_cpu
, emac
->tx_desc_dma
);
1261 dma_free_coherent(dev
, emac
->rx_desc_alloc_size
,
1262 emac
->rx_desc_cpu
, emac
->rx_desc_dma
);
1265 if (emac
->irq_tx
!= -1)
1266 free_irq(emac
->irq_tx
, ndev
);
1269 free_irq(emac
->irq_rx
, ndev
);
1273 phy_disconnect(ndev
->phydev
);
1278 static int bcm6348_emac_stop(struct net_device
*ndev
)
1280 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1281 struct bcm6348_iudma
*iudma
= emac
->iudma
;
1282 struct device
*dev
= &emac
->pdev
->dev
;
1285 netif_stop_queue(ndev
);
1286 napi_disable(&emac
->napi
);
1288 phy_stop(ndev
->phydev
);
1289 del_timer_sync(&emac
->rx_timeout
);
1291 /* mask all interrupts */
1292 emac_writel(emac
, 0, ENET_IRMASK_REG
);
1293 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->rx_chan
);
1294 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->tx_chan
);
1296 /* disable dma & emac */
1297 bcm6348_iudma_chan_stop(iudma
, emac
->tx_chan
);
1298 bcm6348_iudma_chan_stop(iudma
, emac
->rx_chan
);
1299 bcm6348_emac_disable_mac(emac
);
1301 /* force reclaim of all tx buffers */
1302 bcm6348_emac_tx_reclaim(ndev
, 1);
1304 /* free the rx skb ring */
1305 for (i
= 0; i
< emac
->rx_ring_size
; i
++) {
1306 struct bcm6348_iudma_desc
*desc
;
1308 if (!emac
->rx_skb
[i
])
1311 desc
= &emac
->rx_desc_cpu
[i
];
1312 dma_unmap_single_attrs(dev
, desc
->address
, emac
->rx_skb_size
,
1314 DMA_ATTR_SKIP_CPU_SYNC
);
1315 kfree_skb(emac
->rx_skb
[i
]);
1318 /* free remaining allocated memory */
1319 kfree(emac
->rx_skb
);
1320 kfree(emac
->tx_skb
);
1321 dma_free_coherent(dev
, emac
->rx_desc_alloc_size
, emac
->rx_desc_cpu
,
1323 dma_free_coherent(dev
, emac
->tx_desc_alloc_size
, emac
->tx_desc_cpu
,
1325 free_irq(emac
->irq_tx
, ndev
);
1326 free_irq(emac
->irq_rx
, ndev
);
1327 free_irq(ndev
->irq
, ndev
);
1329 netdev_reset_queue(ndev
);
1334 static const struct net_device_ops bcm6348_emac_ops
= {
1335 .ndo_open
= bcm6348_emac_open
,
1336 .ndo_stop
= bcm6348_emac_stop
,
1337 .ndo_start_xmit
= bcm6348_emac_start_xmit
,
1338 .ndo_set_mac_address
= bcm6348_emac_set_mac_address
,
1339 .ndo_set_rx_mode
= bcm6348_emac_set_multicast_list
,
1342 static int bcm6348_emac_mdio_op(struct bcm6348_emac
*emac
, uint32_t data
)
1346 /* Make sure mii interrupt status is cleared */
1347 emac_writel(emac
, ENET_IR_MII
, ENET_IR_REG
);
1350 emac_writel(emac
, data
, ENET_MIID_REG
);
1353 /* busy wait on mii interrupt bit, with timeout */
1356 if (emac_readl(emac
, ENET_IR_REG
) & ENET_IR_MII
)
1359 } while (limit
-- > 0);
1361 return (limit
< 0) ? 1 : 0;
1364 static int bcm6348_emac_mdio_read(struct mii_bus
*bus
, int phy_id
, int loc
)
1366 struct bcm6348_emac
*emac
= bus
->priv
;
1367 struct platform_device
*pdev
= emac
->pdev
;
1368 struct device
*dev
= &pdev
->dev
;
1371 reg
= 0x2 << ENET_MIID_TA_SHIFT
;
1372 reg
|= loc
<< ENET_MIID_REG_SHIFT
;
1373 reg
|= phy_id
<< ENET_MIID_PHY_SHIFT
;
1374 reg
|= ENET_MIID_OP_READ
;
1376 if (bcm6348_emac_mdio_op(emac
, reg
)) {
1377 dev_err(dev
, "mdio_read: phy=%d loc=%x timeout!\n",
1382 reg
= emac_readl(emac
, ENET_MIID_REG
);
1383 reg
= (reg
>> ENET_MIID_DATA_SHIFT
) & ENET_MIID_DATA_MASK
;
1388 static int bcm6348_emac_mdio_write(struct mii_bus
*bus
, int phy_id
,
1389 int loc
, uint16_t val
)
1391 struct bcm6348_emac
*emac
= bus
->priv
;
1392 struct platform_device
*pdev
= emac
->pdev
;
1393 struct device
*dev
= &pdev
->dev
;
1396 reg
= (val
<< ENET_MIID_DATA_SHIFT
) & ENET_MIID_DATA_MASK
;
1397 reg
|= 0x2 << ENET_MIID_TA_SHIFT
;
1398 reg
|= loc
<< ENET_MIID_REG_SHIFT
;
1399 reg
|= phy_id
<< ENET_MIID_PHY_SHIFT
;
1400 reg
|= ENET_MIID_OP_WRITE
;
1402 if (bcm6348_emac_mdio_op(emac
, reg
)) {
1403 dev_err(dev
, "mdio_write: phy=%d loc=%x timeout!\n",
1408 bcm6348_emac_mdio_op(emac
, reg
);
1413 static int bcm6348_emac_mdio_init(struct bcm6348_emac
*emac
,
1414 struct device_node
*np
)
1416 struct platform_device
*pdev
= emac
->pdev
;
1417 struct device
*dev
= &pdev
->dev
;
1418 struct device_node
*mnp
;
1419 struct mii_bus
*mii_bus
;
1422 mnp
= of_get_child_by_name(np
, "mdio");
1426 mii_bus
= devm_mdiobus_alloc(dev
);
1432 mii_bus
->priv
= emac
;
1433 mii_bus
->name
= np
->full_name
;
1434 snprintf(mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-mii", dev_name(dev
));
1435 mii_bus
->parent
= dev
;
1436 mii_bus
->read
= bcm6348_emac_mdio_read
;
1437 mii_bus
->write
= bcm6348_emac_mdio_write
;
1438 mii_bus
->phy_mask
= 0x3f;
1440 ret
= devm_of_mdiobus_register(dev
, mii_bus
, mnp
);
1443 dev_err(dev
, "MDIO bus registration failed\n");
1447 dev_info(dev
, "MDIO bus init\n");
1453 * preinit hardware to allow mii operation while device is down
1455 static void bcm6348_emac_hw_preinit(struct bcm6348_emac
*emac
)
1460 /* make sure emac is disabled */
1461 bcm6348_emac_disable_mac(emac
);
1463 /* soft reset emac */
1464 val
= ENET_CTL_SRESET_MASK
;
1465 emac_writel(emac
, val
, ENET_CTL_REG
);
1470 val
= emac_readl(emac
, ENET_CTL_REG
);
1471 if (!(val
& ENET_CTL_SRESET_MASK
))
1476 /* select correct mii interface */
1477 val
= emac_readl(emac
, ENET_CTL_REG
);
1479 val
|= ENET_CTL_EPHYSEL_MASK
;
1481 val
&= ~ENET_CTL_EPHYSEL_MASK
;
1482 emac_writel(emac
, val
, ENET_CTL_REG
);
1484 /* turn on mdc clock */
1485 emac_writel(emac
, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT
) |
1486 ENET_MIISC_PREAMBLEEN_MASK
, ENET_MIISC_REG
);
1488 /* set mib counters to self-clear when read */
1489 val
= emac_readl(emac
, ENET_MIBCTL_REG
);
1490 val
|= ENET_MIBCTL_RDCLEAR_MASK
;
1491 emac_writel(emac
, val
, ENET_MIBCTL_REG
);
1494 static int bcm6348_emac_probe(struct platform_device
*pdev
)
1496 struct device
*dev
= &pdev
->dev
;
1497 struct device_node
*node
= dev
->of_node
;
1498 struct device_node
*dma_node
;
1499 struct platform_device
*dma_pdev
;
1500 struct bcm6348_emac
*emac
;
1501 struct bcm6348_iudma
*iudma
;
1502 struct net_device
*ndev
;
1507 dma_node
= of_parse_phandle(node
, "brcm,iudma", 0);
1511 dma_pdev
= of_find_device_by_node(dma_node
);
1512 of_node_put(dma_node
);
1516 iudma
= platform_get_drvdata(dma_pdev
);
1518 return -EPROBE_DEFER
;
1520 ndev
= devm_alloc_etherdev(dev
, sizeof(*emac
));
1524 platform_set_drvdata(pdev
, ndev
);
1525 SET_NETDEV_DEV(ndev
, dev
);
1527 emac
= netdev_priv(ndev
);
1528 emac
->iudma
= iudma
;
1530 emac
->net_dev
= ndev
;
1532 emac
->base
= devm_platform_ioremap_resource(pdev
, 0);
1533 if (IS_ERR_OR_NULL(emac
->base
))
1534 return PTR_ERR(emac
->base
);
1536 ndev
->irq
= of_irq_get_byname(node
, "emac");
1540 emac
->irq_rx
= of_irq_get_byname(node
, "rx");
1544 emac
->irq_tx
= of_irq_get_byname(node
, "tx");
1548 if (of_property_read_u32(node
, "dma-rx", &emac
->rx_chan
))
1551 if (of_property_read_u32(node
, "dma-tx", &emac
->tx_chan
))
1554 emac
->ext_mii
= of_property_read_bool(node
, "brcm,external-mii");
1556 emac
->rx_ring_size
= ENET_DEF_RX_DESC
;
1557 emac
->tx_ring_size
= ENET_DEF_TX_DESC
;
1558 emac
->copybreak
= ENET_DEF_CPY_BREAK
;
1561 emac
->old_duplex
= -1;
1562 emac
->old_pause
= -1;
1564 of_get_mac_address(node
, ndev
->dev_addr
);
1565 if (is_valid_ether_addr(ndev
->dev_addr
)) {
1566 dev_info(dev
, "mtd mac %pM\n", ndev
->dev_addr
);
1568 random_ether_addr(ndev
->dev_addr
);
1569 dev_info(dev
, "random mac %pM\n", ndev
->dev_addr
);
1572 emac
->rx_skb_size
= ALIGN(ndev
->mtu
+ ENET_MTU_OVERHEAD
,
1573 ENET_DMA_MAXBURST
* 4);
1575 emac
->num_clocks
= of_clk_get_parent_count(node
);
1576 if (emac
->num_clocks
) {
1577 emac
->clock
= devm_kcalloc(dev
, emac
->num_clocks
,
1578 sizeof(struct clk
*), GFP_KERNEL
);
1579 if (IS_ERR_OR_NULL(emac
->clock
))
1580 return PTR_ERR(emac
->clock
);
1582 for (i
= 0; i
< emac
->num_clocks
; i
++) {
1583 emac
->clock
[i
] = of_clk_get(node
, i
);
1584 if (IS_ERR_OR_NULL(emac
->clock
[i
])) {
1585 dev_err(dev
, "error getting emac clock %d\n", i
);
1586 return PTR_ERR(emac
->clock
[i
]);
1589 ret
= clk_prepare_enable(emac
->clock
[i
]);
1591 dev_err(dev
, "error enabling emac clock %d\n", i
);
1596 num_resets
= of_count_phandle_with_args(node
, "resets",
1599 emac
->num_resets
= num_resets
;
1601 emac
->num_resets
= 0;
1602 if (emac
->num_resets
) {
1603 emac
->reset
= devm_kcalloc(dev
, emac
->num_resets
,
1604 sizeof(struct reset_control
*),
1606 if (IS_ERR_OR_NULL(emac
->reset
))
1607 return PTR_ERR(emac
->reset
);
1610 for (i
= 0; i
< emac
->num_resets
; i
++) {
1611 emac
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
1612 if (IS_ERR_OR_NULL(emac
->reset
[i
])) {
1613 dev_err(dev
, "error getting emac reset %d\n", i
);
1614 return PTR_ERR(emac
->reset
[i
]);
1617 ret
= reset_control_reset(emac
->reset
[i
]);
1619 dev_err(dev
, "error performing emac reset %d\n", i
);
1624 /* do minimal hardware init to be able to probe mii bus */
1625 bcm6348_emac_hw_preinit(emac
);
1627 ret
= bcm6348_emac_mdio_init(emac
, node
);
1631 spin_lock_init(&emac
->rx_lock
);
1633 timer_setup(&emac
->rx_timeout
, bcm6348_emac_refill_rx_timer
, 0);
1635 /* zero mib counters */
1636 for (i
= 0; i
< ENET_MIB_REG_COUNT
; i
++)
1637 emac_writel(emac
, 0, ENET_MIB_REG(i
));
1639 /* register netdevice */
1640 ndev
->netdev_ops
= &bcm6348_emac_ops
;
1641 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
1642 ndev
->mtu
= ETH_DATA_LEN
- VLAN_ETH_HLEN
;
1643 ndev
->max_mtu
= ENET_MAX_MTU
- VLAN_ETH_HLEN
;
1644 netif_napi_add(ndev
, &emac
->napi
, bcm6348_emac_poll
, 16);
1645 SET_NETDEV_DEV(ndev
, dev
);
1647 ret
= devm_register_netdev(dev
, ndev
);
1649 goto out_disable_clk
;
1651 netif_carrier_off(ndev
);
1653 ndev
->phydev
= of_phy_get_and_connect(ndev
, node
,
1654 bcm6348_emac_adjust_phy
);
1655 if (IS_ERR_OR_NULL(ndev
->phydev
))
1656 dev_warn(dev
, "PHY not found!\n");
1658 dev_info(dev
, "%s at 0x%px, IRQ %d\n", ndev
->name
, emac
->base
,
1664 for (i
= 0; i
< emac
->num_resets
; i
++)
1665 reset_control_assert(emac
->reset
[i
]);
1667 for (i
= 0; i
< emac
->num_clocks
; i
++)
1668 clk_disable_unprepare(emac
->clock
[i
]);
1673 static int bcm6348_emac_remove(struct platform_device
*pdev
)
1675 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1676 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1679 emac_writel(emac
, 0, ENET_MIISC_REG
);
1681 for (i
= 0; i
< emac
->num_resets
; i
++)
1682 reset_control_assert(emac
->reset
[i
]);
1684 for (i
= 0; i
< emac
->num_clocks
; i
++)
1685 clk_disable_unprepare(emac
->clock
[i
]);
1690 static const struct of_device_id bcm6348_emac_of_match
[] = {
1691 { .compatible
= "brcm,bcm6338-emac", },
1692 { .compatible
= "brcm,bcm6348-emac", },
1693 { .compatible
= "brcm,bcm6358-emac", },
1696 MODULE_DEVICE_TABLE(of
, bcm6348_emac_of_match
);
1698 static struct platform_driver bcm6348_emac_driver
= {
1700 .name
= "bcm6348-emac",
1701 .of_match_table
= of_match_ptr(bcm6348_emac_of_match
),
1703 .probe
= bcm6348_emac_probe
,
1704 .remove
= bcm6348_emac_remove
,
1706 module_platform_driver(bcm6348_emac_driver
);