1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * BCM6348 Ethernet Controller Driver
5 * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/of_address.h>
20 #include <linux/of_clk.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_mdio.h>
23 #include <linux/of_net.h>
24 #include <linux/of_platform.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/reset.h>
30 #define DMA_CHAN_WIDTH 0x10
32 /* Controller Configuration Register */
33 #define DMA_CFG_REG 0x0
34 #define DMA_CFG_EN_SHIFT 0
35 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
36 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
38 /* Flow Control Descriptor Low Threshold register */
39 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
41 /* Flow Control Descriptor High Threshold register */
42 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
44 /* Flow Control Descriptor Buffer Alloca Threshold register */
45 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
46 #define DMA_BUFALLOC_FORCE_SHIFT 31
47 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
49 /* Channel Configuration register */
50 #define DMAC_CHANCFG_REG 0x0
51 #define DMAC_CHANCFG_EN_SHIFT 0
52 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
53 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
54 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
55 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
56 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
57 #define DMAC_CHANCFG_CHAINING_SHIFT 2
58 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
59 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
60 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
61 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
62 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
64 /* Interrupt Control/Status register */
65 #define DMAC_IR_REG 0x4
66 #define DMAC_IR_BUFDONE_MASK (1 << 0)
67 #define DMAC_IR_PKTDONE_MASK (1 << 1)
68 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
70 /* Interrupt Mask register */
71 #define DMAC_IRMASK_REG 0x8
73 /* Maximum Burst Length */
74 #define DMAC_MAXBURST_REG 0xc
76 /* Ring Start Address register */
77 #define DMAS_RSTART_REG 0x0
79 /* State Ram Word 2 */
80 #define DMAS_SRAM2_REG 0x4
82 /* State Ram Word 3 */
83 #define DMAS_SRAM3_REG 0x8
85 /* State Ram Word 4 */
86 #define DMAS_SRAM4_REG 0xc
88 struct bcm6348_iudma_desc
{
94 #define DMADESC_LENGTH_SHIFT 16
95 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
96 #define DMADESC_OWNER_MASK (1 << 15)
97 #define DMADESC_EOP_MASK (1 << 14)
98 #define DMADESC_SOP_MASK (1 << 13)
99 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
100 #define DMADESC_WRAP_MASK (1 << 12)
103 #define DMADESC_UNDER_MASK (1 << 9)
104 #define DMADESC_APPEND_CRC (1 << 8)
105 #define DMADESC_OVSIZE_MASK (1 << 4)
106 #define DMADESC_RXER_MASK (1 << 2)
107 #define DMADESC_CRC_MASK (1 << 1)
108 #define DMADESC_OV_MASK (1 << 0)
109 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
110 DMADESC_OVSIZE_MASK | \
111 DMADESC_RXER_MASK | \
115 struct bcm6348_iudma
{
116 void __iomem
*dma_base
;
117 void __iomem
*dma_chan
;
118 void __iomem
*dma_sram
;
120 spinlock_t dma_base_lock
;
123 unsigned int num_clocks
;
125 struct reset_control
**reset
;
126 unsigned int num_resets
;
128 unsigned int dma_channels
;
131 int bcm6348_iudma_drivers_register(struct platform_device
*pdev
);
133 static inline u32
dma_readl(struct bcm6348_iudma
*iudma
, u32 off
)
137 spin_lock(&iudma
->dma_base_lock
);
138 val
= __raw_readl(iudma
->dma_base
+ off
);
139 spin_unlock(&iudma
->dma_base_lock
);
144 static inline void dma_writel(struct bcm6348_iudma
*iudma
, u32 val
, u32 off
)
146 spin_lock(&iudma
->dma_base_lock
);
147 __raw_writel(val
, iudma
->dma_base
+ off
);
148 spin_unlock(&iudma
->dma_base_lock
);
151 static inline u32
dmac_readl(struct bcm6348_iudma
*iudma
, u32 off
, int chan
)
153 return __raw_readl(iudma
->dma_chan
+ chan
* DMA_CHAN_WIDTH
+ off
);
156 static inline void dmac_writel(struct bcm6348_iudma
*iudma
, u32 val
, u32 off
,
159 __raw_writel(val
, iudma
->dma_chan
+ chan
* DMA_CHAN_WIDTH
+ off
);
162 static inline void dmas_writel(struct bcm6348_iudma
*iudma
, u32 val
, u32 off
,
165 __raw_writel(val
, iudma
->dma_sram
+ chan
* DMA_CHAN_WIDTH
+ off
);
168 static void bcm6348_iudma_chan_stop(struct bcm6348_iudma
*iudma
, int chan
)
172 dmac_writel(iudma
, 0, DMAC_CHANCFG_REG
, chan
);
177 val
= dmac_readl(iudma
, DMAC_CHANCFG_REG
, chan
);
178 if (!(val
& DMAC_CHANCFG_EN_MASK
))
185 static int bcm6348_iudma_probe(struct platform_device
*pdev
)
187 struct device
*dev
= &pdev
->dev
;
188 struct device_node
*node
= dev
->of_node
;
189 struct bcm6348_iudma
*iudma
;
194 iudma
= devm_kzalloc(dev
, sizeof(*iudma
), GFP_KERNEL
);
198 if (of_property_read_u32(node
, "dma-channels", &iudma
->dma_channels
))
201 iudma
->dma_base
= devm_platform_ioremap_resource_byname(pdev
, "dma");
202 if (IS_ERR_OR_NULL(iudma
->dma_base
))
203 return PTR_ERR(iudma
->dma_base
);
205 iudma
->dma_chan
= devm_platform_ioremap_resource_byname(pdev
,
207 if (IS_ERR_OR_NULL(iudma
->dma_chan
))
208 return PTR_ERR(iudma
->dma_chan
);
210 iudma
->dma_sram
= devm_platform_ioremap_resource_byname(pdev
,
212 if (IS_ERR_OR_NULL(iudma
->dma_sram
))
213 return PTR_ERR(iudma
->dma_sram
);
215 iudma
->num_clocks
= of_clk_get_parent_count(node
);
216 if (iudma
->num_clocks
) {
217 iudma
->clock
= devm_kcalloc(dev
, iudma
->num_clocks
,
218 sizeof(struct clk
*), GFP_KERNEL
);
219 if (IS_ERR_OR_NULL(iudma
->clock
))
220 return PTR_ERR(iudma
->clock
);
222 for (i
= 0; i
< iudma
->num_clocks
; i
++) {
223 iudma
->clock
[i
] = of_clk_get(node
, i
);
224 if (IS_ERR_OR_NULL(iudma
->clock
[i
])) {
225 dev_err(dev
, "error getting iudma clock %d\n", i
);
226 return PTR_ERR(iudma
->clock
[i
]);
229 ret
= clk_prepare_enable(iudma
->clock
[i
]);
231 dev_err(dev
, "error enabling iudma clock %d\n", i
);
236 num_resets
= of_count_phandle_with_args(node
, "resets",
239 iudma
->num_resets
= num_resets
;
241 iudma
->num_resets
= 0;
242 if (iudma
->num_resets
) {
243 iudma
->reset
= devm_kcalloc(dev
, iudma
->num_resets
,
244 sizeof(struct reset_control
*),
246 if (IS_ERR_OR_NULL(iudma
->reset
))
247 return PTR_ERR(iudma
->reset
);
249 for (i
= 0; i
< iudma
->num_resets
; i
++) {
250 iudma
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
251 if (IS_ERR_OR_NULL(iudma
->reset
[i
])) {
252 dev_err(dev
, "error getting iudma reset %d\n", i
);
253 return PTR_ERR(iudma
->reset
[i
]);
256 ret
= reset_control_reset(iudma
->reset
[i
]);
258 dev_err(dev
, "error performing iudma reset %d\n", i
);
263 dma_writel(iudma
, 0, DMA_CFG_REG
);
264 for (i
= 0; i
< iudma
->dma_channels
; i
++)
265 bcm6348_iudma_chan_stop(iudma
, i
);
266 dma_writel(iudma
, DMA_CFG_EN_MASK
, DMA_CFG_REG
);
268 spin_lock_init(&iudma
->dma_base_lock
);
270 dev_info(dev
, "bcm6348-iudma @ 0x%px\n", iudma
->dma_base
);
272 platform_set_drvdata(pdev
, iudma
);
274 return bcm6348_iudma_drivers_register(pdev
);
277 static const struct of_device_id bcm6348_iudma_of_match
[] = {
278 { .compatible
= "brcm,bcm6338-iudma", },
279 { .compatible
= "brcm,bcm6348-iudma", },
280 { .compatible
= "brcm,bcm6358-iudma", },
284 static struct platform_driver bcm6348_iudma_driver
= {
286 .name
= "bcm6348-iudma",
287 .of_match_table
= of_match_ptr(bcm6348_iudma_of_match
),
289 .probe
= bcm6348_iudma_probe
,
291 builtin_platform_driver(bcm6348_iudma_driver
);
294 * BCM6348 Eternet MACs
298 #define ENET_MAX_MTU 2046
300 #define ENET_TAG_SIZE 6
301 #define ENET_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
304 /* Default number of descriptor */
305 #define ENET_DEF_RX_DESC 64
306 #define ENET_DEF_TX_DESC 32
307 #define ENET_DEF_CPY_BREAK 128
309 /* Maximum burst len for dma (4 bytes unit) */
310 #define ENET_DMA_MAXBURST 8
312 /* Receiver Configuration register */
313 #define ENET_RXCFG_REG 0x0
314 #define ENET_RXCFG_ALLMCAST_SHIFT 1
315 #define ENET_RXCFG_ALLMCAST_MASK (1 << ENET_RXCFG_ALLMCAST_SHIFT)
316 #define ENET_RXCFG_PROMISC_SHIFT 3
317 #define ENET_RXCFG_PROMISC_MASK (1 << ENET_RXCFG_PROMISC_SHIFT)
318 #define ENET_RXCFG_LOOPBACK_SHIFT 4
319 #define ENET_RXCFG_LOOPBACK_MASK (1 << ENET_RXCFG_LOOPBACK_SHIFT)
320 #define ENET_RXCFG_ENFLOW_SHIFT 5
321 #define ENET_RXCFG_ENFLOW_MASK (1 << ENET_RXCFG_ENFLOW_SHIFT)
323 /* Receive Maximum Length register */
324 #define ENET_RXMAXLEN_REG 0x4
325 #define ENET_RXMAXLEN_SHIFT 0
326 #define ENET_RXMAXLEN_MASK (0x7ff << ENET_RXMAXLEN_SHIFT)
328 /* Transmit Maximum Length register */
329 #define ENET_TXMAXLEN_REG 0x8
330 #define ENET_TXMAXLEN_SHIFT 0
331 #define ENET_TXMAXLEN_MASK (0x7ff << ENET_TXMAXLEN_SHIFT)
333 /* MII Status/Control register */
334 #define ENET_MIISC_REG 0x10
335 #define ENET_MIISC_MDCFREQDIV_SHIFT 0
336 #define ENET_MIISC_MDCFREQDIV_MASK (0x7f << ENET_MIISC_MDCFREQDIV_SHIFT)
337 #define ENET_MIISC_PREAMBLEEN_SHIFT 7
338 #define ENET_MIISC_PREAMBLEEN_MASK (1 << ENET_MIISC_PREAMBLEEN_SHIFT)
340 /* MII Data register */
341 #define ENET_MIID_REG 0x14
342 #define ENET_MIID_DATA_SHIFT 0
343 #define ENET_MIID_DATA_MASK (0xffff << ENET_MIID_DATA_SHIFT)
344 #define ENET_MIID_TA_SHIFT 16
345 #define ENET_MIID_TA_MASK (0x3 << ENET_MIID_TA_SHIFT)
346 #define ENET_MIID_REG_SHIFT 18
347 #define ENET_MIID_REG_MASK (0x1f << ENET_MIID_REG_SHIFT)
348 #define ENET_MIID_PHY_SHIFT 23
349 #define ENET_MIID_PHY_MASK (0x1f << ENET_MIID_PHY_SHIFT)
350 #define ENET_MIID_OP_SHIFT 28
351 #define ENET_MIID_OP_WRITE (0x5 << ENET_MIID_OP_SHIFT)
352 #define ENET_MIID_OP_READ (0x6 << ENET_MIID_OP_SHIFT)
354 /* Ethernet Interrupt Mask register */
355 #define ENET_IRMASK_REG 0x18
357 /* Ethernet Interrupt register */
358 #define ENET_IR_REG 0x1c
359 #define ENET_IR_MII BIT(0)
360 #define ENET_IR_MIB BIT(1)
361 #define ENET_IR_FLOWC BIT(2)
363 /* Ethernet Control register */
364 #define ENET_CTL_REG 0x2c
365 #define ENET_CTL_ENABLE_SHIFT 0
366 #define ENET_CTL_ENABLE_MASK (1 << ENET_CTL_ENABLE_SHIFT)
367 #define ENET_CTL_DISABLE_SHIFT 1
368 #define ENET_CTL_DISABLE_MASK (1 << ENET_CTL_DISABLE_SHIFT)
369 #define ENET_CTL_SRESET_SHIFT 2
370 #define ENET_CTL_SRESET_MASK (1 << ENET_CTL_SRESET_SHIFT)
371 #define ENET_CTL_EPHYSEL_SHIFT 3
372 #define ENET_CTL_EPHYSEL_MASK (1 << ENET_CTL_EPHYSEL_SHIFT)
374 /* Transmit Control register */
375 #define ENET_TXCTL_REG 0x30
376 #define ENET_TXCTL_FD_SHIFT 0
377 #define ENET_TXCTL_FD_MASK (1 << ENET_TXCTL_FD_SHIFT)
379 /* Transmit Watermask register */
380 #define ENET_TXWMARK_REG 0x34
381 #define ENET_TXWMARK_WM_SHIFT 0
382 #define ENET_TXWMARK_WM_MASK (0x3f << ENET_TXWMARK_WM_SHIFT)
384 /* MIB Control register */
385 #define ENET_MIBCTL_REG 0x38
386 #define ENET_MIBCTL_RDCLEAR_SHIFT 0
387 #define ENET_MIBCTL_RDCLEAR_MASK (1 << ENET_MIBCTL_RDCLEAR_SHIFT)
389 /* Perfect Match Data Low register */
390 #define ENET_PML_REG(x) (0x58 + (x) * 8)
391 #define ENET_PMH_REG(x) (0x5c + (x) * 8)
392 #define ENET_PMH_DATAVALID_SHIFT 16
393 #define ENET_PMH_DATAVALID_MASK (1 << ENET_PMH_DATAVALID_SHIFT)
396 #define ENET_MIB_REG(x) (0x200 + (x) * 4)
397 #define ENET_MIB_REG_COUNT 55
400 * TX transmit threshold (4 bytes unit), FIFO is 256 bytes, the value
401 * must be low enough so that a DMA transfer of above burst length can
402 * not overflow the fifo
404 #define ENET_TX_FIFO_TRESH 32
406 struct bcm6348_emac
{
407 struct bcm6348_iudma
*iudma
;
411 unsigned int num_clocks
;
413 struct reset_control
**reset
;
414 unsigned int num_resets
;
421 /* hw view of rx & tx dma ring */
422 dma_addr_t rx_desc_dma
;
423 dma_addr_t tx_desc_dma
;
425 /* allocated size (in bytes) for rx & tx dma ring */
426 unsigned int rx_desc_alloc_size
;
427 unsigned int tx_desc_alloc_size
;
429 struct napi_struct napi
;
431 /* dma channel id for rx */
434 /* number of dma desc in rx ring */
437 /* cpu view of rx dma ring */
438 struct bcm6348_iudma_desc
*rx_desc_cpu
;
440 /* current number of armed descriptor given to hardware for rx */
443 /* next rx descriptor to fetch from hardware */
446 /* next dirty rx descriptor to refill */
449 /* size of allocated rx skbs */
450 unsigned int rx_skb_size
;
452 /* list of skb given to hw for rx */
453 struct sk_buff
**rx_skb
;
455 /* used when rx skb allocation failed, so we defer rx queue
457 struct timer_list rx_timeout
;
459 /* lock rx_timeout against rx normal operation */
462 /* dma channel id for tx */
465 /* number of dma desc in tx ring */
468 /* cpu view of tx dma ring */
469 struct bcm6348_iudma_desc
*tx_desc_cpu
;
471 /* number of available descriptor for tx */
474 /* next tx descriptor avaiable */
477 /* next dirty tx descriptor to reclaim */
480 /* list of skb given to hw for tx */
481 struct sk_buff
**tx_skb
;
483 /* lock used by tx reclaim and xmit */
486 /* network device reference */
487 struct net_device
*net_dev
;
489 /* platform device reference */
490 struct platform_device
*pdev
;
492 /* external mii bus */
501 static inline void emac_writel(struct bcm6348_emac
*emac
, u32 val
, u32 off
)
503 __raw_writel(val
, emac
->base
+ off
);
506 static inline u32
emac_readl(struct bcm6348_emac
*emac
, u32 off
)
508 return __raw_readl(emac
->base
+ off
);
514 static int bcm6348_emac_refill_rx(struct net_device
*ndev
)
516 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
517 struct bcm6348_iudma
*iudma
= emac
->iudma
;
518 struct platform_device
*pdev
= emac
->pdev
;
519 struct device
*dev
= &pdev
->dev
;
521 while (emac
->rx_desc_count
< emac
->rx_ring_size
) {
522 struct bcm6348_iudma_desc
*desc
;
528 desc_idx
= emac
->rx_dirty_desc
;
529 desc
= &emac
->rx_desc_cpu
[desc_idx
];
531 if (!emac
->rx_skb
[desc_idx
]) {
532 skb
= netdev_alloc_skb(ndev
, emac
->rx_skb_size
);
535 emac
->rx_skb
[desc_idx
] = skb
;
536 p
= dma_map_single(dev
, skb
->data
, emac
->rx_skb_size
,
541 len_stat
= emac
->rx_skb_size
<< DMADESC_LENGTH_SHIFT
;
542 len_stat
|= DMADESC_OWNER_MASK
;
543 if (emac
->rx_dirty_desc
== emac
->rx_ring_size
- 1) {
544 len_stat
|= DMADESC_WRAP_MASK
;
545 emac
->rx_dirty_desc
= 0;
547 emac
->rx_dirty_desc
++;
550 desc
->len_stat
= len_stat
;
552 emac
->rx_desc_count
++;
554 /* tell dma engine we allocated one buffer */
555 dma_writel(iudma
, 1, DMA_BUFALLOC_REG(emac
->rx_chan
));
558 /* If rx ring is still empty, set a timer to try allocating
559 * again at a later time. */
560 if (emac
->rx_desc_count
== 0 && netif_running(ndev
)) {
561 dev_warn(dev
, "unable to refill rx ring\n");
562 emac
->rx_timeout
.expires
= jiffies
+ HZ
;
563 add_timer(&emac
->rx_timeout
);
570 * timer callback to defer refill rx queue in case we're OOM
572 static void bcm6348_emac_refill_rx_timer(struct timer_list
*t
)
574 struct bcm6348_emac
*emac
= from_timer(emac
, t
, rx_timeout
);
575 struct net_device
*ndev
= emac
->net_dev
;
577 spin_lock(&emac
->rx_lock
);
578 bcm6348_emac_refill_rx(ndev
);
579 spin_unlock(&emac
->rx_lock
);
583 * extract packet from rx queue
585 static int bcm6348_emac_receive_queue(struct net_device
*ndev
, int budget
)
587 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
588 struct bcm6348_iudma
*iudma
= emac
->iudma
;
589 struct platform_device
*pdev
= emac
->pdev
;
590 struct device
*dev
= &pdev
->dev
;
593 /* don't scan ring further than number of refilled
595 if (budget
> emac
->rx_desc_count
)
596 budget
= emac
->rx_desc_count
;
599 struct bcm6348_iudma_desc
*desc
;
605 desc_idx
= emac
->rx_curr_desc
;
606 desc
= &emac
->rx_desc_cpu
[desc_idx
];
608 /* make sure we actually read the descriptor status at
612 len_stat
= desc
->len_stat
;
614 /* break if dma ownership belongs to hw */
615 if (len_stat
& DMADESC_OWNER_MASK
)
619 emac
->rx_curr_desc
++;
620 if (emac
->rx_curr_desc
== emac
->rx_ring_size
)
621 emac
->rx_curr_desc
= 0;
622 emac
->rx_desc_count
--;
624 /* if the packet does not have start of packet _and_
625 * end of packet flag set, then just recycle it */
626 if ((len_stat
& DMADESC_ESOP_MASK
) != DMADESC_ESOP_MASK
) {
627 ndev
->stats
.rx_dropped
++;
632 skb
= emac
->rx_skb
[desc_idx
];
633 len
= (len_stat
& DMADESC_LENGTH_MASK
)
634 >> DMADESC_LENGTH_SHIFT
;
635 /* don't include FCS */
638 if (len
< emac
->copybreak
) {
639 struct sk_buff
*nskb
;
641 nskb
= napi_alloc_skb(&emac
->napi
, len
);
643 /* forget packet, just rearm desc */
644 ndev
->stats
.rx_dropped
++;
648 dma_sync_single_for_cpu(dev
, desc
->address
,
649 len
, DMA_FROM_DEVICE
);
650 memcpy(nskb
->data
, skb
->data
, len
);
651 dma_sync_single_for_device(dev
, desc
->address
,
652 len
, DMA_FROM_DEVICE
);
655 dma_unmap_single(dev
, desc
->address
,
656 emac
->rx_skb_size
, DMA_FROM_DEVICE
);
657 emac
->rx_skb
[desc_idx
] = NULL
;
661 skb
->protocol
= eth_type_trans(skb
, ndev
);
662 ndev
->stats
.rx_packets
++;
663 ndev
->stats
.rx_bytes
+= len
;
664 netif_receive_skb(skb
);
665 } while (--budget
> 0);
667 if (processed
|| !emac
->rx_desc_count
) {
668 bcm6348_emac_refill_rx(ndev
);
671 dmac_writel(iudma
, DMAC_CHANCFG_EN_MASK
, DMAC_CHANCFG_REG
,
679 * try to or force reclaim of transmitted buffers
681 static int bcm6348_emac_tx_reclaim(struct net_device
*ndev
, int force
)
683 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
684 struct platform_device
*pdev
= emac
->pdev
;
685 struct device
*dev
= &pdev
->dev
;
688 while (emac
->tx_desc_count
< emac
->tx_ring_size
) {
689 struct bcm6348_iudma_desc
*desc
;
692 /* We run in a bh and fight against start_xmit, which
693 * is called with bh disabled */
694 spin_lock(&emac
->tx_lock
);
696 desc
= &emac
->tx_desc_cpu
[emac
->tx_dirty_desc
];
698 if (!force
&& (desc
->len_stat
& DMADESC_OWNER_MASK
)) {
699 spin_unlock(&emac
->tx_lock
);
703 /* ensure other field of the descriptor were not read
704 * before we checked ownership */
707 skb
= emac
->tx_skb
[emac
->tx_dirty_desc
];
708 emac
->tx_skb
[emac
->tx_dirty_desc
] = NULL
;
709 dma_unmap_single(dev
, desc
->address
, skb
->len
, DMA_TO_DEVICE
);
711 emac
->tx_dirty_desc
++;
712 if (emac
->tx_dirty_desc
== emac
->tx_ring_size
)
713 emac
->tx_dirty_desc
= 0;
714 emac
->tx_desc_count
++;
716 spin_unlock(&emac
->tx_lock
);
718 if (desc
->len_stat
& DMADESC_UNDER_MASK
)
719 ndev
->stats
.tx_errors
++;
725 if (netif_queue_stopped(ndev
) && released
)
726 netif_wake_queue(ndev
);
731 static int bcm6348_emac_poll(struct napi_struct
*napi
, int budget
)
733 struct bcm6348_emac
*emac
= container_of(napi
, struct bcm6348_emac
,
735 struct bcm6348_iudma
*iudma
= emac
->iudma
;
736 struct net_device
*ndev
= emac
->net_dev
;
740 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IR_REG
,
742 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IR_REG
,
745 /* reclaim sent skb */
746 bcm6348_emac_tx_reclaim(ndev
, 0);
748 spin_lock(&emac
->rx_lock
);
749 rx_work_done
= bcm6348_emac_receive_queue(ndev
, budget
);
750 spin_unlock(&emac
->rx_lock
);
752 if (rx_work_done
>= budget
) {
753 /* rx queue is not yet empty/clean */
757 /* no more packet in rx/tx queue, remove device from poll
759 napi_complete_done(napi
, rx_work_done
);
761 /* restore rx/tx interrupt */
762 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IRMASK_REG
,
764 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
, DMAC_IRMASK_REG
,
771 * emac interrupt handler
773 static irqreturn_t
bcm6348_emac_isr_mac(int irq
, void *dev_id
)
775 struct net_device
*ndev
= dev_id
;
776 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
779 stat
= emac_readl(emac
, ENET_IR_REG
);
780 if (!(stat
& ENET_IR_MIB
))
783 /* clear & mask interrupt */
784 emac_writel(emac
, ENET_IR_MIB
, ENET_IR_REG
);
785 emac_writel(emac
, 0, ENET_IRMASK_REG
);
791 * rx/tx dma interrupt handler
793 static irqreturn_t
bcm6348_emac_isr_dma(int irq
, void *dev_id
)
795 struct net_device
*ndev
= dev_id
;
796 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
797 struct bcm6348_iudma
*iudma
= emac
->iudma
;
799 /* mask rx/tx interrupts */
800 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->rx_chan
);
801 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->tx_chan
);
803 napi_schedule(&emac
->napi
);
809 * tx request callback
811 static netdev_tx_t
bcm6348_emac_start_xmit(struct sk_buff
*skb
,
812 struct net_device
*ndev
)
814 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
815 struct bcm6348_iudma
*iudma
= emac
->iudma
;
816 struct platform_device
*pdev
= emac
->pdev
;
817 struct device
*dev
= &pdev
->dev
;
818 struct bcm6348_iudma_desc
*desc
;
822 /* lock against tx reclaim */
823 spin_lock(&emac
->tx_lock
);
825 /* make sure the tx hw queue is not full, should not happen
826 * since we stop queue before it's the case */
827 if (unlikely(!emac
->tx_desc_count
)) {
828 netif_stop_queue(ndev
);
829 dev_err(dev
, "xmit called with no tx desc available?\n");
830 ret
= NETDEV_TX_BUSY
;
834 /* point to the next available desc */
835 desc
= &emac
->tx_desc_cpu
[emac
->tx_curr_desc
];
836 emac
->tx_skb
[emac
->tx_curr_desc
] = skb
;
838 /* fill descriptor */
839 desc
->address
= dma_map_single(dev
, skb
->data
, skb
->len
,
842 len_stat
= (skb
->len
<< DMADESC_LENGTH_SHIFT
) & DMADESC_LENGTH_MASK
;
843 len_stat
|= DMADESC_ESOP_MASK
| DMADESC_APPEND_CRC
|
846 emac
->tx_curr_desc
++;
847 if (emac
->tx_curr_desc
== emac
->tx_ring_size
) {
848 emac
->tx_curr_desc
= 0;
849 len_stat
|= DMADESC_WRAP_MASK
;
851 emac
->tx_desc_count
--;
853 /* dma might be already polling, make sure we update desc
854 * fields in correct order */
856 desc
->len_stat
= len_stat
;
860 dmac_writel(iudma
, DMAC_CHANCFG_EN_MASK
, DMAC_CHANCFG_REG
,
863 /* stop queue if no more desc available */
864 if (!emac
->tx_desc_count
)
865 netif_stop_queue(ndev
);
867 ndev
->stats
.tx_bytes
+= skb
->len
;
868 ndev
->stats
.tx_packets
++;
872 spin_unlock(&emac
->tx_lock
);
877 * Change the interface's emac address.
879 static int bcm6348_emac_set_mac_address(struct net_device
*ndev
, void *p
)
881 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
882 struct sockaddr
*addr
= p
;
885 eth_hw_addr_set(ndev
, addr
->sa_data
);
887 /* use perfect match register 0 to store my emac address */
888 val
= (ndev
->dev_addr
[2] << 24) | (ndev
->dev_addr
[3] << 16) |
889 (ndev
->dev_addr
[4] << 8) | ndev
->dev_addr
[5];
890 emac_writel(emac
, val
, ENET_PML_REG(0));
892 val
= (ndev
->dev_addr
[0] << 8 | ndev
->dev_addr
[1]);
893 val
|= ENET_PMH_DATAVALID_MASK
;
894 emac_writel(emac
, val
, ENET_PMH_REG(0));
900 * Change rx mode (promiscuous/allmulti) and update multicast list
902 static void bcm6348_emac_set_multicast_list(struct net_device
*ndev
)
904 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
905 struct netdev_hw_addr
*ha
;
909 val
= emac_readl(emac
, ENET_RXCFG_REG
);
911 if (ndev
->flags
& IFF_PROMISC
)
912 val
|= ENET_RXCFG_PROMISC_MASK
;
914 val
&= ~ENET_RXCFG_PROMISC_MASK
;
916 /* only 3 perfect match registers left, first one is used for
918 if ((ndev
->flags
& IFF_ALLMULTI
) || netdev_mc_count(ndev
) > 3)
919 val
|= ENET_RXCFG_ALLMCAST_MASK
;
921 val
&= ~ENET_RXCFG_ALLMCAST_MASK
;
923 /* no need to set perfect match registers if we catch all
925 if (val
& ENET_RXCFG_ALLMCAST_MASK
) {
926 emac_writel(emac
, val
, ENET_RXCFG_REG
);
931 netdev_for_each_mc_addr(ha
, ndev
) {
938 /* update perfect match registers */
940 tmp
= (dmi_addr
[2] << 24) | (dmi_addr
[3] << 16) |
941 (dmi_addr
[4] << 8) | dmi_addr
[5];
942 emac_writel(emac
, tmp
, ENET_PML_REG(i
+ 1));
944 tmp
= (dmi_addr
[0] << 8 | dmi_addr
[1]);
945 tmp
|= ENET_PMH_DATAVALID_MASK
;
946 emac_writel(emac
, tmp
, ENET_PMH_REG(i
++ + 1));
950 emac_writel(emac
, 0, ENET_PML_REG(i
+ 1));
951 emac_writel(emac
, 0, ENET_PMH_REG(i
+ 1));
954 emac_writel(emac
, val
, ENET_RXCFG_REG
);
960 static void bcm6348_emac_disable_mac(struct bcm6348_emac
*emac
)
965 val
= emac_readl(emac
, ENET_CTL_REG
);
966 val
|= ENET_CTL_DISABLE_MASK
;
967 emac_writel(emac
, val
, ENET_CTL_REG
);
971 val
= emac_readl(emac
, ENET_CTL_REG
);
972 if (!(val
& ENET_CTL_DISABLE_MASK
))
979 * set emac duplex parameters
981 static void bcm6348_emac_set_duplex(struct bcm6348_emac
*emac
, int fullduplex
)
985 val
= emac_readl(emac
, ENET_TXCTL_REG
);
987 val
|= ENET_TXCTL_FD_MASK
;
989 val
&= ~ENET_TXCTL_FD_MASK
;
990 emac_writel(emac
, val
, ENET_TXCTL_REG
);
994 * set emac flow control parameters
996 static void bcm6348_emac_set_flow(struct bcm6348_emac
*emac
, bool rx_en
, bool tx_en
)
998 struct bcm6348_iudma
*iudma
= emac
->iudma
;
1001 val
= emac_readl(emac
, ENET_RXCFG_REG
);
1003 val
|= ENET_RXCFG_ENFLOW_MASK
;
1005 val
&= ~ENET_RXCFG_ENFLOW_MASK
;
1006 emac_writel(emac
, val
, ENET_RXCFG_REG
);
1008 dmas_writel(iudma
, emac
->rx_desc_dma
, DMAS_RSTART_REG
, emac
->rx_chan
);
1009 dmas_writel(iudma
, emac
->tx_desc_dma
, DMAS_RSTART_REG
, emac
->tx_chan
);
1011 val
= dma_readl(iudma
, DMA_CFG_REG
);
1013 val
|= DMA_CFG_FLOWCH_MASK(emac
->rx_chan
);
1015 val
&= ~DMA_CFG_FLOWCH_MASK(emac
->rx_chan
);
1016 dma_writel(iudma
, val
, DMA_CFG_REG
);
1022 static void bcm6348_emac_adjust_phy(struct net_device
*ndev
)
1024 struct phy_device
*phydev
= ndev
->phydev
;
1025 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1026 struct platform_device
*pdev
= emac
->pdev
;
1027 struct device
*dev
= &pdev
->dev
;
1028 bool status_changed
= false;
1030 if (emac
->old_link
!= phydev
->link
) {
1031 status_changed
= true;
1032 emac
->old_link
= phydev
->link
;
1035 if (phydev
->link
&& phydev
->duplex
!= emac
->old_duplex
) {
1036 bcm6348_emac_set_duplex(emac
, phydev
->duplex
== DUPLEX_FULL
);
1037 status_changed
= true;
1038 emac
->old_duplex
= phydev
->duplex
;
1041 if (phydev
->link
&& phydev
->pause
!= emac
->old_pause
) {
1042 bool rx_pause_en
, tx_pause_en
;
1044 if (phydev
->pause
) {
1048 rx_pause_en
= false;
1049 tx_pause_en
= false;
1052 bcm6348_emac_set_flow(emac
, rx_pause_en
, tx_pause_en
);
1053 status_changed
= true;
1054 emac
->old_pause
= phydev
->pause
;
1058 dev_info(dev
, "%s: phy link %s %s/%s/%s/%s\n",
1060 phydev
->link
? "UP" : "DOWN",
1061 phy_modes(phydev
->interface
),
1062 phy_speed_to_str(phydev
->speed
),
1063 phy_duplex_to_str(phydev
->duplex
),
1064 phydev
->pause
? "rx/tx" : "off");
1068 static int bcm6348_emac_open(struct net_device
*ndev
)
1070 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1071 struct bcm6348_iudma
*iudma
= emac
->iudma
;
1072 struct platform_device
*pdev
= emac
->pdev
;
1073 struct device
*dev
= &pdev
->dev
;
1074 struct sockaddr addr
;
1075 unsigned int i
, size
;
1080 /* mask all interrupts and request them */
1081 emac_writel(emac
, 0, ENET_IRMASK_REG
);
1082 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->rx_chan
);
1083 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->tx_chan
);
1085 ret
= request_irq(ndev
->irq
, bcm6348_emac_isr_mac
, 0, ndev
->name
,
1090 ret
= request_irq(emac
->irq_rx
, bcm6348_emac_isr_dma
,
1091 0, ndev
->name
, ndev
);
1095 ret
= request_irq(emac
->irq_tx
, bcm6348_emac_isr_dma
,
1096 0, ndev
->name
, ndev
);
1098 goto out_freeirq_rx
;
1100 /* initialize perfect match registers */
1101 for (i
= 0; i
< 4; i
++) {
1102 emac_writel(emac
, 0, ENET_PML_REG(i
));
1103 emac_writel(emac
, 0, ENET_PMH_REG(i
));
1106 /* write device mac address */
1107 memcpy(addr
.sa_data
, ndev
->dev_addr
, ETH_ALEN
);
1108 bcm6348_emac_set_mac_address(ndev
, &addr
);
1110 /* allocate rx dma ring */
1111 size
= emac
->rx_ring_size
* sizeof(struct bcm6348_iudma_desc
);
1112 p
= dma_alloc_coherent(dev
, size
, &emac
->rx_desc_dma
, GFP_KERNEL
);
1114 dev_err(dev
, "cannot allocate rx ring %u\n", size
);
1116 goto out_freeirq_tx
;
1120 emac
->rx_desc_alloc_size
= size
;
1121 emac
->rx_desc_cpu
= p
;
1123 /* allocate tx dma ring */
1124 size
= emac
->tx_ring_size
* sizeof(struct bcm6348_iudma_desc
);
1125 p
= dma_alloc_coherent(dev
, size
, &emac
->tx_desc_dma
, GFP_KERNEL
);
1127 dev_err(dev
, "cannot allocate tx ring\n");
1129 goto out_free_rx_ring
;
1133 emac
->tx_desc_alloc_size
= size
;
1134 emac
->tx_desc_cpu
= p
;
1136 emac
->tx_skb
= kzalloc(sizeof(struct sk_buff
*) * emac
->tx_ring_size
,
1138 if (!emac
->tx_skb
) {
1139 dev_err(dev
, "cannot allocate rx skb queue\n");
1141 goto out_free_tx_ring
;
1144 emac
->tx_desc_count
= emac
->tx_ring_size
;
1145 emac
->tx_dirty_desc
= 0;
1146 emac
->tx_curr_desc
= 0;
1147 spin_lock_init(&emac
->tx_lock
);
1149 /* init & fill rx ring with skbs */
1150 emac
->rx_skb
= kzalloc(sizeof(struct sk_buff
*) * emac
->rx_ring_size
,
1152 if (!emac
->rx_skb
) {
1153 dev_err(dev
, "cannot allocate rx skb queue\n");
1155 goto out_free_tx_skb
;
1158 emac
->rx_desc_count
= 0;
1159 emac
->rx_dirty_desc
= 0;
1160 emac
->rx_curr_desc
= 0;
1162 /* initialize flow control buffer allocation */
1163 dma_writel(iudma
, DMA_BUFALLOC_FORCE_MASK
| 0,
1164 DMA_BUFALLOC_REG(emac
->rx_chan
));
1166 if (bcm6348_emac_refill_rx(ndev
)) {
1167 dev_err(dev
, "cannot allocate rx skb queue\n");
1172 /* write rx & tx ring addresses */
1173 dmas_writel(iudma
, emac
->rx_desc_dma
,
1174 DMAS_RSTART_REG
, emac
->rx_chan
);
1175 dmas_writel(iudma
, emac
->tx_desc_dma
,
1176 DMAS_RSTART_REG
, emac
->tx_chan
);
1178 /* clear remaining state ram for rx & tx channel */
1179 dmas_writel(iudma
, 0, DMAS_SRAM2_REG
, emac
->rx_chan
);
1180 dmas_writel(iudma
, 0, DMAS_SRAM2_REG
, emac
->tx_chan
);
1181 dmas_writel(iudma
, 0, DMAS_SRAM3_REG
, emac
->rx_chan
);
1182 dmas_writel(iudma
, 0, DMAS_SRAM3_REG
, emac
->tx_chan
);
1183 dmas_writel(iudma
, 0, DMAS_SRAM4_REG
, emac
->rx_chan
);
1184 dmas_writel(iudma
, 0, DMAS_SRAM4_REG
, emac
->tx_chan
);
1186 /* set max rx/tx length */
1187 emac_writel(emac
, ndev
->mtu
, ENET_RXMAXLEN_REG
);
1188 emac_writel(emac
, ndev
->mtu
, ENET_TXMAXLEN_REG
);
1190 /* set dma maximum burst len */
1191 dmac_writel(iudma
, ENET_DMA_MAXBURST
,
1192 DMAC_MAXBURST_REG
, emac
->rx_chan
);
1193 dmac_writel(iudma
, ENET_DMA_MAXBURST
,
1194 DMAC_MAXBURST_REG
, emac
->tx_chan
);
1196 /* set correct transmit fifo watermark */
1197 emac_writel(emac
, ENET_TX_FIFO_TRESH
, ENET_TXWMARK_REG
);
1199 /* set flow control low/high threshold to 1/3 / 2/3 */
1200 val
= emac
->rx_ring_size
/ 3;
1201 dma_writel(iudma
, val
, DMA_FLOWCL_REG(emac
->rx_chan
));
1202 val
= (emac
->rx_ring_size
* 2) / 3;
1203 dma_writel(iudma
, val
, DMA_FLOWCH_REG(emac
->rx_chan
));
1205 /* all set, enable emac and interrupts, start dma engine and
1206 * kick rx dma channel
1209 val
= emac_readl(emac
, ENET_CTL_REG
);
1210 val
|= ENET_CTL_ENABLE_MASK
;
1211 emac_writel(emac
, val
, ENET_CTL_REG
);
1212 dmac_writel(iudma
, DMAC_CHANCFG_EN_MASK
,
1213 DMAC_CHANCFG_REG
, emac
->rx_chan
);
1215 /* watch "mib counters about to overflow" interrupt */
1216 emac_writel(emac
, ENET_IR_MIB
, ENET_IR_REG
);
1217 emac_writel(emac
, ENET_IR_MIB
, ENET_IRMASK_REG
);
1219 /* watch "packet transferred" interrupt in rx and tx */
1220 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1221 DMAC_IR_REG
, emac
->rx_chan
);
1222 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1223 DMAC_IR_REG
, emac
->tx_chan
);
1225 /* make sure we enable napi before rx interrupt */
1226 napi_enable(&emac
->napi
);
1228 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1229 DMAC_IRMASK_REG
, emac
->rx_chan
);
1230 dmac_writel(iudma
, DMAC_IR_PKTDONE_MASK
,
1231 DMAC_IRMASK_REG
, emac
->tx_chan
);
1234 phy_start(ndev
->phydev
);
1236 netif_carrier_on(ndev
);
1237 netif_start_queue(ndev
);
1242 for (i
= 0; i
< emac
->rx_ring_size
; i
++) {
1243 struct bcm6348_iudma_desc
*desc
;
1245 if (!emac
->rx_skb
[i
])
1248 desc
= &emac
->rx_desc_cpu
[i
];
1249 dma_unmap_single(dev
, desc
->address
, emac
->rx_skb_size
,
1251 kfree_skb(emac
->rx_skb
[i
]);
1253 kfree(emac
->rx_skb
);
1256 kfree(emac
->tx_skb
);
1259 dma_free_coherent(dev
, emac
->tx_desc_alloc_size
,
1260 emac
->tx_desc_cpu
, emac
->tx_desc_dma
);
1263 dma_free_coherent(dev
, emac
->rx_desc_alloc_size
,
1264 emac
->rx_desc_cpu
, emac
->rx_desc_dma
);
1267 if (emac
->irq_tx
!= -1)
1268 free_irq(emac
->irq_tx
, ndev
);
1271 free_irq(emac
->irq_rx
, ndev
);
1275 phy_disconnect(ndev
->phydev
);
1280 static int bcm6348_emac_stop(struct net_device
*ndev
)
1282 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1283 struct bcm6348_iudma
*iudma
= emac
->iudma
;
1284 struct device
*dev
= &emac
->pdev
->dev
;
1287 netif_stop_queue(ndev
);
1288 napi_disable(&emac
->napi
);
1290 phy_stop(ndev
->phydev
);
1291 del_timer_sync(&emac
->rx_timeout
);
1293 /* mask all interrupts */
1294 emac_writel(emac
, 0, ENET_IRMASK_REG
);
1295 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->rx_chan
);
1296 dmac_writel(iudma
, 0, DMAC_IRMASK_REG
, emac
->tx_chan
);
1298 /* disable dma & emac */
1299 bcm6348_iudma_chan_stop(iudma
, emac
->tx_chan
);
1300 bcm6348_iudma_chan_stop(iudma
, emac
->rx_chan
);
1301 bcm6348_emac_disable_mac(emac
);
1303 /* force reclaim of all tx buffers */
1304 bcm6348_emac_tx_reclaim(ndev
, 1);
1306 /* free the rx skb ring */
1307 for (i
= 0; i
< emac
->rx_ring_size
; i
++) {
1308 struct bcm6348_iudma_desc
*desc
;
1310 if (!emac
->rx_skb
[i
])
1313 desc
= &emac
->rx_desc_cpu
[i
];
1314 dma_unmap_single_attrs(dev
, desc
->address
, emac
->rx_skb_size
,
1316 DMA_ATTR_SKIP_CPU_SYNC
);
1317 kfree_skb(emac
->rx_skb
[i
]);
1320 /* free remaining allocated memory */
1321 kfree(emac
->rx_skb
);
1322 kfree(emac
->tx_skb
);
1323 dma_free_coherent(dev
, emac
->rx_desc_alloc_size
, emac
->rx_desc_cpu
,
1325 dma_free_coherent(dev
, emac
->tx_desc_alloc_size
, emac
->tx_desc_cpu
,
1327 free_irq(emac
->irq_tx
, ndev
);
1328 free_irq(emac
->irq_rx
, ndev
);
1329 free_irq(ndev
->irq
, ndev
);
1331 netdev_reset_queue(ndev
);
1336 static const struct net_device_ops bcm6348_emac_ops
= {
1337 .ndo_open
= bcm6348_emac_open
,
1338 .ndo_stop
= bcm6348_emac_stop
,
1339 .ndo_start_xmit
= bcm6348_emac_start_xmit
,
1340 .ndo_set_mac_address
= bcm6348_emac_set_mac_address
,
1341 .ndo_set_rx_mode
= bcm6348_emac_set_multicast_list
,
1344 static int bcm6348_emac_mdio_op(struct bcm6348_emac
*emac
, uint32_t data
)
1348 /* Make sure mii interrupt status is cleared */
1349 emac_writel(emac
, ENET_IR_MII
, ENET_IR_REG
);
1352 emac_writel(emac
, data
, ENET_MIID_REG
);
1355 /* busy wait on mii interrupt bit, with timeout */
1358 if (emac_readl(emac
, ENET_IR_REG
) & ENET_IR_MII
)
1361 } while (limit
-- > 0);
1363 return (limit
< 0) ? 1 : 0;
1366 static int bcm6348_emac_mdio_read(struct mii_bus
*bus
, int phy_id
, int loc
)
1368 struct bcm6348_emac
*emac
= bus
->priv
;
1369 struct platform_device
*pdev
= emac
->pdev
;
1370 struct device
*dev
= &pdev
->dev
;
1373 reg
= 0x2 << ENET_MIID_TA_SHIFT
;
1374 reg
|= loc
<< ENET_MIID_REG_SHIFT
;
1375 reg
|= phy_id
<< ENET_MIID_PHY_SHIFT
;
1376 reg
|= ENET_MIID_OP_READ
;
1378 if (bcm6348_emac_mdio_op(emac
, reg
)) {
1379 dev_err(dev
, "mdio_read: phy=%d loc=%x timeout!\n",
1384 reg
= emac_readl(emac
, ENET_MIID_REG
);
1385 reg
= (reg
>> ENET_MIID_DATA_SHIFT
) & ENET_MIID_DATA_MASK
;
1390 static int bcm6348_emac_mdio_write(struct mii_bus
*bus
, int phy_id
,
1391 int loc
, uint16_t val
)
1393 struct bcm6348_emac
*emac
= bus
->priv
;
1394 struct platform_device
*pdev
= emac
->pdev
;
1395 struct device
*dev
= &pdev
->dev
;
1398 reg
= (val
<< ENET_MIID_DATA_SHIFT
) & ENET_MIID_DATA_MASK
;
1399 reg
|= 0x2 << ENET_MIID_TA_SHIFT
;
1400 reg
|= loc
<< ENET_MIID_REG_SHIFT
;
1401 reg
|= phy_id
<< ENET_MIID_PHY_SHIFT
;
1402 reg
|= ENET_MIID_OP_WRITE
;
1404 if (bcm6348_emac_mdio_op(emac
, reg
)) {
1405 dev_err(dev
, "mdio_write: phy=%d loc=%x timeout!\n",
1410 bcm6348_emac_mdio_op(emac
, reg
);
1415 static int bcm6348_emac_mdio_init(struct bcm6348_emac
*emac
,
1416 struct device_node
*np
)
1418 struct platform_device
*pdev
= emac
->pdev
;
1419 struct device
*dev
= &pdev
->dev
;
1420 struct device_node
*mnp
;
1421 struct mii_bus
*mii_bus
;
1424 mnp
= of_get_child_by_name(np
, "mdio");
1428 mii_bus
= devm_mdiobus_alloc(dev
);
1434 mii_bus
->priv
= emac
;
1435 mii_bus
->name
= np
->full_name
;
1436 snprintf(mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-mii", dev_name(dev
));
1437 mii_bus
->parent
= dev
;
1438 mii_bus
->read
= bcm6348_emac_mdio_read
;
1439 mii_bus
->write
= bcm6348_emac_mdio_write
;
1440 mii_bus
->phy_mask
= 0x3f;
1442 ret
= devm_of_mdiobus_register(dev
, mii_bus
, mnp
);
1445 dev_err(dev
, "MDIO bus registration failed\n");
1449 dev_info(dev
, "MDIO bus init\n");
1455 * preinit hardware to allow mii operation while device is down
1457 static void bcm6348_emac_hw_preinit(struct bcm6348_emac
*emac
)
1462 /* make sure emac is disabled */
1463 bcm6348_emac_disable_mac(emac
);
1465 /* soft reset emac */
1466 val
= ENET_CTL_SRESET_MASK
;
1467 emac_writel(emac
, val
, ENET_CTL_REG
);
1472 val
= emac_readl(emac
, ENET_CTL_REG
);
1473 if (!(val
& ENET_CTL_SRESET_MASK
))
1478 /* select correct mii interface */
1479 val
= emac_readl(emac
, ENET_CTL_REG
);
1481 val
|= ENET_CTL_EPHYSEL_MASK
;
1483 val
&= ~ENET_CTL_EPHYSEL_MASK
;
1484 emac_writel(emac
, val
, ENET_CTL_REG
);
1486 /* turn on mdc clock */
1487 emac_writel(emac
, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT
) |
1488 ENET_MIISC_PREAMBLEEN_MASK
, ENET_MIISC_REG
);
1490 /* set mib counters to self-clear when read */
1491 val
= emac_readl(emac
, ENET_MIBCTL_REG
);
1492 val
|= ENET_MIBCTL_RDCLEAR_MASK
;
1493 emac_writel(emac
, val
, ENET_MIBCTL_REG
);
1496 static int bcm6348_emac_probe(struct platform_device
*pdev
)
1498 struct device
*dev
= &pdev
->dev
;
1499 struct device_node
*node
= dev
->of_node
;
1500 struct device_node
*dma_node
;
1501 struct platform_device
*dma_pdev
;
1502 struct bcm6348_emac
*emac
;
1503 struct bcm6348_iudma
*iudma
;
1504 struct net_device
*ndev
;
1509 dma_node
= of_parse_phandle(node
, "brcm,iudma", 0);
1513 dma_pdev
= of_find_device_by_node(dma_node
);
1514 of_node_put(dma_node
);
1518 iudma
= platform_get_drvdata(dma_pdev
);
1520 return -EPROBE_DEFER
;
1522 ndev
= devm_alloc_etherdev(dev
, sizeof(*emac
));
1526 platform_set_drvdata(pdev
, ndev
);
1527 SET_NETDEV_DEV(ndev
, dev
);
1529 emac
= netdev_priv(ndev
);
1530 emac
->iudma
= iudma
;
1532 emac
->net_dev
= ndev
;
1534 emac
->base
= devm_platform_ioremap_resource(pdev
, 0);
1535 if (IS_ERR_OR_NULL(emac
->base
))
1536 return PTR_ERR(emac
->base
);
1538 ndev
->irq
= of_irq_get_byname(node
, "emac");
1542 emac
->irq_rx
= of_irq_get_byname(node
, "rx");
1546 emac
->irq_tx
= of_irq_get_byname(node
, "tx");
1550 if (of_property_read_u32(node
, "dma-rx", &emac
->rx_chan
))
1553 if (of_property_read_u32(node
, "dma-tx", &emac
->tx_chan
))
1556 emac
->ext_mii
= of_property_read_bool(node
, "brcm,external-mii");
1558 emac
->rx_ring_size
= ENET_DEF_RX_DESC
;
1559 emac
->tx_ring_size
= ENET_DEF_TX_DESC
;
1560 emac
->copybreak
= ENET_DEF_CPY_BREAK
;
1563 emac
->old_duplex
= -1;
1564 emac
->old_pause
= -1;
1566 of_get_mac_address(node
, ndev
->dev_addr
);
1567 if (is_valid_ether_addr(ndev
->dev_addr
)) {
1568 dev_info(dev
, "mtd mac %pM\n", ndev
->dev_addr
);
1570 random_ether_addr(ndev
->dev_addr
);
1571 dev_info(dev
, "random mac %pM\n", ndev
->dev_addr
);
1574 emac
->rx_skb_size
= ALIGN(ndev
->mtu
+ ENET_MTU_OVERHEAD
,
1575 ENET_DMA_MAXBURST
* 4);
1577 emac
->num_clocks
= of_clk_get_parent_count(node
);
1578 if (emac
->num_clocks
) {
1579 emac
->clock
= devm_kcalloc(dev
, emac
->num_clocks
,
1580 sizeof(struct clk
*), GFP_KERNEL
);
1581 if (IS_ERR_OR_NULL(emac
->clock
))
1582 return PTR_ERR(emac
->clock
);
1584 for (i
= 0; i
< emac
->num_clocks
; i
++) {
1585 emac
->clock
[i
] = of_clk_get(node
, i
);
1586 if (IS_ERR_OR_NULL(emac
->clock
[i
])) {
1587 dev_err(dev
, "error getting emac clock %d\n", i
);
1588 return PTR_ERR(emac
->clock
[i
]);
1591 ret
= clk_prepare_enable(emac
->clock
[i
]);
1593 dev_err(dev
, "error enabling emac clock %d\n", i
);
1598 num_resets
= of_count_phandle_with_args(node
, "resets",
1601 emac
->num_resets
= num_resets
;
1603 emac
->num_resets
= 0;
1604 if (emac
->num_resets
) {
1605 emac
->reset
= devm_kcalloc(dev
, emac
->num_resets
,
1606 sizeof(struct reset_control
*),
1608 if (IS_ERR_OR_NULL(emac
->reset
))
1609 return PTR_ERR(emac
->reset
);
1612 for (i
= 0; i
< emac
->num_resets
; i
++) {
1613 emac
->reset
[i
] = devm_reset_control_get_by_index(dev
, i
);
1614 if (IS_ERR_OR_NULL(emac
->reset
[i
])) {
1615 dev_err(dev
, "error getting emac reset %d\n", i
);
1616 return PTR_ERR(emac
->reset
[i
]);
1619 ret
= reset_control_reset(emac
->reset
[i
]);
1621 dev_err(dev
, "error performing emac reset %d\n", i
);
1626 /* do minimal hardware init to be able to probe mii bus */
1627 bcm6348_emac_hw_preinit(emac
);
1629 ret
= bcm6348_emac_mdio_init(emac
, node
);
1633 spin_lock_init(&emac
->rx_lock
);
1635 timer_setup(&emac
->rx_timeout
, bcm6348_emac_refill_rx_timer
, 0);
1637 /* zero mib counters */
1638 for (i
= 0; i
< ENET_MIB_REG_COUNT
; i
++)
1639 emac_writel(emac
, 0, ENET_MIB_REG(i
));
1641 /* register netdevice */
1642 ndev
->netdev_ops
= &bcm6348_emac_ops
;
1643 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
1644 ndev
->mtu
= ETH_DATA_LEN
- VLAN_ETH_HLEN
;
1645 ndev
->max_mtu
= ENET_MAX_MTU
- VLAN_ETH_HLEN
;
1646 netif_napi_add(ndev
, &emac
->napi
, bcm6348_emac_poll
, 16);
1647 SET_NETDEV_DEV(ndev
, dev
);
1649 ret
= devm_register_netdev(dev
, ndev
);
1651 goto out_disable_clk
;
1653 netif_carrier_off(ndev
);
1655 ndev
->phydev
= of_phy_get_and_connect(ndev
, node
,
1656 bcm6348_emac_adjust_phy
);
1657 if (IS_ERR_OR_NULL(ndev
->phydev
))
1658 dev_warn(dev
, "PHY not found!\n");
1660 dev_info(dev
, "%s at 0x%px, IRQ %d\n", ndev
->name
, emac
->base
,
1666 for (i
= 0; i
< emac
->num_resets
; i
++)
1667 reset_control_assert(emac
->reset
[i
]);
1669 for (i
= 0; i
< emac
->num_clocks
; i
++)
1670 clk_disable_unprepare(emac
->clock
[i
]);
1675 static int bcm6348_emac_remove(struct platform_device
*pdev
)
1677 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1678 struct bcm6348_emac
*emac
= netdev_priv(ndev
);
1681 emac_writel(emac
, 0, ENET_MIISC_REG
);
1683 for (i
= 0; i
< emac
->num_resets
; i
++)
1684 reset_control_assert(emac
->reset
[i
]);
1686 for (i
= 0; i
< emac
->num_clocks
; i
++)
1687 clk_disable_unprepare(emac
->clock
[i
]);
1692 static const struct of_device_id bcm6348_emac_of_match
[] = {
1693 { .compatible
= "brcm,bcm6338-emac", },
1694 { .compatible
= "brcm,bcm6348-emac", },
1695 { .compatible
= "brcm,bcm6358-emac", },
1698 MODULE_DEVICE_TABLE(of
, bcm6348_emac_of_match
);
1700 static struct platform_driver bcm6348_emac_driver
= {
1702 .name
= "bcm6348-emac",
1703 .of_match_table
= of_match_ptr(bcm6348_emac_of_match
),
1705 .probe
= bcm6348_emac_probe
,
1706 .remove
= bcm6348_emac_remove
,
1709 int bcm6348_iudma_drivers_register(struct platform_device
*pdev
)
1711 struct device
*dev
= &pdev
->dev
;
1714 ret
= platform_driver_register(&bcm6348_emac_driver
);
1716 dev_err(dev
, "error registering emac driver!\n");