bmips: bcm6348-enet: add PHY support
[openwrt/staging/nbd.git] / target / linux / bmips / files / drivers / net / ethernet / broadcom / bcm6348-enet.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * BCM6348 Ethernet Controller Driver
4 *
5 * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/of_address.h>
20 #include <linux/of_clk.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_mdio.h>
23 #include <linux/of_net.h>
24 #include <linux/of_platform.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/reset.h>
28
29 /* DMA channels */
30 #define DMA_CHAN_WIDTH 0x10
31
32 /* Controller Configuration Register */
33 #define DMA_CFG_REG 0x0
34 #define DMA_CFG_EN_SHIFT 0
35 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
36 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
37
38 /* Flow Control Descriptor Low Threshold register */
39 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
40
41 /* Flow Control Descriptor High Threshold register */
42 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
43
44 /* Flow Control Descriptor Buffer Alloca Threshold register */
45 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
46 #define DMA_BUFALLOC_FORCE_SHIFT 31
47 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
48
49 /* Channel Configuration register */
50 #define DMAC_CHANCFG_REG 0x0
51 #define DMAC_CHANCFG_EN_SHIFT 0
52 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
53 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
54 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
55 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
56 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
57 #define DMAC_CHANCFG_CHAINING_SHIFT 2
58 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
59 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
60 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
61 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
62 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
63
64 /* Interrupt Control/Status register */
65 #define DMAC_IR_REG 0x4
66 #define DMAC_IR_BUFDONE_MASK (1 << 0)
67 #define DMAC_IR_PKTDONE_MASK (1 << 1)
68 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
69
70 /* Interrupt Mask register */
71 #define DMAC_IRMASK_REG 0x8
72
73 /* Maximum Burst Length */
74 #define DMAC_MAXBURST_REG 0xc
75
76 /* Ring Start Address register */
77 #define DMAS_RSTART_REG 0x0
78
79 /* State Ram Word 2 */
80 #define DMAS_SRAM2_REG 0x4
81
82 /* State Ram Word 3 */
83 #define DMAS_SRAM3_REG 0x8
84
85 /* State Ram Word 4 */
86 #define DMAS_SRAM4_REG 0xc
87
88 struct bcm6348_iudma_desc {
89 u32 len_stat;
90 u32 address;
91 };
92
93 /* control */
94 #define DMADESC_LENGTH_SHIFT 16
95 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
96 #define DMADESC_OWNER_MASK (1 << 15)
97 #define DMADESC_EOP_MASK (1 << 14)
98 #define DMADESC_SOP_MASK (1 << 13)
99 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
100 #define DMADESC_WRAP_MASK (1 << 12)
101
102 /* status */
103 #define DMADESC_UNDER_MASK (1 << 9)
104 #define DMADESC_APPEND_CRC (1 << 8)
105 #define DMADESC_OVSIZE_MASK (1 << 4)
106 #define DMADESC_RXER_MASK (1 << 2)
107 #define DMADESC_CRC_MASK (1 << 1)
108 #define DMADESC_OV_MASK (1 << 0)
109 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
110 DMADESC_OVSIZE_MASK | \
111 DMADESC_RXER_MASK | \
112 DMADESC_CRC_MASK | \
113 DMADESC_OV_MASK)
114
115 struct bcm6348_iudma {
116 void __iomem *dma_base;
117 void __iomem *dma_chan;
118 void __iomem *dma_sram;
119
120 spinlock_t dma_base_lock;
121
122 struct clk **clock;
123 unsigned int num_clocks;
124
125 struct reset_control **reset;
126 unsigned int num_resets;
127
128 unsigned int dma_channels;
129 };
130
131 static inline u32 dma_readl(struct bcm6348_iudma *iudma, u32 off)
132 {
133 u32 val;
134
135 spin_lock(&iudma->dma_base_lock);
136 val = __raw_readl(iudma->dma_base + off);
137 spin_unlock(&iudma->dma_base_lock);
138
139 return val;
140 }
141
142 static inline void dma_writel(struct bcm6348_iudma *iudma, u32 val, u32 off)
143 {
144 spin_lock(&iudma->dma_base_lock);
145 __raw_writel(val, iudma->dma_base + off);
146 spin_unlock(&iudma->dma_base_lock);
147 }
148
149 static inline u32 dmac_readl(struct bcm6348_iudma *iudma, u32 off, int chan)
150 {
151 return __raw_readl(iudma->dma_chan + chan * DMA_CHAN_WIDTH + off);
152 }
153
154 static inline void dmac_writel(struct bcm6348_iudma *iudma, u32 val, u32 off,
155 int chan)
156 {
157 __raw_writel(val, iudma->dma_chan + chan * DMA_CHAN_WIDTH + off);
158 }
159
160 static inline void dmas_writel(struct bcm6348_iudma *iudma, u32 val, u32 off,
161 int chan)
162 {
163 __raw_writel(val, iudma->dma_sram + chan * DMA_CHAN_WIDTH + off);
164 }
165
166 static void bcm6348_iudma_chan_stop(struct bcm6348_iudma *iudma, int chan)
167 {
168 int limit = 1000;
169
170 dmac_writel(iudma, 0, DMAC_CHANCFG_REG, chan);
171
172 do {
173 u32 val;
174
175 val = dmac_readl(iudma, DMAC_CHANCFG_REG, chan);
176 if (!(val & DMAC_CHANCFG_EN_MASK))
177 break;
178
179 udelay(1);
180 } while (limit--);
181 }
182
183 static int bcm6348_iudma_probe(struct platform_device *pdev)
184 {
185 struct device *dev = &pdev->dev;
186 struct device_node *node = dev->of_node;
187 struct bcm6348_iudma *iudma;
188 unsigned i;
189 int num_resets;
190 int ret;
191
192 iudma = devm_kzalloc(dev, sizeof(*iudma), GFP_KERNEL);
193 if (!iudma)
194 return -ENOMEM;
195
196 if (of_property_read_u32(node, "dma-channels", &iudma->dma_channels))
197 return -ENODEV;
198
199 iudma->dma_base = devm_platform_ioremap_resource_byname(pdev, "dma");
200 if (IS_ERR_OR_NULL(iudma->dma_base))
201 return PTR_ERR(iudma->dma_base);
202
203 iudma->dma_chan = devm_platform_ioremap_resource_byname(pdev,
204 "dma-channels");
205 if (IS_ERR_OR_NULL(iudma->dma_chan))
206 return PTR_ERR(iudma->dma_chan);
207
208 iudma->dma_sram = devm_platform_ioremap_resource_byname(pdev,
209 "dma-sram");
210 if (IS_ERR_OR_NULL(iudma->dma_sram))
211 return PTR_ERR(iudma->dma_sram);
212
213 iudma->num_clocks = of_clk_get_parent_count(node);
214 if (iudma->num_clocks) {
215 iudma->clock = devm_kcalloc(dev, iudma->num_clocks,
216 sizeof(struct clk *), GFP_KERNEL);
217 if (IS_ERR_OR_NULL(iudma->clock))
218 return PTR_ERR(iudma->clock);
219 }
220 for (i = 0; i < iudma->num_clocks; i++) {
221 iudma->clock[i] = of_clk_get(node, i);
222 if (IS_ERR_OR_NULL(iudma->clock[i])) {
223 dev_err(dev, "error getting iudma clock %d\n", i);
224 return PTR_ERR(iudma->clock[i]);
225 }
226
227 ret = clk_prepare_enable(iudma->clock[i]);
228 if (ret) {
229 dev_err(dev, "error enabling iudma clock %d\n", i);
230 return ret;
231 }
232 }
233
234 num_resets = of_count_phandle_with_args(node, "resets",
235 "#reset-cells");
236 if (num_resets > 0)
237 iudma->num_resets = num_resets;
238 else
239 iudma->num_resets = 0;
240 if (iudma->num_resets) {
241 iudma->reset = devm_kcalloc(dev, iudma->num_resets,
242 sizeof(struct reset_control *),
243 GFP_KERNEL);
244 if (IS_ERR_OR_NULL(iudma->reset))
245 return PTR_ERR(iudma->reset);
246 }
247 for (i = 0; i < iudma->num_resets; i++) {
248 iudma->reset[i] = devm_reset_control_get_by_index(dev, i);
249 if (IS_ERR_OR_NULL(iudma->reset[i])) {
250 dev_err(dev, "error getting iudma reset %d\n", i);
251 return PTR_ERR(iudma->reset[i]);
252 }
253
254 ret = reset_control_reset(iudma->reset[i]);
255 if (ret) {
256 dev_err(dev, "error performing iudma reset %d\n", i);
257 return ret;
258 }
259 }
260
261 dma_writel(iudma, 0, DMA_CFG_REG);
262 for (i = 0; i < iudma->dma_channels; i++)
263 bcm6348_iudma_chan_stop(iudma, i);
264 dma_writel(iudma, DMA_CFG_EN_MASK, DMA_CFG_REG);
265
266 spin_lock_init(&iudma->dma_base_lock);
267
268 dev_info(dev, "bcm6348-iudma @ 0x%px\n", iudma->dma_base);
269
270 platform_set_drvdata(pdev, iudma);
271
272 return 0;
273 }
274
275 static const struct of_device_id bcm6348_iudma_of_match[] = {
276 { .compatible = "brcm,bcm6338-iudma", },
277 { .compatible = "brcm,bcm6348-iudma", },
278 { .compatible = "brcm,bcm6358-iudma", },
279 { /* sentinel */ },
280 };
281
282 static struct platform_driver bcm6348_iudma_driver = {
283 .driver = {
284 .name = "bcm6348-iudma",
285 .of_match_table = of_match_ptr(bcm6348_iudma_of_match),
286 },
287 .probe = bcm6348_iudma_probe,
288 };
289 builtin_platform_driver(bcm6348_iudma_driver);
290
291 /*
292 * BCM6348 Eternet MACs
293 */
294
295 /* MTU */
296 #define ENET_MAX_MTU 2046
297
298 #define ENET_TAG_SIZE 6
299 #define ENET_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
300 ENET_TAG_SIZE)
301
302 /* Default number of descriptor */
303 #define ENET_DEF_RX_DESC 64
304 #define ENET_DEF_TX_DESC 32
305 #define ENET_DEF_CPY_BREAK 128
306
307 /* Maximum burst len for dma (4 bytes unit) */
308 #define ENET_DMA_MAXBURST 8
309
310 /* Receiver Configuration register */
311 #define ENET_RXCFG_REG 0x0
312 #define ENET_RXCFG_ALLMCAST_SHIFT 1
313 #define ENET_RXCFG_ALLMCAST_MASK (1 << ENET_RXCFG_ALLMCAST_SHIFT)
314 #define ENET_RXCFG_PROMISC_SHIFT 3
315 #define ENET_RXCFG_PROMISC_MASK (1 << ENET_RXCFG_PROMISC_SHIFT)
316 #define ENET_RXCFG_LOOPBACK_SHIFT 4
317 #define ENET_RXCFG_LOOPBACK_MASK (1 << ENET_RXCFG_LOOPBACK_SHIFT)
318 #define ENET_RXCFG_ENFLOW_SHIFT 5
319 #define ENET_RXCFG_ENFLOW_MASK (1 << ENET_RXCFG_ENFLOW_SHIFT)
320
321 /* Receive Maximum Length register */
322 #define ENET_RXMAXLEN_REG 0x4
323 #define ENET_RXMAXLEN_SHIFT 0
324 #define ENET_RXMAXLEN_MASK (0x7ff << ENET_RXMAXLEN_SHIFT)
325
326 /* Transmit Maximum Length register */
327 #define ENET_TXMAXLEN_REG 0x8
328 #define ENET_TXMAXLEN_SHIFT 0
329 #define ENET_TXMAXLEN_MASK (0x7ff << ENET_TXMAXLEN_SHIFT)
330
331 /* MII Status/Control register */
332 #define ENET_MIISC_REG 0x10
333 #define ENET_MIISC_MDCFREQDIV_SHIFT 0
334 #define ENET_MIISC_MDCFREQDIV_MASK (0x7f << ENET_MIISC_MDCFREQDIV_SHIFT)
335 #define ENET_MIISC_PREAMBLEEN_SHIFT 7
336 #define ENET_MIISC_PREAMBLEEN_MASK (1 << ENET_MIISC_PREAMBLEEN_SHIFT)
337
338 /* MII Data register */
339 #define ENET_MIID_REG 0x14
340 #define ENET_MIID_DATA_SHIFT 0
341 #define ENET_MIID_DATA_MASK (0xffff << ENET_MIID_DATA_SHIFT)
342 #define ENET_MIID_TA_SHIFT 16
343 #define ENET_MIID_TA_MASK (0x3 << ENET_MIID_TA_SHIFT)
344 #define ENET_MIID_REG_SHIFT 18
345 #define ENET_MIID_REG_MASK (0x1f << ENET_MIID_REG_SHIFT)
346 #define ENET_MIID_PHY_SHIFT 23
347 #define ENET_MIID_PHY_MASK (0x1f << ENET_MIID_PHY_SHIFT)
348 #define ENET_MIID_OP_SHIFT 28
349 #define ENET_MIID_OP_WRITE (0x5 << ENET_MIID_OP_SHIFT)
350 #define ENET_MIID_OP_READ (0x6 << ENET_MIID_OP_SHIFT)
351
352 /* Ethernet Interrupt Mask register */
353 #define ENET_IRMASK_REG 0x18
354
355 /* Ethernet Interrupt register */
356 #define ENET_IR_REG 0x1c
357 #define ENET_IR_MII BIT(0)
358 #define ENET_IR_MIB BIT(1)
359 #define ENET_IR_FLOWC BIT(2)
360
361 /* Ethernet Control register */
362 #define ENET_CTL_REG 0x2c
363 #define ENET_CTL_ENABLE_SHIFT 0
364 #define ENET_CTL_ENABLE_MASK (1 << ENET_CTL_ENABLE_SHIFT)
365 #define ENET_CTL_DISABLE_SHIFT 1
366 #define ENET_CTL_DISABLE_MASK (1 << ENET_CTL_DISABLE_SHIFT)
367 #define ENET_CTL_SRESET_SHIFT 2
368 #define ENET_CTL_SRESET_MASK (1 << ENET_CTL_SRESET_SHIFT)
369 #define ENET_CTL_EPHYSEL_SHIFT 3
370 #define ENET_CTL_EPHYSEL_MASK (1 << ENET_CTL_EPHYSEL_SHIFT)
371
372 /* Transmit Control register */
373 #define ENET_TXCTL_REG 0x30
374 #define ENET_TXCTL_FD_SHIFT 0
375 #define ENET_TXCTL_FD_MASK (1 << ENET_TXCTL_FD_SHIFT)
376
377 /* Transmit Watermask register */
378 #define ENET_TXWMARK_REG 0x34
379 #define ENET_TXWMARK_WM_SHIFT 0
380 #define ENET_TXWMARK_WM_MASK (0x3f << ENET_TXWMARK_WM_SHIFT)
381
382 /* MIB Control register */
383 #define ENET_MIBCTL_REG 0x38
384 #define ENET_MIBCTL_RDCLEAR_SHIFT 0
385 #define ENET_MIBCTL_RDCLEAR_MASK (1 << ENET_MIBCTL_RDCLEAR_SHIFT)
386
387 /* Perfect Match Data Low register */
388 #define ENET_PML_REG(x) (0x58 + (x) * 8)
389 #define ENET_PMH_REG(x) (0x5c + (x) * 8)
390 #define ENET_PMH_DATAVALID_SHIFT 16
391 #define ENET_PMH_DATAVALID_MASK (1 << ENET_PMH_DATAVALID_SHIFT)
392
393 /* MIB register */
394 #define ENET_MIB_REG(x) (0x200 + (x) * 4)
395 #define ENET_MIB_REG_COUNT 55
396
397 /*
398 * TX transmit threshold (4 bytes unit), FIFO is 256 bytes, the value
399 * must be low enough so that a DMA transfer of above burst length can
400 * not overflow the fifo
401 */
402 #define ENET_TX_FIFO_TRESH 32
403
404 struct bcm6348_emac {
405 struct bcm6348_iudma *iudma;
406 void __iomem *base;
407
408 struct clk **clock;
409 unsigned int num_clocks;
410
411 struct reset_control **reset;
412 unsigned int num_resets;
413
414 int copybreak;
415
416 int irq_rx;
417 int irq_tx;
418
419 /* hw view of rx & tx dma ring */
420 dma_addr_t rx_desc_dma;
421 dma_addr_t tx_desc_dma;
422
423 /* allocated size (in bytes) for rx & tx dma ring */
424 unsigned int rx_desc_alloc_size;
425 unsigned int tx_desc_alloc_size;
426
427 struct napi_struct napi;
428
429 /* dma channel id for rx */
430 int rx_chan;
431
432 /* number of dma desc in rx ring */
433 int rx_ring_size;
434
435 /* cpu view of rx dma ring */
436 struct bcm6348_iudma_desc *rx_desc_cpu;
437
438 /* current number of armed descriptor given to hardware for rx */
439 int rx_desc_count;
440
441 /* next rx descriptor to fetch from hardware */
442 int rx_curr_desc;
443
444 /* next dirty rx descriptor to refill */
445 int rx_dirty_desc;
446
447 /* size of allocated rx skbs */
448 unsigned int rx_skb_size;
449
450 /* list of skb given to hw for rx */
451 struct sk_buff **rx_skb;
452
453 /* used when rx skb allocation failed, so we defer rx queue
454 * refill */
455 struct timer_list rx_timeout;
456
457 /* lock rx_timeout against rx normal operation */
458 spinlock_t rx_lock;
459
460 /* dma channel id for tx */
461 int tx_chan;
462
463 /* number of dma desc in tx ring */
464 int tx_ring_size;
465
466 /* cpu view of tx dma ring */
467 struct bcm6348_iudma_desc *tx_desc_cpu;
468
469 /* number of available descriptor for tx */
470 int tx_desc_count;
471
472 /* next tx descriptor avaiable */
473 int tx_curr_desc;
474
475 /* next dirty tx descriptor to reclaim */
476 int tx_dirty_desc;
477
478 /* list of skb given to hw for tx */
479 struct sk_buff **tx_skb;
480
481 /* lock used by tx reclaim and xmit */
482 spinlock_t tx_lock;
483
484 /* network device reference */
485 struct net_device *net_dev;
486
487 /* platform device reference */
488 struct platform_device *pdev;
489
490 /* external mii bus */
491 bool ext_mii;
492
493 /* phy */
494 int old_link;
495 int old_duplex;
496 int old_pause;
497 };
498
499 static inline void emac_writel(struct bcm6348_emac *emac, u32 val, u32 off)
500 {
501 __raw_writel(val, emac->base + off);
502 }
503
504 static inline u32 emac_readl(struct bcm6348_emac *emac, u32 off)
505 {
506 return __raw_readl(emac->base + off);
507 }
508
509 /*
510 * refill rx queue
511 */
512 static int bcm6348_emac_refill_rx(struct net_device *ndev)
513 {
514 struct bcm6348_emac *emac = netdev_priv(ndev);
515 struct bcm6348_iudma *iudma = emac->iudma;
516 struct platform_device *pdev = emac->pdev;
517 struct device *dev = &pdev->dev;
518
519 while (emac->rx_desc_count < emac->rx_ring_size) {
520 struct bcm6348_iudma_desc *desc;
521 struct sk_buff *skb;
522 dma_addr_t p;
523 int desc_idx;
524 u32 len_stat;
525
526 desc_idx = emac->rx_dirty_desc;
527 desc = &emac->rx_desc_cpu[desc_idx];
528
529 if (!emac->rx_skb[desc_idx]) {
530 skb = netdev_alloc_skb(ndev, emac->rx_skb_size);
531 if (!skb)
532 break;
533 emac->rx_skb[desc_idx] = skb;
534 p = dma_map_single(dev, skb->data, emac->rx_skb_size,
535 DMA_FROM_DEVICE);
536 desc->address = p;
537 }
538
539 len_stat = emac->rx_skb_size << DMADESC_LENGTH_SHIFT;
540 len_stat |= DMADESC_OWNER_MASK;
541 if (emac->rx_dirty_desc == emac->rx_ring_size - 1) {
542 len_stat |= DMADESC_WRAP_MASK;
543 emac->rx_dirty_desc = 0;
544 } else {
545 emac->rx_dirty_desc++;
546 }
547 wmb();
548 desc->len_stat = len_stat;
549
550 emac->rx_desc_count++;
551
552 /* tell dma engine we allocated one buffer */
553 dma_writel(iudma, 1, DMA_BUFALLOC_REG(emac->rx_chan));
554 }
555
556 /* If rx ring is still empty, set a timer to try allocating
557 * again at a later time. */
558 if (emac->rx_desc_count == 0 && netif_running(ndev)) {
559 dev_warn(dev, "unable to refill rx ring\n");
560 emac->rx_timeout.expires = jiffies + HZ;
561 add_timer(&emac->rx_timeout);
562 }
563
564 return 0;
565 }
566
567 /*
568 * timer callback to defer refill rx queue in case we're OOM
569 */
570 static void bcm6348_emac_refill_rx_timer(struct timer_list *t)
571 {
572 struct bcm6348_emac *emac = from_timer(emac, t, rx_timeout);
573 struct net_device *ndev = emac->net_dev;
574
575 spin_lock(&emac->rx_lock);
576 bcm6348_emac_refill_rx(ndev);
577 spin_unlock(&emac->rx_lock);
578 }
579
580 /*
581 * extract packet from rx queue
582 */
583 static int bcm6348_emac_receive_queue(struct net_device *ndev, int budget)
584 {
585 struct bcm6348_emac *emac = netdev_priv(ndev);
586 struct bcm6348_iudma *iudma = emac->iudma;
587 struct platform_device *pdev = emac->pdev;
588 struct device *dev = &pdev->dev;
589 int processed = 0;
590
591 /* don't scan ring further than number of refilled
592 * descriptor */
593 if (budget > emac->rx_desc_count)
594 budget = emac->rx_desc_count;
595
596 do {
597 struct bcm6348_iudma_desc *desc;
598 struct sk_buff *skb;
599 int desc_idx;
600 u32 len_stat;
601 unsigned int len;
602
603 desc_idx = emac->rx_curr_desc;
604 desc = &emac->rx_desc_cpu[desc_idx];
605
606 /* make sure we actually read the descriptor status at
607 * each loop */
608 rmb();
609
610 len_stat = desc->len_stat;
611
612 /* break if dma ownership belongs to hw */
613 if (len_stat & DMADESC_OWNER_MASK)
614 break;
615
616 processed++;
617 emac->rx_curr_desc++;
618 if (emac->rx_curr_desc == emac->rx_ring_size)
619 emac->rx_curr_desc = 0;
620 emac->rx_desc_count--;
621
622 /* if the packet does not have start of packet _and_
623 * end of packet flag set, then just recycle it */
624 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
625 ndev->stats.rx_dropped++;
626 continue;
627 }
628
629 /* valid packet */
630 skb = emac->rx_skb[desc_idx];
631 len = (len_stat & DMADESC_LENGTH_MASK)
632 >> DMADESC_LENGTH_SHIFT;
633 /* don't include FCS */
634 len -= 4;
635
636 if (len < emac->copybreak) {
637 struct sk_buff *nskb;
638
639 nskb = napi_alloc_skb(&emac->napi, len);
640 if (!nskb) {
641 /* forget packet, just rearm desc */
642 ndev->stats.rx_dropped++;
643 continue;
644 }
645
646 dma_sync_single_for_cpu(dev, desc->address,
647 len, DMA_FROM_DEVICE);
648 memcpy(nskb->data, skb->data, len);
649 dma_sync_single_for_device(dev, desc->address,
650 len, DMA_FROM_DEVICE);
651 skb = nskb;
652 } else {
653 dma_unmap_single(dev, desc->address,
654 emac->rx_skb_size, DMA_FROM_DEVICE);
655 emac->rx_skb[desc_idx] = NULL;
656 }
657
658 skb_put(skb, len);
659 skb->protocol = eth_type_trans(skb, ndev);
660 ndev->stats.rx_packets++;
661 ndev->stats.rx_bytes += len;
662 netif_receive_skb(skb);
663 } while (--budget > 0);
664
665 if (processed || !emac->rx_desc_count) {
666 bcm6348_emac_refill_rx(ndev);
667
668 /* kick rx dma */
669 dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG,
670 emac->rx_chan);
671 }
672
673 return processed;
674 }
675
676 /*
677 * try to or force reclaim of transmitted buffers
678 */
679 static int bcm6348_emac_tx_reclaim(struct net_device *ndev, int force)
680 {
681 struct bcm6348_emac *emac = netdev_priv(ndev);
682 struct platform_device *pdev = emac->pdev;
683 struct device *dev = &pdev->dev;
684 int released = 0;
685
686 while (emac->tx_desc_count < emac->tx_ring_size) {
687 struct bcm6348_iudma_desc *desc;
688 struct sk_buff *skb;
689
690 /* We run in a bh and fight against start_xmit, which
691 * is called with bh disabled */
692 spin_lock(&emac->tx_lock);
693
694 desc = &emac->tx_desc_cpu[emac->tx_dirty_desc];
695
696 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
697 spin_unlock(&emac->tx_lock);
698 break;
699 }
700
701 /* ensure other field of the descriptor were not read
702 * before we checked ownership */
703 rmb();
704
705 skb = emac->tx_skb[emac->tx_dirty_desc];
706 emac->tx_skb[emac->tx_dirty_desc] = NULL;
707 dma_unmap_single(dev, desc->address, skb->len, DMA_TO_DEVICE);
708
709 emac->tx_dirty_desc++;
710 if (emac->tx_dirty_desc == emac->tx_ring_size)
711 emac->tx_dirty_desc = 0;
712 emac->tx_desc_count++;
713
714 spin_unlock(&emac->tx_lock);
715
716 if (desc->len_stat & DMADESC_UNDER_MASK)
717 ndev->stats.tx_errors++;
718
719 dev_kfree_skb(skb);
720 released++;
721 }
722
723 if (netif_queue_stopped(ndev) && released)
724 netif_wake_queue(ndev);
725
726 return released;
727 }
728
729 static int bcm6348_emac_poll(struct napi_struct *napi, int budget)
730 {
731 struct bcm6348_emac *emac = container_of(napi, struct bcm6348_emac,
732 napi);
733 struct bcm6348_iudma *iudma = emac->iudma;
734 struct net_device *ndev = emac->net_dev;
735 int rx_work_done;
736
737 /* ack interrupts */
738 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG,
739 emac->rx_chan);
740 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG,
741 emac->tx_chan);
742
743 /* reclaim sent skb */
744 bcm6348_emac_tx_reclaim(ndev, 0);
745
746 spin_lock(&emac->rx_lock);
747 rx_work_done = bcm6348_emac_receive_queue(ndev, budget);
748 spin_unlock(&emac->rx_lock);
749
750 if (rx_work_done >= budget) {
751 /* rx queue is not yet empty/clean */
752 return rx_work_done;
753 }
754
755 /* no more packet in rx/tx queue, remove device from poll
756 * queue */
757 napi_complete_done(napi, rx_work_done);
758
759 /* restore rx/tx interrupt */
760 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG,
761 emac->rx_chan);
762 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG,
763 emac->tx_chan);
764
765 return rx_work_done;
766 }
767
768 /*
769 * emac interrupt handler
770 */
771 static irqreturn_t bcm6348_emac_isr_mac(int irq, void *dev_id)
772 {
773 struct net_device *ndev = dev_id;
774 struct bcm6348_emac *emac = netdev_priv(ndev);
775 u32 stat;
776
777 stat = emac_readl(emac, ENET_IR_REG);
778 if (!(stat & ENET_IR_MIB))
779 return IRQ_NONE;
780
781 /* clear & mask interrupt */
782 emac_writel(emac, ENET_IR_MIB, ENET_IR_REG);
783 emac_writel(emac, 0, ENET_IRMASK_REG);
784
785 return IRQ_HANDLED;
786 }
787
788 /*
789 * rx/tx dma interrupt handler
790 */
791 static irqreturn_t bcm6348_emac_isr_dma(int irq, void *dev_id)
792 {
793 struct net_device *ndev = dev_id;
794 struct bcm6348_emac *emac = netdev_priv(ndev);
795 struct bcm6348_iudma *iudma = emac->iudma;
796
797 /* mask rx/tx interrupts */
798 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan);
799 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan);
800
801 napi_schedule(&emac->napi);
802
803 return IRQ_HANDLED;
804 }
805
806 /*
807 * tx request callback
808 */
809 static netdev_tx_t bcm6348_emac_start_xmit(struct sk_buff *skb,
810 struct net_device *ndev)
811 {
812 struct bcm6348_emac *emac = netdev_priv(ndev);
813 struct bcm6348_iudma *iudma = emac->iudma;
814 struct platform_device *pdev = emac->pdev;
815 struct device *dev = &pdev->dev;
816 struct bcm6348_iudma_desc *desc;
817 u32 len_stat;
818 netdev_tx_t ret;
819
820 /* lock against tx reclaim */
821 spin_lock(&emac->tx_lock);
822
823 /* make sure the tx hw queue is not full, should not happen
824 * since we stop queue before it's the case */
825 if (unlikely(!emac->tx_desc_count)) {
826 netif_stop_queue(ndev);
827 dev_err(dev, "xmit called with no tx desc available?\n");
828 ret = NETDEV_TX_BUSY;
829 goto out_unlock;
830 }
831
832 /* point to the next available desc */
833 desc = &emac->tx_desc_cpu[emac->tx_curr_desc];
834 emac->tx_skb[emac->tx_curr_desc] = skb;
835
836 /* fill descriptor */
837 desc->address = dma_map_single(dev, skb->data, skb->len,
838 DMA_TO_DEVICE);
839
840 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
841 len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC |
842 DMADESC_OWNER_MASK;
843
844 emac->tx_curr_desc++;
845 if (emac->tx_curr_desc == emac->tx_ring_size) {
846 emac->tx_curr_desc = 0;
847 len_stat |= DMADESC_WRAP_MASK;
848 }
849 emac->tx_desc_count--;
850
851 /* dma might be already polling, make sure we update desc
852 * fields in correct order */
853 wmb();
854 desc->len_stat = len_stat;
855 wmb();
856
857 /* kick tx dma */
858 dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG,
859 emac->tx_chan);
860
861 /* stop queue if no more desc available */
862 if (!emac->tx_desc_count)
863 netif_stop_queue(ndev);
864
865 ndev->stats.tx_bytes += skb->len;
866 ndev->stats.tx_packets++;
867 ret = NETDEV_TX_OK;
868
869 out_unlock:
870 spin_unlock(&emac->tx_lock);
871 return ret;
872 }
873
874 /*
875 * Change the interface's emac address.
876 */
877 static int bcm6348_emac_set_mac_address(struct net_device *ndev, void *p)
878 {
879 struct bcm6348_emac *emac = netdev_priv(ndev);
880 struct sockaddr *addr = p;
881 u32 val;
882
883 eth_hw_addr_set(ndev, addr->sa_data);
884
885 /* use perfect match register 0 to store my emac address */
886 val = (ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
887 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5];
888 emac_writel(emac, val, ENET_PML_REG(0));
889
890 val = (ndev->dev_addr[0] << 8 | ndev->dev_addr[1]);
891 val |= ENET_PMH_DATAVALID_MASK;
892 emac_writel(emac, val, ENET_PMH_REG(0));
893
894 return 0;
895 }
896
897 /*
898 * Change rx mode (promiscuous/allmulti) and update multicast list
899 */
900 static void bcm6348_emac_set_multicast_list(struct net_device *ndev)
901 {
902 struct bcm6348_emac *emac = netdev_priv(ndev);
903 struct netdev_hw_addr *ha;
904 u32 val;
905 unsigned int i;
906
907 val = emac_readl(emac, ENET_RXCFG_REG);
908
909 if (ndev->flags & IFF_PROMISC)
910 val |= ENET_RXCFG_PROMISC_MASK;
911 else
912 val &= ~ENET_RXCFG_PROMISC_MASK;
913
914 /* only 3 perfect match registers left, first one is used for
915 * own mac address */
916 if ((ndev->flags & IFF_ALLMULTI) || netdev_mc_count(ndev) > 3)
917 val |= ENET_RXCFG_ALLMCAST_MASK;
918 else
919 val &= ~ENET_RXCFG_ALLMCAST_MASK;
920
921 /* no need to set perfect match registers if we catch all
922 * multicast */
923 if (val & ENET_RXCFG_ALLMCAST_MASK) {
924 emac_writel(emac, val, ENET_RXCFG_REG);
925 return;
926 }
927
928 i = 0;
929 netdev_for_each_mc_addr(ha, ndev) {
930 u8 *dmi_addr;
931 u32 tmp;
932
933 if (i == 3)
934 break;
935
936 /* update perfect match registers */
937 dmi_addr = ha->addr;
938 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
939 (dmi_addr[4] << 8) | dmi_addr[5];
940 emac_writel(emac, tmp, ENET_PML_REG(i + 1));
941
942 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
943 tmp |= ENET_PMH_DATAVALID_MASK;
944 emac_writel(emac, tmp, ENET_PMH_REG(i++ + 1));
945 }
946
947 for (; i < 3; i++) {
948 emac_writel(emac, 0, ENET_PML_REG(i + 1));
949 emac_writel(emac, 0, ENET_PMH_REG(i + 1));
950 }
951
952 emac_writel(emac, val, ENET_RXCFG_REG);
953 }
954
955 /*
956 * disable emac
957 */
958 static void bcm6348_emac_disable_mac(struct bcm6348_emac *emac)
959 {
960 int limit;
961 u32 val;
962
963 val = emac_readl(emac, ENET_CTL_REG);
964 val |= ENET_CTL_DISABLE_MASK;
965 emac_writel(emac, val, ENET_CTL_REG);
966
967 limit = 1000;
968 do {
969 val = emac_readl(emac, ENET_CTL_REG);
970 if (!(val & ENET_CTL_DISABLE_MASK))
971 break;
972 udelay(1);
973 } while (limit--);
974 }
975
976 /*
977 * set emac duplex parameters
978 */
979 static void bcm6348_emac_set_duplex(struct bcm6348_emac *emac, int fullduplex)
980 {
981 u32 val;
982
983 val = emac_readl(emac, ENET_TXCTL_REG);
984 if (fullduplex)
985 val |= ENET_TXCTL_FD_MASK;
986 else
987 val &= ~ENET_TXCTL_FD_MASK;
988 emac_writel(emac, val, ENET_TXCTL_REG);
989 }
990
991 /*
992 * set emac flow control parameters
993 */
994 static void bcm6348_emac_set_flow(struct bcm6348_emac *emac, bool rx_en, bool tx_en)
995 {
996 struct bcm6348_iudma *iudma = emac->iudma;
997 u32 val;
998
999 val = emac_readl(emac, ENET_RXCFG_REG);
1000 if (rx_en)
1001 val |= ENET_RXCFG_ENFLOW_MASK;
1002 else
1003 val &= ~ENET_RXCFG_ENFLOW_MASK;
1004 emac_writel(emac, val, ENET_RXCFG_REG);
1005
1006 dmas_writel(iudma, emac->rx_desc_dma, DMAS_RSTART_REG, emac->rx_chan);
1007 dmas_writel(iudma, emac->tx_desc_dma, DMAS_RSTART_REG, emac->tx_chan);
1008
1009 val = dma_readl(iudma, DMA_CFG_REG);
1010 if (tx_en)
1011 val |= DMA_CFG_FLOWCH_MASK(emac->rx_chan);
1012 else
1013 val &= ~DMA_CFG_FLOWCH_MASK(emac->rx_chan);
1014 dma_writel(iudma, val, DMA_CFG_REG);
1015 }
1016
1017 /*
1018 * adjust emac phy
1019 */
1020 static void bcm6348_emac_adjust_phy(struct net_device *ndev)
1021 {
1022 struct phy_device *phydev = ndev->phydev;
1023 struct bcm6348_emac *emac = netdev_priv(ndev);
1024 struct platform_device *pdev = emac->pdev;
1025 struct device *dev = &pdev->dev;
1026 bool status_changed = false;
1027
1028 if (emac->old_link != phydev->link) {
1029 status_changed = true;
1030 emac->old_link = phydev->link;
1031 }
1032
1033 if (phydev->link && phydev->duplex != emac->old_duplex) {
1034 bcm6348_emac_set_duplex(emac, phydev->duplex == DUPLEX_FULL);
1035 status_changed = true;
1036 emac->old_duplex = phydev->duplex;
1037 }
1038
1039 if (phydev->link && phydev->pause != emac->old_pause) {
1040 bool rx_pause_en, tx_pause_en;
1041
1042 if (phydev->pause) {
1043 rx_pause_en = true;
1044 tx_pause_en = true;
1045 } else {
1046 rx_pause_en = false;
1047 tx_pause_en = false;
1048 }
1049
1050 bcm6348_emac_set_flow(emac, rx_pause_en, tx_pause_en);
1051 status_changed = true;
1052 emac->old_pause = phydev->pause;
1053 }
1054
1055 if (status_changed)
1056 dev_info(dev, "%s: phy link %s %s/%s/%s/%s\n",
1057 ndev->name,
1058 phydev->link ? "UP" : "DOWN",
1059 phy_modes(phydev->interface),
1060 phy_speed_to_str(phydev->speed),
1061 phy_duplex_to_str(phydev->duplex),
1062 phydev->pause ? "rx/tx" : "off");
1063 }
1064
1065
1066 static int bcm6348_emac_open(struct net_device *ndev)
1067 {
1068 struct bcm6348_emac *emac = netdev_priv(ndev);
1069 struct bcm6348_iudma *iudma = emac->iudma;
1070 struct platform_device *pdev = emac->pdev;
1071 struct device *dev = &pdev->dev;
1072 struct sockaddr addr;
1073 unsigned int i, size;
1074 int ret;
1075 void *p;
1076 u32 val;
1077
1078 /* mask all interrupts and request them */
1079 emac_writel(emac, 0, ENET_IRMASK_REG);
1080 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan);
1081 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan);
1082
1083 ret = request_irq(ndev->irq, bcm6348_emac_isr_mac, 0, ndev->name,
1084 ndev);
1085 if (ret)
1086 return ret;
1087
1088 ret = request_irq(emac->irq_rx, bcm6348_emac_isr_dma,
1089 0, ndev->name, ndev);
1090 if (ret)
1091 goto out_freeirq;
1092
1093 ret = request_irq(emac->irq_tx, bcm6348_emac_isr_dma,
1094 0, ndev->name, ndev);
1095 if (ret)
1096 goto out_freeirq_rx;
1097
1098 /* initialize perfect match registers */
1099 for (i = 0; i < 4; i++) {
1100 emac_writel(emac, 0, ENET_PML_REG(i));
1101 emac_writel(emac, 0, ENET_PMH_REG(i));
1102 }
1103
1104 /* write device mac address */
1105 memcpy(addr.sa_data, ndev->dev_addr, ETH_ALEN);
1106 bcm6348_emac_set_mac_address(ndev, &addr);
1107
1108 /* allocate rx dma ring */
1109 size = emac->rx_ring_size * sizeof(struct bcm6348_iudma_desc);
1110 p = dma_alloc_coherent(dev, size, &emac->rx_desc_dma, GFP_KERNEL);
1111 if (!p) {
1112 dev_err(dev, "cannot allocate rx ring %u\n", size);
1113 ret = -ENOMEM;
1114 goto out_freeirq_tx;
1115 }
1116
1117 memset(p, 0, size);
1118 emac->rx_desc_alloc_size = size;
1119 emac->rx_desc_cpu = p;
1120
1121 /* allocate tx dma ring */
1122 size = emac->tx_ring_size * sizeof(struct bcm6348_iudma_desc);
1123 p = dma_alloc_coherent(dev, size, &emac->tx_desc_dma, GFP_KERNEL);
1124 if (!p) {
1125 dev_err(dev, "cannot allocate tx ring\n");
1126 ret = -ENOMEM;
1127 goto out_free_rx_ring;
1128 }
1129
1130 memset(p, 0, size);
1131 emac->tx_desc_alloc_size = size;
1132 emac->tx_desc_cpu = p;
1133
1134 emac->tx_skb = kzalloc(sizeof(struct sk_buff *) * emac->tx_ring_size,
1135 GFP_KERNEL);
1136 if (!emac->tx_skb) {
1137 dev_err(dev, "cannot allocate rx skb queue\n");
1138 ret = -ENOMEM;
1139 goto out_free_tx_ring;
1140 }
1141
1142 emac->tx_desc_count = emac->tx_ring_size;
1143 emac->tx_dirty_desc = 0;
1144 emac->tx_curr_desc = 0;
1145 spin_lock_init(&emac->tx_lock);
1146
1147 /* init & fill rx ring with skbs */
1148 emac->rx_skb = kzalloc(sizeof(struct sk_buff *) * emac->rx_ring_size,
1149 GFP_KERNEL);
1150 if (!emac->rx_skb) {
1151 dev_err(dev, "cannot allocate rx skb queue\n");
1152 ret = -ENOMEM;
1153 goto out_free_tx_skb;
1154 }
1155
1156 emac->rx_desc_count = 0;
1157 emac->rx_dirty_desc = 0;
1158 emac->rx_curr_desc = 0;
1159
1160 /* initialize flow control buffer allocation */
1161 dma_writel(iudma, DMA_BUFALLOC_FORCE_MASK | 0,
1162 DMA_BUFALLOC_REG(emac->rx_chan));
1163
1164 if (bcm6348_emac_refill_rx(ndev)) {
1165 dev_err(dev, "cannot allocate rx skb queue\n");
1166 ret = -ENOMEM;
1167 goto out;
1168 }
1169
1170 /* write rx & tx ring addresses */
1171 dmas_writel(iudma, emac->rx_desc_dma,
1172 DMAS_RSTART_REG, emac->rx_chan);
1173 dmas_writel(iudma, emac->tx_desc_dma,
1174 DMAS_RSTART_REG, emac->tx_chan);
1175
1176 /* clear remaining state ram for rx & tx channel */
1177 dmas_writel(iudma, 0, DMAS_SRAM2_REG, emac->rx_chan);
1178 dmas_writel(iudma, 0, DMAS_SRAM2_REG, emac->tx_chan);
1179 dmas_writel(iudma, 0, DMAS_SRAM3_REG, emac->rx_chan);
1180 dmas_writel(iudma, 0, DMAS_SRAM3_REG, emac->tx_chan);
1181 dmas_writel(iudma, 0, DMAS_SRAM4_REG, emac->rx_chan);
1182 dmas_writel(iudma, 0, DMAS_SRAM4_REG, emac->tx_chan);
1183
1184 /* set max rx/tx length */
1185 emac_writel(emac, ndev->mtu, ENET_RXMAXLEN_REG);
1186 emac_writel(emac, ndev->mtu, ENET_TXMAXLEN_REG);
1187
1188 /* set dma maximum burst len */
1189 dmac_writel(iudma, ENET_DMA_MAXBURST,
1190 DMAC_MAXBURST_REG, emac->rx_chan);
1191 dmac_writel(iudma, ENET_DMA_MAXBURST,
1192 DMAC_MAXBURST_REG, emac->tx_chan);
1193
1194 /* set correct transmit fifo watermark */
1195 emac_writel(emac, ENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1196
1197 /* set flow control low/high threshold to 1/3 / 2/3 */
1198 val = emac->rx_ring_size / 3;
1199 dma_writel(iudma, val, DMA_FLOWCL_REG(emac->rx_chan));
1200 val = (emac->rx_ring_size * 2) / 3;
1201 dma_writel(iudma, val, DMA_FLOWCH_REG(emac->rx_chan));
1202
1203 /* all set, enable emac and interrupts, start dma engine and
1204 * kick rx dma channel
1205 */
1206 wmb();
1207 val = emac_readl(emac, ENET_CTL_REG);
1208 val |= ENET_CTL_ENABLE_MASK;
1209 emac_writel(emac, val, ENET_CTL_REG);
1210 dmac_writel(iudma, DMAC_CHANCFG_EN_MASK,
1211 DMAC_CHANCFG_REG, emac->rx_chan);
1212
1213 /* watch "mib counters about to overflow" interrupt */
1214 emac_writel(emac, ENET_IR_MIB, ENET_IR_REG);
1215 emac_writel(emac, ENET_IR_MIB, ENET_IRMASK_REG);
1216
1217 /* watch "packet transferred" interrupt in rx and tx */
1218 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1219 DMAC_IR_REG, emac->rx_chan);
1220 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1221 DMAC_IR_REG, emac->tx_chan);
1222
1223 /* make sure we enable napi before rx interrupt */
1224 napi_enable(&emac->napi);
1225
1226 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1227 DMAC_IRMASK_REG, emac->rx_chan);
1228 dmac_writel(iudma, DMAC_IR_PKTDONE_MASK,
1229 DMAC_IRMASK_REG, emac->tx_chan);
1230
1231 if (ndev->phydev)
1232 phy_start(ndev->phydev);
1233
1234 netif_carrier_on(ndev);
1235 netif_start_queue(ndev);
1236
1237 return 0;
1238
1239 out:
1240 for (i = 0; i < emac->rx_ring_size; i++) {
1241 struct bcm6348_iudma_desc *desc;
1242
1243 if (!emac->rx_skb[i])
1244 continue;
1245
1246 desc = &emac->rx_desc_cpu[i];
1247 dma_unmap_single(dev, desc->address, emac->rx_skb_size,
1248 DMA_FROM_DEVICE);
1249 kfree_skb(emac->rx_skb[i]);
1250 }
1251 kfree(emac->rx_skb);
1252
1253 out_free_tx_skb:
1254 kfree(emac->tx_skb);
1255
1256 out_free_tx_ring:
1257 dma_free_coherent(dev, emac->tx_desc_alloc_size,
1258 emac->tx_desc_cpu, emac->tx_desc_dma);
1259
1260 out_free_rx_ring:
1261 dma_free_coherent(dev, emac->rx_desc_alloc_size,
1262 emac->rx_desc_cpu, emac->rx_desc_dma);
1263
1264 out_freeirq_tx:
1265 if (emac->irq_tx != -1)
1266 free_irq(emac->irq_tx, ndev);
1267
1268 out_freeirq_rx:
1269 free_irq(emac->irq_rx, ndev);
1270
1271 out_freeirq:
1272 if (ndev->phydev)
1273 phy_disconnect(ndev->phydev);
1274
1275 return ret;
1276 }
1277
1278 static int bcm6348_emac_stop(struct net_device *ndev)
1279 {
1280 struct bcm6348_emac *emac = netdev_priv(ndev);
1281 struct bcm6348_iudma *iudma = emac->iudma;
1282 struct device *dev = &emac->pdev->dev;
1283 unsigned int i;
1284
1285 netif_stop_queue(ndev);
1286 napi_disable(&emac->napi);
1287 if (ndev->phydev)
1288 phy_stop(ndev->phydev);
1289 del_timer_sync(&emac->rx_timeout);
1290
1291 /* mask all interrupts */
1292 emac_writel(emac, 0, ENET_IRMASK_REG);
1293 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan);
1294 dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan);
1295
1296 /* disable dma & emac */
1297 bcm6348_iudma_chan_stop(iudma, emac->tx_chan);
1298 bcm6348_iudma_chan_stop(iudma, emac->rx_chan);
1299 bcm6348_emac_disable_mac(emac);
1300
1301 /* force reclaim of all tx buffers */
1302 bcm6348_emac_tx_reclaim(ndev, 1);
1303
1304 /* free the rx skb ring */
1305 for (i = 0; i < emac->rx_ring_size; i++) {
1306 struct bcm6348_iudma_desc *desc;
1307
1308 if (!emac->rx_skb[i])
1309 continue;
1310
1311 desc = &emac->rx_desc_cpu[i];
1312 dma_unmap_single_attrs(dev, desc->address, emac->rx_skb_size,
1313 DMA_FROM_DEVICE,
1314 DMA_ATTR_SKIP_CPU_SYNC);
1315 kfree_skb(emac->rx_skb[i]);
1316 }
1317
1318 /* free remaining allocated memory */
1319 kfree(emac->rx_skb);
1320 kfree(emac->tx_skb);
1321 dma_free_coherent(dev, emac->rx_desc_alloc_size, emac->rx_desc_cpu,
1322 emac->rx_desc_dma);
1323 dma_free_coherent(dev, emac->tx_desc_alloc_size, emac->tx_desc_cpu,
1324 emac->tx_desc_dma);
1325 free_irq(emac->irq_tx, ndev);
1326 free_irq(emac->irq_rx, ndev);
1327 free_irq(ndev->irq, ndev);
1328
1329 netdev_reset_queue(ndev);
1330
1331 return 0;
1332 }
1333
1334 static const struct net_device_ops bcm6348_emac_ops = {
1335 .ndo_open = bcm6348_emac_open,
1336 .ndo_stop = bcm6348_emac_stop,
1337 .ndo_start_xmit = bcm6348_emac_start_xmit,
1338 .ndo_set_mac_address = bcm6348_emac_set_mac_address,
1339 .ndo_set_rx_mode = bcm6348_emac_set_multicast_list,
1340 };
1341
1342 static int bcm6348_emac_mdio_op(struct bcm6348_emac *emac, uint32_t data)
1343 {
1344 int limit;
1345
1346 /* Make sure mii interrupt status is cleared */
1347 emac_writel(emac, ENET_IR_MII, ENET_IR_REG);
1348
1349 /* Issue mii op */
1350 emac_writel(emac, data, ENET_MIID_REG);
1351 wmb();
1352
1353 /* busy wait on mii interrupt bit, with timeout */
1354 limit = 1000;
1355 do {
1356 if (emac_readl(emac, ENET_IR_REG) & ENET_IR_MII)
1357 break;
1358 udelay(1);
1359 } while (limit-- > 0);
1360
1361 return (limit < 0) ? 1 : 0;
1362 }
1363
1364 static int bcm6348_emac_mdio_read(struct mii_bus *bus, int phy_id, int loc)
1365 {
1366 struct bcm6348_emac *emac = bus->priv;
1367 struct platform_device *pdev = emac->pdev;
1368 struct device *dev = &pdev->dev;
1369 uint32_t reg;
1370
1371 reg = 0x2 << ENET_MIID_TA_SHIFT;
1372 reg |= loc << ENET_MIID_REG_SHIFT;
1373 reg |= phy_id << ENET_MIID_PHY_SHIFT;
1374 reg |= ENET_MIID_OP_READ;
1375
1376 if (bcm6348_emac_mdio_op(emac, reg)) {
1377 dev_err(dev, "mdio_read: phy=%d loc=%x timeout!\n",
1378 phy_id, loc);
1379 return -EINVAL;
1380 }
1381
1382 reg = emac_readl(emac, ENET_MIID_REG);
1383 reg = (reg >> ENET_MIID_DATA_SHIFT) & ENET_MIID_DATA_MASK;
1384
1385 return (int) reg;
1386 }
1387
1388 static int bcm6348_emac_mdio_write(struct mii_bus *bus, int phy_id,
1389 int loc, uint16_t val)
1390 {
1391 struct bcm6348_emac *emac = bus->priv;
1392 struct platform_device *pdev = emac->pdev;
1393 struct device *dev = &pdev->dev;
1394 uint32_t reg;
1395
1396 reg = (val << ENET_MIID_DATA_SHIFT) & ENET_MIID_DATA_MASK;
1397 reg |= 0x2 << ENET_MIID_TA_SHIFT;
1398 reg |= loc << ENET_MIID_REG_SHIFT;
1399 reg |= phy_id << ENET_MIID_PHY_SHIFT;
1400 reg |= ENET_MIID_OP_WRITE;
1401
1402 if (bcm6348_emac_mdio_op(emac, reg)) {
1403 dev_err(dev, "mdio_write: phy=%d loc=%x timeout!\n",
1404 phy_id, loc);
1405 return -EINVAL;
1406 }
1407
1408 bcm6348_emac_mdio_op(emac, reg);
1409
1410 return 0;
1411 }
1412
1413 static int bcm6348_emac_mdio_init(struct bcm6348_emac *emac,
1414 struct device_node *np)
1415 {
1416 struct platform_device *pdev = emac->pdev;
1417 struct device *dev = &pdev->dev;
1418 struct device_node *mnp;
1419 struct mii_bus *mii_bus;
1420 int ret;
1421
1422 mnp = of_get_child_by_name(np, "mdio");
1423 if (!mnp)
1424 return -ENODEV;
1425
1426 mii_bus = devm_mdiobus_alloc(dev);
1427 if (!mii_bus) {
1428 of_node_put(mnp);
1429 return -ENOMEM;
1430 }
1431
1432 mii_bus->priv = emac;
1433 mii_bus->name = np->full_name;
1434 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
1435 mii_bus->parent = dev;
1436 mii_bus->read = bcm6348_emac_mdio_read;
1437 mii_bus->write = bcm6348_emac_mdio_write;
1438 mii_bus->phy_mask = 0x3f;
1439
1440 ret = devm_of_mdiobus_register(dev, mii_bus, mnp);
1441 of_node_put(mnp);
1442 if (ret) {
1443 dev_err(dev, "MDIO bus registration failed\n");
1444 return ret;
1445 }
1446
1447 dev_info(dev, "MDIO bus init\n");
1448
1449 return 0;
1450 }
1451
1452 /*
1453 * preinit hardware to allow mii operation while device is down
1454 */
1455 static void bcm6348_emac_hw_preinit(struct bcm6348_emac *emac)
1456 {
1457 u32 val;
1458 int limit;
1459
1460 /* make sure emac is disabled */
1461 bcm6348_emac_disable_mac(emac);
1462
1463 /* soft reset emac */
1464 val = ENET_CTL_SRESET_MASK;
1465 emac_writel(emac, val, ENET_CTL_REG);
1466 wmb();
1467
1468 limit = 1000;
1469 do {
1470 val = emac_readl(emac, ENET_CTL_REG);
1471 if (!(val & ENET_CTL_SRESET_MASK))
1472 break;
1473 udelay(1);
1474 } while (limit--);
1475
1476 /* select correct mii interface */
1477 val = emac_readl(emac, ENET_CTL_REG);
1478 if (emac->ext_mii)
1479 val |= ENET_CTL_EPHYSEL_MASK;
1480 else
1481 val &= ~ENET_CTL_EPHYSEL_MASK;
1482 emac_writel(emac, val, ENET_CTL_REG);
1483
1484 /* turn on mdc clock */
1485 emac_writel(emac, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1486 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1487
1488 /* set mib counters to self-clear when read */
1489 val = emac_readl(emac, ENET_MIBCTL_REG);
1490 val |= ENET_MIBCTL_RDCLEAR_MASK;
1491 emac_writel(emac, val, ENET_MIBCTL_REG);
1492 }
1493
1494 static int bcm6348_emac_probe(struct platform_device *pdev)
1495 {
1496 struct device *dev = &pdev->dev;
1497 struct device_node *node = dev->of_node;
1498 struct device_node *dma_node;
1499 struct platform_device *dma_pdev;
1500 struct bcm6348_emac *emac;
1501 struct bcm6348_iudma *iudma;
1502 struct net_device *ndev;
1503 unsigned i;
1504 int num_resets;
1505 int ret;
1506
1507 dma_node = of_parse_phandle(node, "brcm,iudma", 0);
1508 if (!dma_node)
1509 return -EINVAL;
1510
1511 dma_pdev = of_find_device_by_node(dma_node);
1512 of_node_put(dma_node);
1513 if (!dma_pdev)
1514 return -EINVAL;
1515
1516 iudma = platform_get_drvdata(dma_pdev);
1517 if (!iudma)
1518 return -EPROBE_DEFER;
1519
1520 ndev = devm_alloc_etherdev(dev, sizeof(*emac));
1521 if (!ndev)
1522 return -ENOMEM;
1523
1524 platform_set_drvdata(pdev, ndev);
1525 SET_NETDEV_DEV(ndev, dev);
1526
1527 emac = netdev_priv(ndev);
1528 emac->iudma = iudma;
1529 emac->pdev = pdev;
1530 emac->net_dev = ndev;
1531
1532 emac->base = devm_platform_ioremap_resource(pdev, 0);
1533 if (IS_ERR_OR_NULL(emac->base))
1534 return PTR_ERR(emac->base);
1535
1536 ndev->irq = of_irq_get_byname(node, "emac");
1537 if (!ndev->irq)
1538 return -ENODEV;
1539
1540 emac->irq_rx = of_irq_get_byname(node, "rx");
1541 if (!emac->irq_rx)
1542 return -ENODEV;
1543
1544 emac->irq_tx = of_irq_get_byname(node, "tx");
1545 if (!emac->irq_tx)
1546 return -ENODEV;
1547
1548 if (of_property_read_u32(node, "dma-rx", &emac->rx_chan))
1549 return -ENODEV;
1550
1551 if (of_property_read_u32(node, "dma-tx", &emac->tx_chan))
1552 return -ENODEV;
1553
1554 emac->ext_mii = of_property_read_bool(node, "brcm,external-mii");
1555
1556 emac->rx_ring_size = ENET_DEF_RX_DESC;
1557 emac->tx_ring_size = ENET_DEF_TX_DESC;
1558 emac->copybreak = ENET_DEF_CPY_BREAK;
1559
1560 emac->old_link = 0;
1561 emac->old_duplex = -1;
1562 emac->old_pause = -1;
1563
1564 of_get_mac_address(node, ndev->dev_addr);
1565 if (is_valid_ether_addr(ndev->dev_addr)) {
1566 dev_info(dev, "mtd mac %pM\n", ndev->dev_addr);
1567 } else {
1568 random_ether_addr(ndev->dev_addr);
1569 dev_info(dev, "random mac %pM\n", ndev->dev_addr);
1570 }
1571
1572 emac->rx_skb_size = ALIGN(ndev->mtu + ENET_MTU_OVERHEAD,
1573 ENET_DMA_MAXBURST * 4);
1574
1575 emac->num_clocks = of_clk_get_parent_count(node);
1576 if (emac->num_clocks) {
1577 emac->clock = devm_kcalloc(dev, emac->num_clocks,
1578 sizeof(struct clk *), GFP_KERNEL);
1579 if (IS_ERR_OR_NULL(emac->clock))
1580 return PTR_ERR(emac->clock);
1581 }
1582 for (i = 0; i < emac->num_clocks; i++) {
1583 emac->clock[i] = of_clk_get(node, i);
1584 if (IS_ERR_OR_NULL(emac->clock[i])) {
1585 dev_err(dev, "error getting emac clock %d\n", i);
1586 return PTR_ERR(emac->clock[i]);
1587 }
1588
1589 ret = clk_prepare_enable(emac->clock[i]);
1590 if (ret) {
1591 dev_err(dev, "error enabling emac clock %d\n", i);
1592 return ret;
1593 }
1594 }
1595
1596 num_resets = of_count_phandle_with_args(node, "resets",
1597 "#reset-cells");
1598 if (num_resets > 0)
1599 emac->num_resets = num_resets;
1600 else
1601 emac->num_resets = 0;
1602 if (emac->num_resets) {
1603 emac->reset = devm_kcalloc(dev, emac->num_resets,
1604 sizeof(struct reset_control *),
1605 GFP_KERNEL);
1606 if (IS_ERR_OR_NULL(emac->reset))
1607 return PTR_ERR(emac->reset);
1608
1609 }
1610 for (i = 0; i < emac->num_resets; i++) {
1611 emac->reset[i] = devm_reset_control_get_by_index(dev, i);
1612 if (IS_ERR_OR_NULL(emac->reset[i])) {
1613 dev_err(dev, "error getting emac reset %d\n", i);
1614 return PTR_ERR(emac->reset[i]);
1615 }
1616
1617 ret = reset_control_reset(emac->reset[i]);
1618 if (ret) {
1619 dev_err(dev, "error performing emac reset %d\n", i);
1620 return ret;
1621 }
1622 }
1623
1624 /* do minimal hardware init to be able to probe mii bus */
1625 bcm6348_emac_hw_preinit(emac);
1626
1627 ret = bcm6348_emac_mdio_init(emac, node);
1628 if (ret)
1629 return ret;
1630
1631 spin_lock_init(&emac->rx_lock);
1632
1633 timer_setup(&emac->rx_timeout, bcm6348_emac_refill_rx_timer, 0);
1634
1635 /* zero mib counters */
1636 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1637 emac_writel(emac, 0, ENET_MIB_REG(i));
1638
1639 /* register netdevice */
1640 ndev->netdev_ops = &bcm6348_emac_ops;
1641 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
1642 ndev->mtu = ETH_DATA_LEN - VLAN_ETH_HLEN;
1643 ndev->max_mtu = ENET_MAX_MTU - VLAN_ETH_HLEN;
1644 netif_napi_add(ndev, &emac->napi, bcm6348_emac_poll, 16);
1645 SET_NETDEV_DEV(ndev, dev);
1646
1647 ret = devm_register_netdev(dev, ndev);
1648 if (ret)
1649 goto out_disable_clk;
1650
1651 netif_carrier_off(ndev);
1652
1653 ndev->phydev = of_phy_get_and_connect(ndev, node,
1654 bcm6348_emac_adjust_phy);
1655 if (IS_ERR_OR_NULL(ndev->phydev))
1656 dev_warn(dev, "PHY not found!\n");
1657
1658 dev_info(dev, "%s at 0x%px, IRQ %d\n", ndev->name, emac->base,
1659 ndev->irq);
1660
1661 return 0;
1662
1663 out_disable_clk:
1664 for (i = 0; i < emac->num_resets; i++)
1665 reset_control_assert(emac->reset[i]);
1666
1667 for (i = 0; i < emac->num_clocks; i++)
1668 clk_disable_unprepare(emac->clock[i]);
1669
1670 return ret;
1671 }
1672
1673 static int bcm6348_emac_remove(struct platform_device *pdev)
1674 {
1675 struct net_device *ndev = platform_get_drvdata(pdev);
1676 struct bcm6348_emac *emac = netdev_priv(ndev);
1677 unsigned int i;
1678
1679 emac_writel(emac, 0, ENET_MIISC_REG);
1680
1681 for (i = 0; i < emac->num_resets; i++)
1682 reset_control_assert(emac->reset[i]);
1683
1684 for (i = 0; i < emac->num_clocks; i++)
1685 clk_disable_unprepare(emac->clock[i]);
1686
1687 return 0;
1688 }
1689
1690 static const struct of_device_id bcm6348_emac_of_match[] = {
1691 { .compatible = "brcm,bcm6338-emac", },
1692 { .compatible = "brcm,bcm6348-emac", },
1693 { .compatible = "brcm,bcm6358-emac", },
1694 { /* sentinel */ },
1695 };
1696 MODULE_DEVICE_TABLE(of, bcm6348_emac_of_match);
1697
1698 static struct platform_driver bcm6348_emac_driver = {
1699 .driver = {
1700 .name = "bcm6348-emac",
1701 .of_match_table = of_match_ptr(bcm6348_emac_of_match),
1702 },
1703 .probe = bcm6348_emac_probe,
1704 .remove = bcm6348_emac_remove,
1705 };
1706 module_platform_driver(bcm6348_emac_driver);