2a27118aa749d73309fd868f6400349367b00450
[openwrt/openwrt.git] / target / linux / bmips / files / drivers / net / ethernet / broadcom / bcm6368-enetsw.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * BCM6368 Ethernet Switch Controller Driver
4 *
5 * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 Jonas Gorski <jonas.gorski@gmail.com>
7 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/of_clk.h>
18 #include <linux/of_net.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23
24 /* MTU */
25 #define ENETSW_TAG_SIZE 6
26 #define ENETSW_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
27 ENETSW_TAG_SIZE)
28
29 /* default number of descriptor */
30 #define ENETSW_DEF_RX_DESC 64
31 #define ENETSW_DEF_TX_DESC 32
32 #define ENETSW_DEF_CPY_BREAK 128
33
34 /* maximum burst len for dma (4 bytes unit) */
35 #define ENETSW_DMA_MAXBURST 8
36
37 /* DMA channels */
38 #define DMA_CHAN_WIDTH 0x10
39
40 /* Controller Configuration Register */
41 #define DMA_CFG_REG 0x0
42 #define DMA_CFG_EN_SHIFT 0
43 #define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT)
44 #define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
45
46 /* Flow Control Descriptor Low Threshold register */
47 #define DMA_FLOWCL_REG(x) (0x4 + (x) * 6)
48
49 /* Flow Control Descriptor High Threshold register */
50 #define DMA_FLOWCH_REG(x) (0x8 + (x) * 6)
51
52 /* Flow Control Descriptor Buffer Alloca Threshold register */
53 #define DMA_BUFALLOC_REG(x) (0xc + (x) * 6)
54 #define DMA_BUFALLOC_FORCE_SHIFT 31
55 #define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT)
56
57 /* Channel Configuration register */
58 #define DMAC_CHANCFG_REG 0x0
59 #define DMAC_CHANCFG_EN_SHIFT 0
60 #define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT)
61 #define DMAC_CHANCFG_PKTHALT_SHIFT 1
62 #define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT)
63 #define DMAC_CHANCFG_BUFHALT_SHIFT 2
64 #define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT)
65 #define DMAC_CHANCFG_CHAINING_SHIFT 2
66 #define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT)
67 #define DMAC_CHANCFG_WRAP_EN_SHIFT 3
68 #define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT)
69 #define DMAC_CHANCFG_FLOWC_EN_SHIFT 4
70 #define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT)
71
72 /* Interrupt Control/Status register */
73 #define DMAC_IR_REG 0x4
74 #define DMAC_IR_BUFDONE_MASK (1 << 0)
75 #define DMAC_IR_PKTDONE_MASK (1 << 1)
76 #define DMAC_IR_NOTOWNER_MASK (1 << 2)
77
78 /* Interrupt Mask register */
79 #define DMAC_IRMASK_REG 0x8
80
81 /* Maximum Burst Length */
82 #define DMAC_MAXBURST_REG 0xc
83
84 /* Ring Start Address register */
85 #define DMAS_RSTART_REG 0x0
86
87 /* State Ram Word 2 */
88 #define DMAS_SRAM2_REG 0x4
89
90 /* State Ram Word 3 */
91 #define DMAS_SRAM3_REG 0x8
92
93 /* State Ram Word 4 */
94 #define DMAS_SRAM4_REG 0xc
95
96 struct bcm6368_enetsw_desc {
97 u32 len_stat;
98 u32 address;
99 };
100
101 /* control */
102 #define DMADESC_LENGTH_SHIFT 16
103 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
104 #define DMADESC_OWNER_MASK (1 << 15)
105 #define DMADESC_EOP_MASK (1 << 14)
106 #define DMADESC_SOP_MASK (1 << 13)
107 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
108 #define DMADESC_WRAP_MASK (1 << 12)
109 #define DMADESC_USB_NOZERO_MASK (1 << 1)
110 #define DMADESC_USB_ZERO_MASK (1 << 0)
111
112 /* status */
113 #define DMADESC_UNDER_MASK (1 << 9)
114 #define DMADESC_APPEND_CRC (1 << 8)
115 #define DMADESC_OVSIZE_MASK (1 << 4)
116 #define DMADESC_RXER_MASK (1 << 2)
117 #define DMADESC_CRC_MASK (1 << 1)
118 #define DMADESC_OV_MASK (1 << 0)
119 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
120 DMADESC_OVSIZE_MASK | \
121 DMADESC_RXER_MASK | \
122 DMADESC_CRC_MASK | \
123 DMADESC_OV_MASK)
124
125 struct bcm6368_enetsw {
126 void __iomem *dma_base;
127 void __iomem *dma_chan;
128 void __iomem *dma_sram;
129
130 struct device **pm;
131 struct device_link **link_pm;
132 int num_pms;
133
134 struct clk **clock;
135 unsigned int num_clocks;
136
137 struct reset_control **reset;
138 unsigned int num_resets;
139
140 int copybreak;
141
142 int irq_rx;
143 int irq_tx;
144
145 /* hw view of rx & tx dma ring */
146 dma_addr_t rx_desc_dma;
147 dma_addr_t tx_desc_dma;
148
149 /* allocated size (in bytes) for rx & tx dma ring */
150 unsigned int rx_desc_alloc_size;
151 unsigned int tx_desc_alloc_size;
152
153 struct napi_struct napi;
154
155 /* dma channel id for rx */
156 int rx_chan;
157
158 /* number of dma desc in rx ring */
159 int rx_ring_size;
160
161 /* cpu view of rx dma ring */
162 struct bcm6368_enetsw_desc *rx_desc_cpu;
163
164 /* current number of armed descriptor given to hardware for rx */
165 int rx_desc_count;
166
167 /* next rx descriptor to fetch from hardware */
168 int rx_curr_desc;
169
170 /* next dirty rx descriptor to refill */
171 int rx_dirty_desc;
172
173 /* size of allocated rx skbs */
174 unsigned int rx_skb_size;
175
176 /* list of skb given to hw for rx */
177 struct sk_buff **rx_skb;
178
179 /* used when rx skb allocation failed, so we defer rx queue
180 * refill */
181 struct timer_list rx_timeout;
182
183 /* lock rx_timeout against rx normal operation */
184 spinlock_t rx_lock;
185
186 /* dma channel id for tx */
187 int tx_chan;
188
189 /* number of dma desc in tx ring */
190 int tx_ring_size;
191
192 /* maximum dma burst size */
193 int dma_maxburst;
194
195 /* cpu view of rx dma ring */
196 struct bcm6368_enetsw_desc *tx_desc_cpu;
197
198 /* number of available descriptor for tx */
199 int tx_desc_count;
200
201 /* next tx descriptor avaiable */
202 int tx_curr_desc;
203
204 /* next dirty tx descriptor to reclaim */
205 int tx_dirty_desc;
206
207 /* list of skb given to hw for tx */
208 struct sk_buff **tx_skb;
209
210 /* lock used by tx reclaim and xmit */
211 spinlock_t tx_lock;
212
213 /* network device reference */
214 struct net_device *net_dev;
215
216 /* platform device reference */
217 struct platform_device *pdev;
218
219 /* dma channel enable mask */
220 u32 dma_chan_en_mask;
221
222 /* dma channel interrupt mask */
223 u32 dma_chan_int_mask;
224
225 /* dma channel width */
226 unsigned int dma_chan_width;
227 };
228
229 static inline void dma_writel(struct bcm6368_enetsw *priv, u32 val, u32 off)
230 {
231 __raw_writel(val, priv->dma_base + off);
232 }
233
234 static inline u32 dma_readl(struct bcm6368_enetsw *priv, u32 off, int chan)
235 {
236 return __raw_readl(priv->dma_chan + off + chan * priv->dma_chan_width);
237 }
238
239 static inline void dmac_writel(struct bcm6368_enetsw *priv, u32 val,
240 u32 off, int chan)
241 {
242 __raw_writel(val, priv->dma_chan + off + chan * priv->dma_chan_width);
243 }
244
245 static inline void dmas_writel(struct bcm6368_enetsw *priv, u32 val,
246 u32 off, int chan)
247 {
248 __raw_writel(val, priv->dma_sram + off + chan * priv->dma_chan_width);
249 }
250
251 /*
252 * refill rx queue
253 */
254 static int bcm6368_enetsw_refill_rx(struct net_device *dev)
255 {
256 struct bcm6368_enetsw *priv = netdev_priv(dev);
257
258 while (priv->rx_desc_count < priv->rx_ring_size) {
259 struct bcm6368_enetsw_desc *desc;
260 struct sk_buff *skb;
261 dma_addr_t p;
262 int desc_idx;
263 u32 len_stat;
264
265 desc_idx = priv->rx_dirty_desc;
266 desc = &priv->rx_desc_cpu[desc_idx];
267
268 if (!priv->rx_skb[desc_idx]) {
269 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
270 if (!skb)
271 break;
272 priv->rx_skb[desc_idx] = skb;
273 p = dma_map_single(&priv->pdev->dev, skb->data,
274 priv->rx_skb_size,
275 DMA_FROM_DEVICE);
276 desc->address = p;
277 }
278
279 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
280 len_stat |= DMADESC_OWNER_MASK;
281 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
282 len_stat |= DMADESC_WRAP_MASK;
283 priv->rx_dirty_desc = 0;
284 } else {
285 priv->rx_dirty_desc++;
286 }
287 wmb();
288 desc->len_stat = len_stat;
289
290 priv->rx_desc_count++;
291
292 /* tell dma engine we allocated one buffer */
293 dma_writel(priv, 1, DMA_BUFALLOC_REG(priv->rx_chan));
294 }
295
296 /* If rx ring is still empty, set a timer to try allocating
297 * again at a later time. */
298 if (priv->rx_desc_count == 0 && netif_running(dev)) {
299 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
300 priv->rx_timeout.expires = jiffies + HZ;
301 add_timer(&priv->rx_timeout);
302 }
303
304 return 0;
305 }
306
307 /*
308 * timer callback to defer refill rx queue in case we're OOM
309 */
310 static void bcm6368_enetsw_refill_rx_timer(struct timer_list *t)
311 {
312 struct bcm6368_enetsw *priv = from_timer(priv, t, rx_timeout);
313 struct net_device *dev = priv->net_dev;
314
315 spin_lock(&priv->rx_lock);
316 bcm6368_enetsw_refill_rx(dev);
317 spin_unlock(&priv->rx_lock);
318 }
319
320 /*
321 * extract packet from rx queue
322 */
323 static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget)
324 {
325 struct bcm6368_enetsw *priv = netdev_priv(dev);
326 struct device *kdev = &priv->pdev->dev;
327 int processed = 0;
328
329 /* don't scan ring further than number of refilled
330 * descriptor */
331 if (budget > priv->rx_desc_count)
332 budget = priv->rx_desc_count;
333
334 do {
335 struct bcm6368_enetsw_desc *desc;
336 struct sk_buff *skb;
337 int desc_idx;
338 u32 len_stat;
339 unsigned int len;
340
341 desc_idx = priv->rx_curr_desc;
342 desc = &priv->rx_desc_cpu[desc_idx];
343
344 /* make sure we actually read the descriptor status at
345 * each loop */
346 rmb();
347
348 len_stat = desc->len_stat;
349
350 /* break if dma ownership belongs to hw */
351 if (len_stat & DMADESC_OWNER_MASK)
352 break;
353
354 processed++;
355 priv->rx_curr_desc++;
356 if (priv->rx_curr_desc == priv->rx_ring_size)
357 priv->rx_curr_desc = 0;
358 priv->rx_desc_count--;
359
360 /* if the packet does not have start of packet _and_
361 * end of packet flag set, then just recycle it */
362 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
363 dev->stats.rx_dropped++;
364 continue;
365 }
366
367 /* valid packet */
368 skb = priv->rx_skb[desc_idx];
369 len = (len_stat & DMADESC_LENGTH_MASK)
370 >> DMADESC_LENGTH_SHIFT;
371 /* don't include FCS */
372 len -= 4;
373
374 if (len < priv->copybreak) {
375 struct sk_buff *nskb;
376
377 nskb = napi_alloc_skb(&priv->napi, len);
378 if (!nskb) {
379 /* forget packet, just rearm desc */
380 dev->stats.rx_dropped++;
381 continue;
382 }
383
384 dma_sync_single_for_cpu(kdev, desc->address,
385 len, DMA_FROM_DEVICE);
386 memcpy(nskb->data, skb->data, len);
387 dma_sync_single_for_device(kdev, desc->address,
388 len, DMA_FROM_DEVICE);
389 skb = nskb;
390 } else {
391 dma_unmap_single(&priv->pdev->dev, desc->address,
392 priv->rx_skb_size, DMA_FROM_DEVICE);
393 priv->rx_skb[desc_idx] = NULL;
394 }
395
396 skb_put(skb, len);
397 skb->protocol = eth_type_trans(skb, dev);
398 dev->stats.rx_packets++;
399 dev->stats.rx_bytes += len;
400 netif_receive_skb(skb);
401 } while (--budget > 0);
402
403 if (processed || !priv->rx_desc_count) {
404 bcm6368_enetsw_refill_rx(dev);
405
406 /* kick rx dma */
407 dmac_writel(priv, priv->dma_chan_en_mask,
408 DMAC_CHANCFG_REG, priv->rx_chan);
409 }
410
411 return processed;
412 }
413
414 /*
415 * try to or force reclaim of transmitted buffers
416 */
417 static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force)
418 {
419 struct bcm6368_enetsw *priv = netdev_priv(dev);
420 int released = 0;
421
422 while (priv->tx_desc_count < priv->tx_ring_size) {
423 struct bcm6368_enetsw_desc *desc;
424 struct sk_buff *skb;
425
426 /* We run in a bh and fight against start_xmit, which
427 * is called with bh disabled */
428 spin_lock(&priv->tx_lock);
429
430 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
431
432 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
433 spin_unlock(&priv->tx_lock);
434 break;
435 }
436
437 /* ensure other field of the descriptor were not read
438 * before we checked ownership */
439 rmb();
440
441 skb = priv->tx_skb[priv->tx_dirty_desc];
442 priv->tx_skb[priv->tx_dirty_desc] = NULL;
443 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
444 DMA_TO_DEVICE);
445
446 priv->tx_dirty_desc++;
447 if (priv->tx_dirty_desc == priv->tx_ring_size)
448 priv->tx_dirty_desc = 0;
449 priv->tx_desc_count++;
450
451 spin_unlock(&priv->tx_lock);
452
453 if (desc->len_stat & DMADESC_UNDER_MASK)
454 dev->stats.tx_errors++;
455
456 dev_kfree_skb(skb);
457 released++;
458 }
459
460 if (netif_queue_stopped(dev) && released)
461 netif_wake_queue(dev);
462
463 return released;
464 }
465
466 /*
467 * poll func, called by network core
468 */
469 static int bcm6368_enetsw_poll(struct napi_struct *napi, int budget)
470 {
471 struct bcm6368_enetsw *priv = container_of(napi, struct bcm6368_enetsw, napi);
472 struct net_device *dev = priv->net_dev;
473 int rx_work_done;
474
475 /* ack interrupts */
476 dmac_writel(priv, priv->dma_chan_int_mask,
477 DMAC_IR_REG, priv->rx_chan);
478 dmac_writel(priv, priv->dma_chan_int_mask,
479 DMAC_IR_REG, priv->tx_chan);
480
481 /* reclaim sent skb */
482 bcm6368_enetsw_tx_reclaim(dev, 0);
483
484 spin_lock(&priv->rx_lock);
485 rx_work_done = bcm6368_enetsw_receive_queue(dev, budget);
486 spin_unlock(&priv->rx_lock);
487
488 if (rx_work_done >= budget) {
489 /* rx queue is not yet empty/clean */
490 return rx_work_done;
491 }
492
493 /* no more packet in rx/tx queue, remove device from poll
494 * queue */
495 napi_complete_done(napi, rx_work_done);
496
497 /* restore rx/tx interrupt */
498 dmac_writel(priv, priv->dma_chan_int_mask,
499 DMAC_IRMASK_REG, priv->rx_chan);
500 dmac_writel(priv, priv->dma_chan_int_mask,
501 DMAC_IRMASK_REG, priv->tx_chan);
502
503 return rx_work_done;
504 }
505
506 /*
507 * rx/tx dma interrupt handler
508 */
509 static irqreturn_t bcm6368_enetsw_isr_dma(int irq, void *dev_id)
510 {
511 struct net_device *dev = dev_id;
512 struct bcm6368_enetsw *priv = netdev_priv(dev);
513
514 /* mask rx/tx interrupts */
515 dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->rx_chan);
516 dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->tx_chan);
517
518 napi_schedule(&priv->napi);
519
520 return IRQ_HANDLED;
521 }
522
523 /*
524 * tx request callback
525 */
526 static netdev_tx_t
527 bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *dev)
528 {
529 struct bcm6368_enetsw *priv = netdev_priv(dev);
530 struct bcm6368_enetsw_desc *desc;
531 u32 len_stat;
532 netdev_tx_t ret;
533
534 /* lock against tx reclaim */
535 spin_lock(&priv->tx_lock);
536
537 /* make sure the tx hw queue is not full, should not happen
538 * since we stop queue before it's the case */
539 if (unlikely(!priv->tx_desc_count)) {
540 netif_stop_queue(dev);
541 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
542 "available?\n");
543 ret = NETDEV_TX_BUSY;
544 goto out_unlock;
545 }
546
547 /* pad small packets */
548 if (skb->len < (ETH_ZLEN + ETH_FCS_LEN)) {
549 int needed = (ETH_ZLEN + ETH_FCS_LEN) - skb->len;
550 char *data;
551
552 if (unlikely(skb_tailroom(skb) < needed)) {
553 struct sk_buff *nskb;
554
555 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
556 if (!nskb) {
557 ret = NETDEV_TX_BUSY;
558 goto out_unlock;
559 }
560
561 dev_kfree_skb(skb);
562 skb = nskb;
563 }
564 data = skb_put_zero(skb, needed);
565 }
566
567 /* point to the next available desc */
568 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
569 priv->tx_skb[priv->tx_curr_desc] = skb;
570
571 /* fill descriptor */
572 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
573 DMA_TO_DEVICE);
574
575 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
576 len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC |
577 DMADESC_OWNER_MASK;
578
579 priv->tx_curr_desc++;
580 if (priv->tx_curr_desc == priv->tx_ring_size) {
581 priv->tx_curr_desc = 0;
582 len_stat |= DMADESC_WRAP_MASK;
583 }
584 priv->tx_desc_count--;
585
586 /* dma might be already polling, make sure we update desc
587 * fields in correct order */
588 wmb();
589 desc->len_stat = len_stat;
590 wmb();
591
592 /* kick tx dma */
593 dmac_writel(priv, priv->dma_chan_en_mask, DMAC_CHANCFG_REG,
594 priv->tx_chan);
595
596 /* stop queue if no more desc available */
597 if (!priv->tx_desc_count)
598 netif_stop_queue(dev);
599
600 dev->stats.tx_bytes += skb->len;
601 dev->stats.tx_packets++;
602 ret = NETDEV_TX_OK;
603
604 out_unlock:
605 spin_unlock(&priv->tx_lock);
606 return ret;
607 }
608
609 /*
610 * disable dma in given channel
611 */
612 static void bcm6368_enetsw_disable_dma(struct bcm6368_enetsw *priv, int chan)
613 {
614 int limit = 1000;
615
616 dmac_writel(priv, 0, DMAC_CHANCFG_REG, chan);
617
618 do {
619 u32 val;
620
621 val = dma_readl(priv, DMAC_CHANCFG_REG, chan);
622 if (!(val & DMAC_CHANCFG_EN_MASK))
623 break;
624
625 udelay(1);
626 } while (limit--);
627 }
628
629 static int bcm6368_enetsw_open(struct net_device *dev)
630 {
631 struct bcm6368_enetsw *priv = netdev_priv(dev);
632 struct device *kdev = &priv->pdev->dev;
633 int i, ret;
634 unsigned int size;
635 void *p;
636 u32 val;
637
638 /* mask all interrupts and request them */
639 dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->rx_chan);
640 dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->tx_chan);
641
642 ret = request_irq(priv->irq_rx, bcm6368_enetsw_isr_dma,
643 0, dev->name, dev);
644 if (ret)
645 goto out_freeirq;
646
647 if (priv->irq_tx != -1) {
648 ret = request_irq(priv->irq_tx, bcm6368_enetsw_isr_dma,
649 0, dev->name, dev);
650 if (ret)
651 goto out_freeirq_rx;
652 }
653
654 /* allocate rx dma ring */
655 size = priv->rx_ring_size * sizeof(struct bcm6368_enetsw_desc);
656 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
657 if (!p) {
658 dev_err(kdev, "cannot allocate rx ring %u\n", size);
659 ret = -ENOMEM;
660 goto out_freeirq_tx;
661 }
662
663 memset(p, 0, size);
664 priv->rx_desc_alloc_size = size;
665 priv->rx_desc_cpu = p;
666
667 /* allocate tx dma ring */
668 size = priv->tx_ring_size * sizeof(struct bcm6368_enetsw_desc);
669 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
670 if (!p) {
671 dev_err(kdev, "cannot allocate tx ring\n");
672 ret = -ENOMEM;
673 goto out_free_rx_ring;
674 }
675
676 memset(p, 0, size);
677 priv->tx_desc_alloc_size = size;
678 priv->tx_desc_cpu = p;
679
680 priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
681 GFP_KERNEL);
682 if (!priv->tx_skb) {
683 dev_err(kdev, "cannot allocate rx skb queue\n");
684 ret = -ENOMEM;
685 goto out_free_tx_ring;
686 }
687
688 priv->tx_desc_count = priv->tx_ring_size;
689 priv->tx_dirty_desc = 0;
690 priv->tx_curr_desc = 0;
691 spin_lock_init(&priv->tx_lock);
692
693 /* init & fill rx ring with skbs */
694 priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
695 GFP_KERNEL);
696 if (!priv->rx_skb) {
697 dev_err(kdev, "cannot allocate rx skb queue\n");
698 ret = -ENOMEM;
699 goto out_free_tx_skb;
700 }
701
702 priv->rx_desc_count = 0;
703 priv->rx_dirty_desc = 0;
704 priv->rx_curr_desc = 0;
705
706 /* initialize flow control buffer allocation */
707 dma_writel(priv, DMA_BUFALLOC_FORCE_MASK | 0,
708 DMA_BUFALLOC_REG(priv->rx_chan));
709
710 if (bcm6368_enetsw_refill_rx(dev)) {
711 dev_err(kdev, "cannot allocate rx skb queue\n");
712 ret = -ENOMEM;
713 goto out;
714 }
715
716 /* write rx & tx ring addresses */
717 dmas_writel(priv, priv->rx_desc_dma,
718 DMAS_RSTART_REG, priv->rx_chan);
719 dmas_writel(priv, priv->tx_desc_dma,
720 DMAS_RSTART_REG, priv->tx_chan);
721
722 /* clear remaining state ram for rx & tx channel */
723 dmas_writel(priv, 0, DMAS_SRAM2_REG, priv->rx_chan);
724 dmas_writel(priv, 0, DMAS_SRAM2_REG, priv->tx_chan);
725 dmas_writel(priv, 0, DMAS_SRAM3_REG, priv->rx_chan);
726 dmas_writel(priv, 0, DMAS_SRAM3_REG, priv->tx_chan);
727 dmas_writel(priv, 0, DMAS_SRAM4_REG, priv->rx_chan);
728 dmas_writel(priv, 0, DMAS_SRAM4_REG, priv->tx_chan);
729
730 /* set dma maximum burst len */
731 dmac_writel(priv, priv->dma_maxburst,
732 DMAC_MAXBURST_REG, priv->rx_chan);
733 dmac_writel(priv, priv->dma_maxburst,
734 DMAC_MAXBURST_REG, priv->tx_chan);
735
736 /* set flow control low/high threshold to 1/3 / 2/3 */
737 val = priv->rx_ring_size / 3;
738 dma_writel(priv, val, DMA_FLOWCL_REG(priv->rx_chan));
739 val = (priv->rx_ring_size * 2) / 3;
740 dma_writel(priv, val, DMA_FLOWCH_REG(priv->rx_chan));
741
742 /* all set, enable mac and interrupts, start dma engine and
743 * kick rx dma channel
744 */
745 wmb();
746 dma_writel(priv, DMA_CFG_EN_MASK, DMA_CFG_REG);
747 dmac_writel(priv, DMAC_CHANCFG_EN_MASK,
748 DMAC_CHANCFG_REG, priv->rx_chan);
749
750 /* watch "packet transferred" interrupt in rx and tx */
751 dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
752 DMAC_IR_REG, priv->rx_chan);
753 dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
754 DMAC_IR_REG, priv->tx_chan);
755
756 /* make sure we enable napi before rx interrupt */
757 napi_enable(&priv->napi);
758
759 dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
760 DMAC_IRMASK_REG, priv->rx_chan);
761 dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
762 DMAC_IRMASK_REG, priv->tx_chan);
763
764 netif_carrier_on(dev);
765 netif_start_queue(dev);
766
767 return 0;
768
769 out:
770 for (i = 0; i < priv->rx_ring_size; i++) {
771 struct bcm6368_enetsw_desc *desc;
772
773 if (!priv->rx_skb[i])
774 continue;
775
776 desc = &priv->rx_desc_cpu[i];
777 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
778 DMA_FROM_DEVICE);
779 kfree_skb(priv->rx_skb[i]);
780 }
781 kfree(priv->rx_skb);
782
783 out_free_tx_skb:
784 kfree(priv->tx_skb);
785
786 out_free_tx_ring:
787 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
788 priv->tx_desc_cpu, priv->tx_desc_dma);
789
790 out_free_rx_ring:
791 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
792 priv->rx_desc_cpu, priv->rx_desc_dma);
793
794 out_freeirq_tx:
795 if (priv->irq_tx != -1)
796 free_irq(priv->irq_tx, dev);
797
798 out_freeirq_rx:
799 free_irq(priv->irq_rx, dev);
800
801 out_freeirq:
802 return ret;
803 }
804
805 static int bcm6368_enetsw_stop(struct net_device *dev)
806 {
807 struct bcm6368_enetsw *priv = netdev_priv(dev);
808 struct device *kdev = &priv->pdev->dev;
809 int i;
810
811 netif_stop_queue(dev);
812 napi_disable(&priv->napi);
813 del_timer_sync(&priv->rx_timeout);
814
815 /* mask all interrupts */
816 dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->rx_chan);
817 dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->tx_chan);
818
819 /* disable dma & mac */
820 bcm6368_enetsw_disable_dma(priv, priv->tx_chan);
821 bcm6368_enetsw_disable_dma(priv, priv->rx_chan);
822
823 /* force reclaim of all tx buffers */
824 bcm6368_enetsw_tx_reclaim(dev, 1);
825
826 /* free the rx skb ring */
827 for (i = 0; i < priv->rx_ring_size; i++) {
828 struct bcm6368_enetsw_desc *desc;
829
830 if (!priv->rx_skb[i])
831 continue;
832
833 desc = &priv->rx_desc_cpu[i];
834 dma_unmap_single_attrs(kdev, desc->address, priv->rx_skb_size,
835 DMA_FROM_DEVICE,
836 DMA_ATTR_SKIP_CPU_SYNC);
837 kfree_skb(priv->rx_skb[i]);
838 }
839
840 /* free remaining allocated memory */
841 kfree(priv->rx_skb);
842 kfree(priv->tx_skb);
843 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
844 priv->rx_desc_cpu, priv->rx_desc_dma);
845 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
846 priv->tx_desc_cpu, priv->tx_desc_dma);
847 if (priv->irq_tx != -1)
848 free_irq(priv->irq_tx, dev);
849 free_irq(priv->irq_rx, dev);
850
851 return 0;
852 }
853
854 static const struct net_device_ops bcm6368_enetsw_ops = {
855 .ndo_open = bcm6368_enetsw_open,
856 .ndo_stop = bcm6368_enetsw_stop,
857 .ndo_start_xmit = bcm6368_enetsw_start_xmit,
858 };
859
860 static int bcm6368_enetsw_probe(struct platform_device *pdev)
861 {
862 struct bcm6368_enetsw *priv;
863 struct device *dev = &pdev->dev;
864 struct device_node *node = dev->of_node;
865 struct net_device *ndev;
866 struct resource *res;
867 const void *mac;
868 unsigned i;
869 int ret;
870
871 ndev = alloc_etherdev(sizeof(*priv));
872 if (!ndev)
873 return -ENOMEM;
874
875 priv = netdev_priv(ndev);
876
877 priv->num_pms = of_count_phandle_with_args(node, "power-domains",
878 "#power-domain-cells");
879 if (priv->num_pms > 1) {
880 priv->pm = devm_kcalloc(dev, priv->num_pms,
881 sizeof(struct device *), GFP_KERNEL);
882 if (!priv->pm)
883 return -ENOMEM;
884
885 priv->link_pm = devm_kcalloc(dev, priv->num_pms,
886 sizeof(struct device_link *),
887 GFP_KERNEL);
888 if (!priv->link_pm)
889 return -ENOMEM;
890
891 for (i = 0; i < priv->num_pms; i++) {
892 priv->pm[i] = genpd_dev_pm_attach_by_id(dev, i);
893 if (IS_ERR(priv->pm[i])) {
894 dev_err(dev, "error getting pm %d\n", i);
895 return -EINVAL;
896 }
897
898 priv->link_pm[i] = device_link_add(dev, priv->pm[i],
899 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
900 DL_FLAG_RPM_ACTIVE);
901 }
902 }
903
904 pm_runtime_enable(dev);
905 pm_runtime_no_callbacks(dev);
906 ret = pm_runtime_get_sync(dev);
907 if (ret < 0) {
908 pm_runtime_disable(dev);
909 dev_info(dev, "PM prober defer: ret=%d\n", ret);
910 return -EPROBE_DEFER;
911 }
912
913 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
914 priv->dma_base = devm_ioremap_resource(dev, res);
915 if (IS_ERR(priv->dma_base))
916 return PTR_ERR(priv->dma_base);
917
918 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
919 "dma-channels");
920 priv->dma_chan = devm_ioremap_resource(dev, res);
921 if (IS_ERR(priv->dma_chan))
922 return PTR_ERR(priv->dma_chan);
923
924 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma-sram");
925 priv->dma_sram = devm_ioremap_resource(dev, res);
926 if (IS_ERR(priv->dma_sram))
927 return PTR_ERR(priv->dma_sram);
928
929 priv->irq_rx = platform_get_irq_byname(pdev, "rx");
930 if (!priv->irq_rx)
931 return -ENODEV;
932
933 priv->irq_tx = platform_get_irq_byname(pdev, "tx");
934 if (!priv->irq_tx)
935 return -ENODEV;
936 else if (priv->irq_tx < 0)
937 priv->irq_tx = -1;
938
939 if (device_property_read_u32(dev, "dma-rx", &priv->rx_chan))
940 return -ENODEV;
941
942 if (device_property_read_u32(dev, "dma-tx", &priv->tx_chan))
943 return -ENODEV;
944
945 priv->rx_ring_size = ENETSW_DEF_RX_DESC;
946 priv->tx_ring_size = ENETSW_DEF_TX_DESC;
947
948 priv->dma_maxburst = ENETSW_DMA_MAXBURST;
949
950 priv->copybreak = ENETSW_DEF_CPY_BREAK;
951
952 priv->dma_chan_en_mask = DMAC_CHANCFG_EN_MASK;
953 priv->dma_chan_int_mask = DMAC_IR_PKTDONE_MASK;
954 priv->dma_chan_width = DMA_CHAN_WIDTH;
955
956 mac = of_get_mac_address(node);
957 if (!IS_ERR_OR_NULL(mac)) {
958 memcpy(ndev->dev_addr, mac, ETH_ALEN);
959 dev_info(dev, "mtd mac %pM\n", ndev->dev_addr);
960 } else {
961 random_ether_addr(ndev->dev_addr);
962 dev_info(dev, "random mac %pM\n", ndev->dev_addr);
963 }
964
965 priv->rx_skb_size = ALIGN(ndev->mtu + ENETSW_MTU_OVERHEAD,
966 priv->dma_maxburst * 4);
967
968 priv->num_clocks = of_clk_get_parent_count(node);
969 if (priv->num_clocks) {
970 priv->clock = devm_kcalloc(dev, priv->num_clocks,
971 sizeof(struct clk *), GFP_KERNEL);
972 if (!priv->clock)
973 return -ENOMEM;
974 }
975 for (i = 0; i < priv->num_clocks; i++) {
976 priv->clock[i] = of_clk_get(node, i);
977 if (IS_ERR(priv->clock[i])) {
978 dev_err(dev, "error getting clock %d\n", i);
979 return -EINVAL;
980 }
981
982 ret = clk_prepare_enable(priv->clock[i]);
983 if (ret) {
984 dev_err(dev, "error enabling clock %d\n", i);
985 return ret;
986 }
987 }
988
989 priv->num_resets = of_count_phandle_with_args(node, "resets",
990 "#reset-cells");
991 if (priv->num_resets) {
992 priv->reset = devm_kcalloc(dev, priv->num_resets,
993 sizeof(struct reset_control *),
994 GFP_KERNEL);
995 if (!priv->reset)
996 return -ENOMEM;
997 }
998 for (i = 0; i < priv->num_resets; i++) {
999 priv->reset[i] = devm_reset_control_get_by_index(dev, i);
1000 if (IS_ERR(priv->reset[i])) {
1001 dev_err(dev, "error getting reset %d\n", i);
1002 return -EINVAL;
1003 }
1004
1005 ret = reset_control_reset(priv->reset[i]);
1006 if (ret) {
1007 dev_err(dev, "error performing reset %d\n", i);
1008 return ret;
1009 }
1010 }
1011
1012 spin_lock_init(&priv->rx_lock);
1013
1014 timer_setup(&priv->rx_timeout, bcm6368_enetsw_refill_rx_timer, 0);
1015
1016 /* register netdevice */
1017 ndev->netdev_ops = &bcm6368_enetsw_ops;
1018 ndev->min_mtu = ETH_ZLEN;
1019 ndev->mtu = ETH_DATA_LEN + ENETSW_TAG_SIZE;
1020 ndev->max_mtu = ETH_DATA_LEN + ENETSW_TAG_SIZE;
1021 netif_napi_add(ndev, &priv->napi, bcm6368_enetsw_poll, 16);
1022 SET_NETDEV_DEV(ndev, dev);
1023
1024 ret = register_netdev(ndev);
1025 if (ret)
1026 goto out_disable_clk;
1027
1028 netif_carrier_off(ndev);
1029 platform_set_drvdata(pdev, ndev);
1030 priv->pdev = pdev;
1031 priv->net_dev = ndev;
1032
1033 return 0;
1034
1035 out_disable_clk:
1036 for (i = 0; i < priv->num_resets; i++)
1037 reset_control_assert(priv->reset[i]);
1038
1039 for (i = 0; i < priv->num_clocks; i++)
1040 clk_disable_unprepare(priv->clock[i]);
1041
1042 return ret;
1043 }
1044
1045 static int bcm6368_enetsw_remove(struct platform_device *pdev)
1046 {
1047 struct device *dev = &pdev->dev;
1048 struct net_device *ndev = platform_get_drvdata(pdev);
1049 struct bcm6368_enetsw *priv = netdev_priv(ndev);
1050 unsigned int i;
1051
1052 unregister_netdev(ndev);
1053
1054 pm_runtime_put_sync(dev);
1055 for (i = 0; priv->pm && i < priv->num_pms; i++) {
1056 dev_pm_domain_detach(priv->pm[i], true);
1057 device_link_del(priv->link_pm[i]);
1058 }
1059
1060 for (i = 0; i < priv->num_resets; i++)
1061 reset_control_assert(priv->reset[i]);
1062
1063 for (i = 0; i < priv->num_clocks; i++)
1064 clk_disable_unprepare(priv->clock[i]);
1065
1066 free_netdev(ndev);
1067
1068 return 0;
1069 }
1070
1071 static const struct of_device_id bcm6368_enetsw_of_match[] = {
1072 { .compatible = "brcm,bcm6318-enetsw", },
1073 { .compatible = "brcm,bcm6328-enetsw", },
1074 { .compatible = "brcm,bcm6362-enetsw", },
1075 { .compatible = "brcm,bcm6368-enetsw", },
1076 { .compatible = "brcm,bcm63268-enetsw", },
1077 { /* sentinel */ }
1078 };
1079 MODULE_DEVICE_TABLE(of, bcm6368_enetsw_of_match);
1080
1081 static struct platform_driver bcm6368_enetsw_driver = {
1082 .driver = {
1083 .name = "bcm6368-enetsw",
1084 .of_match_table = of_match_ptr(bcm6368_enetsw_of_match),
1085 },
1086 .probe = bcm6368_enetsw_probe,
1087 .remove = bcm6368_enetsw_remove,
1088 };
1089 module_platform_driver(bcm6368_enetsw_driver);