ath79: ensure QCA956x gmac0 mux selects sgmii
[openwrt/openwrt.git] / target / linux / ath79 / files / drivers / net / ethernet / atheros / ag71xx / ag71xx_main.c
1 /*
2 * Atheros AR71xx built-in ethernet mac driver
3 *
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * Based on Atheros' AG7100 driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14 #include <linux/sizes.h>
15 #include <linux/of_net.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
18 #include "ag71xx.h"
19
20 #define AG71XX_DEFAULT_MSG_ENABLE \
21 (NETIF_MSG_DRV \
22 | NETIF_MSG_PROBE \
23 | NETIF_MSG_LINK \
24 | NETIF_MSG_TIMER \
25 | NETIF_MSG_IFDOWN \
26 | NETIF_MSG_IFUP \
27 | NETIF_MSG_RX_ERR \
28 | NETIF_MSG_TX_ERR)
29
30 static int ag71xx_msg_level = -1;
31
32 module_param_named(msg_level, ag71xx_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34
35 #define ETH_SWITCH_HEADER_LEN 2
36
37 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
38
39 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
40 {
41 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
42 }
43
44 static void ag71xx_dump_dma_regs(struct ag71xx *ag)
45 {
46 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
47 ag->dev->name,
48 ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
49 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
50 ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
51
52 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
53 ag->dev->name,
54 ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
55 ag71xx_rr(ag, AG71XX_REG_RX_DESC),
56 ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
57 }
58
59 static void ag71xx_dump_regs(struct ag71xx *ag)
60 {
61 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
62 ag->dev->name,
63 ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
64 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
65 ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
66 ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
67 ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
68 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
69 ag->dev->name,
70 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
71 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
72 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
73 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
74 ag->dev->name,
75 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
76 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
77 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
78 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
79 ag->dev->name,
80 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
81 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
82 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
83 }
84
85 static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
86 {
87 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
88 ag->dev->name, label, intr,
89 (intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
90 (intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
91 (intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
92 (intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
93 (intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
94 (intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
95 }
96
97 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
98 {
99 struct ag71xx_ring *ring = &ag->tx_ring;
100 struct net_device *dev = ag->dev;
101 int ring_mask = BIT(ring->order) - 1;
102 u32 bytes_compl = 0, pkts_compl = 0;
103
104 while (ring->curr != ring->dirty) {
105 struct ag71xx_desc *desc;
106 u32 i = ring->dirty & ring_mask;
107
108 desc = ag71xx_ring_desc(ring, i);
109 if (!ag71xx_desc_empty(desc)) {
110 desc->ctrl = 0;
111 dev->stats.tx_errors++;
112 }
113
114 if (ring->buf[i].skb) {
115 bytes_compl += ring->buf[i].len;
116 pkts_compl++;
117 dev_kfree_skb_any(ring->buf[i].skb);
118 }
119 ring->buf[i].skb = NULL;
120 ring->dirty++;
121 }
122
123 /* flush descriptors */
124 wmb();
125
126 netdev_completed_queue(dev, pkts_compl, bytes_compl);
127 }
128
129 static void ag71xx_ring_tx_init(struct ag71xx *ag)
130 {
131 struct ag71xx_ring *ring = &ag->tx_ring;
132 int ring_size = BIT(ring->order);
133 int ring_mask = BIT(ring->order) - 1;
134 int i;
135
136 for (i = 0; i < ring_size; i++) {
137 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
138
139 desc->next = (u32) (ring->descs_dma +
140 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
141
142 desc->ctrl = DESC_EMPTY;
143 ring->buf[i].skb = NULL;
144 }
145
146 /* flush descriptors */
147 wmb();
148
149 ring->curr = 0;
150 ring->dirty = 0;
151 netdev_reset_queue(ag->dev);
152 }
153
154 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
155 {
156 struct ag71xx_ring *ring = &ag->rx_ring;
157 int ring_size = BIT(ring->order);
158 int i;
159
160 if (!ring->buf)
161 return;
162
163 for (i = 0; i < ring_size; i++)
164 if (ring->buf[i].rx_buf) {
165 dma_unmap_single(&ag->pdev->dev, ring->buf[i].dma_addr,
166 ag->rx_buf_size, DMA_FROM_DEVICE);
167 skb_free_frag(ring->buf[i].rx_buf);
168 }
169 }
170
171 static int ag71xx_buffer_size(struct ag71xx *ag)
172 {
173 return ag->rx_buf_size +
174 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
175 }
176
177 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
178 int offset,
179 void *(*alloc)(unsigned int size))
180 {
181 struct ag71xx_ring *ring = &ag->rx_ring;
182 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
183 void *data;
184
185 data = alloc(ag71xx_buffer_size(ag));
186 if (!data)
187 return false;
188
189 buf->rx_buf = data;
190 buf->dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
191 DMA_FROM_DEVICE);
192 desc->data = (u32) buf->dma_addr + offset;
193 return true;
194 }
195
196 static int ag71xx_ring_rx_init(struct ag71xx *ag)
197 {
198 struct ag71xx_ring *ring = &ag->rx_ring;
199 int ring_size = BIT(ring->order);
200 int ring_mask = BIT(ring->order) - 1;
201 unsigned int i;
202 int ret;
203
204 ret = 0;
205 for (i = 0; i < ring_size; i++) {
206 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
207
208 desc->next = (u32) (ring->descs_dma +
209 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
210
211 DBG("ag71xx: RX desc at %p, next is %08x\n",
212 desc, desc->next);
213 }
214
215 for (i = 0; i < ring_size; i++) {
216 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
217
218 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
219 netdev_alloc_frag)) {
220 ret = -ENOMEM;
221 break;
222 }
223
224 desc->ctrl = DESC_EMPTY;
225 }
226
227 /* flush descriptors */
228 wmb();
229
230 ring->curr = 0;
231 ring->dirty = 0;
232
233 return ret;
234 }
235
236 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
237 {
238 struct ag71xx_ring *ring = &ag->rx_ring;
239 int ring_mask = BIT(ring->order) - 1;
240 unsigned int count;
241 int offset = ag->rx_buf_offset;
242
243 count = 0;
244 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
245 struct ag71xx_desc *desc;
246 unsigned int i;
247
248 i = ring->dirty & ring_mask;
249 desc = ag71xx_ring_desc(ring, i);
250
251 if (!ring->buf[i].rx_buf &&
252 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
253 napi_alloc_frag))
254 break;
255
256 desc->ctrl = DESC_EMPTY;
257 count++;
258 }
259
260 /* flush descriptors */
261 wmb();
262
263 DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
264
265 return count;
266 }
267
268 static int ag71xx_rings_init(struct ag71xx *ag)
269 {
270 struct ag71xx_ring *tx = &ag->tx_ring;
271 struct ag71xx_ring *rx = &ag->rx_ring;
272 int ring_size = BIT(tx->order) + BIT(rx->order);
273 int tx_size = BIT(tx->order);
274
275 tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL);
276 if (!tx->buf)
277 return -ENOMEM;
278
279 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
280 &tx->descs_dma, GFP_KERNEL);
281 if (!tx->descs_cpu) {
282 kfree(tx->buf);
283 tx->buf = NULL;
284 return -ENOMEM;
285 }
286
287 rx->buf = &tx->buf[tx_size];
288 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
289 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
290
291 ag71xx_ring_tx_init(ag);
292 return ag71xx_ring_rx_init(ag);
293 }
294
295 static void ag71xx_rings_free(struct ag71xx *ag)
296 {
297 struct ag71xx_ring *tx = &ag->tx_ring;
298 struct ag71xx_ring *rx = &ag->rx_ring;
299 int ring_size = BIT(tx->order) + BIT(rx->order);
300
301 if (tx->descs_cpu)
302 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
303 tx->descs_cpu, tx->descs_dma);
304
305 kfree(tx->buf);
306
307 tx->descs_cpu = NULL;
308 rx->descs_cpu = NULL;
309 tx->buf = NULL;
310 rx->buf = NULL;
311 }
312
313 static void ag71xx_rings_cleanup(struct ag71xx *ag)
314 {
315 ag71xx_ring_rx_clean(ag);
316 ag71xx_ring_tx_clean(ag);
317 ag71xx_rings_free(ag);
318
319 netdev_reset_queue(ag->dev);
320 }
321
322 static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
323 {
324 switch (ag->speed) {
325 case SPEED_1000:
326 return "1000";
327 case SPEED_100:
328 return "100";
329 case SPEED_10:
330 return "10";
331 }
332
333 return "?";
334 }
335
336 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
337 {
338 u32 t;
339
340 t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
341 | (((u32) mac[3]) << 8) | ((u32) mac[2]);
342
343 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
344
345 t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
346 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
347 }
348
349 static void ag71xx_dma_reset(struct ag71xx *ag)
350 {
351 u32 val;
352 int i;
353
354 ag71xx_dump_dma_regs(ag);
355
356 /* stop RX and TX */
357 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
358 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
359
360 /*
361 * give the hardware some time to really stop all rx/tx activity
362 * clearing the descriptors too early causes random memory corruption
363 */
364 mdelay(1);
365
366 /* clear descriptor addresses */
367 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
368 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
369
370 /* clear pending RX/TX interrupts */
371 for (i = 0; i < 256; i++) {
372 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
373 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
374 }
375
376 /* clear pending errors */
377 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
378 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
379
380 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
381 if (val)
382 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
383 ag->dev->name, val);
384
385 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
386
387 /* mask out reserved bits */
388 val &= ~0xff000000;
389
390 if (val)
391 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
392 ag->dev->name, val);
393
394 ag71xx_dump_dma_regs(ag);
395 }
396
397 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
398 MAC_CFG1_SRX | MAC_CFG1_STX)
399
400 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
401
402 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
403 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
404 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
405 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
406 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
407 FIFO_CFG4_VT)
408
409 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
410 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
411 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
412 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
413 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
414 FIFO_CFG5_17 | FIFO_CFG5_SF)
415
416 static void ag71xx_hw_stop(struct ag71xx *ag)
417 {
418 /* disable all interrupts and stop the rx/tx engine */
419 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
420 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
421 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
422 }
423
424 static void ag71xx_hw_setup(struct ag71xx *ag)
425 {
426 struct device_node *np = ag->pdev->dev.of_node;
427 u32 init = MAC_CFG1_INIT;
428
429 /* setup MAC configuration registers */
430 if (of_property_read_bool(np, "flow-control"))
431 init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
432 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
433
434 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
435 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
436
437 /* setup max frame length to zero */
438 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
439
440 /* setup FIFO configuration registers */
441 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
442 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
443 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
444 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
445 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
446 }
447
448 static void ag71xx_hw_init(struct ag71xx *ag)
449 {
450 ag71xx_hw_stop(ag);
451
452 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
453 udelay(20);
454
455 reset_control_assert(ag->mac_reset);
456 if (ag->mdio_reset)
457 reset_control_assert(ag->mdio_reset);
458 msleep(100);
459 reset_control_deassert(ag->mac_reset);
460 if (ag->mdio_reset)
461 reset_control_deassert(ag->mdio_reset);
462 msleep(200);
463
464 ag71xx_hw_setup(ag);
465
466 ag71xx_dma_reset(ag);
467 }
468
469 static void ag71xx_fast_reset(struct ag71xx *ag)
470 {
471 struct net_device *dev = ag->dev;
472 u32 rx_ds;
473 u32 mii_reg;
474
475 ag71xx_hw_stop(ag);
476 wmb();
477
478 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
479 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
480
481 ag71xx_tx_packets(ag, true);
482
483 reset_control_assert(ag->mac_reset);
484 udelay(10);
485 reset_control_deassert(ag->mac_reset);
486 udelay(10);
487
488 ag71xx_dma_reset(ag);
489 ag71xx_hw_setup(ag);
490 ag->tx_ring.curr = 0;
491 ag->tx_ring.dirty = 0;
492 netdev_reset_queue(ag->dev);
493
494 /* setup max frame length */
495 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
496 ag71xx_max_frame_len(ag->dev->mtu));
497
498 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
499 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
500 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
501
502 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
503 }
504
505 static void ag71xx_hw_start(struct ag71xx *ag)
506 {
507 /* start RX engine */
508 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
509
510 /* enable interrupts */
511 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
512
513 netif_wake_queue(ag->dev);
514 }
515
516 static void ath79_set_pllval(struct ag71xx *ag)
517 {
518 u32 pll_reg = ag->pllreg[1];
519 u32 pll_val;
520
521 if (!ag->pllregmap)
522 return;
523
524 switch (ag->speed) {
525 case SPEED_10:
526 pll_val = ag->plldata[2];
527 break;
528 case SPEED_100:
529 pll_val = ag->plldata[1];
530 break;
531 case SPEED_1000:
532 pll_val = ag->plldata[0];
533 break;
534 default:
535 BUG();
536 }
537
538 if (pll_val)
539 regmap_write(ag->pllregmap, pll_reg, pll_val);
540 }
541
542 static void ath79_set_pll(struct ag71xx *ag)
543 {
544 u32 pll_cfg = ag->pllreg[0];
545 u32 pll_shift = ag->pllreg[2];
546
547 if (!ag->pllregmap)
548 return;
549
550 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 2 << pll_shift);
551 udelay(100);
552
553 ath79_set_pllval(ag);
554
555 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 3 << pll_shift);
556 udelay(100);
557
558 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 0);
559 udelay(100);
560 }
561
562 static void ag71xx_bit_set(void __iomem *reg, u32 bit)
563 {
564 u32 val;
565
566 val = __raw_readl(reg) | bit;
567 __raw_writel(val, reg);
568 __raw_readl(reg);
569 }
570
571 static void ag71xx_bit_clear(void __iomem *reg, u32 bit)
572 {
573 u32 val;
574
575 val = __raw_readl(reg) & ~bit;
576 __raw_writel(val, reg);
577 __raw_readl(reg);
578 }
579
580 static void ag71xx_sgmii_init_qca955x(struct device_node *np)
581 {
582 struct device_node *np_dev;
583 void __iomem *gmac_base;
584 u32 mr_an_status;
585 u32 sgmii_status;
586 u8 tries = 0;
587 int err = 0;
588
589 np = of_get_child_by_name(np, "gmac-config");
590 if (!np)
591 return;
592
593 np_dev = of_parse_phandle(np, "device", 0);
594 if (!np_dev)
595 goto out;
596
597 gmac_base = of_iomap(np_dev, 0);
598 if (!gmac_base) {
599 pr_err("%pOF: can't map GMAC registers\n", np_dev);
600 err = -ENOMEM;
601 goto err_iomap;
602 }
603
604 mr_an_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_MR_AN_STATUS);
605 if (!(mr_an_status & QCA955X_MR_AN_STATUS_AN_ABILITY))
606 goto sgmii_out;
607
608 /* SGMII reset sequence */
609 __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET,
610 gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
611 __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
612 udelay(10);
613
614 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
615 QCA955X_SGMII_RESET_HW_RX_125M_N);
616 udelay(10);
617
618 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
619 QCA955X_SGMII_RESET_RX_125M_N);
620 udelay(10);
621
622 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
623 QCA955X_SGMII_RESET_TX_125M_N);
624 udelay(10);
625
626 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
627 QCA955X_SGMII_RESET_RX_CLK_N);
628 udelay(10);
629
630 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
631 QCA955X_SGMII_RESET_TX_CLK_N);
632 udelay(10);
633
634 /*
635 * The following is what QCA has to say about what happens here:
636 *
637 * Across resets SGMII link status goes to weird state.
638 * If SGMII_DEBUG register reads other than 0x1f or 0x10,
639 * we are for sure in a bad state.
640 *
641 * Issue a PHY reset in MR_AN_CONTROL to keep going.
642 */
643 do {
644 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
645 QCA955X_MR_AN_CONTROL_PHY_RESET |
646 QCA955X_MR_AN_CONTROL_AN_ENABLE);
647 udelay(200);
648 ag71xx_bit_clear(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
649 QCA955X_MR_AN_CONTROL_PHY_RESET);
650 mdelay(300);
651 sgmii_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_DEBUG) &
652 QCA955X_SGMII_DEBUG_TX_STATE_MASK;
653
654 if (tries++ >= 20) {
655 pr_err("ag71xx: max retries for SGMII fixup exceeded\n");
656 break;
657 }
658 } while (!(sgmii_status == 0xf || sgmii_status == 0x10));
659
660 sgmii_out:
661 iounmap(gmac_base);
662 err_iomap:
663 of_node_put(np_dev);
664 out:
665 of_node_put(np);
666 }
667
668 static void ag71xx_mux_select_sgmii_qca956x(struct device_node *np)
669 {
670 struct device_node *np_dev;
671 void __iomem *gmac_base;
672 u32 t;
673
674 np = of_get_child_by_name(np, "gmac-config");
675 if (!np)
676 return;
677
678 np_dev = of_parse_phandle(np, "device", 0);
679 if (!np_dev)
680 goto out;
681
682 gmac_base = of_iomap(np_dev, 0);
683 if (!gmac_base) {
684 pr_err("%pOF: can't map GMAC registers\n", np_dev);
685 goto err_iomap;
686 }
687
688 t = __raw_readl(gmac_base + QCA956X_GMAC_REG_ETH_CFG);
689 t |= QCA956X_ETH_CFG_GE0_SGMII;
690 __raw_writel(t, gmac_base + QCA956X_GMAC_REG_ETH_CFG);
691
692 iounmap(gmac_base);
693 err_iomap:
694 of_node_put(np_dev);
695 out:
696 of_node_put(np);
697 }
698
699 static void ath79_mii_ctrl_set_if(struct ag71xx *ag, unsigned int mii_if)
700 {
701 u32 t;
702
703 t = __raw_readl(ag->mii_base);
704 t &= ~(AR71XX_MII_CTRL_IF_MASK);
705 t |= (mii_if & AR71XX_MII_CTRL_IF_MASK);
706 __raw_writel(t, ag->mii_base);
707 }
708
709 static void ath79_mii0_ctrl_set_if(struct ag71xx *ag)
710 {
711 unsigned int mii_if;
712
713 switch (ag->phy_if_mode) {
714 case PHY_INTERFACE_MODE_MII:
715 mii_if = AR71XX_MII0_CTRL_IF_MII;
716 break;
717 case PHY_INTERFACE_MODE_GMII:
718 mii_if = AR71XX_MII0_CTRL_IF_GMII;
719 break;
720 case PHY_INTERFACE_MODE_RGMII:
721 case PHY_INTERFACE_MODE_RGMII_ID:
722 case PHY_INTERFACE_MODE_RGMII_RXID:
723 case PHY_INTERFACE_MODE_RGMII_TXID:
724 mii_if = AR71XX_MII0_CTRL_IF_RGMII;
725 break;
726 case PHY_INTERFACE_MODE_RMII:
727 mii_if = AR71XX_MII0_CTRL_IF_RMII;
728 break;
729 default:
730 WARN(1, "Impossible PHY mode defined.\n");
731 return;
732 }
733
734 ath79_mii_ctrl_set_if(ag, mii_if);
735 }
736
737 static void ath79_mii1_ctrl_set_if(struct ag71xx *ag)
738 {
739 unsigned int mii_if;
740
741 switch (ag->phy_if_mode) {
742 case PHY_INTERFACE_MODE_RMII:
743 mii_if = AR71XX_MII1_CTRL_IF_RMII;
744 break;
745 case PHY_INTERFACE_MODE_RGMII:
746 case PHY_INTERFACE_MODE_RGMII_ID:
747 case PHY_INTERFACE_MODE_RGMII_RXID:
748 case PHY_INTERFACE_MODE_RGMII_TXID:
749 mii_if = AR71XX_MII1_CTRL_IF_RGMII;
750 break;
751 default:
752 WARN(1, "Impossible PHY mode defined.\n");
753 return;
754 }
755
756 ath79_mii_ctrl_set_if(ag, mii_if);
757 }
758
759 static void ath79_mii_ctrl_set_speed(struct ag71xx *ag)
760 {
761 unsigned int mii_speed;
762 u32 t;
763
764 if (!ag->mii_base)
765 return;
766
767 switch (ag->speed) {
768 case SPEED_10:
769 mii_speed = AR71XX_MII_CTRL_SPEED_10;
770 break;
771 case SPEED_100:
772 mii_speed = AR71XX_MII_CTRL_SPEED_100;
773 break;
774 case SPEED_1000:
775 mii_speed = AR71XX_MII_CTRL_SPEED_1000;
776 break;
777 default:
778 BUG();
779 }
780
781 t = __raw_readl(ag->mii_base);
782 t &= ~(AR71XX_MII_CTRL_SPEED_MASK << AR71XX_MII_CTRL_SPEED_SHIFT);
783 t |= mii_speed << AR71XX_MII_CTRL_SPEED_SHIFT;
784 __raw_writel(t, ag->mii_base);
785 }
786
787 static void
788 __ag71xx_link_adjust(struct ag71xx *ag, bool update)
789 {
790 struct device_node *np = ag->pdev->dev.of_node;
791 u32 cfg2;
792 u32 ifctl;
793 u32 fifo5;
794
795 if (!ag->link && update) {
796 ag71xx_hw_stop(ag);
797 netif_carrier_off(ag->dev);
798 if (netif_msg_link(ag))
799 pr_info("%s: link down\n", ag->dev->name);
800 return;
801 }
802
803 if (!of_device_is_compatible(np, "qca,ar9130-eth") &&
804 !of_device_is_compatible(np, "qca,ar7100-eth"))
805 ag71xx_fast_reset(ag);
806
807 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
808 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
809 cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
810
811 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
812 ifctl &= ~(MAC_IFCTL_SPEED);
813
814 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
815 fifo5 &= ~FIFO_CFG5_BM;
816
817 switch (ag->speed) {
818 case SPEED_1000:
819 cfg2 |= MAC_CFG2_IF_1000;
820 fifo5 |= FIFO_CFG5_BM;
821 break;
822 case SPEED_100:
823 cfg2 |= MAC_CFG2_IF_10_100;
824 ifctl |= MAC_IFCTL_SPEED;
825 break;
826 case SPEED_10:
827 cfg2 |= MAC_CFG2_IF_10_100;
828 break;
829 default:
830 BUG();
831 return;
832 }
833
834 if (ag->tx_ring.desc_split) {
835 ag->fifodata[2] &= 0xffff;
836 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
837 }
838
839 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
840
841 if (update) {
842 if (of_device_is_compatible(np, "qca,ar7100-eth") ||
843 of_device_is_compatible(np, "qca,ar9130-eth")) {
844 ath79_set_pll(ag);
845 ath79_mii_ctrl_set_speed(ag);
846 } else if (of_device_is_compatible(np, "qca,ar7242-eth") ||
847 of_device_is_compatible(np, "qca,ar9340-eth") ||
848 of_device_is_compatible(np, "qca,qca9550-eth") ||
849 of_device_is_compatible(np, "qca,qca9560-eth")) {
850 ath79_set_pllval(ag);
851 if (of_property_read_bool(np, "qca955x-sgmii-fixup"))
852 ag71xx_sgmii_init_qca955x(np);
853 }
854 }
855
856 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
857 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
858 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
859
860 if (of_device_is_compatible(np, "qca,qca9530-eth") ||
861 of_device_is_compatible(np, "qca,qca9560-eth")) {
862 /*
863 * The rx ring buffer can stall on small packets on QCA953x and
864 * QCA956x. Disabling the inline checksum engine fixes the stall.
865 * The wr, rr functions cannot be used since this hidden register
866 * is outside of the normal ag71xx register block.
867 */
868 void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4);
869 if (dam) {
870 __raw_writel(__raw_readl(dam) & ~BIT(27), dam);
871 (void)__raw_readl(dam);
872 iounmap(dam);
873 }
874 }
875
876 ag71xx_hw_start(ag);
877
878 netif_carrier_on(ag->dev);
879 if (update && netif_msg_link(ag))
880 pr_info("%s: link up (%sMbps/%s duplex)\n",
881 ag->dev->name,
882 ag71xx_speed_str(ag),
883 (DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
884
885 ag71xx_dump_regs(ag);
886 }
887
888 void ag71xx_link_adjust(struct ag71xx *ag)
889 {
890 __ag71xx_link_adjust(ag, true);
891 }
892
893 static int ag71xx_hw_enable(struct ag71xx *ag)
894 {
895 int ret;
896
897 ret = ag71xx_rings_init(ag);
898 if (ret)
899 return ret;
900
901 napi_enable(&ag->napi);
902 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
903 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
904 netif_start_queue(ag->dev);
905
906 return 0;
907 }
908
909 static void ag71xx_hw_disable(struct ag71xx *ag)
910 {
911 netif_stop_queue(ag->dev);
912
913 ag71xx_hw_stop(ag);
914 ag71xx_dma_reset(ag);
915
916 napi_disable(&ag->napi);
917 del_timer_sync(&ag->oom_timer);
918
919 ag71xx_rings_cleanup(ag);
920 }
921
922 static int ag71xx_open(struct net_device *dev)
923 {
924 struct ag71xx *ag = netdev_priv(dev);
925 unsigned int max_frame_len;
926 int ret;
927
928 netif_carrier_off(dev);
929 max_frame_len = ag71xx_max_frame_len(dev->mtu);
930 ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
931
932 /* setup max frame length */
933 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
934 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
935
936 ret = ag71xx_hw_enable(ag);
937 if (ret)
938 goto err;
939
940 phy_start(ag->phy_dev);
941
942 return 0;
943
944 err:
945 ag71xx_rings_cleanup(ag);
946 return ret;
947 }
948
949 static int ag71xx_stop(struct net_device *dev)
950 {
951 unsigned long flags;
952 struct ag71xx *ag = netdev_priv(dev);
953
954 netif_carrier_off(dev);
955 phy_stop(ag->phy_dev);
956
957 spin_lock_irqsave(&ag->lock, flags);
958 if (ag->link) {
959 ag->link = 0;
960 ag71xx_link_adjust(ag);
961 }
962 spin_unlock_irqrestore(&ag->lock, flags);
963
964 ag71xx_hw_disable(ag);
965
966 return 0;
967 }
968
969 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
970 {
971 int i;
972 struct ag71xx_desc *desc;
973 int ring_mask = BIT(ring->order) - 1;
974 int ndesc = 0;
975 int split = ring->desc_split;
976
977 if (!split)
978 split = len;
979
980 while (len > 0) {
981 unsigned int cur_len = len;
982
983 i = (ring->curr + ndesc) & ring_mask;
984 desc = ag71xx_ring_desc(ring, i);
985
986 if (!ag71xx_desc_empty(desc))
987 return -1;
988
989 if (cur_len > split) {
990 cur_len = split;
991
992 /*
993 * TX will hang if DMA transfers <= 4 bytes,
994 * make sure next segment is more than 4 bytes long.
995 */
996 if (len <= split + 4)
997 cur_len -= 4;
998 }
999
1000 desc->data = addr;
1001 addr += cur_len;
1002 len -= cur_len;
1003
1004 if (len > 0)
1005 cur_len |= DESC_MORE;
1006
1007 /* prevent early tx attempt of this descriptor */
1008 if (!ndesc)
1009 cur_len |= DESC_EMPTY;
1010
1011 desc->ctrl = cur_len;
1012 ndesc++;
1013 }
1014
1015 return ndesc;
1016 }
1017
1018 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1019 struct net_device *dev)
1020 {
1021 struct ag71xx *ag = netdev_priv(dev);
1022 struct ag71xx_ring *ring = &ag->tx_ring;
1023 int ring_mask = BIT(ring->order) - 1;
1024 int ring_size = BIT(ring->order);
1025 struct ag71xx_desc *desc;
1026 dma_addr_t dma_addr;
1027 int i, n, ring_min;
1028
1029 if (skb->len <= 4) {
1030 DBG("%s: packet len is too small\n", ag->dev->name);
1031 goto err_drop;
1032 }
1033
1034 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1035 DMA_TO_DEVICE);
1036
1037 i = ring->curr & ring_mask;
1038 desc = ag71xx_ring_desc(ring, i);
1039
1040 /* setup descriptor fields */
1041 n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
1042 if (n < 0)
1043 goto err_drop_unmap;
1044
1045 i = (ring->curr + n - 1) & ring_mask;
1046 ring->buf[i].len = skb->len;
1047 ring->buf[i].skb = skb;
1048
1049 netdev_sent_queue(dev, skb->len);
1050
1051 skb_tx_timestamp(skb);
1052
1053 desc->ctrl &= ~DESC_EMPTY;
1054 ring->curr += n;
1055
1056 /* flush descriptor */
1057 wmb();
1058
1059 ring_min = 2;
1060 if (ring->desc_split)
1061 ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1062
1063 if (ring->curr - ring->dirty >= ring_size - ring_min) {
1064 DBG("%s: tx queue full\n", dev->name);
1065 netif_stop_queue(dev);
1066 }
1067
1068 DBG("%s: packet injected into TX queue\n", ag->dev->name);
1069
1070 /* enable TX engine */
1071 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1072
1073 return NETDEV_TX_OK;
1074
1075 err_drop_unmap:
1076 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1077
1078 err_drop:
1079 dev->stats.tx_dropped++;
1080
1081 dev_kfree_skb(skb);
1082 return NETDEV_TX_OK;
1083 }
1084
1085 static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1086 {
1087 struct ag71xx *ag = netdev_priv(dev);
1088
1089
1090 switch (cmd) {
1091 case SIOCSIFHWADDR:
1092 if (copy_from_user
1093 (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
1094 return -EFAULT;
1095 return 0;
1096
1097 case SIOCGIFHWADDR:
1098 if (copy_to_user
1099 (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
1100 return -EFAULT;
1101 return 0;
1102
1103 case SIOCGMIIPHY:
1104 case SIOCGMIIREG:
1105 case SIOCSMIIREG:
1106 if (ag->phy_dev == NULL)
1107 break;
1108
1109 return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
1110
1111 default:
1112 break;
1113 }
1114
1115 return -EOPNOTSUPP;
1116 }
1117
1118 static void ag71xx_oom_timer_handler(struct timer_list *t)
1119 {
1120 struct ag71xx *ag = from_timer(ag, t, oom_timer);
1121
1122 napi_schedule(&ag->napi);
1123 }
1124
1125 static void ag71xx_tx_timeout(struct net_device *dev)
1126 {
1127 struct ag71xx *ag = netdev_priv(dev);
1128
1129 if (netif_msg_tx_err(ag))
1130 pr_info("%s: tx timeout\n", ag->dev->name);
1131
1132 schedule_delayed_work(&ag->restart_work, 1);
1133 }
1134
1135 static void ag71xx_restart_work_func(struct work_struct *work)
1136 {
1137 struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
1138
1139 rtnl_lock();
1140 ag71xx_hw_disable(ag);
1141 ag71xx_hw_enable(ag);
1142 if (ag->link)
1143 __ag71xx_link_adjust(ag, false);
1144 rtnl_unlock();
1145 }
1146
1147 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
1148 {
1149 unsigned long timestamp;
1150 u32 rx_sm, tx_sm, rx_fd;
1151
1152 timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start;
1153 if (likely(time_before(jiffies, timestamp + HZ/10)))
1154 return false;
1155
1156 if (!netif_carrier_ok(ag->dev))
1157 return false;
1158
1159 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
1160 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
1161 return true;
1162
1163 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
1164 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
1165 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
1166 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
1167 return true;
1168
1169 return false;
1170 }
1171
1172 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
1173 {
1174 struct ag71xx_ring *ring = &ag->tx_ring;
1175 bool dma_stuck = false;
1176 int ring_mask = BIT(ring->order) - 1;
1177 int ring_size = BIT(ring->order);
1178 int sent = 0;
1179 int bytes_compl = 0;
1180 int n = 0;
1181
1182 DBG("%s: processing TX ring\n", ag->dev->name);
1183
1184 while (ring->dirty + n != ring->curr) {
1185 unsigned int i = (ring->dirty + n) & ring_mask;
1186 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1187 struct sk_buff *skb = ring->buf[i].skb;
1188
1189 if (!flush && !ag71xx_desc_empty(desc)) {
1190 if (ag->tx_hang_workaround &&
1191 ag71xx_check_dma_stuck(ag)) {
1192 schedule_delayed_work(&ag->restart_work, HZ / 2);
1193 dma_stuck = true;
1194 }
1195 break;
1196 }
1197
1198 if (flush)
1199 desc->ctrl |= DESC_EMPTY;
1200
1201 n++;
1202 if (!skb)
1203 continue;
1204
1205 dev_kfree_skb_any(skb);
1206 ring->buf[i].skb = NULL;
1207
1208 bytes_compl += ring->buf[i].len;
1209
1210 sent++;
1211 ring->dirty += n;
1212
1213 while (n > 0) {
1214 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
1215 n--;
1216 }
1217 }
1218
1219 DBG("%s: %d packets sent out\n", ag->dev->name, sent);
1220
1221 if (!sent)
1222 return 0;
1223
1224 ag->dev->stats.tx_bytes += bytes_compl;
1225 ag->dev->stats.tx_packets += sent;
1226
1227 netdev_completed_queue(ag->dev, sent, bytes_compl);
1228 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
1229 netif_wake_queue(ag->dev);
1230
1231 if (!dma_stuck)
1232 cancel_delayed_work(&ag->restart_work);
1233
1234 return sent;
1235 }
1236
1237 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1238 {
1239 struct net_device *dev = ag->dev;
1240 struct ag71xx_ring *ring = &ag->rx_ring;
1241 unsigned int pktlen_mask = ag->desc_pktlen_mask;
1242 unsigned int offset = ag->rx_buf_offset;
1243 int ring_mask = BIT(ring->order) - 1;
1244 int ring_size = BIT(ring->order);
1245 struct list_head rx_list;
1246 struct sk_buff *next;
1247 struct sk_buff *skb;
1248 int done = 0;
1249
1250 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1251 dev->name, limit, ring->curr, ring->dirty);
1252 INIT_LIST_HEAD(&rx_list);
1253
1254 while (done < limit) {
1255 unsigned int i = ring->curr & ring_mask;
1256 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1257 int pktlen;
1258 int err = 0;
1259
1260 if (ag71xx_desc_empty(desc))
1261 break;
1262
1263 if ((ring->dirty + ring_size) == ring->curr) {
1264 ag71xx_assert(0);
1265 break;
1266 }
1267
1268 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1269
1270 pktlen = desc->ctrl & pktlen_mask;
1271 pktlen -= ETH_FCS_LEN;
1272
1273 dma_unmap_single(&ag->pdev->dev, ring->buf[i].dma_addr,
1274 ag->rx_buf_size, DMA_FROM_DEVICE);
1275
1276 dev->stats.rx_packets++;
1277 dev->stats.rx_bytes += pktlen;
1278
1279 skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
1280 if (!skb) {
1281 skb_free_frag(ring->buf[i].rx_buf);
1282 goto next;
1283 }
1284
1285 skb_reserve(skb, offset);
1286 skb_put(skb, pktlen);
1287
1288 if (err) {
1289 dev->stats.rx_dropped++;
1290 kfree_skb(skb);
1291 } else {
1292 skb->dev = dev;
1293 skb->ip_summed = CHECKSUM_NONE;
1294 list_add_tail(&skb->list, &rx_list);
1295 }
1296
1297 next:
1298 ring->buf[i].rx_buf = NULL;
1299 done++;
1300
1301 ring->curr++;
1302 }
1303
1304 ag71xx_ring_rx_refill(ag);
1305
1306 list_for_each_entry_safe(skb, next, &rx_list, list)
1307 skb->protocol = eth_type_trans(skb, dev);
1308 netif_receive_skb_list(&rx_list);
1309
1310 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1311 dev->name, ring->curr, ring->dirty, done);
1312
1313 return done;
1314 }
1315
1316 static int ag71xx_poll(struct napi_struct *napi, int limit)
1317 {
1318 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1319 struct net_device *dev = ag->dev;
1320 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1321 int rx_ring_size = BIT(rx_ring->order);
1322 unsigned long flags;
1323 u32 status;
1324 int tx_done;
1325 int rx_done;
1326
1327 tx_done = ag71xx_tx_packets(ag, false);
1328
1329 DBG("%s: processing RX ring\n", dev->name);
1330 rx_done = ag71xx_rx_packets(ag, limit);
1331
1332 ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1333
1334 if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
1335 goto oom;
1336
1337 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1338 if (unlikely(status & RX_STATUS_OF)) {
1339 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1340 dev->stats.rx_fifo_errors++;
1341
1342 /* restart RX */
1343 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1344 }
1345
1346 if (rx_done < limit) {
1347 if (status & RX_STATUS_PR)
1348 goto more;
1349
1350 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1351 if (status & TX_STATUS_PS)
1352 goto more;
1353
1354 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1355 dev->name, rx_done, tx_done, limit);
1356
1357 napi_complete(napi);
1358
1359 /* enable interrupts */
1360 spin_lock_irqsave(&ag->lock, flags);
1361 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1362 spin_unlock_irqrestore(&ag->lock, flags);
1363 return rx_done;
1364 }
1365
1366 more:
1367 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1368 dev->name, rx_done, tx_done, limit);
1369 return limit;
1370
1371 oom:
1372 if (netif_msg_rx_err(ag))
1373 pr_info("%s: out of memory\n", dev->name);
1374
1375 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1376 napi_complete(napi);
1377 return 0;
1378 }
1379
1380 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1381 {
1382 struct net_device *dev = dev_id;
1383 struct ag71xx *ag = netdev_priv(dev);
1384 u32 status;
1385
1386 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1387 ag71xx_dump_intr(ag, "raw", status);
1388
1389 if (unlikely(!status))
1390 return IRQ_NONE;
1391
1392 if (unlikely(status & AG71XX_INT_ERR)) {
1393 if (status & AG71XX_INT_TX_BE) {
1394 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1395 dev_err(&dev->dev, "TX BUS error\n");
1396 }
1397 if (status & AG71XX_INT_RX_BE) {
1398 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1399 dev_err(&dev->dev, "RX BUS error\n");
1400 }
1401 }
1402
1403 if (likely(status & AG71XX_INT_POLL)) {
1404 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1405 DBG("%s: enable polling mode\n", dev->name);
1406 napi_schedule(&ag->napi);
1407 }
1408
1409 ag71xx_debugfs_update_int_stats(ag, status);
1410
1411 return IRQ_HANDLED;
1412 }
1413
1414 static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1415 {
1416 struct ag71xx *ag = netdev_priv(dev);
1417
1418 dev->mtu = new_mtu;
1419 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1420 ag71xx_max_frame_len(dev->mtu));
1421
1422 return 0;
1423 }
1424
1425 static const struct net_device_ops ag71xx_netdev_ops = {
1426 .ndo_open = ag71xx_open,
1427 .ndo_stop = ag71xx_stop,
1428 .ndo_start_xmit = ag71xx_hard_start_xmit,
1429 .ndo_do_ioctl = ag71xx_do_ioctl,
1430 .ndo_tx_timeout = ag71xx_tx_timeout,
1431 .ndo_change_mtu = ag71xx_change_mtu,
1432 .ndo_set_mac_address = eth_mac_addr,
1433 .ndo_validate_addr = eth_validate_addr,
1434 };
1435
1436 static int ag71xx_probe(struct platform_device *pdev)
1437 {
1438 struct device_node *np = pdev->dev.of_node;
1439 struct net_device *dev;
1440 struct resource *res;
1441 struct ag71xx *ag;
1442 const void *mac_addr;
1443 u32 max_frame_len;
1444 int tx_size, err;
1445
1446 if (!np)
1447 return -ENODEV;
1448
1449 dev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1450 if (!dev)
1451 return -ENOMEM;
1452
1453 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1454 if (!res)
1455 return -EINVAL;
1456
1457 err = ag71xx_setup_gmac(np);
1458 if (err)
1459 return err;
1460
1461 SET_NETDEV_DEV(dev, &pdev->dev);
1462
1463 ag = netdev_priv(dev);
1464 ag->pdev = pdev;
1465 ag->dev = dev;
1466 ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1467 AG71XX_DEFAULT_MSG_ENABLE);
1468 spin_lock_init(&ag->lock);
1469
1470 ag->mac_reset = devm_reset_control_get_exclusive(&pdev->dev, "mac");
1471 if (IS_ERR(ag->mac_reset)) {
1472 dev_err(&pdev->dev, "missing mac reset\n");
1473 return PTR_ERR(ag->mac_reset);
1474 }
1475
1476 ag->mdio_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "mdio");
1477
1478 if (of_property_read_u32_array(np, "fifo-data", ag->fifodata, 3)) {
1479 if (of_device_is_compatible(np, "qca,ar9130-eth") ||
1480 of_device_is_compatible(np, "qca,ar7100-eth")) {
1481 ag->fifodata[0] = 0x0fff0000;
1482 ag->fifodata[1] = 0x00001fff;
1483 } else {
1484 ag->fifodata[0] = 0x0010ffff;
1485 ag->fifodata[1] = 0x015500aa;
1486 ag->fifodata[2] = 0x01f00140;
1487 }
1488 if (of_device_is_compatible(np, "qca,ar9130-eth"))
1489 ag->fifodata[2] = 0x00780fff;
1490 else if (of_device_is_compatible(np, "qca,ar7100-eth"))
1491 ag->fifodata[2] = 0x008001ff;
1492 }
1493
1494 if (of_property_read_u32_array(np, "pll-data", ag->plldata, 3))
1495 dev_dbg(&pdev->dev, "failed to read pll-data property\n");
1496
1497 if (of_property_read_u32_array(np, "pll-reg", ag->pllreg, 3))
1498 dev_dbg(&pdev->dev, "failed to read pll-reg property\n");
1499
1500 ag->pllregmap = syscon_regmap_lookup_by_phandle(np, "pll-handle");
1501 if (IS_ERR(ag->pllregmap)) {
1502 dev_dbg(&pdev->dev, "failed to read pll-handle property\n");
1503 ag->pllregmap = NULL;
1504 }
1505
1506 ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
1507 res->end - res->start + 1);
1508 if (!ag->mac_base)
1509 return -ENOMEM;
1510
1511 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1512 if (res) {
1513 ag->mii_base = devm_ioremap_nocache(&pdev->dev, res->start,
1514 res->end - res->start + 1);
1515 if (!ag->mii_base)
1516 return -ENOMEM;
1517 }
1518
1519 dev->irq = platform_get_irq(pdev, 0);
1520 err = devm_request_irq(&pdev->dev, dev->irq, ag71xx_interrupt,
1521 0x0, dev_name(&pdev->dev), dev);
1522 if (err) {
1523 dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1524 return err;
1525 }
1526
1527 dev->netdev_ops = &ag71xx_netdev_ops;
1528 dev->ethtool_ops = &ag71xx_ethtool_ops;
1529
1530 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1531
1532 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1533
1534 tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1535 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1536
1537 if (of_device_is_compatible(np, "qca,ar9340-eth") ||
1538 of_device_is_compatible(np, "qca,qca9530-eth") ||
1539 of_device_is_compatible(np, "qca,qca9550-eth") ||
1540 of_device_is_compatible(np, "qca,qca9560-eth"))
1541 ag->desc_pktlen_mask = SZ_16K - 1;
1542 else
1543 ag->desc_pktlen_mask = SZ_4K - 1;
1544
1545 if (ag->desc_pktlen_mask == SZ_16K - 1 &&
1546 !of_device_is_compatible(np, "qca,qca9550-eth") &&
1547 !of_device_is_compatible(np, "qca,qca9560-eth"))
1548 max_frame_len = ag->desc_pktlen_mask;
1549 else
1550 max_frame_len = 1540;
1551
1552 dev->min_mtu = 68;
1553 dev->max_mtu = max_frame_len - ag71xx_max_frame_len(0);
1554
1555 if (of_device_is_compatible(np, "qca,ar7240-eth") ||
1556 of_device_is_compatible(np, "qca,ar7241-eth") ||
1557 of_device_is_compatible(np, "qca,ar7242-eth") ||
1558 of_device_is_compatible(np, "qca,ar9330-eth") ||
1559 of_device_is_compatible(np, "qca,ar9340-eth") ||
1560 of_device_is_compatible(np, "qca,qca9530-eth") ||
1561 of_device_is_compatible(np, "qca,qca9550-eth") ||
1562 of_device_is_compatible(np, "qca,qca9560-eth"))
1563 ag->tx_hang_workaround = 1;
1564
1565 ag->rx_buf_offset = NET_SKB_PAD;
1566 if (!of_device_is_compatible(np, "qca,ar7100-eth") &&
1567 !of_device_is_compatible(np, "qca,ar9130-eth"))
1568 ag->rx_buf_offset += NET_IP_ALIGN;
1569
1570 if (of_device_is_compatible(np, "qca,ar7100-eth")) {
1571 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1572 tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1573 }
1574 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1575
1576 ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1577 sizeof(struct ag71xx_desc),
1578 &ag->stop_desc_dma, GFP_KERNEL);
1579 if (!ag->stop_desc)
1580 return -ENOMEM;
1581
1582 ag->stop_desc->data = 0;
1583 ag->stop_desc->ctrl = 0;
1584 ag->stop_desc->next = (u32) ag->stop_desc_dma;
1585
1586 mac_addr = of_get_mac_address(np);
1587 if (IS_ERR_OR_NULL(mac_addr) || !is_valid_ether_addr(mac_addr)) {
1588 dev_err(&pdev->dev, "invalid MAC address, using random address\n");
1589 eth_random_addr(dev->dev_addr);
1590 } else {
1591 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
1592 }
1593
1594 ag->phy_if_mode = of_get_phy_mode(np);
1595 if (ag->phy_if_mode < 0) {
1596 dev_err(&pdev->dev, "missing phy-mode property in DT\n");
1597 return ag->phy_if_mode;
1598 }
1599
1600 if (of_device_is_compatible(np, "qca,qca9560-eth") &&
1601 ag->phy_if_mode == PHY_INTERFACE_MODE_SGMII)
1602 ag71xx_mux_select_sgmii_qca956x(np);
1603
1604 if (of_property_read_u32(np, "qca,mac-idx", &ag->mac_idx))
1605 ag->mac_idx = -1;
1606 if (ag->mii_base)
1607 switch (ag->mac_idx) {
1608 case 0:
1609 ath79_mii0_ctrl_set_if(ag);
1610 break;
1611 case 1:
1612 ath79_mii1_ctrl_set_if(ag);
1613 break;
1614 default:
1615 break;
1616 }
1617
1618 netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1619
1620 ag71xx_dump_regs(ag);
1621
1622 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1623
1624 ag71xx_hw_init(ag);
1625
1626 ag71xx_dump_regs(ag);
1627
1628 /*
1629 * populate current node to register mdio-bus as a subdevice.
1630 * the mdio bus works independently on ar7241 and later chips
1631 * and we need to load mdio1 before gmac0, which can be done
1632 * by adding a "simple-mfd" compatible to gmac node. The
1633 * following code checks OF_POPULATED_BUS flag before populating
1634 * to avoid duplicated population.
1635 */
1636 if (!of_node_check_flag(np, OF_POPULATED_BUS)) {
1637 err = of_platform_populate(np, NULL, NULL, &pdev->dev);
1638 if (err)
1639 return err;
1640 }
1641
1642 err = ag71xx_phy_connect(ag);
1643 if (err)
1644 return err;
1645
1646 err = ag71xx_debugfs_init(ag);
1647 if (err)
1648 goto err_phy_disconnect;
1649
1650 platform_set_drvdata(pdev, dev);
1651
1652 err = register_netdev(dev);
1653 if (err) {
1654 dev_err(&pdev->dev, "unable to register net device\n");
1655 platform_set_drvdata(pdev, NULL);
1656 ag71xx_debugfs_exit(ag);
1657 goto err_phy_disconnect;
1658 }
1659
1660 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n",
1661 dev->name, (unsigned long) ag->mac_base, dev->irq,
1662 phy_modes(ag->phy_if_mode));
1663
1664 return 0;
1665
1666 err_phy_disconnect:
1667 ag71xx_phy_disconnect(ag);
1668 return err;
1669 }
1670
1671 static int ag71xx_remove(struct platform_device *pdev)
1672 {
1673 struct net_device *dev = platform_get_drvdata(pdev);
1674 struct ag71xx *ag;
1675
1676 if (!dev)
1677 return 0;
1678
1679 ag = netdev_priv(dev);
1680 ag71xx_debugfs_exit(ag);
1681 ag71xx_phy_disconnect(ag);
1682 unregister_netdev(dev);
1683 platform_set_drvdata(pdev, NULL);
1684 return 0;
1685 }
1686
1687 static const struct of_device_id ag71xx_match[] = {
1688 { .compatible = "qca,ar7100-eth" },
1689 { .compatible = "qca,ar7240-eth" },
1690 { .compatible = "qca,ar7241-eth" },
1691 { .compatible = "qca,ar7242-eth" },
1692 { .compatible = "qca,ar9130-eth" },
1693 { .compatible = "qca,ar9330-eth" },
1694 { .compatible = "qca,ar9340-eth" },
1695 { .compatible = "qca,qca9530-eth" },
1696 { .compatible = "qca,qca9550-eth" },
1697 { .compatible = "qca,qca9560-eth" },
1698 {}
1699 };
1700
1701 static struct platform_driver ag71xx_driver = {
1702 .probe = ag71xx_probe,
1703 .remove = ag71xx_remove,
1704 .driver = {
1705 .name = AG71XX_DRV_NAME,
1706 .of_match_table = ag71xx_match,
1707 }
1708 };
1709
1710 static int __init ag71xx_module_init(void)
1711 {
1712 int ret;
1713
1714 ret = ag71xx_debugfs_root_init();
1715 if (ret)
1716 goto err_out;
1717
1718 ret = platform_driver_register(&ag71xx_driver);
1719 if (ret)
1720 goto err_debugfs_exit;
1721
1722 return 0;
1723
1724 err_debugfs_exit:
1725 ag71xx_debugfs_root_exit();
1726 err_out:
1727 return ret;
1728 }
1729
1730 static void __exit ag71xx_module_exit(void)
1731 {
1732 platform_driver_unregister(&ag71xx_driver);
1733 ag71xx_debugfs_root_exit();
1734 }
1735
1736 module_init(ag71xx_module_init);
1737 module_exit(ag71xx_module_exit);
1738
1739 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1740 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1741 MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
1742 MODULE_LICENSE("GPL v2");
1743 MODULE_ALIAS("platform:" AG71XX_DRV_NAME);