ath79: ag71xx: Remove ndo_poll_controller
[openwrt/openwrt.git] / target / linux / ath79 / files / drivers / net / ethernet / atheros / ag71xx / ag71xx_main.c
1 /*
2 * Atheros AR71xx built-in ethernet mac driver
3 *
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * Based on Atheros' AG7100 driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14 #include <linux/sizes.h>
15 #include <linux/of_net.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
18 #include "ag71xx.h"
19
20 #define AG71XX_DEFAULT_MSG_ENABLE \
21 (NETIF_MSG_DRV \
22 | NETIF_MSG_PROBE \
23 | NETIF_MSG_LINK \
24 | NETIF_MSG_TIMER \
25 | NETIF_MSG_IFDOWN \
26 | NETIF_MSG_IFUP \
27 | NETIF_MSG_RX_ERR \
28 | NETIF_MSG_TX_ERR)
29
30 static int ag71xx_msg_level = -1;
31
32 module_param_named(msg_level, ag71xx_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34
35 #define ETH_SWITCH_HEADER_LEN 2
36
37 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
38
39 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
40 {
41 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
42 }
43
44 static void ag71xx_dump_dma_regs(struct ag71xx *ag)
45 {
46 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
47 ag->dev->name,
48 ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
49 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
50 ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
51
52 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
53 ag->dev->name,
54 ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
55 ag71xx_rr(ag, AG71XX_REG_RX_DESC),
56 ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
57 }
58
59 static void ag71xx_dump_regs(struct ag71xx *ag)
60 {
61 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
62 ag->dev->name,
63 ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
64 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
65 ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
66 ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
67 ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
68 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
69 ag->dev->name,
70 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
71 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
72 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
73 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
74 ag->dev->name,
75 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
76 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
77 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
78 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
79 ag->dev->name,
80 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
81 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
82 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
83 }
84
85 static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
86 {
87 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
88 ag->dev->name, label, intr,
89 (intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
90 (intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
91 (intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
92 (intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
93 (intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
94 (intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
95 }
96
97 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
98 {
99 struct ag71xx_ring *ring = &ag->tx_ring;
100 struct net_device *dev = ag->dev;
101 int ring_mask = BIT(ring->order) - 1;
102 u32 bytes_compl = 0, pkts_compl = 0;
103
104 while (ring->curr != ring->dirty) {
105 struct ag71xx_desc *desc;
106 u32 i = ring->dirty & ring_mask;
107
108 desc = ag71xx_ring_desc(ring, i);
109 if (!ag71xx_desc_empty(desc)) {
110 desc->ctrl = 0;
111 dev->stats.tx_errors++;
112 }
113
114 if (ring->buf[i].skb) {
115 bytes_compl += ring->buf[i].len;
116 pkts_compl++;
117 dev_kfree_skb_any(ring->buf[i].skb);
118 }
119 ring->buf[i].skb = NULL;
120 ring->dirty++;
121 }
122
123 /* flush descriptors */
124 wmb();
125
126 netdev_completed_queue(dev, pkts_compl, bytes_compl);
127 }
128
129 static void ag71xx_ring_tx_init(struct ag71xx *ag)
130 {
131 struct ag71xx_ring *ring = &ag->tx_ring;
132 int ring_size = BIT(ring->order);
133 int ring_mask = ring_size - 1;
134 int i;
135
136 for (i = 0; i < ring_size; i++) {
137 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
138
139 desc->next = (u32) (ring->descs_dma +
140 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
141
142 desc->ctrl = DESC_EMPTY;
143 ring->buf[i].skb = NULL;
144 }
145
146 /* flush descriptors */
147 wmb();
148
149 ring->curr = 0;
150 ring->dirty = 0;
151 netdev_reset_queue(ag->dev);
152 }
153
154 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
155 {
156 struct ag71xx_ring *ring = &ag->rx_ring;
157 int ring_size = BIT(ring->order);
158 int i;
159
160 if (!ring->buf)
161 return;
162
163 for (i = 0; i < ring_size; i++)
164 if (ring->buf[i].rx_buf) {
165 dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
166 ag->rx_buf_size, DMA_FROM_DEVICE);
167 skb_free_frag(ring->buf[i].rx_buf);
168 }
169 }
170
171 static int ag71xx_buffer_size(struct ag71xx *ag)
172 {
173 return ag->rx_buf_size +
174 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
175 }
176
177 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
178 int offset,
179 void *(*alloc)(unsigned int size))
180 {
181 struct ag71xx_ring *ring = &ag->rx_ring;
182 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
183 void *data;
184
185 data = alloc(ag71xx_buffer_size(ag));
186 if (!data)
187 return false;
188
189 buf->rx_buf = data;
190 buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
191 DMA_FROM_DEVICE);
192 desc->data = (u32) buf->dma_addr + offset;
193 return true;
194 }
195
196 static int ag71xx_ring_rx_init(struct ag71xx *ag)
197 {
198 struct ag71xx_ring *ring = &ag->rx_ring;
199 int ring_size = BIT(ring->order);
200 int ring_mask = BIT(ring->order) - 1;
201 unsigned int i;
202 int ret;
203
204 ret = 0;
205 for (i = 0; i < ring_size; i++) {
206 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
207
208 desc->next = (u32) (ring->descs_dma +
209 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
210
211 DBG("ag71xx: RX desc at %p, next is %08x\n",
212 desc, desc->next);
213 }
214
215 for (i = 0; i < ring_size; i++) {
216 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
217
218 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
219 netdev_alloc_frag)) {
220 ret = -ENOMEM;
221 break;
222 }
223
224 desc->ctrl = DESC_EMPTY;
225 }
226
227 /* flush descriptors */
228 wmb();
229
230 ring->curr = 0;
231 ring->dirty = 0;
232
233 return ret;
234 }
235
236 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
237 {
238 struct ag71xx_ring *ring = &ag->rx_ring;
239 int ring_mask = BIT(ring->order) - 1;
240 unsigned int count;
241 int offset = ag->rx_buf_offset;
242
243 count = 0;
244 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
245 struct ag71xx_desc *desc;
246 unsigned int i;
247
248 i = ring->dirty & ring_mask;
249 desc = ag71xx_ring_desc(ring, i);
250
251 if (!ring->buf[i].rx_buf &&
252 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
253 napi_alloc_frag))
254 break;
255
256 desc->ctrl = DESC_EMPTY;
257 count++;
258 }
259
260 /* flush descriptors */
261 wmb();
262
263 DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
264
265 return count;
266 }
267
268 static int ag71xx_rings_init(struct ag71xx *ag)
269 {
270 struct ag71xx_ring *tx = &ag->tx_ring;
271 struct ag71xx_ring *rx = &ag->rx_ring;
272 int ring_size = BIT(tx->order) + BIT(rx->order);
273 int tx_size = BIT(tx->order);
274
275 tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL);
276 if (!tx->buf)
277 return -ENOMEM;
278
279 tx->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
280 &tx->descs_dma, GFP_ATOMIC);
281 if (!tx->descs_cpu) {
282 kfree(tx->buf);
283 tx->buf = NULL;
284 return -ENOMEM;
285 }
286
287 rx->buf = &tx->buf[BIT(tx->order)];
288 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
289 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
290
291 ag71xx_ring_tx_init(ag);
292 return ag71xx_ring_rx_init(ag);
293 }
294
295 static void ag71xx_rings_free(struct ag71xx *ag)
296 {
297 struct ag71xx_ring *tx = &ag->tx_ring;
298 struct ag71xx_ring *rx = &ag->rx_ring;
299 int ring_size = BIT(tx->order) + BIT(rx->order);
300
301 if (tx->descs_cpu)
302 dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
303 tx->descs_cpu, tx->descs_dma);
304
305 kfree(tx->buf);
306
307 tx->descs_cpu = NULL;
308 rx->descs_cpu = NULL;
309 tx->buf = NULL;
310 rx->buf = NULL;
311 }
312
313 static void ag71xx_rings_cleanup(struct ag71xx *ag)
314 {
315 ag71xx_ring_rx_clean(ag);
316 ag71xx_ring_tx_clean(ag);
317 ag71xx_rings_free(ag);
318
319 netdev_reset_queue(ag->dev);
320 }
321
322 static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
323 {
324 switch (ag->speed) {
325 case SPEED_1000:
326 return "1000";
327 case SPEED_100:
328 return "100";
329 case SPEED_10:
330 return "10";
331 }
332
333 return "?";
334 }
335
336 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
337 {
338 u32 t;
339
340 t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
341 | (((u32) mac[3]) << 8) | ((u32) mac[2]);
342
343 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
344
345 t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
346 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
347 }
348
349 static void ag71xx_dma_reset(struct ag71xx *ag)
350 {
351 u32 val;
352 int i;
353
354 ag71xx_dump_dma_regs(ag);
355
356 /* stop RX and TX */
357 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
358 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
359
360 /*
361 * give the hardware some time to really stop all rx/tx activity
362 * clearing the descriptors too early causes random memory corruption
363 */
364 mdelay(1);
365
366 /* clear descriptor addresses */
367 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
368 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
369
370 /* clear pending RX/TX interrupts */
371 for (i = 0; i < 256; i++) {
372 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
373 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
374 }
375
376 /* clear pending errors */
377 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
378 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
379
380 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
381 if (val)
382 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
383 ag->dev->name, val);
384
385 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
386
387 /* mask out reserved bits */
388 val &= ~0xff000000;
389
390 if (val)
391 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
392 ag->dev->name, val);
393
394 ag71xx_dump_dma_regs(ag);
395 }
396
397 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
398 MAC_CFG1_SRX | MAC_CFG1_STX)
399
400 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
401
402 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
403 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
404 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
405 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
406 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
407 FIFO_CFG4_VT)
408
409 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
410 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
411 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
412 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
413 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
414 FIFO_CFG5_17 | FIFO_CFG5_SF)
415
416 static void ag71xx_hw_stop(struct ag71xx *ag)
417 {
418 /* disable all interrupts and stop the rx/tx engine */
419 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
420 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
421 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
422 }
423
424 static void ag71xx_hw_setup(struct ag71xx *ag)
425 {
426 struct device_node *np = ag->pdev->dev.of_node;
427 u32 init = MAC_CFG1_INIT;
428
429 /* setup MAC configuration registers */
430 if (of_property_read_bool(np, "flow-control"))
431 init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
432 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
433
434 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
435 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
436
437 /* setup max frame length to zero */
438 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
439
440 /* setup FIFO configuration registers */
441 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
442 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
443 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
444 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
445 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
446 }
447
448 static void ag71xx_hw_init(struct ag71xx *ag)
449 {
450 ag71xx_hw_stop(ag);
451
452 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
453 udelay(20);
454
455 reset_control_assert(ag->mac_reset);
456 if (ag->mdio_reset)
457 reset_control_assert(ag->mdio_reset);
458 msleep(100);
459 reset_control_deassert(ag->mac_reset);
460 if (ag->mdio_reset)
461 reset_control_deassert(ag->mdio_reset);
462 msleep(200);
463
464 ag71xx_hw_setup(ag);
465
466 ag71xx_dma_reset(ag);
467 }
468
469 static void ag71xx_fast_reset(struct ag71xx *ag)
470 {
471 struct net_device *dev = ag->dev;
472 u32 rx_ds;
473 u32 mii_reg;
474
475 ag71xx_hw_stop(ag);
476 wmb();
477
478 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
479 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
480
481 ag71xx_tx_packets(ag, true);
482
483 reset_control_assert(ag->mac_reset);
484 udelay(10);
485 reset_control_deassert(ag->mac_reset);
486 udelay(10);
487
488 ag71xx_dma_reset(ag);
489 ag71xx_hw_setup(ag);
490 ag->tx_ring.curr = 0;
491 ag->tx_ring.dirty = 0;
492 netdev_reset_queue(ag->dev);
493
494 /* setup max frame length */
495 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
496 ag71xx_max_frame_len(ag->dev->mtu));
497
498 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
499 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
500 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
501
502 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
503 }
504
505 static void ag71xx_hw_start(struct ag71xx *ag)
506 {
507 /* start RX engine */
508 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
509
510 /* enable interrupts */
511 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
512
513 netif_wake_queue(ag->dev);
514 }
515
516 static void ath79_set_pllval(struct ag71xx *ag)
517 {
518 u32 pll_reg = ag->pllreg[1];
519 u32 pll_val;
520
521 if (!ag->pllregmap)
522 return;
523
524 switch (ag->speed) {
525 case SPEED_10:
526 pll_val = ag->plldata[2];
527 break;
528 case SPEED_100:
529 pll_val = ag->plldata[1];
530 break;
531 case SPEED_1000:
532 pll_val = ag->plldata[0];
533 break;
534 default:
535 BUG();
536 }
537
538 if (pll_val)
539 regmap_write(ag->pllregmap, pll_reg, pll_val);
540 }
541
542 static void ath79_set_pll(struct ag71xx *ag)
543 {
544 u32 pll_cfg = ag->pllreg[0];
545 u32 pll_shift = ag->pllreg[2];
546
547 if (!ag->pllregmap)
548 return;
549
550 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 2 << pll_shift);
551 udelay(100);
552
553 ath79_set_pllval(ag);
554
555 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 3 << pll_shift);
556 udelay(100);
557
558 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 0);
559 udelay(100);
560 }
561
562 static void ath79_mii_ctrl_set_if(struct ag71xx *ag, unsigned int mii_if)
563 {
564 u32 t;
565
566 t = __raw_readl(ag->mii_base);
567 t &= ~(AR71XX_MII_CTRL_IF_MASK);
568 t |= (mii_if & AR71XX_MII_CTRL_IF_MASK);
569 __raw_writel(t, ag->mii_base);
570 }
571
572 static void ath79_mii0_ctrl_set_if(struct ag71xx *ag)
573 {
574 unsigned int mii_if;
575
576 switch (ag->phy_if_mode) {
577 case PHY_INTERFACE_MODE_MII:
578 mii_if = AR71XX_MII0_CTRL_IF_MII;
579 break;
580 case PHY_INTERFACE_MODE_GMII:
581 mii_if = AR71XX_MII0_CTRL_IF_GMII;
582 break;
583 case PHY_INTERFACE_MODE_RGMII:
584 mii_if = AR71XX_MII0_CTRL_IF_RGMII;
585 break;
586 case PHY_INTERFACE_MODE_RMII:
587 mii_if = AR71XX_MII0_CTRL_IF_RMII;
588 break;
589 default:
590 WARN(1, "Impossible PHY mode defined.\n");
591 return;
592 }
593
594 ath79_mii_ctrl_set_if(ag, mii_if);
595 }
596
597 static void ath79_mii1_ctrl_set_if(struct ag71xx *ag)
598 {
599 unsigned int mii_if;
600
601 switch (ag->phy_if_mode) {
602 case PHY_INTERFACE_MODE_RMII:
603 mii_if = AR71XX_MII1_CTRL_IF_RMII;
604 break;
605 case PHY_INTERFACE_MODE_RGMII:
606 mii_if = AR71XX_MII1_CTRL_IF_RGMII;
607 break;
608 default:
609 WARN(1, "Impossible PHY mode defined.\n");
610 return;
611 }
612
613 ath79_mii_ctrl_set_if(ag, mii_if);
614 }
615
616 static void ath79_mii_ctrl_set_speed(struct ag71xx *ag)
617 {
618 unsigned int mii_speed;
619 u32 t;
620
621 if (!ag->mii_base)
622 return;
623
624 switch (ag->speed) {
625 case SPEED_10:
626 mii_speed = AR71XX_MII_CTRL_SPEED_10;
627 break;
628 case SPEED_100:
629 mii_speed = AR71XX_MII_CTRL_SPEED_100;
630 break;
631 case SPEED_1000:
632 mii_speed = AR71XX_MII_CTRL_SPEED_1000;
633 break;
634 default:
635 BUG();
636 }
637
638 t = __raw_readl(ag->mii_base);
639 t &= ~(AR71XX_MII_CTRL_SPEED_MASK << AR71XX_MII_CTRL_SPEED_SHIFT);
640 t |= mii_speed << AR71XX_MII_CTRL_SPEED_SHIFT;
641 __raw_writel(t, ag->mii_base);
642 }
643
644 static void
645 __ag71xx_link_adjust(struct ag71xx *ag, bool update)
646 {
647 struct device_node *np = ag->pdev->dev.of_node;
648 u32 cfg2;
649 u32 ifctl;
650 u32 fifo5;
651
652 if (!ag->link && update) {
653 ag71xx_hw_stop(ag);
654 netif_carrier_off(ag->dev);
655 if (netif_msg_link(ag))
656 pr_info("%s: link down\n", ag->dev->name);
657 return;
658 }
659
660 if (!of_device_is_compatible(np, "qca,ar9130-eth") &&
661 !of_device_is_compatible(np, "qca,ar7100-eth"))
662 ag71xx_fast_reset(ag);
663
664 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
665 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
666 cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
667
668 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
669 ifctl &= ~(MAC_IFCTL_SPEED);
670
671 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
672 fifo5 &= ~FIFO_CFG5_BM;
673
674 switch (ag->speed) {
675 case SPEED_1000:
676 cfg2 |= MAC_CFG2_IF_1000;
677 fifo5 |= FIFO_CFG5_BM;
678 break;
679 case SPEED_100:
680 cfg2 |= MAC_CFG2_IF_10_100;
681 ifctl |= MAC_IFCTL_SPEED;
682 break;
683 case SPEED_10:
684 cfg2 |= MAC_CFG2_IF_10_100;
685 break;
686 default:
687 BUG();
688 return;
689 }
690
691 if (ag->tx_ring.desc_split) {
692 ag->fifodata[2] &= 0xffff;
693 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
694 }
695
696 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
697
698 if (update) {
699 if (of_device_is_compatible(np, "qca,ar7100-eth") ||
700 of_device_is_compatible(np, "qca,ar9130-eth")) {
701 ath79_set_pll(ag);
702 ath79_mii_ctrl_set_speed(ag);
703 } else if (of_device_is_compatible(np, "qca,ar7242-eth") ||
704 of_device_is_compatible(np, "qca,ar9340-eth") ||
705 of_device_is_compatible(np, "qca,qca9550-eth") ||
706 of_device_is_compatible(np, "qca,qca9560-eth")) {
707 ath79_set_pllval(ag);
708 }
709 }
710
711 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
712 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
713 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
714
715 if (of_device_is_compatible(np, "qca,qca9530-eth") ||
716 of_device_is_compatible(np, "qca,qca9560-eth")) {
717 /*
718 * The rx ring buffer can stall on small packets on QCA953x and
719 * QCA956x. Disabling the inline checksum engine fixes the stall.
720 * The wr, rr functions cannot be used since this hidden register
721 * is outside of the normal ag71xx register block.
722 */
723 void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4);
724 if (dam) {
725 __raw_writel(__raw_readl(dam) & ~BIT(27), dam);
726 (void)__raw_readl(dam);
727 iounmap(dam);
728 }
729 }
730
731 ag71xx_hw_start(ag);
732
733 netif_carrier_on(ag->dev);
734 if (update && netif_msg_link(ag))
735 pr_info("%s: link up (%sMbps/%s duplex)\n",
736 ag->dev->name,
737 ag71xx_speed_str(ag),
738 (DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
739
740 ag71xx_dump_regs(ag);
741 }
742
743 void ag71xx_link_adjust(struct ag71xx *ag)
744 {
745 __ag71xx_link_adjust(ag, true);
746 }
747
748 static int ag71xx_hw_enable(struct ag71xx *ag)
749 {
750 int ret;
751
752 ret = ag71xx_rings_init(ag);
753 if (ret)
754 return ret;
755
756 napi_enable(&ag->napi);
757 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
758 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
759 netif_start_queue(ag->dev);
760
761 return 0;
762 }
763
764 static void ag71xx_hw_disable(struct ag71xx *ag)
765 {
766 unsigned long flags;
767
768 spin_lock_irqsave(&ag->lock, flags);
769
770 netif_stop_queue(ag->dev);
771
772 ag71xx_hw_stop(ag);
773 ag71xx_dma_reset(ag);
774
775 napi_disable(&ag->napi);
776 del_timer_sync(&ag->oom_timer);
777
778 spin_unlock_irqrestore(&ag->lock, flags);
779
780 ag71xx_rings_cleanup(ag);
781 }
782
783 static int ag71xx_open(struct net_device *dev)
784 {
785 struct ag71xx *ag = netdev_priv(dev);
786 unsigned int max_frame_len;
787 int ret;
788
789 netif_carrier_off(dev);
790 max_frame_len = ag71xx_max_frame_len(dev->mtu);
791 ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
792
793 /* setup max frame length */
794 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
795 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
796
797 ret = ag71xx_hw_enable(ag);
798 if (ret)
799 goto err;
800
801 phy_start(ag->phy_dev);
802
803 return 0;
804
805 err:
806 ag71xx_rings_cleanup(ag);
807 return ret;
808 }
809
810 static int ag71xx_stop(struct net_device *dev)
811 {
812 unsigned long flags;
813 struct ag71xx *ag = netdev_priv(dev);
814
815 netif_carrier_off(dev);
816 phy_stop(ag->phy_dev);
817
818 spin_lock_irqsave(&ag->lock, flags);
819 if (ag->link) {
820 ag->link = 0;
821 ag71xx_link_adjust(ag);
822 }
823 spin_unlock_irqrestore(&ag->lock, flags);
824
825 ag71xx_hw_disable(ag);
826
827 return 0;
828 }
829
830 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
831 {
832 int i;
833 struct ag71xx_desc *desc;
834 int ring_mask = BIT(ring->order) - 1;
835 int ndesc = 0;
836 int split = ring->desc_split;
837
838 if (!split)
839 split = len;
840
841 while (len > 0) {
842 unsigned int cur_len = len;
843
844 i = (ring->curr + ndesc) & ring_mask;
845 desc = ag71xx_ring_desc(ring, i);
846
847 if (!ag71xx_desc_empty(desc))
848 return -1;
849
850 if (cur_len > split) {
851 cur_len = split;
852
853 /*
854 * TX will hang if DMA transfers <= 4 bytes,
855 * make sure next segment is more than 4 bytes long.
856 */
857 if (len <= split + 4)
858 cur_len -= 4;
859 }
860
861 desc->data = addr;
862 addr += cur_len;
863 len -= cur_len;
864
865 if (len > 0)
866 cur_len |= DESC_MORE;
867
868 /* prevent early tx attempt of this descriptor */
869 if (!ndesc)
870 cur_len |= DESC_EMPTY;
871
872 desc->ctrl = cur_len;
873 ndesc++;
874 }
875
876 return ndesc;
877 }
878
879 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
880 struct net_device *dev)
881 {
882 struct ag71xx *ag = netdev_priv(dev);
883 struct ag71xx_ring *ring = &ag->tx_ring;
884 int ring_mask = BIT(ring->order) - 1;
885 int ring_size = BIT(ring->order);
886 struct ag71xx_desc *desc;
887 dma_addr_t dma_addr;
888 int i, n, ring_min;
889
890 if (skb->len <= 4) {
891 DBG("%s: packet len is too small\n", ag->dev->name);
892 goto err_drop;
893 }
894
895 dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
896 DMA_TO_DEVICE);
897
898 i = ring->curr & ring_mask;
899 desc = ag71xx_ring_desc(ring, i);
900
901 /* setup descriptor fields */
902 n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
903 if (n < 0)
904 goto err_drop_unmap;
905
906 i = (ring->curr + n - 1) & ring_mask;
907 ring->buf[i].len = skb->len;
908 ring->buf[i].skb = skb;
909
910 netdev_sent_queue(dev, skb->len);
911
912 skb_tx_timestamp(skb);
913
914 desc->ctrl &= ~DESC_EMPTY;
915 ring->curr += n;
916
917 /* flush descriptor */
918 wmb();
919
920 ring_min = 2;
921 if (ring->desc_split)
922 ring_min *= AG71XX_TX_RING_DS_PER_PKT;
923
924 if (ring->curr - ring->dirty >= ring_size - ring_min) {
925 DBG("%s: tx queue full\n", dev->name);
926 netif_stop_queue(dev);
927 }
928
929 DBG("%s: packet injected into TX queue\n", ag->dev->name);
930
931 /* enable TX engine */
932 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
933
934 return NETDEV_TX_OK;
935
936 err_drop_unmap:
937 dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
938
939 err_drop:
940 dev->stats.tx_dropped++;
941
942 dev_kfree_skb(skb);
943 return NETDEV_TX_OK;
944 }
945
946 static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
947 {
948 struct ag71xx *ag = netdev_priv(dev);
949 int ret;
950
951 switch (cmd) {
952 case SIOCETHTOOL:
953 if (ag->phy_dev == NULL)
954 break;
955
956 spin_lock_irq(&ag->lock);
957 ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data);
958 spin_unlock_irq(&ag->lock);
959 return ret;
960
961 case SIOCSIFHWADDR:
962 if (copy_from_user
963 (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
964 return -EFAULT;
965 return 0;
966
967 case SIOCGIFHWADDR:
968 if (copy_to_user
969 (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
970 return -EFAULT;
971 return 0;
972
973 case SIOCGMIIPHY:
974 case SIOCGMIIREG:
975 case SIOCSMIIREG:
976 if (ag->phy_dev == NULL)
977 break;
978
979 return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
980
981 default:
982 break;
983 }
984
985 return -EOPNOTSUPP;
986 }
987
988 static void ag71xx_oom_timer_handler(unsigned long data)
989 {
990 struct net_device *dev = (struct net_device *) data;
991 struct ag71xx *ag = netdev_priv(dev);
992
993 napi_schedule(&ag->napi);
994 }
995
996 static void ag71xx_tx_timeout(struct net_device *dev)
997 {
998 struct ag71xx *ag = netdev_priv(dev);
999
1000 if (netif_msg_tx_err(ag))
1001 pr_info("%s: tx timeout\n", ag->dev->name);
1002
1003 schedule_delayed_work(&ag->restart_work, 1);
1004 }
1005
1006 static void ag71xx_restart_work_func(struct work_struct *work)
1007 {
1008 struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
1009
1010 rtnl_lock();
1011 ag71xx_hw_disable(ag);
1012 ag71xx_hw_enable(ag);
1013 if (ag->link)
1014 __ag71xx_link_adjust(ag, false);
1015 rtnl_unlock();
1016 }
1017
1018 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
1019 {
1020 unsigned long timestamp;
1021 u32 rx_sm, tx_sm, rx_fd;
1022
1023 timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start;
1024 if (likely(time_before(jiffies, timestamp + HZ/10)))
1025 return false;
1026
1027 if (!netif_carrier_ok(ag->dev))
1028 return false;
1029
1030 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
1031 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
1032 return true;
1033
1034 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
1035 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
1036 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
1037 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
1038 return true;
1039
1040 return false;
1041 }
1042
1043 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
1044 {
1045 struct ag71xx_ring *ring = &ag->tx_ring;
1046 bool dma_stuck = false;
1047 int ring_mask = BIT(ring->order) - 1;
1048 int ring_size = BIT(ring->order);
1049 int sent = 0;
1050 int bytes_compl = 0;
1051 int n = 0;
1052
1053 DBG("%s: processing TX ring\n", ag->dev->name);
1054
1055 while (ring->dirty + n != ring->curr) {
1056 unsigned int i = (ring->dirty + n) & ring_mask;
1057 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1058 struct sk_buff *skb = ring->buf[i].skb;
1059
1060 if (!flush && !ag71xx_desc_empty(desc)) {
1061 if (ag->tx_hang_workaround &&
1062 ag71xx_check_dma_stuck(ag)) {
1063 schedule_delayed_work(&ag->restart_work, HZ / 2);
1064 dma_stuck = true;
1065 }
1066 break;
1067 }
1068
1069 if (flush)
1070 desc->ctrl |= DESC_EMPTY;
1071
1072 n++;
1073 if (!skb)
1074 continue;
1075
1076 dev_kfree_skb_any(skb);
1077 ring->buf[i].skb = NULL;
1078
1079 bytes_compl += ring->buf[i].len;
1080
1081 sent++;
1082 ring->dirty += n;
1083
1084 while (n > 0) {
1085 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
1086 n--;
1087 }
1088 }
1089
1090 DBG("%s: %d packets sent out\n", ag->dev->name, sent);
1091
1092 if (!sent)
1093 return 0;
1094
1095 ag->dev->stats.tx_bytes += bytes_compl;
1096 ag->dev->stats.tx_packets += sent;
1097
1098 netdev_completed_queue(ag->dev, sent, bytes_compl);
1099 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
1100 netif_wake_queue(ag->dev);
1101
1102 if (!dma_stuck)
1103 cancel_delayed_work(&ag->restart_work);
1104
1105 return sent;
1106 }
1107
1108 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1109 {
1110 struct net_device *dev = ag->dev;
1111 struct ag71xx_ring *ring = &ag->rx_ring;
1112 unsigned int pktlen_mask = ag->desc_pktlen_mask;
1113 unsigned int offset = ag->rx_buf_offset;
1114 int ring_mask = BIT(ring->order) - 1;
1115 int ring_size = BIT(ring->order);
1116 struct sk_buff_head queue;
1117 struct sk_buff *skb;
1118 int done = 0;
1119
1120 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1121 dev->name, limit, ring->curr, ring->dirty);
1122
1123 skb_queue_head_init(&queue);
1124
1125 while (done < limit) {
1126 unsigned int i = ring->curr & ring_mask;
1127 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1128 int pktlen;
1129 int err = 0;
1130
1131 if (ag71xx_desc_empty(desc))
1132 break;
1133
1134 if ((ring->dirty + ring_size) == ring->curr) {
1135 ag71xx_assert(0);
1136 break;
1137 }
1138
1139 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1140
1141 pktlen = desc->ctrl & pktlen_mask;
1142 pktlen -= ETH_FCS_LEN;
1143
1144 dma_unmap_single(&dev->dev, ring->buf[i].dma_addr,
1145 ag->rx_buf_size, DMA_FROM_DEVICE);
1146
1147 dev->stats.rx_packets++;
1148 dev->stats.rx_bytes += pktlen;
1149
1150 skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
1151 if (!skb) {
1152 skb_free_frag(ring->buf[i].rx_buf);
1153 goto next;
1154 }
1155
1156 skb_reserve(skb, offset);
1157 skb_put(skb, pktlen);
1158
1159 if (err) {
1160 dev->stats.rx_dropped++;
1161 kfree_skb(skb);
1162 } else {
1163 skb->dev = dev;
1164 skb->ip_summed = CHECKSUM_NONE;
1165 __skb_queue_tail(&queue, skb);
1166 }
1167
1168 next:
1169 ring->buf[i].rx_buf = NULL;
1170 done++;
1171
1172 ring->curr++;
1173 }
1174
1175 ag71xx_ring_rx_refill(ag);
1176
1177 while ((skb = __skb_dequeue(&queue)) != NULL) {
1178 skb->protocol = eth_type_trans(skb, dev);
1179 netif_receive_skb(skb);
1180 }
1181
1182 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1183 dev->name, ring->curr, ring->dirty, done);
1184
1185 return done;
1186 }
1187
1188 static int ag71xx_poll(struct napi_struct *napi, int limit)
1189 {
1190 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1191 struct net_device *dev = ag->dev;
1192 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1193 int rx_ring_size = BIT(rx_ring->order);
1194 unsigned long flags;
1195 u32 status;
1196 int tx_done;
1197 int rx_done;
1198
1199 tx_done = ag71xx_tx_packets(ag, false);
1200
1201 DBG("%s: processing RX ring\n", dev->name);
1202 rx_done = ag71xx_rx_packets(ag, limit);
1203
1204 ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1205
1206 if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
1207 goto oom;
1208
1209 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1210 if (unlikely(status & RX_STATUS_OF)) {
1211 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1212 dev->stats.rx_fifo_errors++;
1213
1214 /* restart RX */
1215 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1216 }
1217
1218 if (rx_done < limit) {
1219 if (status & RX_STATUS_PR)
1220 goto more;
1221
1222 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1223 if (status & TX_STATUS_PS)
1224 goto more;
1225
1226 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1227 dev->name, rx_done, tx_done, limit);
1228
1229 napi_complete(napi);
1230
1231 /* enable interrupts */
1232 spin_lock_irqsave(&ag->lock, flags);
1233 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1234 spin_unlock_irqrestore(&ag->lock, flags);
1235 return rx_done;
1236 }
1237
1238 more:
1239 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1240 dev->name, rx_done, tx_done, limit);
1241 return limit;
1242
1243 oom:
1244 if (netif_msg_rx_err(ag))
1245 pr_info("%s: out of memory\n", dev->name);
1246
1247 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1248 napi_complete(napi);
1249 return 0;
1250 }
1251
1252 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1253 {
1254 struct net_device *dev = dev_id;
1255 struct ag71xx *ag = netdev_priv(dev);
1256 u32 status;
1257
1258 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1259 ag71xx_dump_intr(ag, "raw", status);
1260
1261 if (unlikely(!status))
1262 return IRQ_NONE;
1263
1264 if (unlikely(status & AG71XX_INT_ERR)) {
1265 if (status & AG71XX_INT_TX_BE) {
1266 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1267 dev_err(&dev->dev, "TX BUS error\n");
1268 }
1269 if (status & AG71XX_INT_RX_BE) {
1270 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1271 dev_err(&dev->dev, "RX BUS error\n");
1272 }
1273 }
1274
1275 if (likely(status & AG71XX_INT_POLL)) {
1276 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1277 DBG("%s: enable polling mode\n", dev->name);
1278 napi_schedule(&ag->napi);
1279 }
1280
1281 ag71xx_debugfs_update_int_stats(ag, status);
1282
1283 return IRQ_HANDLED;
1284 }
1285
1286 static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1287 {
1288 struct ag71xx *ag = netdev_priv(dev);
1289
1290 dev->mtu = new_mtu;
1291 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1292 ag71xx_max_frame_len(dev->mtu));
1293
1294 return 0;
1295 }
1296
1297 static const struct net_device_ops ag71xx_netdev_ops = {
1298 .ndo_open = ag71xx_open,
1299 .ndo_stop = ag71xx_stop,
1300 .ndo_start_xmit = ag71xx_hard_start_xmit,
1301 .ndo_do_ioctl = ag71xx_do_ioctl,
1302 .ndo_tx_timeout = ag71xx_tx_timeout,
1303 .ndo_change_mtu = ag71xx_change_mtu,
1304 .ndo_set_mac_address = eth_mac_addr,
1305 .ndo_validate_addr = eth_validate_addr,
1306 };
1307
1308 static int ag71xx_probe(struct platform_device *pdev)
1309 {
1310 struct device_node *np = pdev->dev.of_node;
1311 struct net_device *dev;
1312 struct resource *res;
1313 struct ag71xx *ag;
1314 const void *mac_addr;
1315 u32 max_frame_len;
1316 int tx_size, err;
1317
1318 if (!np)
1319 return -ENODEV;
1320
1321 dev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1322 if (!dev)
1323 return -ENOMEM;
1324
1325 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1326 if (!res)
1327 return -EINVAL;
1328
1329 err = ag71xx_setup_gmac(np);
1330 if (err)
1331 return err;
1332
1333 SET_NETDEV_DEV(dev, &pdev->dev);
1334
1335 ag = netdev_priv(dev);
1336 ag->pdev = pdev;
1337 ag->dev = dev;
1338 ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1339 AG71XX_DEFAULT_MSG_ENABLE);
1340 spin_lock_init(&ag->lock);
1341
1342 ag->mac_reset = devm_reset_control_get_exclusive(&pdev->dev, "mac");
1343 if (IS_ERR(ag->mac_reset)) {
1344 dev_err(&pdev->dev, "missing mac reset\n");
1345 return PTR_ERR(ag->mac_reset);
1346 }
1347
1348 ag->mdio_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "mdio");
1349
1350 if (of_property_read_u32_array(np, "fifo-data", ag->fifodata, 3)) {
1351 if (of_device_is_compatible(np, "qca,ar9130-eth") ||
1352 of_device_is_compatible(np, "qca,ar7100-eth")) {
1353 ag->fifodata[0] = 0x0fff0000;
1354 ag->fifodata[1] = 0x00001fff;
1355 } else {
1356 ag->fifodata[0] = 0x0010ffff;
1357 ag->fifodata[1] = 0x015500aa;
1358 ag->fifodata[2] = 0x01f00140;
1359 }
1360 if (of_device_is_compatible(np, "qca,ar9130-eth"))
1361 ag->fifodata[2] = 0x00780fff;
1362 else if (of_device_is_compatible(np, "qca,ar7100-eth"))
1363 ag->fifodata[2] = 0x008001ff;
1364 }
1365
1366 if (of_property_read_u32_array(np, "pll-data", ag->plldata, 3))
1367 dev_dbg(&pdev->dev, "failed to read pll-data property\n");
1368
1369 if (of_property_read_u32_array(np, "pll-reg", ag->pllreg, 3))
1370 dev_dbg(&pdev->dev, "failed to read pll-reg property\n");
1371
1372 ag->pllregmap = syscon_regmap_lookup_by_phandle(np, "pll-handle");
1373 if (IS_ERR(ag->pllregmap)) {
1374 dev_dbg(&pdev->dev, "failed to read pll-handle property\n");
1375 ag->pllregmap = NULL;
1376 }
1377
1378 ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
1379 res->end - res->start + 1);
1380 if (!ag->mac_base)
1381 return -ENOMEM;
1382
1383 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1384 if (res) {
1385 ag->mii_base = devm_ioremap_nocache(&pdev->dev, res->start,
1386 res->end - res->start + 1);
1387 if (!ag->mii_base)
1388 return -ENOMEM;
1389 }
1390
1391 dev->irq = platform_get_irq(pdev, 0);
1392 err = devm_request_irq(&pdev->dev, dev->irq, ag71xx_interrupt,
1393 0x0, dev_name(&pdev->dev), dev);
1394 if (err) {
1395 dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1396 return err;
1397 }
1398
1399 dev->netdev_ops = &ag71xx_netdev_ops;
1400 dev->ethtool_ops = &ag71xx_ethtool_ops;
1401
1402 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1403
1404 init_timer(&ag->oom_timer);
1405 ag->oom_timer.data = (unsigned long) dev;
1406 ag->oom_timer.function = ag71xx_oom_timer_handler;
1407
1408 tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1409 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1410
1411 if (of_device_is_compatible(np, "qca,ar9340-eth") ||
1412 of_device_is_compatible(np, "qca,qca9530-eth") ||
1413 of_device_is_compatible(np, "qca,qca9550-eth") ||
1414 of_device_is_compatible(np, "qca,qca9560-eth"))
1415 ag->desc_pktlen_mask = SZ_16K - 1;
1416 else
1417 ag->desc_pktlen_mask = SZ_4K - 1;
1418
1419 if (ag->desc_pktlen_mask == SZ_16K - 1 &&
1420 !of_device_is_compatible(np, "qca,qca9550-eth") &&
1421 !of_device_is_compatible(np, "qca,qca9560-eth"))
1422 max_frame_len = ag->desc_pktlen_mask;
1423 else
1424 max_frame_len = 1540;
1425
1426 dev->min_mtu = 68;
1427 dev->max_mtu = max_frame_len - ag71xx_max_frame_len(0);
1428
1429 if (of_device_is_compatible(np, "qca,ar7240-eth"))
1430 ag->tx_hang_workaround = 1;
1431
1432 ag->rx_buf_offset = NET_SKB_PAD;
1433 if (!of_device_is_compatible(np, "qca,ar7100-eth") &&
1434 !of_device_is_compatible(np, "qca,ar9130-eth"))
1435 ag->rx_buf_offset += NET_IP_ALIGN;
1436
1437 if (of_device_is_compatible(np, "qca,ar7100-eth")) {
1438 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1439 tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1440 }
1441 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1442
1443 ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1444 sizeof(struct ag71xx_desc),
1445 &ag->stop_desc_dma, GFP_KERNEL);
1446 if (!ag->stop_desc)
1447 return -ENOMEM;
1448
1449 ag->stop_desc->data = 0;
1450 ag->stop_desc->ctrl = 0;
1451 ag->stop_desc->next = (u32) ag->stop_desc_dma;
1452
1453 mac_addr = of_get_mac_address(np);
1454 if (mac_addr)
1455 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
1456 if (!mac_addr || !is_valid_ether_addr(dev->dev_addr)) {
1457 dev_err(&pdev->dev, "invalid MAC address, using random address\n");
1458 eth_random_addr(dev->dev_addr);
1459 }
1460
1461 ag->phy_if_mode = of_get_phy_mode(np);
1462 if (ag->phy_if_mode < 0) {
1463 dev_err(&pdev->dev, "missing phy-mode property in DT\n");
1464 return ag->phy_if_mode;
1465 }
1466
1467 if (of_property_read_u32(np, "qca,mac-idx", &ag->mac_idx))
1468 ag->mac_idx = -1;
1469 if (ag->mii_base)
1470 switch (ag->mac_idx) {
1471 case 0:
1472 ath79_mii0_ctrl_set_if(ag);
1473 break;
1474 case 1:
1475 ath79_mii1_ctrl_set_if(ag);
1476 break;
1477 default:
1478 break;
1479 }
1480
1481 netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1482
1483 ag71xx_dump_regs(ag);
1484
1485 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1486
1487 ag71xx_hw_init(ag);
1488
1489 ag71xx_dump_regs(ag);
1490
1491 /*
1492 * populate current node to register mdio-bus as a subdevice.
1493 * the mdio bus works independently on ar7241 and later chips
1494 * and we need to load mdio1 before gmac0, which can be done
1495 * by adding a "simple-mfd" compatible to gmac node. The
1496 * following code checks OF_POPULATED_BUS flag before populating
1497 * to avoid duplicated population.
1498 */
1499 if (!of_node_check_flag(np, OF_POPULATED_BUS)) {
1500 err = of_platform_populate(np, NULL, NULL, &pdev->dev);
1501 if (err)
1502 return err;
1503 }
1504
1505 err = ag71xx_phy_connect(ag);
1506 if (err)
1507 return err;
1508
1509 err = ag71xx_debugfs_init(ag);
1510 if (err)
1511 goto err_phy_disconnect;
1512
1513 platform_set_drvdata(pdev, dev);
1514
1515 err = register_netdev(dev);
1516 if (err) {
1517 dev_err(&pdev->dev, "unable to register net device\n");
1518 platform_set_drvdata(pdev, NULL);
1519 ag71xx_debugfs_exit(ag);
1520 goto err_phy_disconnect;
1521 }
1522
1523 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n",
1524 dev->name, (unsigned long) ag->mac_base, dev->irq,
1525 phy_modes(ag->phy_if_mode));
1526
1527 return 0;
1528
1529 err_phy_disconnect:
1530 ag71xx_phy_disconnect(ag);
1531 return err;
1532 }
1533
1534 static int ag71xx_remove(struct platform_device *pdev)
1535 {
1536 struct net_device *dev = platform_get_drvdata(pdev);
1537 struct ag71xx *ag;
1538
1539 if (!dev)
1540 return 0;
1541
1542 ag = netdev_priv(dev);
1543 ag71xx_debugfs_exit(ag);
1544 ag71xx_phy_disconnect(ag);
1545 unregister_netdev(dev);
1546 platform_set_drvdata(pdev, NULL);
1547 return 0;
1548 }
1549
1550 static const struct of_device_id ag71xx_match[] = {
1551 { .compatible = "qca,ar7100-eth" },
1552 { .compatible = "qca,ar7240-eth" },
1553 { .compatible = "qca,ar7241-eth" },
1554 { .compatible = "qca,ar7242-eth" },
1555 { .compatible = "qca,ar9130-eth" },
1556 { .compatible = "qca,ar9330-eth" },
1557 { .compatible = "qca,ar9340-eth" },
1558 { .compatible = "qca,qca9530-eth" },
1559 { .compatible = "qca,qca9550-eth" },
1560 { .compatible = "qca,qca9560-eth" },
1561 {}
1562 };
1563
1564 static struct platform_driver ag71xx_driver = {
1565 .probe = ag71xx_probe,
1566 .remove = ag71xx_remove,
1567 .driver = {
1568 .name = AG71XX_DRV_NAME,
1569 .of_match_table = ag71xx_match,
1570 }
1571 };
1572
1573 static int __init ag71xx_module_init(void)
1574 {
1575 int ret;
1576
1577 ret = ag71xx_debugfs_root_init();
1578 if (ret)
1579 goto err_out;
1580
1581 ret = platform_driver_register(&ag71xx_driver);
1582 if (ret)
1583 goto err_debugfs_exit;
1584
1585 return 0;
1586
1587 err_debugfs_exit:
1588 ag71xx_debugfs_root_exit();
1589 err_out:
1590 return ret;
1591 }
1592
1593 static void __exit ag71xx_module_exit(void)
1594 {
1595 platform_driver_unregister(&ag71xx_driver);
1596 ag71xx_debugfs_root_exit();
1597 }
1598
1599 module_init(ag71xx_module_init);
1600 module_exit(ag71xx_module_exit);
1601
1602 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1603 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1604 MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
1605 MODULE_LICENSE("GPL v2");
1606 MODULE_ALIAS("platform:" AG71XX_DRV_NAME);