ar71xx: allocate rx/tx descriptor/buffers in one chunk
[openwrt/openwrt.git] / target / linux / ar71xx / files / drivers / net / ethernet / atheros / ag71xx / ag71xx_main.c
1 /*
2 * Atheros AR71xx built-in ethernet mac driver
3 *
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * Based on Atheros' AG7100 driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14 #include "ag71xx.h"
15
16 #define AG71XX_DEFAULT_MSG_ENABLE \
17 (NETIF_MSG_DRV \
18 | NETIF_MSG_PROBE \
19 | NETIF_MSG_LINK \
20 | NETIF_MSG_TIMER \
21 | NETIF_MSG_IFDOWN \
22 | NETIF_MSG_IFUP \
23 | NETIF_MSG_RX_ERR \
24 | NETIF_MSG_TX_ERR)
25
26 static int ag71xx_msg_level = -1;
27
28 module_param_named(msg_level, ag71xx_msg_level, int, 0);
29 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
30
31 #define ETH_SWITCH_HEADER_LEN 2
32
33 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
34
35 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
36 {
37 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
38 }
39
40 static void ag71xx_dump_dma_regs(struct ag71xx *ag)
41 {
42 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
43 ag->dev->name,
44 ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
45 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
46 ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
47
48 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
49 ag->dev->name,
50 ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
51 ag71xx_rr(ag, AG71XX_REG_RX_DESC),
52 ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
53 }
54
55 static void ag71xx_dump_regs(struct ag71xx *ag)
56 {
57 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
58 ag->dev->name,
59 ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
60 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
61 ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
62 ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
63 ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
64 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
65 ag->dev->name,
66 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
67 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
68 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
69 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
70 ag->dev->name,
71 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
72 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
73 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
74 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
75 ag->dev->name,
76 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
77 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
78 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
79 }
80
81 static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
82 {
83 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
84 ag->dev->name, label, intr,
85 (intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
86 (intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
87 (intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
88 (intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
89 (intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
90 (intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
91 }
92
93 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
94 {
95 struct ag71xx_ring *ring = &ag->tx_ring;
96 struct net_device *dev = ag->dev;
97 int ring_mask = BIT(ring->order) - 1;
98 u32 bytes_compl = 0, pkts_compl = 0;
99
100 while (ring->curr != ring->dirty) {
101 struct ag71xx_desc *desc;
102 u32 i = ring->dirty & ring_mask;
103
104 desc = ag71xx_ring_desc(ring, i);
105 if (!ag71xx_desc_empty(desc)) {
106 desc->ctrl = 0;
107 dev->stats.tx_errors++;
108 }
109
110 if (ring->buf[i].skb) {
111 bytes_compl += ring->buf[i].len;
112 pkts_compl++;
113 dev_kfree_skb_any(ring->buf[i].skb);
114 }
115 ring->buf[i].skb = NULL;
116 ring->dirty++;
117 }
118
119 /* flush descriptors */
120 wmb();
121
122 netdev_completed_queue(dev, pkts_compl, bytes_compl);
123 }
124
125 static void ag71xx_ring_tx_init(struct ag71xx *ag)
126 {
127 struct ag71xx_ring *ring = &ag->tx_ring;
128 int ring_size = BIT(ring->order);
129 int ring_mask = ring_size - 1;
130 int i;
131
132 for (i = 0; i < ring_size; i++) {
133 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
134
135 desc->next = (u32) (ring->descs_dma +
136 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
137
138 desc->ctrl = DESC_EMPTY;
139 ring->buf[i].skb = NULL;
140 }
141
142 /* flush descriptors */
143 wmb();
144
145 ring->curr = 0;
146 ring->dirty = 0;
147 netdev_reset_queue(ag->dev);
148 }
149
150 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
151 {
152 struct ag71xx_ring *ring = &ag->rx_ring;
153 int ring_size = BIT(ring->order);
154 int i;
155
156 if (!ring->buf)
157 return;
158
159 for (i = 0; i < ring_size; i++)
160 if (ring->buf[i].rx_buf) {
161 dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
162 ag->rx_buf_size, DMA_FROM_DEVICE);
163 skb_free_frag(ring->buf[i].rx_buf);
164 }
165 }
166
167 static int ag71xx_buffer_offset(struct ag71xx *ag)
168 {
169 int offset = NET_SKB_PAD;
170
171 /*
172 * On AR71xx/AR91xx packets must be 4-byte aligned.
173 *
174 * When using builtin AR8216 support, hardware adds a 2-byte header,
175 * so we don't need any extra alignment in that case.
176 */
177 if (!ag71xx_get_pdata(ag)->is_ar724x || ag71xx_has_ar8216(ag))
178 return offset;
179
180 return offset + NET_IP_ALIGN;
181 }
182
183 static int ag71xx_buffer_size(struct ag71xx *ag)
184 {
185 return ag->rx_buf_size +
186 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
187 }
188
189 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
190 int offset,
191 void *(*alloc)(unsigned int size))
192 {
193 struct ag71xx_ring *ring = &ag->rx_ring;
194 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
195 void *data;
196
197 data = alloc(ag71xx_buffer_size(ag));
198 if (!data)
199 return false;
200
201 buf->rx_buf = data;
202 buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
203 DMA_FROM_DEVICE);
204 desc->data = (u32) buf->dma_addr + offset;
205 return true;
206 }
207
208 static int ag71xx_ring_rx_init(struct ag71xx *ag)
209 {
210 struct ag71xx_ring *ring = &ag->rx_ring;
211 int ring_size = BIT(ring->order);
212 int ring_mask = BIT(ring->order) - 1;
213 unsigned int i;
214 int ret;
215 int offset = ag71xx_buffer_offset(ag);
216
217 ret = 0;
218 for (i = 0; i < ring_size; i++) {
219 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
220
221 desc->next = (u32) (ring->descs_dma +
222 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
223
224 DBG("ag71xx: RX desc at %p, next is %08x\n",
225 desc, desc->next);
226 }
227
228 for (i = 0; i < ring_size; i++) {
229 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
230
231 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
232 netdev_alloc_frag)) {
233 ret = -ENOMEM;
234 break;
235 }
236
237 desc->ctrl = DESC_EMPTY;
238 }
239
240 /* flush descriptors */
241 wmb();
242
243 ring->curr = 0;
244 ring->dirty = 0;
245
246 return ret;
247 }
248
249 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
250 {
251 struct ag71xx_ring *ring = &ag->rx_ring;
252 int ring_mask = BIT(ring->order) - 1;
253 unsigned int count;
254 int offset = ag71xx_buffer_offset(ag);
255
256 count = 0;
257 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
258 struct ag71xx_desc *desc;
259 unsigned int i;
260
261 i = ring->dirty & ring_mask;
262 desc = ag71xx_ring_desc(ring, i);
263
264 if (!ring->buf[i].rx_buf &&
265 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
266 napi_alloc_frag))
267 break;
268
269 desc->ctrl = DESC_EMPTY;
270 count++;
271 }
272
273 /* flush descriptors */
274 wmb();
275
276 DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
277
278 return count;
279 }
280
281 static int ag71xx_rings_init(struct ag71xx *ag)
282 {
283 struct ag71xx_ring *tx = &ag->tx_ring;
284 struct ag71xx_ring *rx = &ag->rx_ring;
285 int ring_size = BIT(tx->order) + BIT(rx->order);
286 int tx_size = BIT(tx->order);
287
288 tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL);
289 if (!tx->buf)
290 return -ENOMEM;
291
292 tx->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
293 &tx->descs_dma, GFP_ATOMIC);
294 if (!tx->descs_cpu) {
295 kfree(tx->buf);
296 tx->buf = NULL;
297 return -ENOMEM;
298 }
299
300 rx->buf = &tx->buf[BIT(tx->order)];
301 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
302 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
303
304 ag71xx_ring_tx_init(ag);
305 return ag71xx_ring_rx_init(ag);
306 }
307
308 static void ag71xx_rings_free(struct ag71xx *ag)
309 {
310 struct ag71xx_ring *tx = &ag->tx_ring;
311 struct ag71xx_ring *rx = &ag->rx_ring;
312 int ring_size = BIT(tx->order) + BIT(rx->order);
313
314 if (tx->descs_cpu)
315 dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
316 tx->descs_cpu, tx->descs_dma);
317
318 kfree(tx->buf);
319
320 tx->descs_cpu = NULL;
321 rx->descs_cpu = NULL;
322 tx->buf = NULL;
323 rx->buf = NULL;
324 }
325
326 static void ag71xx_rings_cleanup(struct ag71xx *ag)
327 {
328 ag71xx_ring_rx_clean(ag);
329 ag71xx_ring_tx_clean(ag);
330 ag71xx_rings_free(ag);
331
332 netdev_reset_queue(ag->dev);
333 }
334
335 static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
336 {
337 switch (ag->speed) {
338 case SPEED_1000:
339 return "1000";
340 case SPEED_100:
341 return "100";
342 case SPEED_10:
343 return "10";
344 }
345
346 return "?";
347 }
348
349 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
350 {
351 u32 t;
352
353 t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
354 | (((u32) mac[3]) << 8) | ((u32) mac[2]);
355
356 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
357
358 t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
359 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
360 }
361
362 static void ag71xx_dma_reset(struct ag71xx *ag)
363 {
364 u32 val;
365 int i;
366
367 ag71xx_dump_dma_regs(ag);
368
369 /* stop RX and TX */
370 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
371 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
372
373 /*
374 * give the hardware some time to really stop all rx/tx activity
375 * clearing the descriptors too early causes random memory corruption
376 */
377 mdelay(1);
378
379 /* clear descriptor addresses */
380 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
381 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
382
383 /* clear pending RX/TX interrupts */
384 for (i = 0; i < 256; i++) {
385 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
386 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
387 }
388
389 /* clear pending errors */
390 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
391 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
392
393 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
394 if (val)
395 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
396 ag->dev->name, val);
397
398 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
399
400 /* mask out reserved bits */
401 val &= ~0xff000000;
402
403 if (val)
404 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
405 ag->dev->name, val);
406
407 ag71xx_dump_dma_regs(ag);
408 }
409
410 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
411 MAC_CFG1_SRX | MAC_CFG1_STX)
412
413 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
414
415 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
416 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
417 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
418 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
419 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
420 FIFO_CFG4_VT)
421
422 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
423 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
424 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
425 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
426 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
427 FIFO_CFG5_17 | FIFO_CFG5_SF)
428
429 static void ag71xx_hw_stop(struct ag71xx *ag)
430 {
431 /* disable all interrupts and stop the rx/tx engine */
432 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
433 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
434 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
435 }
436
437 static void ag71xx_hw_setup(struct ag71xx *ag)
438 {
439 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
440 u32 init = MAC_CFG1_INIT;
441
442 /* setup MAC configuration registers */
443 if (pdata->use_flow_control)
444 init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
445 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
446
447 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
448 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
449
450 /* setup max frame length to zero */
451 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
452
453 /* setup FIFO configuration registers */
454 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
455 if (pdata->is_ar724x) {
456 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, pdata->fifo_cfg1);
457 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, pdata->fifo_cfg2);
458 } else {
459 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
460 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
461 }
462 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
463 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
464 }
465
466 static void ag71xx_hw_init(struct ag71xx *ag)
467 {
468 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
469 u32 reset_mask = pdata->reset_bit;
470
471 ag71xx_hw_stop(ag);
472
473 if (pdata->is_ar724x) {
474 u32 reset_phy = reset_mask;
475
476 reset_phy &= AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY;
477 reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY);
478
479 ath79_device_reset_set(reset_phy);
480 msleep(50);
481 ath79_device_reset_clear(reset_phy);
482 msleep(200);
483 }
484
485 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
486 udelay(20);
487
488 ath79_device_reset_set(reset_mask);
489 msleep(100);
490 ath79_device_reset_clear(reset_mask);
491 msleep(200);
492
493 ag71xx_hw_setup(ag);
494
495 ag71xx_dma_reset(ag);
496 }
497
498 static void ag71xx_fast_reset(struct ag71xx *ag)
499 {
500 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
501 struct net_device *dev = ag->dev;
502 u32 reset_mask = pdata->reset_bit;
503 u32 rx_ds;
504 u32 mii_reg;
505
506 reset_mask &= AR71XX_RESET_GE0_MAC | AR71XX_RESET_GE1_MAC;
507
508 ag71xx_hw_stop(ag);
509 wmb();
510
511 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
512 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
513
514 ag71xx_tx_packets(ag, true);
515
516 ath79_device_reset_set(reset_mask);
517 udelay(10);
518 ath79_device_reset_clear(reset_mask);
519 udelay(10);
520
521 ag71xx_dma_reset(ag);
522 ag71xx_hw_setup(ag);
523 ag->tx_ring.curr = 0;
524 ag->tx_ring.dirty = 0;
525 netdev_reset_queue(ag->dev);
526
527 /* setup max frame length */
528 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
529 ag71xx_max_frame_len(ag->dev->mtu));
530
531 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
532 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
533 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
534
535 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
536 }
537
538 static void ag71xx_hw_start(struct ag71xx *ag)
539 {
540 /* start RX engine */
541 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
542
543 /* enable interrupts */
544 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
545
546 netif_wake_queue(ag->dev);
547 }
548
549 static void
550 __ag71xx_link_adjust(struct ag71xx *ag, bool update)
551 {
552 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
553 u32 cfg2;
554 u32 ifctl;
555 u32 fifo5;
556 u32 fifo3;
557
558 if (!ag->link && update) {
559 ag71xx_hw_stop(ag);
560 netif_carrier_off(ag->dev);
561 if (netif_msg_link(ag))
562 pr_info("%s: link down\n", ag->dev->name);
563 return;
564 }
565
566 if (pdata->is_ar724x)
567 ag71xx_fast_reset(ag);
568
569 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
570 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
571 cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
572
573 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
574 ifctl &= ~(MAC_IFCTL_SPEED);
575
576 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
577 fifo5 &= ~FIFO_CFG5_BM;
578
579 switch (ag->speed) {
580 case SPEED_1000:
581 cfg2 |= MAC_CFG2_IF_1000;
582 fifo5 |= FIFO_CFG5_BM;
583 break;
584 case SPEED_100:
585 cfg2 |= MAC_CFG2_IF_10_100;
586 ifctl |= MAC_IFCTL_SPEED;
587 break;
588 case SPEED_10:
589 cfg2 |= MAC_CFG2_IF_10_100;
590 break;
591 default:
592 BUG();
593 return;
594 }
595
596 if (pdata->is_ar91xx)
597 fifo3 = 0x00780fff;
598 else if (pdata->is_ar724x)
599 fifo3 = pdata->fifo_cfg3;
600 else
601 fifo3 = 0x008001ff;
602
603 if (ag->tx_ring.desc_split) {
604 fifo3 &= 0xffff;
605 fifo3 |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
606 }
607
608 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, fifo3);
609
610 if (update && pdata->set_speed)
611 pdata->set_speed(ag->speed);
612
613 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
614 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
615 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
616
617 if (pdata->disable_inline_checksum_engine) {
618 /*
619 * The rx ring buffer can stall on small packets on QCA953x and
620 * QCA956x. Disabling the inline checksum engine fixes the stall.
621 * The wr, rr functions cannot be used since this hidden register
622 * is outside of the normal ag71xx register block.
623 */
624 void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4);
625 if (dam) {
626 __raw_writel(__raw_readl(dam) & ~BIT(27), dam);
627 (void)__raw_readl(dam);
628 iounmap(dam);
629 }
630 }
631
632 ag71xx_hw_start(ag);
633
634 netif_carrier_on(ag->dev);
635 if (update && netif_msg_link(ag))
636 pr_info("%s: link up (%sMbps/%s duplex)\n",
637 ag->dev->name,
638 ag71xx_speed_str(ag),
639 (DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
640
641 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
642 ag->dev->name,
643 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
644 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
645 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
646
647 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
648 ag->dev->name,
649 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
650 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
651 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
652
653 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
654 ag->dev->name,
655 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
656 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL));
657 }
658
659 void ag71xx_link_adjust(struct ag71xx *ag)
660 {
661 __ag71xx_link_adjust(ag, true);
662 }
663
664 static int ag71xx_hw_enable(struct ag71xx *ag)
665 {
666 int ret;
667
668 ret = ag71xx_rings_init(ag);
669 if (ret)
670 return ret;
671
672 napi_enable(&ag->napi);
673 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
674 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
675 netif_start_queue(ag->dev);
676
677 return 0;
678 }
679
680 static void ag71xx_hw_disable(struct ag71xx *ag)
681 {
682 unsigned long flags;
683
684 spin_lock_irqsave(&ag->lock, flags);
685
686 netif_stop_queue(ag->dev);
687
688 ag71xx_hw_stop(ag);
689 ag71xx_dma_reset(ag);
690
691 napi_disable(&ag->napi);
692 del_timer_sync(&ag->oom_timer);
693
694 spin_unlock_irqrestore(&ag->lock, flags);
695
696 ag71xx_rings_cleanup(ag);
697 }
698
699 static int ag71xx_open(struct net_device *dev)
700 {
701 struct ag71xx *ag = netdev_priv(dev);
702 unsigned int max_frame_len;
703 int ret;
704
705 netif_carrier_off(dev);
706 max_frame_len = ag71xx_max_frame_len(dev->mtu);
707 ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
708
709 /* setup max frame length */
710 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
711 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
712
713 ret = ag71xx_hw_enable(ag);
714 if (ret)
715 goto err;
716
717 ag71xx_phy_start(ag);
718
719 return 0;
720
721 err:
722 ag71xx_rings_cleanup(ag);
723 return ret;
724 }
725
726 static int ag71xx_stop(struct net_device *dev)
727 {
728 struct ag71xx *ag = netdev_priv(dev);
729
730 netif_carrier_off(dev);
731 ag71xx_phy_stop(ag);
732 ag71xx_hw_disable(ag);
733
734 return 0;
735 }
736
737 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
738 {
739 int i;
740 struct ag71xx_desc *desc;
741 int ring_mask = BIT(ring->order) - 1;
742 int ndesc = 0;
743 int split = ring->desc_split;
744
745 if (!split)
746 split = len;
747
748 while (len > 0) {
749 unsigned int cur_len = len;
750
751 i = (ring->curr + ndesc) & ring_mask;
752 desc = ag71xx_ring_desc(ring, i);
753
754 if (!ag71xx_desc_empty(desc))
755 return -1;
756
757 if (cur_len > split) {
758 cur_len = split;
759
760 /*
761 * TX will hang if DMA transfers <= 4 bytes,
762 * make sure next segment is more than 4 bytes long.
763 */
764 if (len <= split + 4)
765 cur_len -= 4;
766 }
767
768 desc->data = addr;
769 addr += cur_len;
770 len -= cur_len;
771
772 if (len > 0)
773 cur_len |= DESC_MORE;
774
775 /* prevent early tx attempt of this descriptor */
776 if (!ndesc)
777 cur_len |= DESC_EMPTY;
778
779 desc->ctrl = cur_len;
780 ndesc++;
781 }
782
783 return ndesc;
784 }
785
786 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
787 struct net_device *dev)
788 {
789 struct ag71xx *ag = netdev_priv(dev);
790 struct ag71xx_ring *ring = &ag->tx_ring;
791 int ring_mask = BIT(ring->order) - 1;
792 int ring_size = BIT(ring->order);
793 struct ag71xx_desc *desc;
794 dma_addr_t dma_addr;
795 int i, n, ring_min;
796
797 if (ag71xx_has_ar8216(ag))
798 ag71xx_add_ar8216_header(ag, skb);
799
800 if (skb->len <= 4) {
801 DBG("%s: packet len is too small\n", ag->dev->name);
802 goto err_drop;
803 }
804
805 dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
806 DMA_TO_DEVICE);
807
808 i = ring->curr & ring_mask;
809 desc = ag71xx_ring_desc(ring, i);
810
811 /* setup descriptor fields */
812 n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
813 if (n < 0)
814 goto err_drop_unmap;
815
816 i = (ring->curr + n - 1) & ring_mask;
817 ring->buf[i].len = skb->len;
818 ring->buf[i].skb = skb;
819 ring->buf[i].timestamp = jiffies;
820
821 netdev_sent_queue(dev, skb->len);
822
823 skb_tx_timestamp(skb);
824
825 desc->ctrl &= ~DESC_EMPTY;
826 ring->curr += n;
827
828 /* flush descriptor */
829 wmb();
830
831 ring_min = 2;
832 if (ring->desc_split)
833 ring_min *= AG71XX_TX_RING_DS_PER_PKT;
834
835 if (ring->curr - ring->dirty >= ring_size - ring_min) {
836 DBG("%s: tx queue full\n", dev->name);
837 netif_stop_queue(dev);
838 }
839
840 DBG("%s: packet injected into TX queue\n", ag->dev->name);
841
842 /* enable TX engine */
843 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
844
845 return NETDEV_TX_OK;
846
847 err_drop_unmap:
848 dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
849
850 err_drop:
851 dev->stats.tx_dropped++;
852
853 dev_kfree_skb(skb);
854 return NETDEV_TX_OK;
855 }
856
857 static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
858 {
859 struct ag71xx *ag = netdev_priv(dev);
860 int ret;
861
862 switch (cmd) {
863 case SIOCETHTOOL:
864 if (ag->phy_dev == NULL)
865 break;
866
867 spin_lock_irq(&ag->lock);
868 ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data);
869 spin_unlock_irq(&ag->lock);
870 return ret;
871
872 case SIOCSIFHWADDR:
873 if (copy_from_user
874 (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
875 return -EFAULT;
876 return 0;
877
878 case SIOCGIFHWADDR:
879 if (copy_to_user
880 (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
881 return -EFAULT;
882 return 0;
883
884 case SIOCGMIIPHY:
885 case SIOCGMIIREG:
886 case SIOCSMIIREG:
887 if (ag->phy_dev == NULL)
888 break;
889
890 return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
891
892 default:
893 break;
894 }
895
896 return -EOPNOTSUPP;
897 }
898
899 static void ag71xx_oom_timer_handler(unsigned long data)
900 {
901 struct net_device *dev = (struct net_device *) data;
902 struct ag71xx *ag = netdev_priv(dev);
903
904 napi_schedule(&ag->napi);
905 }
906
907 static void ag71xx_tx_timeout(struct net_device *dev)
908 {
909 struct ag71xx *ag = netdev_priv(dev);
910
911 if (netif_msg_tx_err(ag))
912 pr_info("%s: tx timeout\n", ag->dev->name);
913
914 schedule_delayed_work(&ag->restart_work, 1);
915 }
916
917 static void ag71xx_restart_work_func(struct work_struct *work)
918 {
919 struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
920
921 rtnl_lock();
922 ag71xx_hw_disable(ag);
923 ag71xx_hw_enable(ag);
924 if (ag->link)
925 __ag71xx_link_adjust(ag, false);
926 rtnl_unlock();
927 }
928
929 static bool ag71xx_check_dma_stuck(struct ag71xx *ag, unsigned long timestamp)
930 {
931 u32 rx_sm, tx_sm, rx_fd;
932
933 if (likely(time_before(jiffies, timestamp + HZ/10)))
934 return false;
935
936 if (!netif_carrier_ok(ag->dev))
937 return false;
938
939 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
940 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
941 return true;
942
943 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
944 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
945 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
946 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
947 return true;
948
949 return false;
950 }
951
952 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
953 {
954 struct ag71xx_ring *ring = &ag->tx_ring;
955 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
956 bool dma_stuck = false;
957 int ring_mask = BIT(ring->order) - 1;
958 int ring_size = BIT(ring->order);
959 int sent = 0;
960 int bytes_compl = 0;
961 int n = 0;
962
963 DBG("%s: processing TX ring\n", ag->dev->name);
964
965 while (ring->dirty + n != ring->curr) {
966 unsigned int i = (ring->dirty + n) & ring_mask;
967 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
968 struct sk_buff *skb = ring->buf[i].skb;
969
970 if (!flush && !ag71xx_desc_empty(desc)) {
971 if (pdata->is_ar724x &&
972 ag71xx_check_dma_stuck(ag, ring->buf[i].timestamp)) {
973 schedule_delayed_work(&ag->restart_work, HZ / 2);
974 dma_stuck = true;
975 }
976 break;
977 }
978
979 if (flush)
980 desc->ctrl |= DESC_EMPTY;
981
982 n++;
983 if (!skb)
984 continue;
985
986 dev_kfree_skb_any(skb);
987 ring->buf[i].skb = NULL;
988
989 bytes_compl += ring->buf[i].len;
990
991 sent++;
992 ring->dirty += n;
993
994 while (n > 0) {
995 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
996 n--;
997 }
998 }
999
1000 DBG("%s: %d packets sent out\n", ag->dev->name, sent);
1001
1002 ag->dev->stats.tx_bytes += bytes_compl;
1003 ag->dev->stats.tx_packets += sent;
1004
1005 if (!sent)
1006 return 0;
1007
1008 netdev_completed_queue(ag->dev, sent, bytes_compl);
1009 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
1010 netif_wake_queue(ag->dev);
1011
1012 if (!dma_stuck)
1013 cancel_delayed_work(&ag->restart_work);
1014
1015 return sent;
1016 }
1017
1018 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1019 {
1020 struct net_device *dev = ag->dev;
1021 struct ag71xx_ring *ring = &ag->rx_ring;
1022 int offset = ag71xx_buffer_offset(ag);
1023 unsigned int pktlen_mask = ag->desc_pktlen_mask;
1024 int ring_mask = BIT(ring->order) - 1;
1025 int ring_size = BIT(ring->order);
1026 struct sk_buff_head queue;
1027 struct sk_buff *skb;
1028 int done = 0;
1029
1030 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1031 dev->name, limit, ring->curr, ring->dirty);
1032
1033 skb_queue_head_init(&queue);
1034
1035 while (done < limit) {
1036 unsigned int i = ring->curr & ring_mask;
1037 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1038 int pktlen;
1039 int err = 0;
1040
1041 if (ag71xx_desc_empty(desc))
1042 break;
1043
1044 if ((ring->dirty + ring_size) == ring->curr) {
1045 ag71xx_assert(0);
1046 break;
1047 }
1048
1049 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1050
1051 pktlen = desc->ctrl & pktlen_mask;
1052 pktlen -= ETH_FCS_LEN;
1053
1054 dma_unmap_single(&dev->dev, ring->buf[i].dma_addr,
1055 ag->rx_buf_size, DMA_FROM_DEVICE);
1056
1057 dev->stats.rx_packets++;
1058 dev->stats.rx_bytes += pktlen;
1059
1060 skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
1061 if (!skb) {
1062 skb_free_frag(ring->buf[i].rx_buf);
1063 goto next;
1064 }
1065
1066 skb_reserve(skb, offset);
1067 skb_put(skb, pktlen);
1068
1069 if (ag71xx_has_ar8216(ag))
1070 err = ag71xx_remove_ar8216_header(ag, skb, pktlen);
1071
1072 if (err) {
1073 dev->stats.rx_dropped++;
1074 kfree_skb(skb);
1075 } else {
1076 skb->dev = dev;
1077 skb->ip_summed = CHECKSUM_NONE;
1078 __skb_queue_tail(&queue, skb);
1079 }
1080
1081 next:
1082 ring->buf[i].rx_buf = NULL;
1083 done++;
1084
1085 ring->curr++;
1086 }
1087
1088 ag71xx_ring_rx_refill(ag);
1089
1090 while ((skb = __skb_dequeue(&queue)) != NULL) {
1091 skb->protocol = eth_type_trans(skb, dev);
1092 netif_receive_skb(skb);
1093 }
1094
1095 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1096 dev->name, ring->curr, ring->dirty, done);
1097
1098 return done;
1099 }
1100
1101 static int ag71xx_poll(struct napi_struct *napi, int limit)
1102 {
1103 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1104 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
1105 struct net_device *dev = ag->dev;
1106 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1107 int rx_ring_size = BIT(rx_ring->order);
1108 unsigned long flags;
1109 u32 status;
1110 int tx_done;
1111 int rx_done;
1112
1113 pdata->ddr_flush();
1114 tx_done = ag71xx_tx_packets(ag, false);
1115
1116 DBG("%s: processing RX ring\n", dev->name);
1117 rx_done = ag71xx_rx_packets(ag, limit);
1118
1119 ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1120
1121 if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
1122 goto oom;
1123
1124 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1125 if (unlikely(status & RX_STATUS_OF)) {
1126 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1127 dev->stats.rx_fifo_errors++;
1128
1129 /* restart RX */
1130 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1131 }
1132
1133 if (rx_done < limit) {
1134 if (status & RX_STATUS_PR)
1135 goto more;
1136
1137 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1138 if (status & TX_STATUS_PS)
1139 goto more;
1140
1141 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1142 dev->name, rx_done, tx_done, limit);
1143
1144 napi_complete(napi);
1145
1146 /* enable interrupts */
1147 spin_lock_irqsave(&ag->lock, flags);
1148 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1149 spin_unlock_irqrestore(&ag->lock, flags);
1150 return rx_done;
1151 }
1152
1153 more:
1154 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1155 dev->name, rx_done, tx_done, limit);
1156 return limit;
1157
1158 oom:
1159 if (netif_msg_rx_err(ag))
1160 pr_info("%s: out of memory\n", dev->name);
1161
1162 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1163 napi_complete(napi);
1164 return 0;
1165 }
1166
1167 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1168 {
1169 struct net_device *dev = dev_id;
1170 struct ag71xx *ag = netdev_priv(dev);
1171 u32 status;
1172
1173 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1174 ag71xx_dump_intr(ag, "raw", status);
1175
1176 if (unlikely(!status))
1177 return IRQ_NONE;
1178
1179 if (unlikely(status & AG71XX_INT_ERR)) {
1180 if (status & AG71XX_INT_TX_BE) {
1181 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1182 dev_err(&dev->dev, "TX BUS error\n");
1183 }
1184 if (status & AG71XX_INT_RX_BE) {
1185 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1186 dev_err(&dev->dev, "RX BUS error\n");
1187 }
1188 }
1189
1190 if (likely(status & AG71XX_INT_POLL)) {
1191 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1192 DBG("%s: enable polling mode\n", dev->name);
1193 napi_schedule(&ag->napi);
1194 }
1195
1196 ag71xx_debugfs_update_int_stats(ag, status);
1197
1198 return IRQ_HANDLED;
1199 }
1200
1201 #ifdef CONFIG_NET_POLL_CONTROLLER
1202 /*
1203 * Polling 'interrupt' - used by things like netconsole to send skbs
1204 * without having to re-enable interrupts. It's not called while
1205 * the interrupt routine is executing.
1206 */
1207 static void ag71xx_netpoll(struct net_device *dev)
1208 {
1209 disable_irq(dev->irq);
1210 ag71xx_interrupt(dev->irq, dev);
1211 enable_irq(dev->irq);
1212 }
1213 #endif
1214
1215 static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1216 {
1217 struct ag71xx *ag = netdev_priv(dev);
1218 unsigned int max_frame_len;
1219
1220 max_frame_len = ag71xx_max_frame_len(new_mtu);
1221 if (new_mtu < 68 || max_frame_len > ag->max_frame_len)
1222 return -EINVAL;
1223
1224 if (netif_running(dev))
1225 return -EBUSY;
1226
1227 dev->mtu = new_mtu;
1228 return 0;
1229 }
1230
1231 static const struct net_device_ops ag71xx_netdev_ops = {
1232 .ndo_open = ag71xx_open,
1233 .ndo_stop = ag71xx_stop,
1234 .ndo_start_xmit = ag71xx_hard_start_xmit,
1235 .ndo_do_ioctl = ag71xx_do_ioctl,
1236 .ndo_tx_timeout = ag71xx_tx_timeout,
1237 .ndo_change_mtu = ag71xx_change_mtu,
1238 .ndo_set_mac_address = eth_mac_addr,
1239 .ndo_validate_addr = eth_validate_addr,
1240 #ifdef CONFIG_NET_POLL_CONTROLLER
1241 .ndo_poll_controller = ag71xx_netpoll,
1242 #endif
1243 };
1244
1245 static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode)
1246 {
1247 switch (mode) {
1248 case PHY_INTERFACE_MODE_MII:
1249 return "MII";
1250 case PHY_INTERFACE_MODE_GMII:
1251 return "GMII";
1252 case PHY_INTERFACE_MODE_RMII:
1253 return "RMII";
1254 case PHY_INTERFACE_MODE_RGMII:
1255 return "RGMII";
1256 case PHY_INTERFACE_MODE_SGMII:
1257 return "SGMII";
1258 default:
1259 break;
1260 }
1261
1262 return "unknown";
1263 }
1264
1265
1266 static int ag71xx_probe(struct platform_device *pdev)
1267 {
1268 struct net_device *dev;
1269 struct resource *res;
1270 struct ag71xx *ag;
1271 struct ag71xx_platform_data *pdata;
1272 int tx_size, err;
1273
1274 pdata = pdev->dev.platform_data;
1275 if (!pdata) {
1276 dev_err(&pdev->dev, "no platform data specified\n");
1277 err = -ENXIO;
1278 goto err_out;
1279 }
1280
1281 if (pdata->mii_bus_dev == NULL && pdata->phy_mask) {
1282 dev_err(&pdev->dev, "no MII bus device specified\n");
1283 err = -EINVAL;
1284 goto err_out;
1285 }
1286
1287 dev = alloc_etherdev(sizeof(*ag));
1288 if (!dev) {
1289 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1290 err = -ENOMEM;
1291 goto err_out;
1292 }
1293
1294 if (!pdata->max_frame_len || !pdata->desc_pktlen_mask)
1295 return -EINVAL;
1296
1297 SET_NETDEV_DEV(dev, &pdev->dev);
1298
1299 ag = netdev_priv(dev);
1300 ag->pdev = pdev;
1301 ag->dev = dev;
1302 ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1303 AG71XX_DEFAULT_MSG_ENABLE);
1304 spin_lock_init(&ag->lock);
1305
1306 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base");
1307 if (!res) {
1308 dev_err(&pdev->dev, "no mac_base resource found\n");
1309 err = -ENXIO;
1310 goto err_out;
1311 }
1312
1313 ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1);
1314 if (!ag->mac_base) {
1315 dev_err(&pdev->dev, "unable to ioremap mac_base\n");
1316 err = -ENOMEM;
1317 goto err_free_dev;
1318 }
1319
1320 dev->irq = platform_get_irq(pdev, 0);
1321 err = request_irq(dev->irq, ag71xx_interrupt,
1322 0x0,
1323 dev->name, dev);
1324 if (err) {
1325 dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1326 goto err_unmap_base;
1327 }
1328
1329 dev->base_addr = (unsigned long)ag->mac_base;
1330 dev->netdev_ops = &ag71xx_netdev_ops;
1331 dev->ethtool_ops = &ag71xx_ethtool_ops;
1332
1333 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1334
1335 init_timer(&ag->oom_timer);
1336 ag->oom_timer.data = (unsigned long) dev;
1337 ag->oom_timer.function = ag71xx_oom_timer_handler;
1338
1339 tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1340 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1341
1342 ag->max_frame_len = pdata->max_frame_len;
1343 ag->desc_pktlen_mask = pdata->desc_pktlen_mask;
1344
1345 if (!pdata->is_ar724x && !pdata->is_ar91xx) {
1346 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1347 tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1348 }
1349 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1350
1351 ag->stop_desc = dma_alloc_coherent(NULL,
1352 sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);
1353
1354 if (!ag->stop_desc)
1355 goto err_free_irq;
1356
1357 ag->stop_desc->data = 0;
1358 ag->stop_desc->ctrl = 0;
1359 ag->stop_desc->next = (u32) ag->stop_desc_dma;
1360
1361 memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
1362
1363 netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1364
1365 ag71xx_dump_regs(ag);
1366
1367 ag71xx_hw_init(ag);
1368
1369 ag71xx_dump_regs(ag);
1370
1371 err = ag71xx_phy_connect(ag);
1372 if (err)
1373 goto err_free_desc;
1374
1375 err = ag71xx_debugfs_init(ag);
1376 if (err)
1377 goto err_phy_disconnect;
1378
1379 platform_set_drvdata(pdev, dev);
1380
1381 err = register_netdev(dev);
1382 if (err) {
1383 dev_err(&pdev->dev, "unable to register net device\n");
1384 goto err_debugfs_exit;
1385 }
1386
1387 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1388 dev->name, dev->base_addr, dev->irq,
1389 ag71xx_get_phy_if_mode_name(pdata->phy_if_mode));
1390
1391 return 0;
1392
1393 err_debugfs_exit:
1394 ag71xx_debugfs_exit(ag);
1395 err_phy_disconnect:
1396 ag71xx_phy_disconnect(ag);
1397 err_free_desc:
1398 dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc,
1399 ag->stop_desc_dma);
1400 err_free_irq:
1401 free_irq(dev->irq, dev);
1402 err_unmap_base:
1403 iounmap(ag->mac_base);
1404 err_free_dev:
1405 kfree(dev);
1406 err_out:
1407 platform_set_drvdata(pdev, NULL);
1408 return err;
1409 }
1410
1411 static int ag71xx_remove(struct platform_device *pdev)
1412 {
1413 struct net_device *dev = platform_get_drvdata(pdev);
1414
1415 if (dev) {
1416 struct ag71xx *ag = netdev_priv(dev);
1417
1418 ag71xx_debugfs_exit(ag);
1419 ag71xx_phy_disconnect(ag);
1420 unregister_netdev(dev);
1421 free_irq(dev->irq, dev);
1422 iounmap(ag->mac_base);
1423 kfree(dev);
1424 platform_set_drvdata(pdev, NULL);
1425 }
1426
1427 return 0;
1428 }
1429
1430 static struct platform_driver ag71xx_driver = {
1431 .probe = ag71xx_probe,
1432 .remove = ag71xx_remove,
1433 .driver = {
1434 .name = AG71XX_DRV_NAME,
1435 }
1436 };
1437
1438 static int __init ag71xx_module_init(void)
1439 {
1440 int ret;
1441
1442 ret = ag71xx_debugfs_root_init();
1443 if (ret)
1444 goto err_out;
1445
1446 ret = ag71xx_mdio_driver_init();
1447 if (ret)
1448 goto err_debugfs_exit;
1449
1450 ret = platform_driver_register(&ag71xx_driver);
1451 if (ret)
1452 goto err_mdio_exit;
1453
1454 return 0;
1455
1456 err_mdio_exit:
1457 ag71xx_mdio_driver_exit();
1458 err_debugfs_exit:
1459 ag71xx_debugfs_root_exit();
1460 err_out:
1461 return ret;
1462 }
1463
1464 static void __exit ag71xx_module_exit(void)
1465 {
1466 platform_driver_unregister(&ag71xx_driver);
1467 ag71xx_mdio_driver_exit();
1468 ag71xx_debugfs_root_exit();
1469 }
1470
1471 module_init(ag71xx_module_init);
1472 module_exit(ag71xx_module_exit);
1473
1474 MODULE_VERSION(AG71XX_DRV_VERSION);
1475 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1476 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1477 MODULE_LICENSE("GPL v2");
1478 MODULE_ALIAS("platform:" AG71XX_DRV_NAME);