fix ar2315 watchdog restart (patch from #3953)
[openwrt/openwrt.git] / target / linux / ixp4xx / patches-2.6.25 / 200-npe_driver.patch
1 --- a/drivers/net/arm/Kconfig
2 +++ b/drivers/net/arm/Kconfig
3 @@ -47,3 +47,11 @@
4 help
5 This is a driver for the ethernet hardware included in EP93xx CPUs.
6 Say Y if you are building a kernel for EP93xx based devices.
7 +
8 +config IXP4XX_ETH
9 + tristate "Intel IXP4xx Ethernet support"
10 + depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
11 + select MII
12 + help
13 + Say Y here if you want to use built-in Ethernet ports
14 + on IXP4xx processor.
15 --- a/drivers/net/arm/Makefile
16 +++ b/drivers/net/arm/Makefile
17 @@ -9,3 +9,4 @@
18 obj-$(CONFIG_ARM_ETHER1) += ether1.o
19 obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
20 obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
21 +obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
22 --- /dev/null
23 +++ b/drivers/net/arm/ixp4xx_eth.c
24 @@ -0,0 +1,1265 @@
25 +/*
26 + * Intel IXP4xx Ethernet driver for Linux
27 + *
28 + * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
29 + *
30 + * This program is free software; you can redistribute it and/or modify it
31 + * under the terms of version 2 of the GNU General Public License
32 + * as published by the Free Software Foundation.
33 + *
34 + * Ethernet port config (0x00 is not present on IXP42X):
35 + *
36 + * logical port 0x00 0x10 0x20
37 + * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
38 + * physical PortId 2 0 1
39 + * TX queue 23 24 25
40 + * RX-free queue 26 27 28
41 + * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
42 + *
43 + *
44 + * Queue entries:
45 + * bits 0 -> 1 - NPE ID (RX and TX-done)
46 + * bits 0 -> 2 - priority (TX, per 802.1D)
47 + * bits 3 -> 4 - port ID (user-set?)
48 + * bits 5 -> 31 - physical descriptor address
49 + */
50 +
51 +#include <linux/delay.h>
52 +#include <linux/dma-mapping.h>
53 +#include <linux/dmapool.h>
54 +#include <linux/etherdevice.h>
55 +#include <linux/io.h>
56 +#include <linux/kernel.h>
57 +#include <linux/mii.h>
58 +#include <linux/platform_device.h>
59 +#include <asm/arch/npe.h>
60 +#include <asm/arch/qmgr.h>
61 +
62 +#define DEBUG_QUEUES 0
63 +#define DEBUG_DESC 0
64 +#define DEBUG_RX 0
65 +#define DEBUG_TX 0
66 +#define DEBUG_PKT_BYTES 0
67 +#define DEBUG_MDIO 0
68 +#define DEBUG_CLOSE 0
69 +
70 +#define DRV_NAME "ixp4xx_eth"
71 +
72 +#define MAX_NPES 3
73 +
74 +#define RX_DESCS 64 /* also length of all RX queues */
75 +#define TX_DESCS 16 /* also length of all TX queues */
76 +#define TXDONE_QUEUE_LEN 64 /* dwords */
77 +
78 +#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
79 +#define REGS_SIZE 0x1000
80 +#define MAX_MRU 1536 /* 0x600 */
81 +#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
82 +
83 +#define NAPI_WEIGHT 16
84 +#define MDIO_INTERVAL (3 * HZ)
85 +#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
86 +#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
87 +#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
88 +
89 +#define NPE_ID(port_id) ((port_id) >> 4)
90 +#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
91 +#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
92 +#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
93 +#define TXDONE_QUEUE 31
94 +
95 +/* TX Control Registers */
96 +#define TX_CNTRL0_TX_EN 0x01
97 +#define TX_CNTRL0_HALFDUPLEX 0x02
98 +#define TX_CNTRL0_RETRY 0x04
99 +#define TX_CNTRL0_PAD_EN 0x08
100 +#define TX_CNTRL0_APPEND_FCS 0x10
101 +#define TX_CNTRL0_2DEFER 0x20
102 +#define TX_CNTRL0_RMII 0x40 /* reduced MII */
103 +#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
104 +
105 +/* RX Control Registers */
106 +#define RX_CNTRL0_RX_EN 0x01
107 +#define RX_CNTRL0_PADSTRIP_EN 0x02
108 +#define RX_CNTRL0_SEND_FCS 0x04
109 +#define RX_CNTRL0_PAUSE_EN 0x08
110 +#define RX_CNTRL0_LOOP_EN 0x10
111 +#define RX_CNTRL0_ADDR_FLTR_EN 0x20
112 +#define RX_CNTRL0_RX_RUNT_EN 0x40
113 +#define RX_CNTRL0_BCAST_DIS 0x80
114 +#define RX_CNTRL1_DEFER_EN 0x01
115 +
116 +/* Core Control Register */
117 +#define CORE_RESET 0x01
118 +#define CORE_RX_FIFO_FLUSH 0x02
119 +#define CORE_TX_FIFO_FLUSH 0x04
120 +#define CORE_SEND_JAM 0x08
121 +#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
122 +
123 +#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
124 + TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
125 + TX_CNTRL0_2DEFER)
126 +#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
127 +#define DEFAULT_CORE_CNTRL CORE_MDC_EN
128 +
129 +
130 +/* NPE message codes */
131 +#define NPE_GETSTATUS 0x00
132 +#define NPE_EDB_SETPORTADDRESS 0x01
133 +#define NPE_EDB_GETMACADDRESSDATABASE 0x02
134 +#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
135 +#define NPE_GETSTATS 0x04
136 +#define NPE_RESETSTATS 0x05
137 +#define NPE_SETMAXFRAMELENGTHS 0x06
138 +#define NPE_VLAN_SETRXTAGMODE 0x07
139 +#define NPE_VLAN_SETDEFAULTRXVID 0x08
140 +#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
141 +#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
142 +#define NPE_VLAN_SETRXQOSENTRY 0x0B
143 +#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
144 +#define NPE_STP_SETBLOCKINGSTATE 0x0D
145 +#define NPE_FW_SETFIREWALLMODE 0x0E
146 +#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
147 +#define NPE_PC_SETAPMACTABLE 0x11
148 +#define NPE_SETLOOPBACK_MODE 0x12
149 +#define NPE_PC_SETBSSIDTABLE 0x13
150 +#define NPE_ADDRESS_FILTER_CONFIG 0x14
151 +#define NPE_APPENDFCSCONFIG 0x15
152 +#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
153 +#define NPE_MAC_RECOVERY_START 0x17
154 +
155 +
156 +#ifdef __ARMEB__
157 +typedef struct sk_buff buffer_t;
158 +#define free_buffer dev_kfree_skb
159 +#define free_buffer_irq dev_kfree_skb_irq
160 +#else
161 +typedef void buffer_t;
162 +#define free_buffer kfree
163 +#define free_buffer_irq kfree
164 +#endif
165 +
166 +struct eth_regs {
167 + u32 tx_control[2], __res1[2]; /* 000 */
168 + u32 rx_control[2], __res2[2]; /* 010 */
169 + u32 random_seed, __res3[3]; /* 020 */
170 + u32 partial_empty_threshold, __res4; /* 030 */
171 + u32 partial_full_threshold, __res5; /* 038 */
172 + u32 tx_start_bytes, __res6[3]; /* 040 */
173 + u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
174 + u32 tx_2part_deferral[2], __res8[2]; /* 060 */
175 + u32 slot_time, __res9[3]; /* 070 */
176 + u32 mdio_command[4]; /* 080 */
177 + u32 mdio_status[4]; /* 090 */
178 + u32 mcast_mask[6], __res10[2]; /* 0A0 */
179 + u32 mcast_addr[6], __res11[2]; /* 0C0 */
180 + u32 int_clock_threshold, __res12[3]; /* 0E0 */
181 + u32 hw_addr[6], __res13[61]; /* 0F0 */
182 + u32 core_control; /* 1FC */
183 +};
184 +
185 +struct port {
186 + struct resource *mem_res;
187 + struct eth_regs __iomem *regs;
188 + struct npe *npe;
189 + struct net_device *netdev;
190 + struct napi_struct napi;
191 + struct net_device_stats stat;
192 + struct mii_if_info mii;
193 + struct delayed_work mdio_thread;
194 + struct eth_plat_info *plat;
195 + buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
196 + struct desc *desc_tab; /* coherent */
197 + u32 desc_tab_phys;
198 + int id; /* logical port ID */
199 + u16 mii_bmcr;
200 +};
201 +
202 +/* NPE message structure */
203 +struct msg {
204 +#ifdef __ARMEB__
205 + u8 cmd, eth_id, byte2, byte3;
206 + u8 byte4, byte5, byte6, byte7;
207 +#else
208 + u8 byte3, byte2, eth_id, cmd;
209 + u8 byte7, byte6, byte5, byte4;
210 +#endif
211 +};
212 +
213 +/* Ethernet packet descriptor */
214 +struct desc {
215 + u32 next; /* pointer to next buffer, unused */
216 +
217 +#ifdef __ARMEB__
218 + u16 buf_len; /* buffer length */
219 + u16 pkt_len; /* packet length */
220 + u32 data; /* pointer to data buffer in RAM */
221 + u8 dest_id;
222 + u8 src_id;
223 + u16 flags;
224 + u8 qos;
225 + u8 padlen;
226 + u16 vlan_tci;
227 +#else
228 + u16 pkt_len; /* packet length */
229 + u16 buf_len; /* buffer length */
230 + u32 data; /* pointer to data buffer in RAM */
231 + u16 flags;
232 + u8 src_id;
233 + u8 dest_id;
234 + u16 vlan_tci;
235 + u8 padlen;
236 + u8 qos;
237 +#endif
238 +
239 +#ifdef __ARMEB__
240 + u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
241 + u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
242 + u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
243 +#else
244 + u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
245 + u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
246 + u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
247 +#endif
248 +};
249 +
250 +
251 +#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
252 + (n) * sizeof(struct desc))
253 +#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
254 +
255 +#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
256 + ((n) + RX_DESCS) * sizeof(struct desc))
257 +#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
258 +
259 +#ifndef __ARMEB__
260 +static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
261 +{
262 + int i;
263 + for (i = 0; i < cnt; i++)
264 + dest[i] = swab32(src[i]);
265 +}
266 +#endif
267 +
268 +static spinlock_t mdio_lock;
269 +static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
270 +static int ports_open;
271 +static struct port *npe_port_tab[MAX_NPES];
272 +static struct dma_pool *dma_pool;
273 +
274 +
275 +static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
276 + int write, u16 cmd)
277 +{
278 + int cycles = 0;
279 +
280 + if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
281 + printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
282 + return 0;
283 + }
284 +
285 + if (write) {
286 + __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
287 + __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
288 + }
289 + __raw_writel(((phy_id << 5) | location) & 0xFF,
290 + &mdio_regs->mdio_command[2]);
291 + __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
292 + &mdio_regs->mdio_command[3]);
293 +
294 + while ((cycles < MAX_MDIO_RETRIES) &&
295 + (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
296 + udelay(1);
297 + cycles++;
298 + }
299 +
300 + if (cycles == MAX_MDIO_RETRIES) {
301 + printk(KERN_ERR "%s: MII write failed\n", dev->name);
302 + return 0;
303 + }
304 +
305 +#if DEBUG_MDIO
306 + printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
307 + cycles);
308 +#endif
309 +
310 + if (write)
311 + return 0;
312 +
313 + if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
314 + printk(KERN_ERR "%s: MII read failed\n", dev->name);
315 + return 0;
316 + }
317 +
318 + return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
319 + (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
320 +}
321 +
322 +static int mdio_read(struct net_device *dev, int phy_id, int location)
323 +{
324 + unsigned long flags;
325 + u16 val;
326 +
327 + spin_lock_irqsave(&mdio_lock, flags);
328 + val = mdio_cmd(dev, phy_id, location, 0, 0);
329 + spin_unlock_irqrestore(&mdio_lock, flags);
330 + return val;
331 +}
332 +
333 +static void mdio_write(struct net_device *dev, int phy_id, int location,
334 + int val)
335 +{
336 + unsigned long flags;
337 +
338 + spin_lock_irqsave(&mdio_lock, flags);
339 + mdio_cmd(dev, phy_id, location, 1, val);
340 + spin_unlock_irqrestore(&mdio_lock, flags);
341 +}
342 +
343 +static void phy_reset(struct net_device *dev, int phy_id)
344 +{
345 + struct port *port = netdev_priv(dev);
346 + int cycles = 0;
347 +
348 + mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
349 +
350 + while (cycles < MAX_MII_RESET_RETRIES) {
351 + if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
352 +#if DEBUG_MDIO
353 + printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
354 + dev->name, cycles);
355 +#endif
356 + return;
357 + }
358 + udelay(1);
359 + cycles++;
360 + }
361 +
362 + printk(KERN_ERR "%s: MII reset failed\n", dev->name);
363 +}
364 +
365 +static void eth_set_duplex(struct port *port)
366 +{
367 + if (port->mii.full_duplex)
368 + __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
369 + &port->regs->tx_control[0]);
370 + else
371 + __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
372 + &port->regs->tx_control[0]);
373 +}
374 +
375 +
376 +static void phy_check_media(struct port *port, int init)
377 +{
378 + if (mii_check_media(&port->mii, 1, init))
379 + eth_set_duplex(port);
380 + if (port->mii.force_media) { /* mii_check_media() doesn't work */
381 + struct net_device *dev = port->netdev;
382 + int cur_link = mii_link_ok(&port->mii);
383 + int prev_link = netif_carrier_ok(dev);
384 +
385 + if (!prev_link && cur_link) {
386 + printk(KERN_INFO "%s: link up\n", dev->name);
387 + netif_carrier_on(dev);
388 + } else if (prev_link && !cur_link) {
389 + printk(KERN_INFO "%s: link down\n", dev->name);
390 + netif_carrier_off(dev);
391 + }
392 + }
393 +}
394 +
395 +
396 +static void mdio_thread(struct work_struct *work)
397 +{
398 + struct port *port = container_of(work, struct port, mdio_thread.work);
399 +
400 + phy_check_media(port, 0);
401 + schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
402 +}
403 +
404 +
405 +static inline void debug_pkt(struct net_device *dev, const char *func,
406 + u8 *data, int len)
407 +{
408 +#if DEBUG_PKT_BYTES
409 + int i;
410 +
411 + printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
412 + for (i = 0; i < len; i++) {
413 + if (i >= DEBUG_PKT_BYTES)
414 + break;
415 + printk("%s%02X",
416 + ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
417 + data[i]);
418 + }
419 + printk("\n");
420 +#endif
421 +}
422 +
423 +
424 +static inline void debug_desc(u32 phys, struct desc *desc)
425 +{
426 +#if DEBUG_DESC
427 + printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
428 + " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
429 + phys, desc->next, desc->buf_len, desc->pkt_len,
430 + desc->data, desc->dest_id, desc->src_id, desc->flags,
431 + desc->qos, desc->padlen, desc->vlan_tci,
432 + desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
433 + desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
434 + desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
435 + desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
436 +#endif
437 +}
438 +
439 +static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
440 +{
441 +#if DEBUG_QUEUES
442 + static struct {
443 + int queue;
444 + char *name;
445 + } names[] = {
446 + { TX_QUEUE(0x10), "TX#0 " },
447 + { TX_QUEUE(0x20), "TX#1 " },
448 + { TX_QUEUE(0x00), "TX#2 " },
449 + { RXFREE_QUEUE(0x10), "RX-free#0 " },
450 + { RXFREE_QUEUE(0x20), "RX-free#1 " },
451 + { RXFREE_QUEUE(0x00), "RX-free#2 " },
452 + { TXDONE_QUEUE, "TX-done " },
453 + };
454 + int i;
455 +
456 + for (i = 0; i < ARRAY_SIZE(names); i++)
457 + if (names[i].queue == queue)
458 + break;
459 +
460 + printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
461 + i < ARRAY_SIZE(names) ? names[i].name : "",
462 + is_get ? "->" : "<-", phys);
463 +#endif
464 +}
465 +
466 +static inline u32 queue_get_entry(unsigned int queue)
467 +{
468 + u32 phys = qmgr_get_entry(queue);
469 + debug_queue(queue, 1, phys);
470 + return phys;
471 +}
472 +
473 +static inline int queue_get_desc(unsigned int queue, struct port *port,
474 + int is_tx)
475 +{
476 + u32 phys, tab_phys, n_desc;
477 + struct desc *tab;
478 +
479 + if (!(phys = queue_get_entry(queue)))
480 + return -1;
481 +
482 + phys &= ~0x1F; /* mask out non-address bits */
483 + tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
484 + tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
485 + n_desc = (phys - tab_phys) / sizeof(struct desc);
486 + BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
487 + debug_desc(phys, &tab[n_desc]);
488 + BUG_ON(tab[n_desc].next);
489 + return n_desc;
490 +}
491 +
492 +static inline void queue_put_desc(unsigned int queue, u32 phys,
493 + struct desc *desc)
494 +{
495 + debug_queue(queue, 0, phys);
496 + debug_desc(phys, desc);
497 + BUG_ON(phys & 0x1F);
498 + qmgr_put_entry(queue, phys);
499 + BUG_ON(qmgr_stat_overflow(queue));
500 +}
501 +
502 +
503 +static inline void dma_unmap_tx(struct port *port, struct desc *desc)
504 +{
505 +#ifdef __ARMEB__
506 + dma_unmap_single(&port->netdev->dev, desc->data,
507 + desc->buf_len, DMA_TO_DEVICE);
508 +#else
509 + dma_unmap_single(&port->netdev->dev, desc->data & ~3,
510 + ALIGN((desc->data & 3) + desc->buf_len, 4),
511 + DMA_TO_DEVICE);
512 +#endif
513 +}
514 +
515 +
516 +static void eth_rx_irq(void *pdev)
517 +{
518 + struct net_device *dev = pdev;
519 + struct port *port = netdev_priv(dev);
520 +
521 +#if DEBUG_RX
522 + printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
523 +#endif
524 + qmgr_disable_irq(port->plat->rxq);
525 + netif_rx_schedule(dev, &port->napi);
526 +}
527 +
528 +static int eth_poll(struct napi_struct *napi, int budget)
529 +{
530 + struct port *port = container_of(napi, struct port, napi);
531 + struct net_device *dev = port->netdev;
532 + unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
533 + int received = 0;
534 +
535 +#if DEBUG_RX
536 + printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
537 +#endif
538 +
539 + while (received < budget) {
540 + struct sk_buff *skb;
541 + struct desc *desc;
542 + int n;
543 +#ifdef __ARMEB__
544 + struct sk_buff *temp;
545 + u32 phys;
546 +#endif
547 +
548 + if ((n = queue_get_desc(rxq, port, 0)) < 0) {
549 + received = 0; /* No packet received */
550 +#if DEBUG_RX
551 + printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
552 + dev->name);
553 +#endif
554 + netif_rx_complete(dev, napi);
555 + qmgr_enable_irq(rxq);
556 + if (!qmgr_stat_empty(rxq) &&
557 + netif_rx_reschedule(dev, napi)) {
558 +#if DEBUG_RX
559 + printk(KERN_DEBUG "%s: eth_poll"
560 + " netif_rx_reschedule successed\n",
561 + dev->name);
562 +#endif
563 + qmgr_disable_irq(rxq);
564 + continue;
565 + }
566 +#if DEBUG_RX
567 + printk(KERN_DEBUG "%s: eth_poll all done\n",
568 + dev->name);
569 +#endif
570 + return 0; /* all work done */
571 + }
572 +
573 + desc = rx_desc_ptr(port, n);
574 +
575 +#ifdef __ARMEB__
576 + if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
577 + phys = dma_map_single(&dev->dev, skb->data,
578 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
579 + if (dma_mapping_error(phys)) {
580 + dev_kfree_skb(skb);
581 + skb = NULL;
582 + }
583 + }
584 +#else
585 + skb = netdev_alloc_skb(dev,
586 + ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
587 +#endif
588 +
589 + if (!skb) {
590 + port->stat.rx_dropped++;
591 + /* put the desc back on RX-ready queue */
592 + desc->buf_len = MAX_MRU;
593 + desc->pkt_len = 0;
594 + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
595 + continue;
596 + }
597 +
598 + /* process received frame */
599 +#ifdef __ARMEB__
600 + temp = skb;
601 + skb = port->rx_buff_tab[n];
602 + dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
603 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
604 +#else
605 + dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
606 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
607 + memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
608 + ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
609 +#endif
610 + skb_reserve(skb, NET_IP_ALIGN);
611 + skb_put(skb, desc->pkt_len);
612 +
613 + debug_pkt(dev, "eth_poll", skb->data, skb->len);
614 +
615 + skb->protocol = eth_type_trans(skb, dev);
616 + dev->last_rx = jiffies;
617 + port->stat.rx_packets++;
618 + port->stat.rx_bytes += skb->len;
619 + netif_receive_skb(skb);
620 +
621 + /* put the new buffer on RX-free queue */
622 +#ifdef __ARMEB__
623 + port->rx_buff_tab[n] = temp;
624 + desc->data = phys + NET_IP_ALIGN;
625 +#endif
626 + desc->buf_len = MAX_MRU;
627 + desc->pkt_len = 0;
628 + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
629 + received++;
630 + }
631 +
632 +#if DEBUG_RX
633 + printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
634 +#endif
635 + return received; /* not all work done */
636 +}
637 +
638 +
639 +static void eth_txdone_irq(void *unused)
640 +{
641 + u32 phys;
642 +
643 +#if DEBUG_TX
644 + printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
645 +#endif
646 + while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
647 + u32 npe_id, n_desc;
648 + struct port *port;
649 + struct desc *desc;
650 + int start;
651 +
652 + npe_id = phys & 3;
653 + BUG_ON(npe_id >= MAX_NPES);
654 + port = npe_port_tab[npe_id];
655 + BUG_ON(!port);
656 + phys &= ~0x1F; /* mask out non-address bits */
657 + n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
658 + BUG_ON(n_desc >= TX_DESCS);
659 + desc = tx_desc_ptr(port, n_desc);
660 + debug_desc(phys, desc);
661 +
662 + if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
663 + port->stat.tx_packets++;
664 + port->stat.tx_bytes += desc->pkt_len;
665 +
666 + dma_unmap_tx(port, desc);
667 +#if DEBUG_TX
668 + printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
669 + port->netdev->name, port->tx_buff_tab[n_desc]);
670 +#endif
671 + free_buffer_irq(port->tx_buff_tab[n_desc]);
672 + port->tx_buff_tab[n_desc] = NULL;
673 + }
674 +
675 + start = qmgr_stat_empty(port->plat->txreadyq);
676 + queue_put_desc(port->plat->txreadyq, phys, desc);
677 + if (start) {
678 +#if DEBUG_TX
679 + printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
680 + port->netdev->name);
681 +#endif
682 + netif_wake_queue(port->netdev);
683 + }
684 + }
685 +}
686 +
687 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
688 +{
689 + struct port *port = netdev_priv(dev);
690 + unsigned int txreadyq = port->plat->txreadyq;
691 + int len, offset, bytes, n;
692 + void *mem;
693 + u32 phys;
694 + struct desc *desc;
695 +
696 +#if DEBUG_TX
697 + printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
698 +#endif
699 +
700 + if (unlikely(skb->len > MAX_MRU)) {
701 + dev_kfree_skb(skb);
702 + port->stat.tx_errors++;
703 + return NETDEV_TX_OK;
704 + }
705 +
706 + debug_pkt(dev, "eth_xmit", skb->data, skb->len);
707 +
708 + len = skb->len;
709 +#ifdef __ARMEB__
710 + offset = 0; /* no need to keep alignment */
711 + bytes = len;
712 + mem = skb->data;
713 +#else
714 + offset = (int)skb->data & 3; /* keep 32-bit alignment */
715 + bytes = ALIGN(offset + len, 4);
716 + if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
717 + dev_kfree_skb(skb);
718 + port->stat.tx_dropped++;
719 + return NETDEV_TX_OK;
720 + }
721 + memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
722 + dev_kfree_skb(skb);
723 +#endif
724 +
725 + phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
726 + if (dma_mapping_error(phys)) {
727 +#ifdef __ARMEB__
728 + dev_kfree_skb(skb);
729 +#else
730 + kfree(mem);
731 +#endif
732 + port->stat.tx_dropped++;
733 + return NETDEV_TX_OK;
734 + }
735 +
736 + n = queue_get_desc(txreadyq, port, 1);
737 + BUG_ON(n < 0);
738 + desc = tx_desc_ptr(port, n);
739 +
740 +#ifdef __ARMEB__
741 + port->tx_buff_tab[n] = skb;
742 +#else
743 + port->tx_buff_tab[n] = mem;
744 +#endif
745 + desc->data = phys + offset;
746 + desc->buf_len = desc->pkt_len = len;
747 +
748 + /* NPE firmware pads short frames with zeros internally */
749 + wmb();
750 + queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
751 + dev->trans_start = jiffies;
752 +
753 + if (qmgr_stat_empty(txreadyq)) {
754 +#if DEBUG_TX
755 + printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
756 +#endif
757 + netif_stop_queue(dev);
758 + /* we could miss TX ready interrupt */
759 + if (!qmgr_stat_empty(txreadyq)) {
760 +#if DEBUG_TX
761 + printk(KERN_DEBUG "%s: eth_xmit ready again\n",
762 + dev->name);
763 +#endif
764 + netif_wake_queue(dev);
765 + }
766 + }
767 +
768 +#if DEBUG_TX
769 + printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
770 +#endif
771 + return NETDEV_TX_OK;
772 +}
773 +
774 +
775 +static struct net_device_stats *eth_stats(struct net_device *dev)
776 +{
777 + struct port *port = netdev_priv(dev);
778 + return &port->stat;
779 +}
780 +
781 +static void eth_set_mcast_list(struct net_device *dev)
782 +{
783 + struct port *port = netdev_priv(dev);
784 + struct dev_mc_list *mclist = dev->mc_list;
785 + u8 diffs[ETH_ALEN], *addr;
786 + int cnt = dev->mc_count, i;
787 +
788 + if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
789 + __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
790 + &port->regs->rx_control[0]);
791 + return;
792 + }
793 +
794 + memset(diffs, 0, ETH_ALEN);
795 + addr = mclist->dmi_addr; /* first MAC address */
796 +
797 + while (--cnt && (mclist = mclist->next))
798 + for (i = 0; i < ETH_ALEN; i++)
799 + diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
800 +
801 + for (i = 0; i < ETH_ALEN; i++) {
802 + __raw_writel(addr[i], &port->regs->mcast_addr[i]);
803 + __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
804 + }
805 +
806 + __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
807 + &port->regs->rx_control[0]);
808 +}
809 +
810 +
811 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
812 +{
813 + struct port *port = netdev_priv(dev);
814 + unsigned int duplex_chg;
815 + int err;
816 +
817 + if (!netif_running(dev))
818 + return -EINVAL;
819 + err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
820 + if (duplex_chg)
821 + eth_set_duplex(port);
822 + return err;
823 +}
824 +
825 +
826 +static int request_queues(struct port *port)
827 +{
828 + int err;
829 +
830 + err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
831 + if (err)
832 + return err;
833 +
834 + err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
835 + if (err)
836 + goto rel_rxfree;
837 +
838 + err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
839 + if (err)
840 + goto rel_rx;
841 +
842 + err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
843 + if (err)
844 + goto rel_tx;
845 +
846 + /* TX-done queue handles skbs sent out by the NPEs */
847 + if (!ports_open) {
848 + err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
849 + if (err)
850 + goto rel_txready;
851 + }
852 + return 0;
853 +
854 +rel_txready:
855 + qmgr_release_queue(port->plat->txreadyq);
856 +rel_tx:
857 + qmgr_release_queue(TX_QUEUE(port->id));
858 +rel_rx:
859 + qmgr_release_queue(port->plat->rxq);
860 +rel_rxfree:
861 + qmgr_release_queue(RXFREE_QUEUE(port->id));
862 + printk(KERN_DEBUG "%s: unable to request hardware queues\n",
863 + port->netdev->name);
864 + return err;
865 +}
866 +
867 +static void release_queues(struct port *port)
868 +{
869 + qmgr_release_queue(RXFREE_QUEUE(port->id));
870 + qmgr_release_queue(port->plat->rxq);
871 + qmgr_release_queue(TX_QUEUE(port->id));
872 + qmgr_release_queue(port->plat->txreadyq);
873 +
874 + if (!ports_open)
875 + qmgr_release_queue(TXDONE_QUEUE);
876 +}
877 +
878 +static int init_queues(struct port *port)
879 +{
880 + int i;
881 +
882 + if (!ports_open)
883 + if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
884 + POOL_ALLOC_SIZE, 32, 0)))
885 + return -ENOMEM;
886 +
887 + if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
888 + &port->desc_tab_phys)))
889 + return -ENOMEM;
890 + memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
891 + memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
892 + memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
893 +
894 + /* Setup RX buffers */
895 + for (i = 0; i < RX_DESCS; i++) {
896 + struct desc *desc = rx_desc_ptr(port, i);
897 + buffer_t *buff; /* skb or kmalloc()ated memory */
898 + void *data;
899 +#ifdef __ARMEB__
900 + if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
901 + return -ENOMEM;
902 + data = buff->data;
903 +#else
904 + if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
905 + return -ENOMEM;
906 + data = buff;
907 +#endif
908 + desc->buf_len = MAX_MRU;
909 + desc->data = dma_map_single(&port->netdev->dev, data,
910 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
911 + if (dma_mapping_error(desc->data)) {
912 + free_buffer(buff);
913 + return -EIO;
914 + }
915 + desc->data += NET_IP_ALIGN;
916 + port->rx_buff_tab[i] = buff;
917 + }
918 +
919 + return 0;
920 +}
921 +
922 +static void destroy_queues(struct port *port)
923 +{
924 + int i;
925 +
926 + if (port->desc_tab) {
927 + for (i = 0; i < RX_DESCS; i++) {
928 + struct desc *desc = rx_desc_ptr(port, i);
929 + buffer_t *buff = port->rx_buff_tab[i];
930 + if (buff) {
931 + dma_unmap_single(&port->netdev->dev,
932 + desc->data - NET_IP_ALIGN,
933 + RX_BUFF_SIZE, DMA_FROM_DEVICE);
934 + free_buffer(buff);
935 + }
936 + }
937 + for (i = 0; i < TX_DESCS; i++) {
938 + struct desc *desc = tx_desc_ptr(port, i);
939 + buffer_t *buff = port->tx_buff_tab[i];
940 + if (buff) {
941 + dma_unmap_tx(port, desc);
942 + free_buffer(buff);
943 + }
944 + }
945 + dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
946 + port->desc_tab = NULL;
947 + }
948 +
949 + if (!ports_open && dma_pool) {
950 + dma_pool_destroy(dma_pool);
951 + dma_pool = NULL;
952 + }
953 +}
954 +
955 +static int eth_open(struct net_device *dev)
956 +{
957 + struct port *port = netdev_priv(dev);
958 + struct npe *npe = port->npe;
959 + struct msg msg;
960 + int i, err;
961 +
962 + if (!npe_running(npe)) {
963 + err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
964 + if (err)
965 + return err;
966 +
967 + if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
968 + printk(KERN_ERR "%s: %s not responding\n", dev->name,
969 + npe_name(npe));
970 + return -EIO;
971 + }
972 + }
973 +
974 + mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
975 +
976 + memset(&msg, 0, sizeof(msg));
977 + msg.cmd = NPE_VLAN_SETRXQOSENTRY;
978 + msg.eth_id = port->id;
979 + msg.byte5 = port->plat->rxq | 0x80;
980 + msg.byte7 = port->plat->rxq << 4;
981 + for (i = 0; i < 8; i++) {
982 + msg.byte3 = i;
983 + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
984 + return -EIO;
985 + }
986 +
987 + msg.cmd = NPE_EDB_SETPORTADDRESS;
988 + msg.eth_id = PHYSICAL_ID(port->id);
989 + msg.byte2 = dev->dev_addr[0];
990 + msg.byte3 = dev->dev_addr[1];
991 + msg.byte4 = dev->dev_addr[2];
992 + msg.byte5 = dev->dev_addr[3];
993 + msg.byte6 = dev->dev_addr[4];
994 + msg.byte7 = dev->dev_addr[5];
995 + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
996 + return -EIO;
997 +
998 + memset(&msg, 0, sizeof(msg));
999 + msg.cmd = NPE_FW_SETFIREWALLMODE;
1000 + msg.eth_id = port->id;
1001 + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
1002 + return -EIO;
1003 +
1004 + if ((err = request_queues(port)) != 0)
1005 + return err;
1006 +
1007 + if ((err = init_queues(port)) != 0) {
1008 + destroy_queues(port);
1009 + release_queues(port);
1010 + return err;
1011 + }
1012 +
1013 + for (i = 0; i < ETH_ALEN; i++)
1014 + __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
1015 + __raw_writel(0x08, &port->regs->random_seed);
1016 + __raw_writel(0x12, &port->regs->partial_empty_threshold);
1017 + __raw_writel(0x30, &port->regs->partial_full_threshold);
1018 + __raw_writel(0x08, &port->regs->tx_start_bytes);
1019 + __raw_writel(0x15, &port->regs->tx_deferral);
1020 + __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
1021 + __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
1022 + __raw_writel(0x80, &port->regs->slot_time);
1023 + __raw_writel(0x01, &port->regs->int_clock_threshold);
1024 +
1025 + /* Populate queues with buffers, no failure after this point */
1026 + for (i = 0; i < TX_DESCS; i++)
1027 + queue_put_desc(port->plat->txreadyq,
1028 + tx_desc_phys(port, i), tx_desc_ptr(port, i));
1029 +
1030 + for (i = 0; i < RX_DESCS; i++)
1031 + queue_put_desc(RXFREE_QUEUE(port->id),
1032 + rx_desc_phys(port, i), rx_desc_ptr(port, i));
1033 +
1034 + __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1035 + __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1036 + __raw_writel(0, &port->regs->rx_control[1]);
1037 + __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1038 +
1039 + napi_enable(&port->napi);
1040 + phy_check_media(port, 1);
1041 + eth_set_mcast_list(dev);
1042 + netif_start_queue(dev);
1043 + schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1044 +
1045 + qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1046 + eth_rx_irq, dev);
1047 + if (!ports_open) {
1048 + qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1049 + eth_txdone_irq, NULL);
1050 + qmgr_enable_irq(TXDONE_QUEUE);
1051 + }
1052 + ports_open++;
1053 + /* we may already have RX data, enables IRQ */
1054 + netif_rx_schedule(dev, &port->napi);
1055 + return 0;
1056 +}
1057 +
1058 +static int eth_close(struct net_device *dev)
1059 +{
1060 + struct port *port = netdev_priv(dev);
1061 + struct msg msg;
1062 + int buffs = RX_DESCS; /* allocated RX buffers */
1063 + int i;
1064 +
1065 + ports_open--;
1066 + qmgr_disable_irq(port->plat->rxq);
1067 + napi_disable(&port->napi);
1068 + netif_stop_queue(dev);
1069 +
1070 + while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1071 + buffs--;
1072 +
1073 + memset(&msg, 0, sizeof(msg));
1074 + msg.cmd = NPE_SETLOOPBACK_MODE;
1075 + msg.eth_id = port->id;
1076 + msg.byte3 = 1;
1077 + if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1078 + printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1079 +
1080 + i = 0;
1081 + do { /* drain RX buffers */
1082 + while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1083 + buffs--;
1084 + if (!buffs)
1085 + break;
1086 + if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1087 + /* we have to inject some packet */
1088 + struct desc *desc;
1089 + u32 phys;
1090 + int n = queue_get_desc(port->plat->txreadyq, port, 1);
1091 + BUG_ON(n < 0);
1092 + desc = tx_desc_ptr(port, n);
1093 + phys = tx_desc_phys(port, n);
1094 + desc->buf_len = desc->pkt_len = 1;
1095 + wmb();
1096 + queue_put_desc(TX_QUEUE(port->id), phys, desc);
1097 + }
1098 + udelay(1);
1099 + } while (++i < MAX_CLOSE_WAIT);
1100 +
1101 + if (buffs)
1102 + printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1103 + " left in NPE\n", dev->name, buffs);
1104 +#if DEBUG_CLOSE
1105 + if (!buffs)
1106 + printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1107 +#endif
1108 +
1109 + buffs = TX_DESCS;
1110 + while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1111 + buffs--; /* cancel TX */
1112 +
1113 + i = 0;
1114 + do {
1115 + while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1116 + buffs--;
1117 + if (!buffs)
1118 + break;
1119 + } while (++i < MAX_CLOSE_WAIT);
1120 +
1121 + if (buffs)
1122 + printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1123 + "left in NPE\n", dev->name, buffs);
1124 +#if DEBUG_CLOSE
1125 + if (!buffs)
1126 + printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1127 +#endif
1128 +
1129 + msg.byte3 = 0;
1130 + if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1131 + printk(KERN_CRIT "%s: unable to disable loopback\n",
1132 + dev->name);
1133 +
1134 + port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
1135 + ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1136 + mdio_write(dev, port->plat->phy, MII_BMCR,
1137 + port->mii_bmcr | BMCR_PDOWN);
1138 +
1139 + if (!ports_open)
1140 + qmgr_disable_irq(TXDONE_QUEUE);
1141 + cancel_rearming_delayed_work(&port->mdio_thread);
1142 + destroy_queues(port);
1143 + release_queues(port);
1144 + return 0;
1145 +}
1146 +
1147 +static int __devinit eth_init_one(struct platform_device *pdev)
1148 +{
1149 + struct port *port;
1150 + struct net_device *dev;
1151 + struct eth_plat_info *plat = pdev->dev.platform_data;
1152 + u32 regs_phys;
1153 + int err;
1154 +
1155 + if (!(dev = alloc_etherdev(sizeof(struct port))))
1156 + return -ENOMEM;
1157 +
1158 + SET_NETDEV_DEV(dev, &pdev->dev);
1159 + port = netdev_priv(dev);
1160 + port->netdev = dev;
1161 + port->id = pdev->id;
1162 +
1163 + switch (port->id) {
1164 + case IXP4XX_ETH_NPEA:
1165 + port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1166 + regs_phys = IXP4XX_EthA_BASE_PHYS;
1167 + break;
1168 + case IXP4XX_ETH_NPEB:
1169 + port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1170 + regs_phys = IXP4XX_EthB_BASE_PHYS;
1171 + break;
1172 + case IXP4XX_ETH_NPEC:
1173 + port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1174 + regs_phys = IXP4XX_EthC_BASE_PHYS;
1175 + break;
1176 + default:
1177 + err = -ENOSYS;
1178 + goto err_free;
1179 + }
1180 +
1181 + dev->open = eth_open;
1182 + dev->hard_start_xmit = eth_xmit;
1183 + dev->stop = eth_close;
1184 + dev->get_stats = eth_stats;
1185 + dev->do_ioctl = eth_ioctl;
1186 + dev->set_multicast_list = eth_set_mcast_list;
1187 + dev->tx_queue_len = 100;
1188 +
1189 + netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1190 +
1191 + if (!(port->npe = npe_request(NPE_ID(port->id)))) {
1192 + err = -EIO;
1193 + goto err_free;
1194 + }
1195 +
1196 + if (register_netdev(dev)) {
1197 + err = -EIO;
1198 + goto err_npe_rel;
1199 + }
1200 +
1201 + port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1202 + if (!port->mem_res) {
1203 + err = -EBUSY;
1204 + goto err_unreg;
1205 + }
1206 +
1207 + port->plat = plat;
1208 + npe_port_tab[NPE_ID(port->id)] = port;
1209 + memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1210 +
1211 + platform_set_drvdata(pdev, dev);
1212 +
1213 + __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1214 + &port->regs->core_control);
1215 + udelay(50);
1216 + __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1217 + udelay(50);
1218 +
1219 + port->mii.dev = dev;
1220 + port->mii.mdio_read = mdio_read;
1221 + port->mii.mdio_write = mdio_write;
1222 + port->mii.phy_id = plat->phy;
1223 + port->mii.phy_id_mask = 0x1F;
1224 + port->mii.reg_num_mask = 0x1F;
1225 +
1226 + printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1227 + npe_name(port->npe));
1228 +
1229 + phy_reset(dev, plat->phy);
1230 + port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1231 + ~(BMCR_RESET | BMCR_PDOWN);
1232 + mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1233 +
1234 + INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1235 + return 0;
1236 +
1237 +err_unreg:
1238 + unregister_netdev(dev);
1239 +err_npe_rel:
1240 + npe_release(port->npe);
1241 +err_free:
1242 + free_netdev(dev);
1243 + return err;
1244 +}
1245 +
1246 +static int __devexit eth_remove_one(struct platform_device *pdev)
1247 +{
1248 + struct net_device *dev = platform_get_drvdata(pdev);
1249 + struct port *port = netdev_priv(dev);
1250 +
1251 + unregister_netdev(dev);
1252 + npe_port_tab[NPE_ID(port->id)] = NULL;
1253 + platform_set_drvdata(pdev, NULL);
1254 + npe_release(port->npe);
1255 + release_resource(port->mem_res);
1256 + free_netdev(dev);
1257 + return 0;
1258 +}
1259 +
1260 +static struct platform_driver drv = {
1261 + .driver.name = DRV_NAME,
1262 + .probe = eth_init_one,
1263 + .remove = eth_remove_one,
1264 +};
1265 +
1266 +static int __init eth_init_module(void)
1267 +{
1268 + if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1269 + return -ENOSYS;
1270 +
1271 + /* All MII PHY accesses use NPE-B Ethernet registers */
1272 + spin_lock_init(&mdio_lock);
1273 + mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1274 + __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1275 +
1276 + return platform_driver_register(&drv);
1277 +}
1278 +
1279 +static void __exit eth_cleanup_module(void)
1280 +{
1281 + platform_driver_unregister(&drv);
1282 +}
1283 +
1284 +MODULE_AUTHOR("Krzysztof Halasa");
1285 +MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1286 +MODULE_LICENSE("GPL v2");
1287 +MODULE_ALIAS("platform:ixp4xx_eth");
1288 +module_init(eth_init_module);
1289 +module_exit(eth_cleanup_module);
1290 --- a/arch/arm/mach-ixp4xx/ixp4xx_npe.c
1291 +++ b/arch/arm/mach-ixp4xx/ixp4xx_npe.c
1292 @@ -448,7 +448,9 @@
1293 return -ETIMEDOUT;
1294 }
1295
1296 +#if DEBUG_MSG > 1
1297 debug_msg(npe, "Sending a message took %i cycles\n", cycles);
1298 +#endif
1299 return 0;
1300 }
1301
1302 @@ -484,7 +486,9 @@
1303 return -ETIMEDOUT;
1304 }
1305
1306 +#if DEBUG_MSG > 1
1307 debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
1308 +#endif
1309 return 0;
1310 }
1311
1312 --- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
1313 +++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
1314 @@ -184,6 +184,8 @@
1315 case 3: mask[0] = 0xFF; break;
1316 }
1317
1318 + mask[1] = mask[2] = mask[3] = 0;
1319 +
1320 while (addr--)
1321 shift_mask(mask);
1322