e1ffe06cc701a03091903529bca110fbda537ee8
[openwrt/openwrt.git] / target / linux / lantiq / patches-3.8 / 0025-NET-MIPS-lantiq-adds-xrx200-net.patch
1 From fbfdf78ba827a8f854ae3ed7b11ea6df4054ffb1 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Mon, 22 Oct 2012 12:22:23 +0200
4 Subject: [PATCH 25/40] NET: MIPS: lantiq: adds xrx200-net
5
6 ---
7 drivers/net/ethernet/Kconfig | 8 +-
8 drivers/net/ethernet/Makefile | 1 +
9 drivers/net/ethernet/lantiq_pce.h | 163 +++++
10 drivers/net/ethernet/lantiq_xrx200.c | 1203 ++++++++++++++++++++++++++++++++++
11 4 files changed, 1374 insertions(+), 1 deletion(-)
12 create mode 100644 drivers/net/ethernet/lantiq_pce.h
13 create mode 100644 drivers/net/ethernet/lantiq_xrx200.c
14
15 --- a/drivers/net/ethernet/Kconfig
16 +++ b/drivers/net/ethernet/Kconfig
17 @@ -83,7 +83,13 @@ config LANTIQ_ETOP
18 tristate "Lantiq SoC ETOP driver"
19 depends on SOC_TYPE_XWAY
20 ---help---
21 - Support for the MII0 inside the Lantiq SoC
22 + Support for the MII0 inside the Lantiq ADSL SoC
23 +
24 +config LANTIQ_XRX200
25 + tristate "Lantiq SoC XRX200 driver"
26 + depends on SOC_TYPE_XWAY
27 + ---help---
28 + Support for the MII0 inside the Lantiq VDSL SoC
29
30 source "drivers/net/ethernet/marvell/Kconfig"
31 source "drivers/net/ethernet/mellanox/Kconfig"
32 --- a/drivers/net/ethernet/Makefile
33 +++ b/drivers/net/ethernet/Makefile
34 @@ -36,6 +36,7 @@ obj-$(CONFIG_IP1000) += icplus/
35 obj-$(CONFIG_JME) += jme.o
36 obj-$(CONFIG_KORINA) += korina.o
37 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
38 +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
39 obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
40 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
41 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
42 --- /dev/null
43 +++ b/drivers/net/ethernet/lantiq_pce.h
44 @@ -0,0 +1,163 @@
45 +/*
46 + * This program is free software; you can redistribute it and/or modify it
47 + * under the terms of the GNU General Public License version 2 as published
48 + * by the Free Software Foundation.
49 + *
50 + * This program is distributed in the hope that it will be useful,
51 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
52 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
53 + * GNU General Public License for more details.
54 + *
55 + * You should have received a copy of the GNU General Public License
56 + * along with this program; if not, write to the Free Software
57 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
58 + *
59 + * Copyright (C) 2010 Lantiq Deutschland GmbH
60 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
61 + *
62 + * PCE microcode extracted from UGW5.2 switch api
63 + */
64 +
65 +/* Switch API Micro Code V0.3 */
66 +enum {
67 + OUT_MAC0 = 0,
68 + OUT_MAC1,
69 + OUT_MAC2,
70 + OUT_MAC3,
71 + OUT_MAC4,
72 + OUT_MAC5,
73 + OUT_ETHTYP,
74 + OUT_VTAG0,
75 + OUT_VTAG1,
76 + OUT_ITAG0,
77 + OUT_ITAG1, /*10 */
78 + OUT_ITAG2,
79 + OUT_ITAG3,
80 + OUT_IP0,
81 + OUT_IP1,
82 + OUT_IP2,
83 + OUT_IP3,
84 + OUT_SIP0,
85 + OUT_SIP1,
86 + OUT_SIP2,
87 + OUT_SIP3, /*20*/
88 + OUT_SIP4,
89 + OUT_SIP5,
90 + OUT_SIP6,
91 + OUT_SIP7,
92 + OUT_DIP0,
93 + OUT_DIP1,
94 + OUT_DIP2,
95 + OUT_DIP3,
96 + OUT_DIP4,
97 + OUT_DIP5, /*30*/
98 + OUT_DIP6,
99 + OUT_DIP7,
100 + OUT_SESID,
101 + OUT_PROT,
102 + OUT_APP0,
103 + OUT_APP1,
104 + OUT_IGMP0,
105 + OUT_IGMP1,
106 + OUT_IPOFF, /*39*/
107 + OUT_NONE = 63
108 +};
109 +
110 +/* parser's microcode length type */
111 +#define INSTR 0
112 +#define IPV6 1
113 +#define LENACCU 2
114 +
115 +/* parser's microcode flag type */
116 +enum {
117 + FLAG_ITAG = 0,
118 + FLAG_VLAN,
119 + FLAG_SNAP,
120 + FLAG_PPPOE,
121 + FLAG_IPV6,
122 + FLAG_IPV6FL,
123 + FLAG_IPV4,
124 + FLAG_IGMP,
125 + FLAG_TU,
126 + FLAG_HOP,
127 + FLAG_NN1, /*10 */
128 + FLAG_NN2,
129 + FLAG_END,
130 + FLAG_NO, /*13*/
131 +};
132 +
133 +/* Micro code version V2_11 (extension for parsing IPv6 in PPPoE) */
134 +#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
135 + { {val, msk, (ns<<10 | out<<4 | len>>1), (len&1)<<15 | type<<13 | flags<<9 | ipv4_len<<8 }}
136 +struct pce_microcode {
137 + unsigned short val[4];
138 +/* unsigned short val_2;
139 + unsigned short val_1;
140 + unsigned short val_0;*/
141 +} pce_microcode[] = {
142 + /* value mask ns fields L type flags ipv4_len */
143 + MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
144 + MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
145 + MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
146 + MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
147 + MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
148 + MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
149 + MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
150 + MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
151 + MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
152 + MC_ENTRY(0x0000, 0x0000, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
153 + MC_ENTRY(0x0600, 0x0600, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
154 + MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
155 + MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
156 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
157 + MC_ENTRY(0x0300, 0xFF00, 39, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
158 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
159 + MC_ENTRY(0x0000, 0x0000, 39, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
160 + MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
161 + MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
162 + MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
163 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
164 + MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
165 + MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
166 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
167 + MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
168 + MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
169 + MC_ENTRY(0x0000, 0x0000, 38, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
170 + MC_ENTRY(0x1100, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
171 + MC_ENTRY(0x0600, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
172 + MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
173 + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
174 + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
175 + MC_ENTRY(0x0000, 0x0000, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
176 + MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
177 + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
178 + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
179 + MC_ENTRY(0x0000, 0x0000, 38, OUT_PROT, 1, IPV6, FLAG_NO, 0),
180 + MC_ENTRY(0x0000, 0x0000, 38, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
181 + MC_ENTRY(0x0000, 0x0000, 39, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
182 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
183 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
184 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
185 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
186 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
187 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
188 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
189 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
190 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
191 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
192 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
193 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
194 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
195 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
196 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
197 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
198 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
199 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
200 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
201 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
202 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
203 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
204 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
205 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
206 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
207 +};
208 --- /dev/null
209 +++ b/drivers/net/ethernet/lantiq_xrx200.c
210 @@ -0,0 +1,1203 @@
211 +/*
212 + * This program is free software; you can redistribute it and/or modify it
213 + * under the terms of the GNU General Public License version 2 as published
214 + * by the Free Software Foundation.
215 + *
216 + * This program is distributed in the hope that it will be useful,
217 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
218 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
219 + * GNU General Public License for more details.
220 + *
221 + * You should have received a copy of the GNU General Public License
222 + * along with this program; if not, write to the Free Software
223 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
224 + *
225 + * Copyright (C) 2010 Lantiq Deutschland
226 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
227 + */
228 +
229 +#include <linux/etherdevice.h>
230 +#include <linux/module.h>
231 +#include <linux/platform_device.h>
232 +#include <linux/interrupt.h>
233 +#include <linux/clk.h>
234 +#include <asm/delay.h>
235 +
236 +#include <linux/of_net.h>
237 +#include <linux/of_mdio.h>
238 +#include <linux/of_gpio.h>
239 +
240 +#include <xway_dma.h>
241 +#include <lantiq_soc.h>
242 +
243 +#include "lantiq_pce.h"
244 +
245 +#define SW_POLLING
246 +#define SW_ROUTING
247 +#define SW_PORTMAP
248 +
249 +#ifdef SW_ROUTING
250 + #ifdef SW_PORTMAP
251 +#define XRX200_MAX_DEV 2
252 + #else
253 +#define XRX200_MAX_DEV 2
254 + #endif
255 +#else
256 +#define XRX200_MAX_DEV 1
257 +#endif
258 +
259 +#define XRX200_MAX_PORT 7
260 +#define XRX200_MAX_DMA 8
261 +
262 +#define XRX200_HEADROOM 4
263 +
264 +#define XRX200_TX_TIMEOUT (10 * HZ)
265 +
266 +/* port type */
267 +#define XRX200_PORT_TYPE_PHY 1
268 +#define XRX200_PORT_TYPE_MAC 2
269 +
270 +/* DMA */
271 +#define XRX200_DMA_CRC_LEN 0x4
272 +#define XRX200_DMA_DATA_LEN 0x600
273 +#define XRX200_DMA_IRQ INT_NUM_IM2_IRL0
274 +#define XRX200_DMA_RX 0
275 +#define XRX200_DMA_TX 1
276 +
277 +/* fetch / store dma */
278 +#define FDMA_PCTRL0 0x2A00
279 +#define FDMA_PCTRLx(x) (FDMA_PCTRL0 + (x * 0x18))
280 +#define SDMA_PCTRL0 0x2F00
281 +#define SDMA_PCTRLx(x) (SDMA_PCTRL0 + (x * 0x18))
282 +
283 +/* buffer management */
284 +#define BM_PCFG0 0x200
285 +#define BM_PCFGx(x) (BM_PCFG0 + (x * 8))
286 +
287 +/* MDIO */
288 +#define MDIO_GLOB 0x0000
289 +#define MDIO_CTRL 0x0020
290 +#define MDIO_READ 0x0024
291 +#define MDIO_WRITE 0x0028
292 +#define MDIO_PHY0 0x0054
293 +#define MDIO_PHY(x) (0x0054 - (x * sizeof(unsigned)))
294 +#define MDIO_CLK_CFG0 0x002C
295 +#define MDIO_CLK_CFG1 0x0030
296 +
297 +#define MDIO_GLOB_ENABLE 0x8000
298 +#define MDIO_BUSY BIT(12)
299 +#define MDIO_RD BIT(11)
300 +#define MDIO_WR BIT(10)
301 +#define MDIO_MASK 0x1f
302 +#define MDIO_ADDRSHIFT 5
303 +#define MDIO1_25MHZ 9
304 +
305 +#define MDIO_PHY_LINK_DOWN 0x4000
306 +#define MDIO_PHY_LINK_UP 0x2000
307 +
308 +#define MDIO_PHY_SPEED_M10 0x0000
309 +#define MDIO_PHY_SPEED_M100 0x0800
310 +#define MDIO_PHY_SPEED_G1 0x1000
311 +
312 +#define MDIO_PHY_FDUP_EN 0x0600
313 +#define MDIO_PHY_FDUP_DIS 0x0200
314 +
315 +#define MDIO_PHY_LINK_MASK 0x6000
316 +#define MDIO_PHY_SPEED_MASK 0x1800
317 +#define MDIO_PHY_FDUP_MASK 0x0600
318 +#define MDIO_PHY_ADDR_MASK 0x001f
319 +#define MDIO_UPDATE_MASK MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
320 + MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
321 +
322 +/* MII */
323 +#define MII_CFG(p) (p * 8)
324 +
325 +#define MII_CFG_EN BIT(14)
326 +
327 +#define MII_CFG_MODE_MIIP 0x0
328 +#define MII_CFG_MODE_MIIM 0x1
329 +#define MII_CFG_MODE_RMIIP 0x2
330 +#define MII_CFG_MODE_RMIIM 0x3
331 +#define MII_CFG_MODE_RGMII 0x4
332 +#define MII_CFG_MODE_MASK 0xf
333 +
334 +#define MII_CFG_RATE_M2P5 0x00
335 +#define MII_CFG_RATE_M25 0x10
336 +#define MII_CFG_RATE_M125 0x20
337 +#define MII_CFG_RATE_M50 0x30
338 +#define MII_CFG_RATE_AUTO 0x40
339 +#define MII_CFG_RATE_MASK 0x70
340 +
341 +/* cpu port mac */
342 +#define PMAC_HD_CTL 0x0000
343 +#define PMAC_RX_IPG 0x0024
344 +#define PMAC_EWAN 0x002c
345 +
346 +#define PMAC_IPG_MASK 0xf
347 +#define PMAC_HD_CTL_AS 0x0008
348 +#define PMAC_HD_CTL_AC 0x0004
349 +#define PMAC_HD_CTL_RXSH 0x0040
350 +#define PMAC_HD_CTL_AST 0x0080
351 +#define PMAC_HD_CTL_RST 0x0100
352 +
353 +/* PCE */
354 +#define PCE_TBL_KEY(x) (0x1100 + ((7 - x) * 4))
355 +#define PCE_TBL_MASK 0x1120
356 +#define PCE_TBL_VAL(x) (0x1124 + ((4 - x) * 4))
357 +#define PCE_TBL_ADDR 0x1138
358 +#define PCE_TBL_CTRL 0x113c
359 +#define PCE_PMAP1 0x114c
360 +#define PCE_PMAP2 0x1150
361 +#define PCE_PMAP3 0x1154
362 +#define PCE_GCTRL_REG(x) (0x1158 + (x * 4))
363 +#define PCE_PCTRL_REG(p, x) (0x1200 + (((p * 0xa) + x) * 4))
364 +
365 +#define PCE_TBL_BUSY BIT(15)
366 +#define PCE_TBL_CFG_ADDR_MASK 0x1f
367 +#define PCE_TBL_CFG_ADWR 0x20
368 +#define PCE_TBL_CFG_ADWR_MASK 0x60
369 +#define PCE_INGRESS BIT(11)
370 +
371 +/* MAC */
372 +#define MAC_FLEN_REG (0x2314)
373 +#define MAC_CTRL_REG(p, x) (0x240c + (((p * 0xc) + x) * 4))
374 +
375 +/* buffer management */
376 +#define BM_PCFG(p) (0x200 + (p * 8))
377 +
378 +/* special tag in TX path header */
379 +#define SPID_SHIFT 24
380 +#define DPID_SHIFT 16
381 +#define DPID_ENABLE 1
382 +#define SPID_CPU_PORT 2
383 +#define PORT_MAP_SEL BIT(15)
384 +#define PORT_MAP_EN BIT(14)
385 +#define PORT_MAP_SHIFT 1
386 +#define PORT_MAP_MASK 0x3f
387 +
388 +#define SPPID_MASK 0x7
389 +#define SPPID_SHIFT 4
390 +
391 +/* MII regs not yet in linux */
392 +#define MDIO_DEVAD_NONE (-1)
393 +#define ADVERTIZE_MPD (1 << 10)
394 +
395 +struct xrx200_port {
396 + u8 num;
397 + u8 phy_addr;
398 + u16 flags;
399 + phy_interface_t phy_if;
400 +
401 + int link;
402 + int gpio;
403 + enum of_gpio_flags gpio_flags;
404 +
405 + struct phy_device *phydev;
406 + struct device_node *phy_node;
407 +};
408 +
409 +struct xrx200_chan {
410 + int idx;
411 + int refcount;
412 + int tx_free;
413 +
414 + struct net_device dummy_dev;
415 + struct net_device *devs[XRX200_MAX_DEV];
416 +
417 + struct napi_struct napi;
418 + struct ltq_dma_channel dma;
419 + struct sk_buff *skb[LTQ_DESC_NUM];
420 +};
421 +
422 +struct xrx200_hw {
423 + struct clk *clk;
424 + struct mii_bus *mii_bus;
425 +
426 + struct xrx200_chan chan[XRX200_MAX_DMA];
427 +
428 + struct net_device *devs[XRX200_MAX_DEV];
429 + int num_devs;
430 +
431 + int port_map[XRX200_MAX_PORT];
432 + unsigned short wan_map;
433 +
434 + spinlock_t lock;
435 +};
436 +
437 +struct xrx200_priv {
438 + struct net_device_stats stats;
439 + int id;
440 +
441 + struct xrx200_port port[XRX200_MAX_PORT];
442 + int num_port;
443 + int wan;
444 + unsigned short port_map;
445 + const void *mac;
446 +
447 + struct xrx200_hw *hw;
448 +};
449 +
450 +static __iomem void *xrx200_switch_membase;
451 +static __iomem void *xrx200_mii_membase;
452 +static __iomem void *xrx200_mdio_membase;
453 +static __iomem void *xrx200_pmac_membase;
454 +
455 +#define ltq_switch_r32(x) ltq_r32(xrx200_switch_membase + (x))
456 +#define ltq_switch_w32(x, y) ltq_w32(x, xrx200_switch_membase + (y))
457 +#define ltq_switch_w32_mask(x, y, z) \
458 + ltq_w32_mask(x, y, xrx200_switch_membase + (z))
459 +
460 +#define ltq_mdio_r32(x) ltq_r32(xrx200_mdio_membase + (x))
461 +#define ltq_mdio_w32(x, y) ltq_w32(x, xrx200_mdio_membase + (y))
462 +#define ltq_mdio_w32_mask(x, y, z) \
463 + ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
464 +
465 +#define ltq_mii_r32(x) ltq_r32(xrx200_mii_membase + (x))
466 +#define ltq_mii_w32(x, y) ltq_w32(x, xrx200_mii_membase + (y))
467 +#define ltq_mii_w32_mask(x, y, z) \
468 + ltq_w32_mask(x, y, xrx200_mii_membase + (z))
469 +
470 +#define ltq_pmac_r32(x) ltq_r32(xrx200_pmac_membase + (x))
471 +#define ltq_pmac_w32(x, y) ltq_w32(x, xrx200_pmac_membase + (y))
472 +#define ltq_pmac_w32_mask(x, y, z) \
473 + ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
474 +
475 +static int xrx200_open(struct net_device *dev)
476 +{
477 + struct xrx200_priv *priv = netdev_priv(dev);
478 + unsigned long flags;
479 + int i;
480 +
481 + for (i = 0; i < XRX200_MAX_DMA; i++) {
482 + if (!priv->hw->chan[i].dma.irq)
483 + continue;
484 + spin_lock_irqsave(&priv->hw->lock, flags);
485 + if (!priv->hw->chan[i].refcount) {
486 + napi_enable(&priv->hw->chan[i].napi);
487 + ltq_dma_open(&priv->hw->chan[i].dma);
488 + }
489 + priv->hw->chan[i].refcount++;
490 + spin_unlock_irqrestore(&priv->hw->lock, flags);
491 + }
492 + for (i = 0; i < priv->num_port; i++)
493 + if (priv->port[i].phydev)
494 + phy_start(priv->port[i].phydev);
495 + netif_start_queue(dev);
496 +
497 + return 0;
498 +}
499 +
500 +static int xrx200_close(struct net_device *dev)
501 +{
502 + struct xrx200_priv *priv = netdev_priv(dev);
503 + unsigned long flags;
504 + int i;
505 +
506 + netif_stop_queue(dev);
507 +
508 + for (i = 0; i < priv->num_port; i++)
509 + if (priv->port[i].phydev)
510 + phy_stop(priv->port[i].phydev);
511 +
512 + for (i = 0; i < XRX200_MAX_DMA; i++) {
513 + if (!priv->hw->chan[i].dma.irq)
514 + continue;
515 + spin_lock_irqsave(&priv->hw->lock, flags);
516 + priv->hw->chan[i].refcount--;
517 + if (!priv->hw->chan[i].refcount) {
518 + napi_disable(&priv->hw->chan[i].napi);
519 + ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
520 + }
521 + spin_unlock_irqrestore(&priv->hw->lock, flags);
522 + }
523 +
524 + return 0;
525 +}
526 +
527 +static int xrx200_alloc_skb(struct xrx200_chan *ch)
528 +{
529 +#define DMA_PAD (NET_IP_ALIGN + NET_SKB_PAD)
530 + ch->skb[ch->dma.desc] = dev_alloc_skb(XRX200_DMA_DATA_LEN + DMA_PAD);
531 + if (!ch->skb[ch->dma.desc])
532 + return -ENOMEM;
533 +
534 + skb_reserve(ch->skb[ch->dma.desc], NET_SKB_PAD);
535 + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
536 + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
537 + DMA_FROM_DEVICE);
538 + ch->dma.desc_base[ch->dma.desc].addr =
539 + CPHYSADDR(ch->skb[ch->dma.desc]->data);
540 + ch->dma.desc_base[ch->dma.desc].ctl =
541 + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
542 + XRX200_DMA_DATA_LEN;
543 + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
544 +
545 + return 0;
546 +}
547 +
548 +static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
549 +{
550 + struct net_device *dev = ch->devs[id];
551 + struct xrx200_priv *priv = netdev_priv(dev);
552 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
553 + struct sk_buff *skb = ch->skb[ch->dma.desc];
554 + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - XRX200_DMA_CRC_LEN;
555 + unsigned long flags;
556 +
557 + spin_lock_irqsave(&priv->hw->lock, flags);
558 + if (xrx200_alloc_skb(ch)) {
559 + netdev_err(dev,
560 + "failed to allocate new rx buffer, stopping DMA\n");
561 + ltq_dma_close(&ch->dma);
562 + }
563 +
564 + ch->dma.desc++;
565 + ch->dma.desc %= LTQ_DESC_NUM;
566 + spin_unlock_irqrestore(&priv->hw->lock, flags);
567 +
568 + skb_put(skb, len);
569 +#ifdef SW_ROUTING
570 + skb_pull(skb, 8);
571 +#endif
572 + skb->dev = dev;
573 + skb->protocol = eth_type_trans(skb, dev);
574 + netif_receive_skb(skb);
575 + priv->stats.rx_packets++;
576 + priv->stats.rx_bytes+=len;
577 +}
578 +
579 +static int xrx200_poll_rx(struct napi_struct *napi, int budget)
580 +{
581 + struct xrx200_chan *ch = container_of(napi,
582 + struct xrx200_chan, napi);
583 + struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
584 + int rx = 0;
585 + int complete = 0;
586 + unsigned long flags;
587 +
588 + while ((rx < budget) && !complete) {
589 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
590 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
591 +#ifdef SW_ROUTING
592 + struct sk_buff *skb = ch->skb[ch->dma.desc];
593 + u32 *special_tag = (u32*)skb->data;
594 + int port = (special_tag[1] >> SPPID_SHIFT) & SPPID_MASK;
595 + xrx200_hw_receive(ch, priv->hw->port_map[port]);
596 +#else
597 + xrx200_hw_receive(ch, 0);
598 +#endif
599 + rx++;
600 + } else {
601 + complete = 1;
602 + }
603 + }
604 + if (complete || !rx) {
605 + napi_complete(&ch->napi);
606 + spin_lock_irqsave(&priv->hw->lock, flags);
607 + ltq_dma_ack_irq(&ch->dma);
608 + spin_unlock_irqrestore(&priv->hw->lock, flags);
609 + }
610 + return rx;
611 +}
612 +
613 +static int xrx200_poll_tx(struct napi_struct *napi, int budget)
614 +{
615 + struct xrx200_chan *ch =
616 + container_of(napi, struct xrx200_chan, napi);
617 + struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
618 + unsigned long flags;
619 + int i;
620 +
621 + spin_lock_irqsave(&priv->hw->lock, flags);
622 + while ((ch->dma.desc_base[ch->tx_free].ctl &
623 + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
624 + dev_kfree_skb_any(ch->skb[ch->tx_free]);
625 + ch->skb[ch->tx_free] = NULL;
626 + memset(&ch->dma.desc_base[ch->tx_free], 0,
627 + sizeof(struct ltq_dma_desc));
628 + ch->tx_free++;
629 + ch->tx_free %= LTQ_DESC_NUM;
630 + }
631 + spin_unlock_irqrestore(&priv->hw->lock, flags);
632 +
633 + for (i = 0; i < XRX200_MAX_DEV && ch->devs[i]; i++) {
634 + struct netdev_queue *txq =
635 + netdev_get_tx_queue(ch->devs[i], 0);
636 + if (netif_tx_queue_stopped(txq))
637 + netif_tx_start_queue(txq);
638 + }
639 + napi_complete(&ch->napi);
640 + spin_lock_irqsave(&priv->hw->lock, flags);
641 + ltq_dma_ack_irq(&ch->dma);
642 + spin_unlock_irqrestore(&priv->hw->lock, flags);
643 +
644 + return 1;
645 +}
646 +
647 +static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
648 +{
649 + struct xrx200_priv *priv = netdev_priv(dev);
650 +
651 + return &priv->stats;
652 +}
653 +
654 +static void xrx200_tx_timeout(struct net_device *dev)
655 +{
656 + struct xrx200_priv *priv = netdev_priv(dev);
657 +
658 + printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
659 +
660 + priv->stats.tx_errors++;
661 + netif_wake_queue(dev);
662 +}
663 +
664 +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 +{
666 + int queue = skb_get_queue_mapping(skb);
667 + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
668 + struct xrx200_priv *priv = netdev_priv(dev);
669 + struct xrx200_chan *ch = &priv->hw->chan[XRX200_DMA_TX];
670 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
671 + unsigned long flags;
672 + u32 byte_offset;
673 + int len;
674 +#ifdef SW_ROUTING
675 + #ifdef SW_PORTMAP
676 + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | PORT_MAP_SEL | PORT_MAP_EN | DPID_ENABLE;
677 + #else
678 + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
679 + #endif
680 +#endif
681 +
682 + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
683 +
684 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
685 + netdev_err(dev, "tx ring full\n");
686 + netif_tx_stop_queue(txq);
687 + return NETDEV_TX_BUSY;
688 + }
689 +#ifdef SW_ROUTING
690 + #ifdef SW_PORTMAP
691 + special_tag |= priv->port_map << PORT_MAP_SHIFT;
692 + #else
693 + if(priv->id)
694 + special_tag |= (1 << DPID_SHIFT);
695 + #endif
696 + if(skb_headroom(skb) < 4) {
697 + struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
698 + dev_kfree_skb_any(skb);
699 + skb = tmp;
700 + }
701 + skb_push(skb, 4);
702 + memcpy(skb->data, &special_tag, sizeof(u32));
703 + len += 4;
704 +#endif
705 +
706 + /* dma needs to start on a 16 byte aligned address */
707 + byte_offset = CPHYSADDR(skb->data) % 16;
708 + ch->skb[ch->dma.desc] = skb;
709 +
710 + dev->trans_start = jiffies;
711 +
712 + spin_lock_irqsave(&priv->hw->lock, flags);
713 + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
714 + DMA_TO_DEVICE)) - byte_offset;
715 + wmb();
716 + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
717 + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
718 + ch->dma.desc++;
719 + ch->dma.desc %= LTQ_DESC_NUM;
720 + spin_unlock_irqrestore(&priv->hw->lock, flags);
721 +
722 + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
723 + netif_tx_stop_queue(txq);
724 +
725 + priv->stats.tx_packets++;
726 + priv->stats.tx_bytes+=len;
727 +
728 + return NETDEV_TX_OK;
729 +}
730 +
731 +static irqreturn_t xrx200_dma_irq(int irq, void *priv)
732 +{
733 + struct xrx200_hw *hw = priv;
734 + int ch = irq - XRX200_DMA_IRQ;
735 +
736 + napi_schedule(&hw->chan[ch].napi);
737 +
738 + return IRQ_HANDLED;
739 +}
740 +
741 +static int xrx200_dma_init(struct xrx200_hw *hw)
742 +{
743 + int i, err = 0;
744 +
745 + ltq_dma_init_port(DMA_PORT_ETOP);
746 +
747 + for (i = 0; i < 8 && !err; i++) {
748 + int irq = XRX200_DMA_IRQ + i;
749 + struct xrx200_chan *ch = &hw->chan[i];
750 +
751 + ch->idx = ch->dma.nr = i;
752 +
753 + if (i == XRX200_DMA_TX) {
754 + ltq_dma_alloc_tx(&ch->dma);
755 + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
756 + } else if (i == XRX200_DMA_RX) {
757 + ltq_dma_alloc_rx(&ch->dma);
758 + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
759 + ch->dma.desc++)
760 + if (xrx200_alloc_skb(ch))
761 + err = -ENOMEM;
762 + ch->dma.desc = 0;
763 + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
764 + } else
765 + continue;
766 +
767 + if (!err)
768 + ch->dma.irq = irq;
769 + }
770 +
771 + return err;
772 +}
773 +
774 +#ifdef SW_POLLING
775 +static void xrx200_gmac_update(struct xrx200_port *port)
776 +{
777 + u16 phyaddr = port->phydev->addr & MDIO_PHY_ADDR_MASK;
778 + u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
779 + u16 miirate = 0;
780 +
781 + switch (port->phydev->speed) {
782 + case SPEED_1000:
783 + phyaddr |= MDIO_PHY_SPEED_G1;
784 + miirate = MII_CFG_RATE_M125;
785 + break;
786 +
787 + case SPEED_100:
788 + phyaddr |= MDIO_PHY_SPEED_M100;
789 + switch (miimode) {
790 + case MII_CFG_MODE_RMIIM:
791 + case MII_CFG_MODE_RMIIP:
792 + miirate = MII_CFG_RATE_M50;
793 + break;
794 + default:
795 + miirate = MII_CFG_RATE_M25;
796 + break;
797 + }
798 + break;
799 +
800 + default:
801 + phyaddr |= MDIO_PHY_SPEED_M10;
802 + miirate = MII_CFG_RATE_M2P5;
803 + break;
804 + }
805 +
806 + if (port->phydev->link)
807 + phyaddr |= MDIO_PHY_LINK_UP;
808 + else
809 + phyaddr |= MDIO_PHY_LINK_DOWN;
810 +
811 + if (port->phydev->duplex == DUPLEX_FULL)
812 + phyaddr |= MDIO_PHY_FDUP_EN;
813 + else
814 + phyaddr |= MDIO_PHY_FDUP_DIS;
815 +
816 + ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
817 + ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
818 + udelay(1);
819 +}
820 +#else
821 +static void xrx200_gmac_update(struct xrx200_port *port)
822 +{
823 +
824 +}
825 +#endif
826 +
827 +static void xrx200_mdio_link(struct net_device *dev)
828 +{
829 + struct xrx200_priv *priv = netdev_priv(dev);
830 + int i;
831 +
832 + for (i = 0; i < priv->num_port; i++) {
833 + if (!priv->port[i].phydev)
834 + continue;
835 +
836 + if (priv->port[i].link != priv->port[i].phydev->link) {
837 + xrx200_gmac_update(&priv->port[i]);
838 + priv->port[i].link = priv->port[i].phydev->link;
839 + netdev_info(dev, "port %d %s link\n",
840 + priv->port[i].num,
841 + (priv->port[i].link)?("got"):("lost"));
842 + }
843 + }
844 +}
845 +
846 +static inline int xrx200_mdio_poll(struct mii_bus *bus)
847 +{
848 + unsigned cnt = 10000;
849 +
850 + while (likely(cnt--)) {
851 + unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
852 + if ((ctrl & MDIO_BUSY) == 0)
853 + return 0;
854 + }
855 +
856 + return 1;
857 +}
858 +
859 +static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
860 +{
861 + if (xrx200_mdio_poll(bus))
862 + return 1;
863 +
864 + ltq_mdio_w32(val, MDIO_WRITE);
865 + ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
866 + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
867 + (reg & MDIO_MASK),
868 + MDIO_CTRL);
869 +
870 + return 0;
871 +}
872 +
873 +static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
874 +{
875 + if (xrx200_mdio_poll(bus))
876 + return -1;
877 +
878 + ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
879 + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
880 + (reg & MDIO_MASK),
881 + MDIO_CTRL);
882 +
883 + if (xrx200_mdio_poll(bus))
884 + return -1;
885 +
886 + return ltq_mdio_r32(MDIO_READ);
887 +}
888 +
889 +static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
890 +{
891 + struct xrx200_priv *priv = netdev_priv(dev);
892 + struct phy_device *phydev = NULL;
893 + unsigned val;
894 +
895 + phydev = priv->hw->mii_bus->phy_map[port->phy_addr];
896 +
897 + if (!phydev) {
898 + netdev_err(dev, "no PHY found\n");
899 + return -ENODEV;
900 + }
901 +
902 + phydev = phy_connect(dev, dev_name(&phydev->dev), &xrx200_mdio_link,
903 + 0, port->phy_if);
904 +
905 + if (IS_ERR(phydev)) {
906 + netdev_err(dev, "Could not attach to PHY\n");
907 + return PTR_ERR(phydev);
908 + }
909 +
910 + phydev->supported &= (SUPPORTED_10baseT_Half
911 + | SUPPORTED_10baseT_Full
912 + | SUPPORTED_100baseT_Half
913 + | SUPPORTED_100baseT_Full
914 + | SUPPORTED_1000baseT_Half
915 + | SUPPORTED_1000baseT_Full
916 + | SUPPORTED_Autoneg
917 + | SUPPORTED_MII
918 + | SUPPORTED_TP);
919 + phydev->advertising = phydev->supported;
920 + port->phydev = phydev;
921 +
922 + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
923 + dev->name, phydev->drv->name,
924 + dev_name(&phydev->dev), phydev->irq);
925 +
926 +#ifdef SW_POLLING
927 + phy_read_status(phydev);
928 +
929 + val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
930 + val |= ADVERTIZE_MPD;
931 + xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
932 + xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
933 +
934 + phy_start_aneg(phydev);
935 +#endif
936 + return 0;
937 +}
938 +
939 +static void xrx200_port_config(struct xrx200_priv *priv,
940 + const struct xrx200_port *port)
941 +{
942 + u16 miimode = 0;
943 +
944 + switch (port->num) {
945 + case 0: /* xMII0 */
946 + case 1: /* xMII1 */
947 + switch (port->phy_if) {
948 + case PHY_INTERFACE_MODE_MII:
949 + if (port->flags & XRX200_PORT_TYPE_PHY)
950 + /* MII MAC mode, connected to external PHY */
951 + miimode = MII_CFG_MODE_MIIM;
952 + else
953 + /* MII PHY mode, connected to external MAC */
954 + miimode = MII_CFG_MODE_MIIP;
955 + break;
956 + case PHY_INTERFACE_MODE_RMII:
957 + if (port->flags & XRX200_PORT_TYPE_PHY)
958 + /* RMII MAC mode, connected to external PHY */
959 + miimode = MII_CFG_MODE_RMIIM;
960 + else
961 + /* RMII PHY mode, connected to external MAC */
962 + miimode = MII_CFG_MODE_RMIIP;
963 + break;
964 + case PHY_INTERFACE_MODE_RGMII:
965 + /* RGMII MAC mode, connected to external PHY */
966 + miimode = MII_CFG_MODE_RGMII;
967 + break;
968 + default:
969 + break;
970 + }
971 + break;
972 + case 2: /* internal GPHY0 */
973 + case 3: /* internal GPHY0 */
974 + case 4: /* internal GPHY1 */
975 + switch (port->phy_if) {
976 + case PHY_INTERFACE_MODE_MII:
977 + case PHY_INTERFACE_MODE_GMII:
978 + /* MII MAC mode, connected to internal GPHY */
979 + miimode = MII_CFG_MODE_MIIM;
980 + break;
981 + default:
982 + break;
983 + }
984 + break;
985 + case 5: /* internal GPHY1 or xMII2 */
986 + switch (port->phy_if) {
987 + case PHY_INTERFACE_MODE_MII:
988 + /* MII MAC mode, connected to internal GPHY */
989 + miimode = MII_CFG_MODE_MIIM;
990 + break;
991 + case PHY_INTERFACE_MODE_RGMII:
992 + /* RGMII MAC mode, connected to external PHY */
993 + miimode = MII_CFG_MODE_RGMII;
994 + break;
995 + default:
996 + break;
997 + }
998 + break;
999 + default:
1000 + break;
1001 + }
1002 +
1003 + ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
1004 + MII_CFG(port->num));
1005 +}
1006 +
1007 +static int xrx200_init(struct net_device *dev)
1008 +{
1009 + struct xrx200_priv *priv = netdev_priv(dev);
1010 + struct sockaddr mac;
1011 + int err, i;
1012 +
1013 +#ifndef SW_POLLING
1014 + unsigned int reg = 0;
1015 +
1016 + /* enable auto polling */
1017 + for (i = 0; i < priv->num_port; i++)
1018 + reg |= BIT(priv->port[i].num);
1019 + ltq_mdio_w32(reg, MDIO_CLK_CFG0);
1020 + ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
1021 +#endif
1022 +
1023 + /* setup each port */
1024 + for (i = 0; i < priv->num_port; i++)
1025 + xrx200_port_config(priv, &priv->port[i]);
1026 +
1027 + memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
1028 + if (!is_valid_ether_addr(mac.sa_data)) {
1029 + pr_warn("net-xrx200: invalid MAC, using random\n");
1030 + eth_random_addr(mac.sa_data);
1031 + dev->addr_assign_type |= NET_ADDR_RANDOM;
1032 + }
1033 +
1034 + err = eth_mac_addr(dev, &mac);
1035 + if (err)
1036 + goto err_netdev;
1037 +
1038 + for (i = 0; i < priv->num_port; i++)
1039 + if (xrx200_mdio_probe(dev, &priv->port[i]))
1040 + pr_warn("xrx200-mdio: probing phy of port %d failed\n",
1041 + priv->port[i].num);
1042 +
1043 + return 0;
1044 +
1045 +err_netdev:
1046 + unregister_netdev(dev);
1047 + free_netdev(dev);
1048 + return err;
1049 +}
1050 +
1051 +static void xrx200_pci_microcode(void)
1052 +{
1053 + int i;
1054 +
1055 + ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
1056 + PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
1057 + ltq_switch_w32(0, PCE_TBL_MASK);
1058 +
1059 + for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
1060 + ltq_switch_w32(i, PCE_TBL_ADDR);
1061 + ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
1062 + ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
1063 + ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
1064 + ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
1065 +
1066 + // start the table access:
1067 + ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
1068 + while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
1069 + }
1070 +
1071 + /* tell the switch that the microcode is loaded */
1072 + ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
1073 +}
1074 +
1075 +static void xrx200_hw_init(struct xrx200_hw *hw)
1076 +{
1077 + int i;
1078 +
1079 + /* enable clock gate */
1080 + clk_enable(hw->clk);
1081 +
1082 + ltq_switch_w32(1, 0);
1083 + mdelay(100);
1084 + ltq_switch_w32(0, 0);
1085 + /*
1086 + * TODO: we should really disbale all phys/miis here and explicitly
1087 + * enable them in the device secific init function
1088 + */
1089 +
1090 + /* disable port fetch/store dma */
1091 + for (i = 0; i < 7; i++ ) {
1092 + ltq_switch_w32(0, FDMA_PCTRLx(i));
1093 + ltq_switch_w32(0, SDMA_PCTRLx(i));
1094 + }
1095 +
1096 + /* enable Switch */
1097 + ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
1098 +
1099 + /* load the pce microcode */
1100 + xrx200_pci_microcode();
1101 +
1102 + /* Default unknown Broadcat/Multicast/Unicast port maps */
1103 + ltq_switch_w32(0x7f, PCE_PMAP1);
1104 + ltq_switch_w32(0x7f, PCE_PMAP2);
1105 + ltq_switch_w32(0x7f, PCE_PMAP3);
1106 +
1107 + /* RMON Counter Enable for all physical ports */
1108 + for (i = 0; i < 7; i++)
1109 + ltq_switch_w32(0x1, BM_PCFG(i));
1110 +
1111 + /* disable auto polling */
1112 + ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
1113 +
1114 + /* enable port statistic counters */
1115 + for (i = 0; i < 7; i++)
1116 + ltq_switch_w32(0x1, BM_PCFGx(i));
1117 +
1118 + /* set IPG to 12 */
1119 + ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
1120 +
1121 +#ifdef SW_ROUTING
1122 + /* enable status header, enable CRC */
1123 + ltq_pmac_w32_mask(0,
1124 + PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC,
1125 + PMAC_HD_CTL);
1126 +#else
1127 + /* disable status header, enable CRC */
1128 + ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
1129 + PMAC_HD_CTL_AC,
1130 + PMAC_HD_CTL);
1131 +#endif
1132 +
1133 + /* enable port fetch/store dma */
1134 + for (i = 0; i < 7; i++ ) {
1135 + ltq_switch_w32_mask(0, 0x01, FDMA_PCTRLx(i));
1136 + ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
1137 + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
1138 + }
1139 +
1140 + /* enable special tag insertion on cpu port */
1141 + ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
1142 + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(6, 0));
1143 + ltq_switch_w32_mask(0, BIT(3), MAC_CTRL_REG(6, 2));
1144 + ltq_switch_w32(1518 + 8 + 4 * 2, MAC_FLEN_REG);
1145 +}
1146 +
1147 +static void xrx200_hw_cleanup(struct xrx200_hw *hw)
1148 +{
1149 + int i;
1150 +
1151 + /* disable the switch */
1152 + ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
1153 +
1154 + /* free the channels and IRQs */
1155 + for (i = 0; i < 2; i++) {
1156 + ltq_dma_free(&hw->chan[i].dma);
1157 + if (hw->chan[i].dma.irq)
1158 + free_irq(hw->chan[i].dma.irq, hw);
1159 + }
1160 +
1161 + /* free the allocated RX ring */
1162 + for (i = 0; i < LTQ_DESC_NUM; i++)
1163 + dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
1164 +
1165 + /* clear the mdio bus */
1166 + mdiobus_unregister(hw->mii_bus);
1167 + mdiobus_free(hw->mii_bus);
1168 +
1169 + /* release the clock */
1170 + clk_disable(hw->clk);
1171 + clk_put(hw->clk);
1172 +}
1173 +
1174 +static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
1175 +{
1176 + int i;
1177 + hw->mii_bus = mdiobus_alloc();
1178 + if (!hw->mii_bus)
1179 + return -ENOMEM;
1180 +
1181 + hw->mii_bus->read = xrx200_mdio_rd;
1182 + hw->mii_bus->write = xrx200_mdio_wr;
1183 + hw->mii_bus->name = "lantiq,xrx200-mdio";
1184 + snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
1185 +
1186 + if (of_mdiobus_register(hw->mii_bus, np)) {
1187 + mdiobus_free(hw->mii_bus);
1188 + return -ENXIO;
1189 + }
1190 +
1191 + return 0;
1192 +}
1193 +
1194 +static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
1195 +{
1196 + const __be32 *addr, *id = of_get_property(port, "reg", NULL);
1197 + struct xrx200_port *p = &priv->port[priv->num_port];
1198 +
1199 + if (!id)
1200 + return;
1201 +
1202 + memset(p, 0, sizeof(struct xrx200_port));
1203 + p->phy_node = of_parse_phandle(port, "phy-handle", 0);
1204 + addr = of_get_property(p->phy_node, "reg", NULL);
1205 + if (!addr)
1206 + return;
1207 +
1208 + p->num = *id;
1209 + p->phy_addr = *addr;
1210 + p->phy_if = of_get_phy_mode(port);
1211 + if (p->phy_addr > 0x10)
1212 + p->flags = XRX200_PORT_TYPE_MAC;
1213 + else
1214 + p->flags = XRX200_PORT_TYPE_PHY;
1215 + priv->num_port++;
1216 +
1217 + p->gpio = of_get_gpio_flags(port, 0, &p->gpio_flags);
1218 + if (gpio_is_valid(p->gpio))
1219 + if (!gpio_request(p->gpio, "phy-reset")) {
1220 + gpio_direction_output(p->gpio,
1221 + (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (1) : (0));
1222 + udelay(100);
1223 + gpio_set_value(p->gpio, (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
1224 + }
1225 + /* is this port a wan port ? */
1226 + if (priv->wan)
1227 + priv->hw->wan_map |= BIT(p->num);
1228 +
1229 + priv->port_map |= BIT(p->num);
1230 +
1231 + /* store the port id in the hw struct so we can map ports -> devices */
1232 + priv->hw->port_map[p->num] = priv->hw->num_devs;
1233 +}
1234 +
1235 +static const struct net_device_ops xrx200_netdev_ops = {
1236 + .ndo_init = xrx200_init,
1237 + .ndo_open = xrx200_open,
1238 + .ndo_stop = xrx200_close,
1239 + .ndo_start_xmit = xrx200_start_xmit,
1240 + .ndo_set_mac_address = eth_mac_addr,
1241 + .ndo_validate_addr = eth_validate_addr,
1242 + .ndo_change_mtu = eth_change_mtu,
1243 + .ndo_get_stats = xrx200_get_stats,
1244 + .ndo_tx_timeout = xrx200_tx_timeout,
1245 +};
1246 +
1247 +static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface)
1248 +{
1249 + struct xrx200_priv *priv;
1250 + struct device_node *port;
1251 + const __be32 *wan;
1252 +
1253 + /* alloc the network device */
1254 + hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
1255 + if (!hw->devs[hw->num_devs])
1256 + return;
1257 +
1258 + /* setup the network device */
1259 + strcpy(hw->devs[hw->num_devs]->name, "eth%d");
1260 + hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
1261 + hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
1262 + hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
1263 +
1264 + /* setup our private data */
1265 + priv = netdev_priv(hw->devs[hw->num_devs]);
1266 + priv->hw = hw;
1267 + priv->mac = of_get_mac_address(iface);
1268 + priv->id = hw->num_devs;
1269 +
1270 + /* is this the wan interface ? */
1271 + wan = of_get_property(iface, "lantiq,wan", NULL);
1272 + if (wan && (*wan == 1))
1273 + priv->wan = 1;
1274 +
1275 + /* load the ports that are part of the interface */
1276 + for_each_child_of_node(iface, port)
1277 + if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
1278 + xrx200_of_port(priv, port);
1279 +
1280 + /* register the actual device */
1281 + if (!register_netdev(hw->devs[hw->num_devs]))
1282 + hw->num_devs++;
1283 +}
1284 +
1285 +static struct xrx200_hw xrx200_hw;
1286 +
1287 +static int xrx200_probe(struct platform_device *pdev)
1288 +{
1289 + struct resource *res[4];
1290 + struct device_node *mdio_np, *iface_np;
1291 + int i;
1292 +
1293 + /* load the memory ranges */
1294 + for (i = 0; i < 4; i++) {
1295 + res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
1296 + if (!res[i]) {
1297 + dev_err(&pdev->dev, "failed to get resources\n");
1298 + return -ENOENT;
1299 + }
1300 + }
1301 + xrx200_switch_membase = devm_request_and_ioremap(&pdev->dev, res[0]);
1302 + xrx200_mdio_membase = devm_request_and_ioremap(&pdev->dev, res[1]);
1303 + xrx200_mii_membase = devm_request_and_ioremap(&pdev->dev, res[2]);
1304 + xrx200_pmac_membase = devm_request_and_ioremap(&pdev->dev, res[3]);
1305 + if (!xrx200_switch_membase || !xrx200_mdio_membase ||
1306 + !xrx200_mii_membase || !xrx200_pmac_membase) {
1307 + dev_err(&pdev->dev, "failed to request and remap io ranges \n");
1308 + return -ENOMEM;
1309 + }
1310 +
1311 + /* get the clock */
1312 + xrx200_hw.clk = clk_get(&pdev->dev, NULL);
1313 + if (IS_ERR(xrx200_hw.clk)) {
1314 + dev_err(&pdev->dev, "failed to get clock\n");
1315 + return PTR_ERR(xrx200_hw.clk);
1316 + }
1317 +
1318 + /* bring up the dma engine and IP core */
1319 + spin_lock_init(&xrx200_hw.lock);
1320 + xrx200_dma_init(&xrx200_hw);
1321 + xrx200_hw_init(&xrx200_hw);
1322 +
1323 + /* bring up the mdio bus */
1324 + mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
1325 + "lantiq,xrx200-mdio");
1326 + if (mdio_np)
1327 + if (xrx200_of_mdio(&xrx200_hw, mdio_np))
1328 + dev_err(&pdev->dev, "mdio probe failed\n");
1329 +
1330 + /* load the interfaces */
1331 + for_each_child_of_node(pdev->dev.of_node, iface_np)
1332 + if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
1333 + if (xrx200_hw.num_devs < XRX200_MAX_DEV)
1334 + xrx200_of_iface(&xrx200_hw, iface_np);
1335 + else
1336 + dev_err(&pdev->dev,
1337 + "only %d interfaces allowed\n",
1338 + XRX200_MAX_DEV);
1339 + }
1340 +
1341 + if (!xrx200_hw.num_devs) {
1342 + xrx200_hw_cleanup(&xrx200_hw);
1343 + dev_err(&pdev->dev, "failed to load interfaces\n");
1344 + return -ENOENT;
1345 + }
1346 +
1347 + /* set wan port mask */
1348 + ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
1349 +
1350 + for (i = 0; i < xrx200_hw.num_devs; i++) {
1351 + xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
1352 + xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
1353 + }
1354 +
1355 + /* setup NAPI */
1356 + init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
1357 + init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_TX].dummy_dev);
1358 + netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
1359 + &xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
1360 + netif_napi_add(&xrx200_hw.chan[XRX200_DMA_TX].dummy_dev,
1361 + &xrx200_hw.chan[XRX200_DMA_TX].napi, xrx200_poll_tx, 8);
1362 +
1363 + platform_set_drvdata(pdev, &xrx200_hw);
1364 +
1365 + return 0;
1366 +}
1367 +
1368 +static int xrx200_remove(struct platform_device *pdev)
1369 +{
1370 + struct net_device *dev = platform_get_drvdata(pdev);
1371 + struct xrx200_priv *priv;
1372 +
1373 + if (!dev)
1374 + return 0;
1375 +
1376 + priv = netdev_priv(dev);
1377 +
1378 + /* free stack related instances */
1379 + netif_stop_queue(dev);
1380 + netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
1381 + netif_napi_del(&xrx200_hw.chan[XRX200_DMA_TX].napi);
1382 +
1383 + /* shut down hardware */
1384 + xrx200_hw_cleanup(&xrx200_hw);
1385 +
1386 + /* remove the actual device */
1387 + unregister_netdev(dev);
1388 + free_netdev(dev);
1389 +
1390 + return 0;
1391 +}
1392 +
1393 +static const struct of_device_id xrx200_match[] = {
1394 + { .compatible = "lantiq,xrx200-net" },
1395 + {},
1396 +};
1397 +MODULE_DEVICE_TABLE(of, xrx200_match);
1398 +
1399 +static struct platform_driver xrx200_driver = {
1400 + .probe = xrx200_probe,
1401 + .remove = xrx200_remove,
1402 + .driver = {
1403 + .name = "lantiq,xrx200-net",
1404 + .of_match_table = xrx200_match,
1405 + .owner = THIS_MODULE,
1406 + },
1407 +};
1408 +
1409 +module_platform_driver(xrx200_driver);
1410 +
1411 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1412 +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
1413 +MODULE_LICENSE("GPL");