lantiq: make xrx200 network driver use a tasklet for rx housekeeping
[openwrt/openwrt.git] / target / linux / lantiq / patches-3.8 / 0025-NET-MIPS-lantiq-adds-xrx200-net.patch
1 From fbfdf78ba827a8f854ae3ed7b11ea6df4054ffb1 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Mon, 22 Oct 2012 12:22:23 +0200
4 Subject: [PATCH 25/40] NET: MIPS: lantiq: adds xrx200-net
5
6 ---
7 drivers/net/ethernet/Kconfig | 8 +-
8 drivers/net/ethernet/Makefile | 1 +
9 drivers/net/ethernet/lantiq_pce.h | 163 +++++
10 drivers/net/ethernet/lantiq_xrx200.c | 1203 ++++++++++++++++++++++++++++++++++
11 4 files changed, 1374 insertions(+), 1 deletion(-)
12 create mode 100644 drivers/net/ethernet/lantiq_pce.h
13 create mode 100644 drivers/net/ethernet/lantiq_xrx200.c
14
15 Index: linux-3.8.13/drivers/net/ethernet/Kconfig
16 ===================================================================
17 --- linux-3.8.13.orig/drivers/net/ethernet/Kconfig 2013-05-11 22:57:46.000000000 +0200
18 +++ linux-3.8.13/drivers/net/ethernet/Kconfig 2013-06-28 17:46:34.521054618 +0200
19 @@ -83,7 +83,13 @@
20 tristate "Lantiq SoC ETOP driver"
21 depends on SOC_TYPE_XWAY
22 ---help---
23 - Support for the MII0 inside the Lantiq SoC
24 + Support for the MII0 inside the Lantiq ADSL SoC
25 +
26 +config LANTIQ_XRX200
27 + tristate "Lantiq SoC XRX200 driver"
28 + depends on SOC_TYPE_XWAY
29 + ---help---
30 + Support for the MII0 inside the Lantiq VDSL SoC
31
32 source "drivers/net/ethernet/marvell/Kconfig"
33 source "drivers/net/ethernet/mellanox/Kconfig"
34 Index: linux-3.8.13/drivers/net/ethernet/Makefile
35 ===================================================================
36 --- linux-3.8.13.orig/drivers/net/ethernet/Makefile 2013-05-11 22:57:46.000000000 +0200
37 +++ linux-3.8.13/drivers/net/ethernet/Makefile 2013-06-28 17:46:34.521054618 +0200
38 @@ -36,6 +36,7 @@
39 obj-$(CONFIG_JME) += jme.o
40 obj-$(CONFIG_KORINA) += korina.o
41 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
42 +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
43 obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
44 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
45 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
46 Index: linux-3.8.13/drivers/net/ethernet/lantiq_pce.h
47 ===================================================================
48 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
49 +++ linux-3.8.13/drivers/net/ethernet/lantiq_pce.h 2013-06-28 17:46:34.525054619 +0200
50 @@ -0,0 +1,163 @@
51 +/*
52 + * This program is free software; you can redistribute it and/or modify it
53 + * under the terms of the GNU General Public License version 2 as published
54 + * by the Free Software Foundation.
55 + *
56 + * This program is distributed in the hope that it will be useful,
57 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
58 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
59 + * GNU General Public License for more details.
60 + *
61 + * You should have received a copy of the GNU General Public License
62 + * along with this program; if not, write to the Free Software
63 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
64 + *
65 + * Copyright (C) 2010 Lantiq Deutschland GmbH
66 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
67 + *
68 + * PCE microcode extracted from UGW5.2 switch api
69 + */
70 +
71 +/* Switch API Micro Code V0.3 */
72 +enum {
73 + OUT_MAC0 = 0,
74 + OUT_MAC1,
75 + OUT_MAC2,
76 + OUT_MAC3,
77 + OUT_MAC4,
78 + OUT_MAC5,
79 + OUT_ETHTYP,
80 + OUT_VTAG0,
81 + OUT_VTAG1,
82 + OUT_ITAG0,
83 + OUT_ITAG1, /*10 */
84 + OUT_ITAG2,
85 + OUT_ITAG3,
86 + OUT_IP0,
87 + OUT_IP1,
88 + OUT_IP2,
89 + OUT_IP3,
90 + OUT_SIP0,
91 + OUT_SIP1,
92 + OUT_SIP2,
93 + OUT_SIP3, /*20*/
94 + OUT_SIP4,
95 + OUT_SIP5,
96 + OUT_SIP6,
97 + OUT_SIP7,
98 + OUT_DIP0,
99 + OUT_DIP1,
100 + OUT_DIP2,
101 + OUT_DIP3,
102 + OUT_DIP4,
103 + OUT_DIP5, /*30*/
104 + OUT_DIP6,
105 + OUT_DIP7,
106 + OUT_SESID,
107 + OUT_PROT,
108 + OUT_APP0,
109 + OUT_APP1,
110 + OUT_IGMP0,
111 + OUT_IGMP1,
112 + OUT_IPOFF, /*39*/
113 + OUT_NONE = 63
114 +};
115 +
116 +/* parser's microcode length type */
117 +#define INSTR 0
118 +#define IPV6 1
119 +#define LENACCU 2
120 +
121 +/* parser's microcode flag type */
122 +enum {
123 + FLAG_ITAG = 0,
124 + FLAG_VLAN,
125 + FLAG_SNAP,
126 + FLAG_PPPOE,
127 + FLAG_IPV6,
128 + FLAG_IPV6FL,
129 + FLAG_IPV4,
130 + FLAG_IGMP,
131 + FLAG_TU,
132 + FLAG_HOP,
133 + FLAG_NN1, /*10 */
134 + FLAG_NN2,
135 + FLAG_END,
136 + FLAG_NO, /*13*/
137 +};
138 +
139 +/* Micro code version V2_11 (extension for parsing IPv6 in PPPoE) */
140 +#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
141 + { {val, msk, (ns<<10 | out<<4 | len>>1), (len&1)<<15 | type<<13 | flags<<9 | ipv4_len<<8 }}
142 +struct pce_microcode {
143 + unsigned short val[4];
144 +/* unsigned short val_2;
145 + unsigned short val_1;
146 + unsigned short val_0;*/
147 +} pce_microcode[] = {
148 + /* value mask ns fields L type flags ipv4_len */
149 + MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
150 + MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
151 + MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
152 + MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
153 + MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
154 + MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
155 + MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
156 + MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
157 + MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
158 + MC_ENTRY(0x0000, 0x0000, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
159 + MC_ENTRY(0x0600, 0x0600, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
160 + MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
161 + MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
162 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
163 + MC_ENTRY(0x0300, 0xFF00, 39, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
164 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
165 + MC_ENTRY(0x0000, 0x0000, 39, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
166 + MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
167 + MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
168 + MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
169 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
170 + MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
171 + MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
172 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
173 + MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
174 + MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
175 + MC_ENTRY(0x0000, 0x0000, 38, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
176 + MC_ENTRY(0x1100, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
177 + MC_ENTRY(0x0600, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
178 + MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
179 + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
180 + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
181 + MC_ENTRY(0x0000, 0x0000, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
182 + MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
183 + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
184 + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
185 + MC_ENTRY(0x0000, 0x0000, 38, OUT_PROT, 1, IPV6, FLAG_NO, 0),
186 + MC_ENTRY(0x0000, 0x0000, 38, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
187 + MC_ENTRY(0x0000, 0x0000, 39, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
188 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
189 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
190 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
191 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
192 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
193 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
194 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
195 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
196 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
197 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
198 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
199 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
200 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
201 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
202 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
203 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
204 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
205 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
206 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
207 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
208 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
209 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
210 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
211 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
212 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
213 +};
214 Index: linux-3.8.13/drivers/net/ethernet/lantiq_xrx200.c
215 ===================================================================
216 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
217 +++ linux-3.8.13/drivers/net/ethernet/lantiq_xrx200.c 2013-06-29 18:09:53.932157475 +0200
218 @@ -0,0 +1,1203 @@
219 +/*
220 + * This program is free software; you can redistribute it and/or modify it
221 + * under the terms of the GNU General Public License version 2 as published
222 + * by the Free Software Foundation.
223 + *
224 + * This program is distributed in the hope that it will be useful,
225 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
226 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
227 + * GNU General Public License for more details.
228 + *
229 + * You should have received a copy of the GNU General Public License
230 + * along with this program; if not, write to the Free Software
231 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
232 + *
233 + * Copyright (C) 2010 Lantiq Deutschland
234 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
235 + */
236 +
237 +#include <linux/etherdevice.h>
238 +#include <linux/module.h>
239 +#include <linux/platform_device.h>
240 +#include <linux/interrupt.h>
241 +#include <linux/clk.h>
242 +#include <asm/delay.h>
243 +
244 +#include <linux/of_net.h>
245 +#include <linux/of_mdio.h>
246 +#include <linux/of_gpio.h>
247 +
248 +#include <xway_dma.h>
249 +#include <lantiq_soc.h>
250 +
251 +#include "lantiq_pce.h"
252 +
253 +#define SW_POLLING
254 +#define SW_ROUTING
255 +#define SW_PORTMAP
256 +
257 +#ifdef SW_ROUTING
258 + #ifdef SW_PORTMAP
259 +#define XRX200_MAX_DEV 2
260 + #else
261 +#define XRX200_MAX_DEV 2
262 + #endif
263 +#else
264 +#define XRX200_MAX_DEV 1
265 +#endif
266 +
267 +#define XRX200_MAX_PORT 7
268 +#define XRX200_MAX_DMA 8
269 +
270 +#define XRX200_HEADROOM 4
271 +
272 +#define XRX200_TX_TIMEOUT (10 * HZ)
273 +
274 +/* port type */
275 +#define XRX200_PORT_TYPE_PHY 1
276 +#define XRX200_PORT_TYPE_MAC 2
277 +
278 +/* DMA */
279 +#define XRX200_DMA_CRC_LEN 0x4
280 +#define XRX200_DMA_DATA_LEN 0x600
281 +#define XRX200_DMA_IRQ INT_NUM_IM2_IRL0
282 +#define XRX200_DMA_RX 0
283 +#define XRX200_DMA_TX 1
284 +#define XRX200_DMA_IS_TX(x) (x%2)
285 +#define XRX200_DMA_IS_RX(x) (!XRX200_DMA_IS_TX(x))
286 +
287 +/* fetch / store dma */
288 +#define FDMA_PCTRL0 0x2A00
289 +#define FDMA_PCTRLx(x) (FDMA_PCTRL0 + (x * 0x18))
290 +#define SDMA_PCTRL0 0x2F00
291 +#define SDMA_PCTRLx(x) (SDMA_PCTRL0 + (x * 0x18))
292 +
293 +/* buffer management */
294 +#define BM_PCFG0 0x200
295 +#define BM_PCFGx(x) (BM_PCFG0 + (x * 8))
296 +
297 +/* MDIO */
298 +#define MDIO_GLOB 0x0000
299 +#define MDIO_CTRL 0x0020
300 +#define MDIO_READ 0x0024
301 +#define MDIO_WRITE 0x0028
302 +#define MDIO_PHY0 0x0054
303 +#define MDIO_PHY(x) (0x0054 - (x * sizeof(unsigned)))
304 +#define MDIO_CLK_CFG0 0x002C
305 +#define MDIO_CLK_CFG1 0x0030
306 +
307 +#define MDIO_GLOB_ENABLE 0x8000
308 +#define MDIO_BUSY BIT(12)
309 +#define MDIO_RD BIT(11)
310 +#define MDIO_WR BIT(10)
311 +#define MDIO_MASK 0x1f
312 +#define MDIO_ADDRSHIFT 5
313 +#define MDIO1_25MHZ 9
314 +
315 +#define MDIO_PHY_LINK_DOWN 0x4000
316 +#define MDIO_PHY_LINK_UP 0x2000
317 +
318 +#define MDIO_PHY_SPEED_M10 0x0000
319 +#define MDIO_PHY_SPEED_M100 0x0800
320 +#define MDIO_PHY_SPEED_G1 0x1000
321 +
322 +#define MDIO_PHY_FDUP_EN 0x0600
323 +#define MDIO_PHY_FDUP_DIS 0x0200
324 +
325 +#define MDIO_PHY_LINK_MASK 0x6000
326 +#define MDIO_PHY_SPEED_MASK 0x1800
327 +#define MDIO_PHY_FDUP_MASK 0x0600
328 +#define MDIO_PHY_ADDR_MASK 0x001f
329 +#define MDIO_UPDATE_MASK MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
330 + MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
331 +
332 +/* MII */
333 +#define MII_CFG(p) (p * 8)
334 +
335 +#define MII_CFG_EN BIT(14)
336 +
337 +#define MII_CFG_MODE_MIIP 0x0
338 +#define MII_CFG_MODE_MIIM 0x1
339 +#define MII_CFG_MODE_RMIIP 0x2
340 +#define MII_CFG_MODE_RMIIM 0x3
341 +#define MII_CFG_MODE_RGMII 0x4
342 +#define MII_CFG_MODE_MASK 0xf
343 +
344 +#define MII_CFG_RATE_M2P5 0x00
345 +#define MII_CFG_RATE_M25 0x10
346 +#define MII_CFG_RATE_M125 0x20
347 +#define MII_CFG_RATE_M50 0x30
348 +#define MII_CFG_RATE_AUTO 0x40
349 +#define MII_CFG_RATE_MASK 0x70
350 +
351 +/* cpu port mac */
352 +#define PMAC_HD_CTL 0x0000
353 +#define PMAC_RX_IPG 0x0024
354 +#define PMAC_EWAN 0x002c
355 +
356 +#define PMAC_IPG_MASK 0xf
357 +#define PMAC_HD_CTL_AS 0x0008
358 +#define PMAC_HD_CTL_AC 0x0004
359 +#define PMAC_HD_CTL_RXSH 0x0040
360 +#define PMAC_HD_CTL_AST 0x0080
361 +#define PMAC_HD_CTL_RST 0x0100
362 +
363 +/* PCE */
364 +#define PCE_TBL_KEY(x) (0x1100 + ((7 - x) * 4))
365 +#define PCE_TBL_MASK 0x1120
366 +#define PCE_TBL_VAL(x) (0x1124 + ((4 - x) * 4))
367 +#define PCE_TBL_ADDR 0x1138
368 +#define PCE_TBL_CTRL 0x113c
369 +#define PCE_PMAP1 0x114c
370 +#define PCE_PMAP2 0x1150
371 +#define PCE_PMAP3 0x1154
372 +#define PCE_GCTRL_REG(x) (0x1158 + (x * 4))
373 +#define PCE_PCTRL_REG(p, x) (0x1200 + (((p * 0xa) + x) * 4))
374 +
375 +#define PCE_TBL_BUSY BIT(15)
376 +#define PCE_TBL_CFG_ADDR_MASK 0x1f
377 +#define PCE_TBL_CFG_ADWR 0x20
378 +#define PCE_TBL_CFG_ADWR_MASK 0x60
379 +#define PCE_INGRESS BIT(11)
380 +
381 +/* MAC */
382 +#define MAC_FLEN_REG (0x2314)
383 +#define MAC_CTRL_REG(p, x) (0x240c + (((p * 0xc) + x) * 4))
384 +
385 +/* buffer management */
386 +#define BM_PCFG(p) (0x200 + (p * 8))
387 +
388 +/* special tag in TX path header */
389 +#define SPID_SHIFT 24
390 +#define DPID_SHIFT 16
391 +#define DPID_ENABLE 1
392 +#define SPID_CPU_PORT 2
393 +#define PORT_MAP_SEL BIT(15)
394 +#define PORT_MAP_EN BIT(14)
395 +#define PORT_MAP_SHIFT 1
396 +#define PORT_MAP_MASK 0x3f
397 +
398 +#define SPPID_MASK 0x7
399 +#define SPPID_SHIFT 4
400 +
401 +/* MII regs not yet in linux */
402 +#define MDIO_DEVAD_NONE (-1)
403 +#define ADVERTIZE_MPD (1 << 10)
404 +
405 +struct xrx200_port {
406 + u8 num;
407 + u8 phy_addr;
408 + u16 flags;
409 + phy_interface_t phy_if;
410 +
411 + int link;
412 + int gpio;
413 + enum of_gpio_flags gpio_flags;
414 +
415 + struct phy_device *phydev;
416 + struct device_node *phy_node;
417 +};
418 +
419 +struct xrx200_chan {
420 + int idx;
421 + int refcount;
422 + int tx_free;
423 +
424 + struct net_device dummy_dev;
425 + struct net_device *devs[XRX200_MAX_DEV];
426 +
427 + struct tasklet_struct tasklet;
428 + struct napi_struct napi;
429 + struct ltq_dma_channel dma;
430 + struct sk_buff *skb[LTQ_DESC_NUM];
431 +};
432 +
433 +struct xrx200_hw {
434 + struct clk *clk;
435 + struct mii_bus *mii_bus;
436 +
437 + struct xrx200_chan chan[XRX200_MAX_DMA];
438 +
439 + struct net_device *devs[XRX200_MAX_DEV];
440 + int num_devs;
441 +
442 + int port_map[XRX200_MAX_PORT];
443 + unsigned short wan_map;
444 +
445 + spinlock_t lock;
446 +};
447 +
448 +struct xrx200_priv {
449 + struct net_device_stats stats;
450 + int id;
451 +
452 + struct xrx200_port port[XRX200_MAX_PORT];
453 + int num_port;
454 + int wan;
455 + unsigned short port_map;
456 + const void *mac;
457 +
458 + struct xrx200_hw *hw;
459 +};
460 +
461 +static __iomem void *xrx200_switch_membase;
462 +static __iomem void *xrx200_mii_membase;
463 +static __iomem void *xrx200_mdio_membase;
464 +static __iomem void *xrx200_pmac_membase;
465 +
466 +#define ltq_switch_r32(x) ltq_r32(xrx200_switch_membase + (x))
467 +#define ltq_switch_w32(x, y) ltq_w32(x, xrx200_switch_membase + (y))
468 +#define ltq_switch_w32_mask(x, y, z) \
469 + ltq_w32_mask(x, y, xrx200_switch_membase + (z))
470 +
471 +#define ltq_mdio_r32(x) ltq_r32(xrx200_mdio_membase + (x))
472 +#define ltq_mdio_w32(x, y) ltq_w32(x, xrx200_mdio_membase + (y))
473 +#define ltq_mdio_w32_mask(x, y, z) \
474 + ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
475 +
476 +#define ltq_mii_r32(x) ltq_r32(xrx200_mii_membase + (x))
477 +#define ltq_mii_w32(x, y) ltq_w32(x, xrx200_mii_membase + (y))
478 +#define ltq_mii_w32_mask(x, y, z) \
479 + ltq_w32_mask(x, y, xrx200_mii_membase + (z))
480 +
481 +#define ltq_pmac_r32(x) ltq_r32(xrx200_pmac_membase + (x))
482 +#define ltq_pmac_w32(x, y) ltq_w32(x, xrx200_pmac_membase + (y))
483 +#define ltq_pmac_w32_mask(x, y, z) \
484 + ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
485 +
486 +static int xrx200_open(struct net_device *dev)
487 +{
488 + struct xrx200_priv *priv = netdev_priv(dev);
489 + unsigned long flags;
490 + int i;
491 +
492 + for (i = 0; i < XRX200_MAX_DMA; i++) {
493 + if (!priv->hw->chan[i].dma.irq)
494 + continue;
495 + spin_lock_irqsave(&priv->hw->lock, flags);
496 + if (!priv->hw->chan[i].refcount) {
497 + if (XRX200_DMA_IS_RX(i))
498 + napi_enable(&priv->hw->chan[i].napi);
499 + ltq_dma_open(&priv->hw->chan[i].dma);
500 + }
501 + priv->hw->chan[i].refcount++;
502 + spin_unlock_irqrestore(&priv->hw->lock, flags);
503 + }
504 + for (i = 0; i < priv->num_port; i++)
505 + if (priv->port[i].phydev)
506 + phy_start(priv->port[i].phydev);
507 + netif_start_queue(dev);
508 +
509 + return 0;
510 +}
511 +
512 +static int xrx200_close(struct net_device *dev)
513 +{
514 + struct xrx200_priv *priv = netdev_priv(dev);
515 + unsigned long flags;
516 + int i;
517 +
518 + netif_stop_queue(dev);
519 +
520 + for (i = 0; i < priv->num_port; i++)
521 + if (priv->port[i].phydev)
522 + phy_stop(priv->port[i].phydev);
523 +
524 + for (i = 0; i < XRX200_MAX_DMA; i++) {
525 + if (!priv->hw->chan[i].dma.irq)
526 + continue;
527 + spin_lock_irqsave(&priv->hw->lock, flags);
528 + priv->hw->chan[i].refcount--;
529 + if (!priv->hw->chan[i].refcount) {
530 + if (XRX200_DMA_IS_RX(i))
531 + napi_disable(&priv->hw->chan[i].napi);
532 + ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
533 + }
534 + spin_unlock_irqrestore(&priv->hw->lock, flags);
535 + }
536 +
537 + return 0;
538 +}
539 +
540 +static int xrx200_alloc_skb(struct xrx200_chan *ch)
541 +{
542 +#define DMA_PAD (NET_IP_ALIGN + NET_SKB_PAD)
543 + ch->skb[ch->dma.desc] = dev_alloc_skb(XRX200_DMA_DATA_LEN + DMA_PAD);
544 + if (!ch->skb[ch->dma.desc])
545 + return -ENOMEM;
546 +
547 + skb_reserve(ch->skb[ch->dma.desc], NET_SKB_PAD);
548 + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
549 + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
550 + DMA_FROM_DEVICE);
551 + ch->dma.desc_base[ch->dma.desc].addr =
552 + CPHYSADDR(ch->skb[ch->dma.desc]->data);
553 + ch->dma.desc_base[ch->dma.desc].ctl =
554 + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
555 + XRX200_DMA_DATA_LEN;
556 + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
557 +
558 + return 0;
559 +}
560 +
561 +static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
562 +{
563 + struct net_device *dev = ch->devs[id];
564 + struct xrx200_priv *priv = netdev_priv(dev);
565 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
566 + struct sk_buff *skb = ch->skb[ch->dma.desc];
567 + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - XRX200_DMA_CRC_LEN;
568 + unsigned long flags;
569 +
570 + spin_lock_irqsave(&priv->hw->lock, flags);
571 + if (xrx200_alloc_skb(ch)) {
572 + netdev_err(dev,
573 + "failed to allocate new rx buffer, stopping DMA\n");
574 + ltq_dma_close(&ch->dma);
575 + }
576 +
577 + ch->dma.desc++;
578 + ch->dma.desc %= LTQ_DESC_NUM;
579 + spin_unlock_irqrestore(&priv->hw->lock, flags);
580 +
581 + skb_put(skb, len);
582 +#ifdef SW_ROUTING
583 + skb_pull(skb, 8);
584 +#endif
585 + skb->dev = dev;
586 + skb->protocol = eth_type_trans(skb, dev);
587 + netif_receive_skb(skb);
588 + priv->stats.rx_packets++;
589 + priv->stats.rx_bytes+=len;
590 +}
591 +
592 +static int xrx200_poll_rx(struct napi_struct *napi, int budget)
593 +{
594 + struct xrx200_chan *ch = container_of(napi,
595 + struct xrx200_chan, napi);
596 + struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
597 + int rx = 0;
598 + int complete = 0;
599 + unsigned long flags;
600 +
601 + while ((rx < budget) && !complete) {
602 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
603 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
604 +#ifdef SW_ROUTING
605 + struct sk_buff *skb = ch->skb[ch->dma.desc];
606 + u32 *special_tag = (u32*)skb->data;
607 + int port = (special_tag[1] >> SPPID_SHIFT) & SPPID_MASK;
608 + xrx200_hw_receive(ch, priv->hw->port_map[port]);
609 +#else
610 + xrx200_hw_receive(ch, 0);
611 +#endif
612 + rx++;
613 + } else {
614 + complete = 1;
615 + }
616 + }
617 + if (complete || !rx) {
618 + napi_complete(&ch->napi);
619 + spin_lock_irqsave(&priv->hw->lock, flags);
620 + ltq_dma_ack_irq(&ch->dma);
621 + spin_unlock_irqrestore(&priv->hw->lock, flags);
622 + }
623 + return rx;
624 +}
625 +
626 +static void xrx200_tx_housekeeping(unsigned long ptr)
627 +{
628 + struct xrx200_hw *hw = (struct xrx200_hw *) ptr;
629 + struct xrx200_chan *ch = &hw->chan[XRX200_DMA_TX];
630 + unsigned long flags;
631 + int i;
632 +
633 + spin_lock_irqsave(&hw->lock, flags);
634 + while ((ch->dma.desc_base[ch->tx_free].ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
635 + dev_kfree_skb_any(ch->skb[ch->tx_free]);
636 + ch->skb[ch->tx_free] = NULL;
637 + memset(&ch->dma.desc_base[ch->tx_free], 0,
638 + sizeof(struct ltq_dma_desc));
639 + ch->tx_free++;
640 + ch->tx_free %= LTQ_DESC_NUM;
641 + }
642 + spin_unlock_irqrestore(&hw->lock, flags);
643 +
644 + for (i = 0; i < XRX200_MAX_DEV && ch->devs[i]; i++) {
645 + struct netdev_queue *txq =
646 + netdev_get_tx_queue(ch->devs[i], 0);
647 + if (netif_tx_queue_stopped(txq))
648 + netif_tx_start_queue(txq);
649 + }
650 +
651 + spin_lock_irqsave(&hw->lock, flags);
652 + ltq_dma_ack_irq(&ch->dma);
653 + spin_unlock_irqrestore(&hw->lock, flags);
654 +}
655 +
656 +static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
657 +{
658 + struct xrx200_priv *priv = netdev_priv(dev);
659 +
660 + return &priv->stats;
661 +}
662 +
663 +static void xrx200_tx_timeout(struct net_device *dev)
664 +{
665 + struct xrx200_priv *priv = netdev_priv(dev);
666 +
667 + printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
668 +
669 + priv->stats.tx_errors++;
670 + netif_wake_queue(dev);
671 +}
672 +
673 +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
674 +{
675 + int queue = skb_get_queue_mapping(skb);
676 + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
677 + struct xrx200_priv *priv = netdev_priv(dev);
678 + struct xrx200_chan *ch = &priv->hw->chan[XRX200_DMA_TX];
679 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
680 + unsigned long flags;
681 + u32 byte_offset;
682 + int len;
683 +#ifdef SW_ROUTING
684 + #ifdef SW_PORTMAP
685 + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | PORT_MAP_SEL | PORT_MAP_EN | DPID_ENABLE;
686 + #else
687 + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
688 + #endif
689 +#endif
690 +
691 + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
692 +
693 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
694 + netdev_err(dev, "tx ring full\n");
695 + netif_tx_stop_queue(txq);
696 + return NETDEV_TX_BUSY;
697 + }
698 +#ifdef SW_ROUTING
699 + #ifdef SW_PORTMAP
700 + special_tag |= priv->port_map << PORT_MAP_SHIFT;
701 + #else
702 + if(priv->id)
703 + special_tag |= (1 << DPID_SHIFT);
704 + #endif
705 + if(skb_headroom(skb) < 4) {
706 + struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
707 + dev_kfree_skb_any(skb);
708 + skb = tmp;
709 + }
710 + skb_push(skb, 4);
711 + memcpy(skb->data, &special_tag, sizeof(u32));
712 + len += 4;
713 +#endif
714 +
715 + /* dma needs to start on a 16 byte aligned address */
716 + byte_offset = CPHYSADDR(skb->data) % 16;
717 + ch->skb[ch->dma.desc] = skb;
718 +
719 + dev->trans_start = jiffies;
720 +
721 + spin_lock_irqsave(&priv->hw->lock, flags);
722 + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
723 + DMA_TO_DEVICE)) - byte_offset;
724 + wmb();
725 + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
726 + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
727 + ch->dma.desc++;
728 + ch->dma.desc %= LTQ_DESC_NUM;
729 + spin_unlock_irqrestore(&priv->hw->lock, flags);
730 +
731 + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
732 + netif_tx_stop_queue(txq);
733 +
734 + priv->stats.tx_packets++;
735 + priv->stats.tx_bytes+=len;
736 +
737 + return NETDEV_TX_OK;
738 +}
739 +
740 +static irqreturn_t xrx200_dma_irq(int irq, void *priv)
741 +{
742 + struct xrx200_hw *hw = priv;
743 + int ch = irq - XRX200_DMA_IRQ;
744 +
745 + if (ch % 2)
746 + tasklet_schedule(&hw->chan[ch].tasklet);
747 + else
748 + napi_schedule(&hw->chan[ch].napi);
749 +
750 + return IRQ_HANDLED;
751 +}
752 +
753 +static int xrx200_dma_init(struct xrx200_hw *hw)
754 +{
755 + int i, err = 0;
756 +
757 + ltq_dma_init_port(DMA_PORT_ETOP);
758 +
759 + for (i = 0; i < 8 && !err; i++) {
760 + int irq = XRX200_DMA_IRQ + i;
761 + struct xrx200_chan *ch = &hw->chan[i];
762 +
763 + ch->idx = ch->dma.nr = i;
764 +
765 + if (i == XRX200_DMA_TX) {
766 + ltq_dma_alloc_tx(&ch->dma);
767 + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
768 + } else if (i == XRX200_DMA_RX) {
769 + ltq_dma_alloc_rx(&ch->dma);
770 + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
771 + ch->dma.desc++)
772 + if (xrx200_alloc_skb(ch))
773 + err = -ENOMEM;
774 + ch->dma.desc = 0;
775 + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
776 + } else
777 + continue;
778 +
779 + if (!err)
780 + ch->dma.irq = irq;
781 + }
782 +
783 + return err;
784 +}
785 +
786 +#ifdef SW_POLLING
787 +static void xrx200_gmac_update(struct xrx200_port *port)
788 +{
789 + u16 phyaddr = port->phydev->addr & MDIO_PHY_ADDR_MASK;
790 + u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
791 + u16 miirate = 0;
792 +
793 + switch (port->phydev->speed) {
794 + case SPEED_1000:
795 + phyaddr |= MDIO_PHY_SPEED_G1;
796 + miirate = MII_CFG_RATE_M125;
797 + break;
798 +
799 + case SPEED_100:
800 + phyaddr |= MDIO_PHY_SPEED_M100;
801 + switch (miimode) {
802 + case MII_CFG_MODE_RMIIM:
803 + case MII_CFG_MODE_RMIIP:
804 + miirate = MII_CFG_RATE_M50;
805 + break;
806 + default:
807 + miirate = MII_CFG_RATE_M25;
808 + break;
809 + }
810 + break;
811 +
812 + default:
813 + phyaddr |= MDIO_PHY_SPEED_M10;
814 + miirate = MII_CFG_RATE_M2P5;
815 + break;
816 + }
817 +
818 + if (port->phydev->link)
819 + phyaddr |= MDIO_PHY_LINK_UP;
820 + else
821 + phyaddr |= MDIO_PHY_LINK_DOWN;
822 +
823 + if (port->phydev->duplex == DUPLEX_FULL)
824 + phyaddr |= MDIO_PHY_FDUP_EN;
825 + else
826 + phyaddr |= MDIO_PHY_FDUP_DIS;
827 +
828 + ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
829 + ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
830 + udelay(1);
831 +}
832 +#else
833 +static void xrx200_gmac_update(struct xrx200_port *port)
834 +{
835 +
836 +}
837 +#endif
838 +
839 +static void xrx200_mdio_link(struct net_device *dev)
840 +{
841 + struct xrx200_priv *priv = netdev_priv(dev);
842 + int i;
843 +
844 + for (i = 0; i < priv->num_port; i++) {
845 + if (!priv->port[i].phydev)
846 + continue;
847 +
848 + if (priv->port[i].link != priv->port[i].phydev->link) {
849 + xrx200_gmac_update(&priv->port[i]);
850 + priv->port[i].link = priv->port[i].phydev->link;
851 + netdev_info(dev, "port %d %s link\n",
852 + priv->port[i].num,
853 + (priv->port[i].link)?("got"):("lost"));
854 + }
855 + }
856 +}
857 +
858 +static inline int xrx200_mdio_poll(struct mii_bus *bus)
859 +{
860 + unsigned cnt = 10000;
861 +
862 + while (likely(cnt--)) {
863 + unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
864 + if ((ctrl & MDIO_BUSY) == 0)
865 + return 0;
866 + }
867 +
868 + return 1;
869 +}
870 +
871 +static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
872 +{
873 + if (xrx200_mdio_poll(bus))
874 + return 1;
875 +
876 + ltq_mdio_w32(val, MDIO_WRITE);
877 + ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
878 + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
879 + (reg & MDIO_MASK),
880 + MDIO_CTRL);
881 +
882 + return 0;
883 +}
884 +
885 +static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
886 +{
887 + if (xrx200_mdio_poll(bus))
888 + return -1;
889 +
890 + ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
891 + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
892 + (reg & MDIO_MASK),
893 + MDIO_CTRL);
894 +
895 + if (xrx200_mdio_poll(bus))
896 + return -1;
897 +
898 + return ltq_mdio_r32(MDIO_READ);
899 +}
900 +
901 +static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
902 +{
903 + struct xrx200_priv *priv = netdev_priv(dev);
904 + struct phy_device *phydev = NULL;
905 + unsigned val;
906 +
907 + phydev = priv->hw->mii_bus->phy_map[port->phy_addr];
908 +
909 + if (!phydev) {
910 + netdev_err(dev, "no PHY found\n");
911 + return -ENODEV;
912 + }
913 +
914 + phydev = phy_connect(dev, dev_name(&phydev->dev), &xrx200_mdio_link,
915 + 0, port->phy_if);
916 +
917 + if (IS_ERR(phydev)) {
918 + netdev_err(dev, "Could not attach to PHY\n");
919 + return PTR_ERR(phydev);
920 + }
921 +
922 + phydev->supported &= (SUPPORTED_10baseT_Half
923 + | SUPPORTED_10baseT_Full
924 + | SUPPORTED_100baseT_Half
925 + | SUPPORTED_100baseT_Full
926 + | SUPPORTED_1000baseT_Half
927 + | SUPPORTED_1000baseT_Full
928 + | SUPPORTED_Autoneg
929 + | SUPPORTED_MII
930 + | SUPPORTED_TP);
931 + phydev->advertising = phydev->supported;
932 + port->phydev = phydev;
933 +
934 + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
935 + dev->name, phydev->drv->name,
936 + dev_name(&phydev->dev), phydev->irq);
937 +
938 +#ifdef SW_POLLING
939 + phy_read_status(phydev);
940 +
941 + val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
942 + val |= ADVERTIZE_MPD;
943 + xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
944 + xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
945 +
946 + phy_start_aneg(phydev);
947 +#endif
948 + return 0;
949 +}
950 +
951 +static void xrx200_port_config(struct xrx200_priv *priv,
952 + const struct xrx200_port *port)
953 +{
954 + u16 miimode = 0;
955 +
956 + switch (port->num) {
957 + case 0: /* xMII0 */
958 + case 1: /* xMII1 */
959 + switch (port->phy_if) {
960 + case PHY_INTERFACE_MODE_MII:
961 + if (port->flags & XRX200_PORT_TYPE_PHY)
962 + /* MII MAC mode, connected to external PHY */
963 + miimode = MII_CFG_MODE_MIIM;
964 + else
965 + /* MII PHY mode, connected to external MAC */
966 + miimode = MII_CFG_MODE_MIIP;
967 + break;
968 + case PHY_INTERFACE_MODE_RMII:
969 + if (port->flags & XRX200_PORT_TYPE_PHY)
970 + /* RMII MAC mode, connected to external PHY */
971 + miimode = MII_CFG_MODE_RMIIM;
972 + else
973 + /* RMII PHY mode, connected to external MAC */
974 + miimode = MII_CFG_MODE_RMIIP;
975 + break;
976 + case PHY_INTERFACE_MODE_RGMII:
977 + /* RGMII MAC mode, connected to external PHY */
978 + miimode = MII_CFG_MODE_RGMII;
979 + break;
980 + default:
981 + break;
982 + }
983 + break;
984 + case 2: /* internal GPHY0 */
985 + case 3: /* internal GPHY0 */
986 + case 4: /* internal GPHY1 */
987 + switch (port->phy_if) {
988 + case PHY_INTERFACE_MODE_MII:
989 + case PHY_INTERFACE_MODE_GMII:
990 + /* MII MAC mode, connected to internal GPHY */
991 + miimode = MII_CFG_MODE_MIIM;
992 + break;
993 + default:
994 + break;
995 + }
996 + break;
997 + case 5: /* internal GPHY1 or xMII2 */
998 + switch (port->phy_if) {
999 + case PHY_INTERFACE_MODE_MII:
1000 + /* MII MAC mode, connected to internal GPHY */
1001 + miimode = MII_CFG_MODE_MIIM;
1002 + break;
1003 + case PHY_INTERFACE_MODE_RGMII:
1004 + /* RGMII MAC mode, connected to external PHY */
1005 + miimode = MII_CFG_MODE_RGMII;
1006 + break;
1007 + default:
1008 + break;
1009 + }
1010 + break;
1011 + default:
1012 + break;
1013 + }
1014 +
1015 + ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
1016 + MII_CFG(port->num));
1017 +}
1018 +
1019 +static int xrx200_init(struct net_device *dev)
1020 +{
1021 + struct xrx200_priv *priv = netdev_priv(dev);
1022 + struct sockaddr mac;
1023 + int err, i;
1024 +
1025 +#ifndef SW_POLLING
1026 + unsigned int reg = 0;
1027 +
1028 + /* enable auto polling */
1029 + for (i = 0; i < priv->num_port; i++)
1030 + reg |= BIT(priv->port[i].num);
1031 + ltq_mdio_w32(reg, MDIO_CLK_CFG0);
1032 + ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
1033 +#endif
1034 +
1035 + /* setup each port */
1036 + for (i = 0; i < priv->num_port; i++)
1037 + xrx200_port_config(priv, &priv->port[i]);
1038 +
1039 + memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
1040 + if (!is_valid_ether_addr(mac.sa_data)) {
1041 + pr_warn("net-xrx200: invalid MAC, using random\n");
1042 + eth_random_addr(mac.sa_data);
1043 + dev->addr_assign_type |= NET_ADDR_RANDOM;
1044 + }
1045 +
1046 + err = eth_mac_addr(dev, &mac);
1047 + if (err)
1048 + goto err_netdev;
1049 +
1050 + for (i = 0; i < priv->num_port; i++)
1051 + if (xrx200_mdio_probe(dev, &priv->port[i]))
1052 + pr_warn("xrx200-mdio: probing phy of port %d failed\n",
1053 + priv->port[i].num);
1054 +
1055 + return 0;
1056 +
1057 +err_netdev:
1058 + unregister_netdev(dev);
1059 + free_netdev(dev);
1060 + return err;
1061 +}
1062 +
1063 +static void xrx200_pci_microcode(void)
1064 +{
1065 + int i;
1066 +
1067 + ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
1068 + PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
1069 + ltq_switch_w32(0, PCE_TBL_MASK);
1070 +
1071 + for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
1072 + ltq_switch_w32(i, PCE_TBL_ADDR);
1073 + ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
1074 + ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
1075 + ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
1076 + ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
1077 +
1078 + // start the table access:
1079 + ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
1080 + while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
1081 + }
1082 +
1083 + /* tell the switch that the microcode is loaded */
1084 + ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
1085 +}
1086 +
1087 +static void xrx200_hw_init(struct xrx200_hw *hw)
1088 +{
1089 + int i;
1090 +
1091 + /* enable clock gate */
1092 + clk_enable(hw->clk);
1093 +
1094 + ltq_switch_w32(1, 0);
1095 + mdelay(100);
1096 + ltq_switch_w32(0, 0);
1097 + /*
1098 + * TODO: we should really disbale all phys/miis here and explicitly
1099 + * enable them in the device secific init function
1100 + */
1101 +
1102 + /* disable port fetch/store dma */
1103 + for (i = 0; i < 7; i++ ) {
1104 + ltq_switch_w32(0, FDMA_PCTRLx(i));
1105 + ltq_switch_w32(0, SDMA_PCTRLx(i));
1106 + }
1107 +
1108 + /* enable Switch */
1109 + ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
1110 +
1111 + /* load the pce microcode */
1112 + xrx200_pci_microcode();
1113 +
1114 + /* Default unknown Broadcat/Multicast/Unicast port maps */
1115 + ltq_switch_w32(0x7f, PCE_PMAP1);
1116 + ltq_switch_w32(0x7f, PCE_PMAP2);
1117 + ltq_switch_w32(0x7f, PCE_PMAP3);
1118 +
1119 + /* RMON Counter Enable for all physical ports */
1120 + for (i = 0; i < 7; i++)
1121 + ltq_switch_w32(0x1, BM_PCFG(i));
1122 +
1123 + /* disable auto polling */
1124 + ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
1125 +
1126 + /* enable port statistic counters */
1127 + for (i = 0; i < 7; i++)
1128 + ltq_switch_w32(0x1, BM_PCFGx(i));
1129 +
1130 + /* set IPG to 12 */
1131 + ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
1132 +
1133 +#ifdef SW_ROUTING
1134 + /* enable status header, enable CRC */
1135 + ltq_pmac_w32_mask(0,
1136 + PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC,
1137 + PMAC_HD_CTL);
1138 +#else
1139 + /* disable status header, enable CRC */
1140 + ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
1141 + PMAC_HD_CTL_AC,
1142 + PMAC_HD_CTL);
1143 +#endif
1144 +
1145 + /* enable port fetch/store dma */
1146 + for (i = 0; i < 7; i++ ) {
1147 + ltq_switch_w32_mask(0, 0x01, FDMA_PCTRLx(i));
1148 + ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
1149 + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
1150 + }
1151 +
1152 + /* enable special tag insertion on cpu port */
1153 + ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
1154 + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(6, 0));
1155 + ltq_switch_w32_mask(0, BIT(3), MAC_CTRL_REG(6, 2));
1156 + ltq_switch_w32(1518 + 8 + 4 * 2, MAC_FLEN_REG);
1157 +}
1158 +
1159 +static void xrx200_hw_cleanup(struct xrx200_hw *hw)
1160 +{
1161 + int i;
1162 +
1163 + /* disable the switch */
1164 + ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
1165 +
1166 + /* free the channels and IRQs */
1167 + for (i = 0; i < 2; i++) {
1168 + ltq_dma_free(&hw->chan[i].dma);
1169 + if (hw->chan[i].dma.irq)
1170 + free_irq(hw->chan[i].dma.irq, hw);
1171 + }
1172 +
1173 + /* free the allocated RX ring */
1174 + for (i = 0; i < LTQ_DESC_NUM; i++)
1175 + dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
1176 +
1177 + /* clear the mdio bus */
1178 + mdiobus_unregister(hw->mii_bus);
1179 + mdiobus_free(hw->mii_bus);
1180 +
1181 + /* release the clock */
1182 + clk_disable(hw->clk);
1183 + clk_put(hw->clk);
1184 +}
1185 +
1186 +static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
1187 +{
1188 + hw->mii_bus = mdiobus_alloc();
1189 + if (!hw->mii_bus)
1190 + return -ENOMEM;
1191 +
1192 + hw->mii_bus->read = xrx200_mdio_rd;
1193 + hw->mii_bus->write = xrx200_mdio_wr;
1194 + hw->mii_bus->name = "lantiq,xrx200-mdio";
1195 + snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
1196 +
1197 + if (of_mdiobus_register(hw->mii_bus, np)) {
1198 + mdiobus_free(hw->mii_bus);
1199 + return -ENXIO;
1200 + }
1201 +
1202 + return 0;
1203 +}
1204 +
1205 +static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
1206 +{
1207 + const __be32 *addr, *id = of_get_property(port, "reg", NULL);
1208 + struct xrx200_port *p = &priv->port[priv->num_port];
1209 +
1210 + if (!id)
1211 + return;
1212 +
1213 + memset(p, 0, sizeof(struct xrx200_port));
1214 + p->phy_node = of_parse_phandle(port, "phy-handle", 0);
1215 + addr = of_get_property(p->phy_node, "reg", NULL);
1216 + if (!addr)
1217 + return;
1218 +
1219 + p->num = *id;
1220 + p->phy_addr = *addr;
1221 + p->phy_if = of_get_phy_mode(port);
1222 + if (p->phy_addr > 0x10)
1223 + p->flags = XRX200_PORT_TYPE_MAC;
1224 + else
1225 + p->flags = XRX200_PORT_TYPE_PHY;
1226 + priv->num_port++;
1227 +
1228 + p->gpio = of_get_gpio_flags(port, 0, &p->gpio_flags);
1229 + if (gpio_is_valid(p->gpio))
1230 + if (!gpio_request(p->gpio, "phy-reset")) {
1231 + gpio_direction_output(p->gpio,
1232 + (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (1) : (0));
1233 + udelay(100);
1234 + gpio_set_value(p->gpio, (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
1235 + }
1236 + /* is this port a wan port ? */
1237 + if (priv->wan)
1238 + priv->hw->wan_map |= BIT(p->num);
1239 +
1240 + priv->port_map |= BIT(p->num);
1241 +
1242 + /* store the port id in the hw struct so we can map ports -> devices */
1243 + priv->hw->port_map[p->num] = priv->hw->num_devs;
1244 +}
1245 +
1246 +static const struct net_device_ops xrx200_netdev_ops = {
1247 + .ndo_init = xrx200_init,
1248 + .ndo_open = xrx200_open,
1249 + .ndo_stop = xrx200_close,
1250 + .ndo_start_xmit = xrx200_start_xmit,
1251 + .ndo_set_mac_address = eth_mac_addr,
1252 + .ndo_validate_addr = eth_validate_addr,
1253 + .ndo_change_mtu = eth_change_mtu,
1254 + .ndo_get_stats = xrx200_get_stats,
1255 + .ndo_tx_timeout = xrx200_tx_timeout,
1256 +};
1257 +
1258 +static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface)
1259 +{
1260 + struct xrx200_priv *priv;
1261 + struct device_node *port;
1262 + const __be32 *wan;
1263 +
1264 + /* alloc the network device */
1265 + hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
1266 + if (!hw->devs[hw->num_devs])
1267 + return;
1268 +
1269 + /* setup the network device */
1270 + strcpy(hw->devs[hw->num_devs]->name, "eth%d");
1271 + hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
1272 + hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
1273 + hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
1274 +
1275 + /* setup our private data */
1276 + priv = netdev_priv(hw->devs[hw->num_devs]);
1277 + priv->hw = hw;
1278 + priv->mac = of_get_mac_address(iface);
1279 + priv->id = hw->num_devs;
1280 +
1281 + /* is this the wan interface ? */
1282 + wan = of_get_property(iface, "lantiq,wan", NULL);
1283 + if (wan && (*wan == 1))
1284 + priv->wan = 1;
1285 +
1286 + /* load the ports that are part of the interface */
1287 + for_each_child_of_node(iface, port)
1288 + if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
1289 + xrx200_of_port(priv, port);
1290 +
1291 + /* register the actual device */
1292 + if (!register_netdev(hw->devs[hw->num_devs]))
1293 + hw->num_devs++;
1294 +}
1295 +
1296 +static struct xrx200_hw xrx200_hw;
1297 +
1298 +static int xrx200_probe(struct platform_device *pdev)
1299 +{
1300 + struct resource *res[4];
1301 + struct device_node *mdio_np, *iface_np;
1302 + int i;
1303 +
1304 + /* load the memory ranges */
1305 + for (i = 0; i < 4; i++) {
1306 + res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
1307 + if (!res[i]) {
1308 + dev_err(&pdev->dev, "failed to get resources\n");
1309 + return -ENOENT;
1310 + }
1311 + }
1312 + xrx200_switch_membase = devm_request_and_ioremap(&pdev->dev, res[0]);
1313 + xrx200_mdio_membase = devm_request_and_ioremap(&pdev->dev, res[1]);
1314 + xrx200_mii_membase = devm_request_and_ioremap(&pdev->dev, res[2]);
1315 + xrx200_pmac_membase = devm_request_and_ioremap(&pdev->dev, res[3]);
1316 + if (!xrx200_switch_membase || !xrx200_mdio_membase ||
1317 + !xrx200_mii_membase || !xrx200_pmac_membase) {
1318 + dev_err(&pdev->dev, "failed to request and remap io ranges \n");
1319 + return -ENOMEM;
1320 + }
1321 +
1322 + /* get the clock */
1323 + xrx200_hw.clk = clk_get(&pdev->dev, NULL);
1324 + if (IS_ERR(xrx200_hw.clk)) {
1325 + dev_err(&pdev->dev, "failed to get clock\n");
1326 + return PTR_ERR(xrx200_hw.clk);
1327 + }
1328 +
1329 + /* bring up the dma engine and IP core */
1330 + spin_lock_init(&xrx200_hw.lock);
1331 + xrx200_dma_init(&xrx200_hw);
1332 + xrx200_hw_init(&xrx200_hw);
1333 + tasklet_init(&xrx200_hw.chan[XRX200_DMA_TX].tasklet, xrx200_tx_housekeeping, (u32) &xrx200_hw);
1334 +
1335 + /* bring up the mdio bus */
1336 + mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
1337 + "lantiq,xrx200-mdio");
1338 + if (mdio_np)
1339 + if (xrx200_of_mdio(&xrx200_hw, mdio_np))
1340 + dev_err(&pdev->dev, "mdio probe failed\n");
1341 +
1342 + /* load the interfaces */
1343 + for_each_child_of_node(pdev->dev.of_node, iface_np)
1344 + if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
1345 + if (xrx200_hw.num_devs < XRX200_MAX_DEV)
1346 + xrx200_of_iface(&xrx200_hw, iface_np);
1347 + else
1348 + dev_err(&pdev->dev,
1349 + "only %d interfaces allowed\n",
1350 + XRX200_MAX_DEV);
1351 + }
1352 +
1353 + if (!xrx200_hw.num_devs) {
1354 + xrx200_hw_cleanup(&xrx200_hw);
1355 + dev_err(&pdev->dev, "failed to load interfaces\n");
1356 + return -ENOENT;
1357 + }
1358 +
1359 + /* set wan port mask */
1360 + ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
1361 +
1362 + for (i = 0; i < xrx200_hw.num_devs; i++) {
1363 + xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
1364 + xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
1365 + }
1366 +
1367 + /* setup NAPI */
1368 + init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
1369 + netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
1370 + &xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
1371 +
1372 + platform_set_drvdata(pdev, &xrx200_hw);
1373 +
1374 + return 0;
1375 +}
1376 +
1377 +static int xrx200_remove(struct platform_device *pdev)
1378 +{
1379 + struct net_device *dev = platform_get_drvdata(pdev);
1380 + struct xrx200_priv *priv;
1381 +
1382 + if (!dev)
1383 + return 0;
1384 +
1385 + priv = netdev_priv(dev);
1386 +
1387 + /* free stack related instances */
1388 + netif_stop_queue(dev);
1389 + netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
1390 +
1391 + /* shut down hardware */
1392 + xrx200_hw_cleanup(&xrx200_hw);
1393 +
1394 + /* remove the actual device */
1395 + unregister_netdev(dev);
1396 + free_netdev(dev);
1397 +
1398 + return 0;
1399 +}
1400 +
1401 +static const struct of_device_id xrx200_match[] = {
1402 + { .compatible = "lantiq,xrx200-net" },
1403 + {},
1404 +};
1405 +MODULE_DEVICE_TABLE(of, xrx200_match);
1406 +
1407 +static struct platform_driver xrx200_driver = {
1408 + .probe = xrx200_probe,
1409 + .remove = xrx200_remove,
1410 + .driver = {
1411 + .name = "lantiq,xrx200-net",
1412 + .of_match_table = xrx200_match,
1413 + .owner = THIS_MODULE,
1414 + },
1415 +};
1416 +
1417 +module_platform_driver(xrx200_driver);
1418 +
1419 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1420 +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
1421 +MODULE_LICENSE("GPL");