ea69c5c21e390771455af4c6cd6e6de47c7d3b66
[openwrt/svn-archive/archive.git] / target / linux / lantiq / patches-3.6 / 0112-NET-MIPS-lantiq-adds-xrx200-net.patch
1 From dd440736aa03cbe9fcf49e4bfdbb22c947f8ba67 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Mon, 22 Oct 2012 12:22:23 +0200
4 Subject: [PATCH 112/113] NET: MIPS: lantiq: adds xrx200-net
5
6 ---
7 drivers/net/ethernet/Kconfig | 8 +-
8 drivers/net/ethernet/Makefile | 1 +
9 drivers/net/ethernet/lantiq_pce.h | 163 +++++
10 drivers/net/ethernet/lantiq_xrx200.c | 1159 ++++++++++++++++++++++++++++++++++
11 4 files changed, 1330 insertions(+), 1 deletion(-)
12 create mode 100644 drivers/net/ethernet/lantiq_pce.h
13 create mode 100644 drivers/net/ethernet/lantiq_xrx200.c
14
15 diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
16 index e4ff389..35cb7b0 100644
17 --- a/drivers/net/ethernet/Kconfig
18 +++ b/drivers/net/ethernet/Kconfig
19 @@ -83,7 +83,13 @@ config LANTIQ_ETOP
20 tristate "Lantiq SoC ETOP driver"
21 depends on SOC_TYPE_XWAY
22 ---help---
23 - Support for the MII0 inside the Lantiq SoC
24 + Support for the MII0 inside the Lantiq ADSL SoC
25 +
26 +config LANTIQ_XRX200
27 + tristate "Lantiq SoC XRX200 driver"
28 + depends on SOC_TYPE_XWAY
29 + ---help---
30 + Support for the MII0 inside the Lantiq VDSL SoC
31
32 source "drivers/net/ethernet/marvell/Kconfig"
33 source "drivers/net/ethernet/mellanox/Kconfig"
34 diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
35 index d447307..4f95100 100644
36 --- a/drivers/net/ethernet/Makefile
37 +++ b/drivers/net/ethernet/Makefile
38 @@ -36,6 +36,7 @@ obj-$(CONFIG_IP1000) += icplus/
39 obj-$(CONFIG_JME) += jme.o
40 obj-$(CONFIG_KORINA) += korina.o
41 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
42 +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
43 obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
44 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
45 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
46 diff --git a/drivers/net/ethernet/lantiq_pce.h b/drivers/net/ethernet/lantiq_pce.h
47 new file mode 100644
48 index 0000000..0c38efe
49 --- /dev/null
50 +++ b/drivers/net/ethernet/lantiq_pce.h
51 @@ -0,0 +1,163 @@
52 +/*
53 + * This program is free software; you can redistribute it and/or modify it
54 + * under the terms of the GNU General Public License version 2 as published
55 + * by the Free Software Foundation.
56 + *
57 + * This program is distributed in the hope that it will be useful,
58 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
59 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
60 + * GNU General Public License for more details.
61 + *
62 + * You should have received a copy of the GNU General Public License
63 + * along with this program; if not, write to the Free Software
64 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
65 + *
66 + * Copyright (C) 2010 Lantiq Deutschland GmbH
67 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
68 + *
69 + * PCE microcode extracted from UGW5.2 switch api
70 + */
71 +
72 +/* Switch API Micro Code V0.3 */
73 +enum {
74 + OUT_MAC0 = 0,
75 + OUT_MAC1,
76 + OUT_MAC2,
77 + OUT_MAC3,
78 + OUT_MAC4,
79 + OUT_MAC5,
80 + OUT_ETHTYP,
81 + OUT_VTAG0,
82 + OUT_VTAG1,
83 + OUT_ITAG0,
84 + OUT_ITAG1, /*10 */
85 + OUT_ITAG2,
86 + OUT_ITAG3,
87 + OUT_IP0,
88 + OUT_IP1,
89 + OUT_IP2,
90 + OUT_IP3,
91 + OUT_SIP0,
92 + OUT_SIP1,
93 + OUT_SIP2,
94 + OUT_SIP3, /*20*/
95 + OUT_SIP4,
96 + OUT_SIP5,
97 + OUT_SIP6,
98 + OUT_SIP7,
99 + OUT_DIP0,
100 + OUT_DIP1,
101 + OUT_DIP2,
102 + OUT_DIP3,
103 + OUT_DIP4,
104 + OUT_DIP5, /*30*/
105 + OUT_DIP6,
106 + OUT_DIP7,
107 + OUT_SESID,
108 + OUT_PROT,
109 + OUT_APP0,
110 + OUT_APP1,
111 + OUT_IGMP0,
112 + OUT_IGMP1,
113 + OUT_IPOFF, /*39*/
114 + OUT_NONE = 63
115 +};
116 +
117 +/* parser's microcode length type */
118 +#define INSTR 0
119 +#define IPV6 1
120 +#define LENACCU 2
121 +
122 +/* parser's microcode flag type */
123 +enum {
124 + FLAG_ITAG = 0,
125 + FLAG_VLAN,
126 + FLAG_SNAP,
127 + FLAG_PPPOE,
128 + FLAG_IPV6,
129 + FLAG_IPV6FL,
130 + FLAG_IPV4,
131 + FLAG_IGMP,
132 + FLAG_TU,
133 + FLAG_HOP,
134 + FLAG_NN1, /*10 */
135 + FLAG_NN2,
136 + FLAG_END,
137 + FLAG_NO, /*13*/
138 +};
139 +
140 +/* Micro code version V2_11 (extension for parsing IPv6 in PPPoE) */
141 +#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
142 + { {val, msk, (ns<<10 | out<<4 | len>>1), (len&1)<<15 | type<<13 | flags<<9 | ipv4_len<<8 }}
143 +struct pce_microcode {
144 + unsigned short val[4];
145 +/* unsigned short val_2;
146 + unsigned short val_1;
147 + unsigned short val_0;*/
148 +} pce_microcode[] = {
149 + /* value mask ns fields L type flags ipv4_len */
150 + MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
151 + MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
152 + MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
153 + MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
154 + MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
155 + MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
156 + MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
157 + MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
158 + MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
159 + MC_ENTRY(0x0000, 0x0000, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
160 + MC_ENTRY(0x0600, 0x0600, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
161 + MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
162 + MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
163 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
164 + MC_ENTRY(0x0300, 0xFF00, 39, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
165 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
166 + MC_ENTRY(0x0000, 0x0000, 39, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
167 + MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
168 + MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
169 + MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
170 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
171 + MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
172 + MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
173 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
174 + MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
175 + MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
176 + MC_ENTRY(0x0000, 0x0000, 38, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
177 + MC_ENTRY(0x1100, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
178 + MC_ENTRY(0x0600, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
179 + MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
180 + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
181 + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
182 + MC_ENTRY(0x0000, 0x0000, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
183 + MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
184 + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
185 + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
186 + MC_ENTRY(0x0000, 0x0000, 38, OUT_PROT, 1, IPV6, FLAG_NO, 0),
187 + MC_ENTRY(0x0000, 0x0000, 38, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
188 + MC_ENTRY(0x0000, 0x0000, 39, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
189 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
190 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
191 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
192 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
193 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
194 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
195 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
196 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
197 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
198 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
199 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
200 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
201 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
202 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
203 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
204 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
205 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
206 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
207 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
208 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
209 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
210 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
211 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
212 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
213 + MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
214 +};
215 diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
216 new file mode 100644
217 index 0000000..71abc7d
218 --- /dev/null
219 +++ b/drivers/net/ethernet/lantiq_xrx200.c
220 @@ -0,0 +1,1159 @@
221 +/*
222 + * This program is free software; you can redistribute it and/or modify it
223 + * under the terms of the GNU General Public License version 2 as published
224 + * by the Free Software Foundation.
225 + *
226 + * This program is distributed in the hope that it will be useful,
227 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
228 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
229 + * GNU General Public License for more details.
230 + *
231 + * You should have received a copy of the GNU General Public License
232 + * along with this program; if not, write to the Free Software
233 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
234 + *
235 + * Copyright (C) 2010 Lantiq Deutschland
236 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
237 + */
238 +
239 +#include <linux/etherdevice.h>
240 +#include <linux/module.h>
241 +#include <linux/platform_device.h>
242 +#include <linux/interrupt.h>
243 +#include <linux/clk.h>
244 +#include <asm/delay.h>
245 +
246 +#include <linux/of_net.h>
247 +#include <linux/of_mdio.h>
248 +
249 +#include <xway_dma.h>
250 +#include <lantiq_soc.h>
251 +
252 +#include "lantiq_pce.h"
253 +
254 +#define SW_POLLING
255 +#define SW_ROUTING
256 +
257 +#ifdef SW_ROUTING
258 +#define XRX200_MAX_DEV 2
259 +#else
260 +#define XRX200_MAX_DEV 1
261 +#endif
262 +
263 +#define XRX200_MAX_PORT 7
264 +#define XRX200_MAX_DMA 8
265 +
266 +#define XRX200_HEADROOM 4
267 +
268 +#define XRX200_TX_TIMEOUT (10 * HZ)
269 +
270 +/* port type */
271 +#define XRX200_PORT_TYPE_PHY 1
272 +#define XRX200_PORT_TYPE_MAC 2
273 +
274 +/* DMA */
275 +#define XRX200_DMA_CRC_LEN 0x4
276 +#define XRX200_DMA_DATA_LEN 0x600
277 +#define XRX200_DMA_IRQ INT_NUM_IM2_IRL0
278 +#define XRX200_DMA_RX 0
279 +#define XRX200_DMA_TX 1
280 +
281 +/* fetch / store dma */
282 +#define FDMA_PCTRL0 0x2A00
283 +#define FDMA_PCTRLx(x) (FDMA_PCTRL0 + (x * 0x18))
284 +#define SDMA_PCTRL0 0x2F00
285 +#define SDMA_PCTRLx(x) (SDMA_PCTRL0 + (x * 0x18))
286 +
287 +/* buffer management */
288 +#define BM_PCFG0 0x200
289 +#define BM_PCFGx(x) (BM_PCFG0 + (x * 8))
290 +
291 +/* MDIO */
292 +#define MDIO_GLOB 0x0000
293 +#define MDIO_CTRL 0x0020
294 +#define MDIO_READ 0x0024
295 +#define MDIO_WRITE 0x0028
296 +#define MDIO_PHY0 0x0054
297 +#define MDIO_PHY(x) (0x0054 - (x * sizeof(unsigned)))
298 +#define MDIO_CLK_CFG0 0x002C
299 +#define MDIO_CLK_CFG1 0x0030
300 +
301 +#define MDIO_GLOB_ENABLE 0x8000
302 +#define MDIO_BUSY BIT(12)
303 +#define MDIO_RD BIT(11)
304 +#define MDIO_WR BIT(10)
305 +#define MDIO_MASK 0x1f
306 +#define MDIO_ADDRSHIFT 5
307 +#define MDIO1_25MHZ 9
308 +
309 +#define MDIO_PHY_LINK_DOWN 0x4000
310 +#define MDIO_PHY_LINK_UP 0x2000
311 +
312 +#define MDIO_PHY_SPEED_M10 0x0000
313 +#define MDIO_PHY_SPEED_M100 0x0800
314 +#define MDIO_PHY_SPEED_G1 0x1000
315 +
316 +#define MDIO_PHY_FDUP_EN 0x0600
317 +#define MDIO_PHY_FDUP_DIS 0x0200
318 +
319 +#define MDIO_PHY_LINK_MASK 0x6000
320 +#define MDIO_PHY_SPEED_MASK 0x1800
321 +#define MDIO_PHY_FDUP_MASK 0x0600
322 +#define MDIO_PHY_ADDR_MASK 0x001f
323 +#define MDIO_UPDATE_MASK MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
324 + MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
325 +
326 +/* MII */
327 +#define MII_CFG(p) (p * 8)
328 +
329 +#define MII_CFG_EN BIT(14)
330 +
331 +#define MII_CFG_MODE_MIIP 0x0
332 +#define MII_CFG_MODE_MIIM 0x1
333 +#define MII_CFG_MODE_RMIIP 0x2
334 +#define MII_CFG_MODE_RMIIM 0x3
335 +#define MII_CFG_MODE_RGMII 0x4
336 +#define MII_CFG_MODE_MASK 0xf
337 +
338 +#define MII_CFG_RATE_M2P5 0x00
339 +#define MII_CFG_RATE_M25 0x10
340 +#define MII_CFG_RATE_M125 0x20
341 +#define MII_CFG_RATE_M50 0x30
342 +#define MII_CFG_RATE_AUTO 0x40
343 +#define MII_CFG_RATE_MASK 0x70
344 +
345 +/* cpu port mac */
346 +#define PMAC_HD_CTL 0x0000
347 +#define PMAC_RX_IPG 0x0024
348 +#define PMAC_EWAN 0x002c
349 +
350 +#define PMAC_IPG_MASK 0xf
351 +#define PMAC_HD_CTL_AS 0x0008
352 +#define PMAC_HD_CTL_AC 0x0004
353 +#define PMAC_HD_CTL_RXSH 0x0040
354 +#define PMAC_HD_CTL_AST 0x0080
355 +
356 +/* PCE */
357 +#define PCE_TBL_KEY(x) (0x1100 + ((7 - x) * 4))
358 +#define PCE_TBL_MASK 0x1120
359 +#define PCE_TBL_VAL(x) (0x1124 + ((4 - x) * 4))
360 +#define PCE_TBL_ADDR 0x1138
361 +#define PCE_TBL_CTRL 0x113c
362 +#define PCE_PMAP1 0x114c
363 +#define PCE_PMAP2 0x1150
364 +#define PCE_PMAP3 0x1154
365 +#define PCE_GCTRL_REG(x) (0x1158 + (x * 4))
366 +#define PCE_PCTRL_REG(p, x) (0x1200 + (((p * 0xa) + x) * 4))
367 +
368 +#define PCE_TBL_BUSY BIT(15)
369 +#define PCE_TBL_CFG_ADDR_MASK 0x1f
370 +#define PCE_TBL_CFG_ADWR 0x20
371 +#define PCE_TBL_CFG_ADWR_MASK 0x60
372 +#define PCE_INGRESS BIT(11)
373 +
374 +/* buffer management */
375 +#define BM_PCFG(p) (0x200 + (p * 8))
376 +
377 +/* special tag in TX path header */
378 +#define SPID_SHIFT 24
379 +#define DPID_SHIFT 16
380 +#define DPID_ENABLE 1
381 +#define SPID_CPU_PORT 2
382 +
383 +#define SPPID_MASK 0x7
384 +#define SPPID_SHIFT 4
385 +
386 +/* MII regs not yet in linux */
387 +#define MDIO_DEVAD_NONE (-1)
388 +#define ADVERTIZE_MPD (1 << 10)
389 +
390 +struct xrx200_port {
391 + u8 num;
392 + u8 phy_addr;
393 + u16 flags;
394 + phy_interface_t phy_if;
395 +
396 + int link;
397 +
398 + struct phy_device *phydev;
399 + struct device_node *phy_node;
400 +};
401 +
402 +struct xrx200_chan {
403 + int idx;
404 + int refcount;
405 + int tx_free;
406 +
407 + struct net_device dummy_dev;
408 + struct net_device *devs[XRX200_MAX_DEV];
409 +
410 + struct napi_struct napi;
411 + struct ltq_dma_channel dma;
412 + struct sk_buff *skb[LTQ_DESC_NUM];
413 +};
414 +
415 +struct xrx200_hw {
416 + struct clk *clk;
417 + struct mii_bus *mii_bus;
418 +
419 + struct xrx200_chan chan[XRX200_MAX_DMA];
420 +
421 + struct net_device *devs[XRX200_MAX_DEV];
422 + int num_devs;
423 +
424 + int port_map[XRX200_MAX_PORT];
425 + unsigned short wan_map;
426 +
427 + spinlock_t lock;
428 +};
429 +
430 +struct xrx200_priv {
431 + struct net_device_stats stats;
432 + int id;
433 +
434 + struct xrx200_port port[XRX200_MAX_PORT];
435 + int num_port;
436 + int wan;
437 + const void *mac;
438 +
439 + struct xrx200_hw *hw;
440 +};
441 +
442 +static __iomem void *xrx200_switch_membase;
443 +static __iomem void *xrx200_mii_membase;
444 +static __iomem void *xrx200_mdio_membase;
445 +static __iomem void *xrx200_pmac_membase;
446 +
447 +#define ltq_switch_r32(x) ltq_r32(xrx200_switch_membase + (x))
448 +#define ltq_switch_w32(x, y) ltq_w32(x, xrx200_switch_membase + (y))
449 +#define ltq_switch_w32_mask(x, y, z) \
450 + ltq_w32_mask(x, y, xrx200_switch_membase + (z))
451 +
452 +#define ltq_mdio_r32(x) ltq_r32(xrx200_mdio_membase + (x))
453 +#define ltq_mdio_w32(x, y) ltq_w32(x, xrx200_mdio_membase + (y))
454 +#define ltq_mdio_w32_mask(x, y, z) \
455 + ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
456 +
457 +#define ltq_mii_r32(x) ltq_r32(xrx200_mii_membase + (x))
458 +#define ltq_mii_w32(x, y) ltq_w32(x, xrx200_mii_membase + (y))
459 +#define ltq_mii_w32_mask(x, y, z) \
460 + ltq_w32_mask(x, y, xrx200_mii_membase + (z))
461 +
462 +#define ltq_pmac_r32(x) ltq_r32(xrx200_pmac_membase + (x))
463 +#define ltq_pmac_w32(x, y) ltq_w32(x, xrx200_pmac_membase + (y))
464 +#define ltq_pmac_w32_mask(x, y, z) \
465 + ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
466 +
467 +static int xrx200_open(struct net_device *dev)
468 +{
469 + struct xrx200_priv *priv = netdev_priv(dev);
470 + unsigned long flags;
471 + int i;
472 +
473 + for (i = 0; i < XRX200_MAX_DMA; i++) {
474 + if (!priv->hw->chan[i].dma.irq)
475 + continue;
476 + spin_lock_irqsave(&priv->hw->lock, flags);
477 + if (!priv->hw->chan[i].refcount) {
478 + napi_enable(&priv->hw->chan[i].napi);
479 + ltq_dma_open(&priv->hw->chan[i].dma);
480 + }
481 + priv->hw->chan[i].refcount++;
482 + spin_unlock_irqrestore(&priv->hw->lock, flags);
483 + }
484 + for (i = 0; i < priv->num_port; i++)
485 + if (priv->port[i].phydev)
486 + phy_start(priv->port[i].phydev);
487 + netif_start_queue(dev);
488 +
489 + return 0;
490 +}
491 +
492 +static int xrx200_close(struct net_device *dev)
493 +{
494 + struct xrx200_priv *priv = netdev_priv(dev);
495 + unsigned long flags;
496 + int i;
497 +
498 + netif_stop_queue(dev);
499 +
500 + for (i = 0; i < priv->num_port; i++)
501 + if (priv->port[i].phydev)
502 + phy_stop(priv->port[i].phydev);
503 +
504 + for (i = 0; i < XRX200_MAX_DMA; i++) {
505 + if (!priv->hw->chan[i].dma.irq)
506 + continue;
507 + spin_lock_irqsave(&priv->hw->lock, flags);
508 + priv->hw->chan[i].refcount--;
509 + if (!priv->hw->chan[i].refcount) {
510 + napi_disable(&priv->hw->chan[i].napi);
511 + ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
512 + }
513 + spin_unlock_irqrestore(&priv->hw->lock, flags);
514 + }
515 +
516 + return 0;
517 +}
518 +
519 +static int xrx200_alloc_skb(struct xrx200_chan *ch)
520 +{
521 +#define DMA_PAD (NET_IP_ALIGN) // + NET_SKB_PAD)
522 + ch->skb[ch->dma.desc] = dev_alloc_skb(XRX200_DMA_DATA_LEN + DMA_PAD);
523 + if (!ch->skb[ch->dma.desc])
524 + return -ENOMEM;
525 +
526 + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
527 + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
528 + DMA_FROM_DEVICE);
529 + ch->dma.desc_base[ch->dma.desc].addr =
530 + CPHYSADDR(ch->skb[ch->dma.desc]->data);
531 + ch->dma.desc_base[ch->dma.desc].ctl =
532 + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
533 + XRX200_DMA_DATA_LEN;
534 + skb_reserve(ch->skb[ch->dma.desc], DMA_PAD);
535 +
536 + return 0;
537 +}
538 +
539 +static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
540 +{
541 + struct net_device *dev = ch->devs[id];
542 + struct xrx200_priv *priv = netdev_priv(dev);
543 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
544 + struct sk_buff *skb = ch->skb[ch->dma.desc];
545 + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - XRX200_DMA_CRC_LEN;
546 + unsigned long flags;
547 +
548 + spin_lock_irqsave(&priv->hw->lock, flags);
549 + if (xrx200_alloc_skb(ch)) {
550 + netdev_err(dev,
551 + "failed to allocate new rx buffer, stopping DMA\n");
552 + ltq_dma_close(&ch->dma);
553 + }
554 +
555 + ch->dma.desc++;
556 + ch->dma.desc %= LTQ_DESC_NUM;
557 + spin_unlock_irqrestore(&priv->hw->lock, flags);
558 +
559 + skb_put(skb, len);
560 +#ifdef SW_ROUTING
561 + skb_pull(skb, 8);
562 +#endif
563 + skb->dev = dev;
564 + skb->protocol = eth_type_trans(skb, dev);
565 + netif_receive_skb(skb);
566 + priv->stats.rx_packets++;
567 + priv->stats.rx_bytes+=len;
568 +}
569 +
570 +static int xrx200_poll_rx(struct napi_struct *napi, int budget)
571 +{
572 + struct xrx200_chan *ch = container_of(napi,
573 + struct xrx200_chan, napi);
574 + struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
575 + int rx = 0;
576 + int complete = 0;
577 + unsigned long flags;
578 +
579 + while ((rx < budget) && !complete) {
580 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
581 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
582 +#ifdef SW_ROUTING
583 + struct sk_buff *skb = ch->skb[ch->dma.desc];
584 + u32 *special_tag = (u32*)skb->data;
585 + int port = (special_tag[1] >> SPPID_SHIFT) & SPPID_MASK;
586 + xrx200_hw_receive(ch, priv->hw->port_map[port]);
587 +#else
588 + xrx200_hw_receive(ch, 0);
589 +#endif
590 + rx++;
591 + } else {
592 + complete = 1;
593 + }
594 + }
595 + if (complete || !rx) {
596 + napi_complete(&ch->napi);
597 + spin_lock_irqsave(&priv->hw->lock, flags);
598 + ltq_dma_ack_irq(&ch->dma);
599 + spin_unlock_irqrestore(&priv->hw->lock, flags);
600 + }
601 + return rx;
602 +}
603 +
604 +static int xrx200_poll_tx(struct napi_struct *napi, int budget)
605 +{
606 + struct xrx200_chan *ch =
607 + container_of(napi, struct xrx200_chan, napi);
608 + struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
609 + unsigned long flags;
610 + int i;
611 +
612 + spin_lock_irqsave(&priv->hw->lock, flags);
613 + while ((ch->dma.desc_base[ch->tx_free].ctl &
614 + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
615 + dev_kfree_skb_any(ch->skb[ch->tx_free]);
616 + ch->skb[ch->tx_free] = NULL;
617 + memset(&ch->dma.desc_base[ch->tx_free], 0,
618 + sizeof(struct ltq_dma_desc));
619 + ch->tx_free++;
620 + ch->tx_free %= LTQ_DESC_NUM;
621 + }
622 + spin_unlock_irqrestore(&priv->hw->lock, flags);
623 +
624 + for (i = 0; i < XRX200_MAX_DEV; i++) {
625 + struct netdev_queue *txq =
626 + netdev_get_tx_queue(ch->devs[i], 0);
627 + if (netif_tx_queue_stopped(txq))
628 + netif_tx_start_queue(txq);
629 + }
630 + napi_complete(&ch->napi);
631 + spin_lock_irqsave(&priv->hw->lock, flags);
632 + ltq_dma_ack_irq(&ch->dma);
633 + spin_unlock_irqrestore(&priv->hw->lock, flags);
634 +
635 + return 1;
636 +}
637 +
638 +static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
639 +{
640 + struct xrx200_priv *priv = netdev_priv(dev);
641 +
642 + return &priv->stats;
643 +}
644 +
645 +static void xrx200_tx_timeout(struct net_device *dev)
646 +{
647 + struct xrx200_priv *priv = netdev_priv(dev);
648 +
649 + printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
650 +
651 + priv->stats.tx_errors++;
652 + netif_wake_queue(dev);
653 +}
654 +
655 +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
656 +{
657 + int queue = skb_get_queue_mapping(skb);
658 + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
659 + struct xrx200_priv *priv = netdev_priv(dev);
660 + struct xrx200_chan *ch = &priv->hw->chan[XRX200_DMA_TX];
661 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
662 + unsigned long flags;
663 + u32 byte_offset;
664 + int len;
665 +#ifdef SW_ROUTING
666 + u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
667 +#endif
668 +
669 + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
670 +
671 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
672 + netdev_err(dev, "tx ring full\n");
673 + netif_tx_stop_queue(txq);
674 + return NETDEV_TX_BUSY;
675 + }
676 +#ifdef SW_ROUTING
677 + if(priv->id)
678 + special_tag |= (1 << DPID_SHIFT);
679 + if(skb_headroom(skb) < 4) {
680 + struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
681 + dev_kfree_skb_any(skb);
682 + skb = tmp;
683 + }
684 + skb_push(skb, 4);
685 + memcpy(skb->data, &special_tag, sizeof(u32));
686 + len += 4;
687 +#endif
688 +
689 + /* dma needs to start on a 16 byte aligned address */
690 + byte_offset = CPHYSADDR(skb->data) % 16;
691 + ch->skb[ch->dma.desc] = skb;
692 +
693 + dev->trans_start = jiffies;
694 +
695 + spin_lock_irqsave(&priv->hw->lock, flags);
696 + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
697 + DMA_TO_DEVICE)) - byte_offset;
698 + wmb();
699 + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
700 + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
701 + ch->dma.desc++;
702 + ch->dma.desc %= LTQ_DESC_NUM;
703 + spin_unlock_irqrestore(&priv->hw->lock, flags);
704 +
705 + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
706 + netif_tx_stop_queue(txq);
707 +
708 + priv->stats.tx_packets++;
709 + priv->stats.tx_bytes+=len;
710 +
711 + return NETDEV_TX_OK;
712 +}
713 +
714 +static irqreturn_t xrx200_dma_irq(int irq, void *priv)
715 +{
716 + struct xrx200_hw *hw = priv;
717 + int ch = irq - XRX200_DMA_IRQ;
718 +
719 + napi_schedule(&hw->chan[ch].napi);
720 +
721 + return IRQ_HANDLED;
722 +}
723 +
724 +static int xrx200_dma_init(struct xrx200_hw *hw)
725 +{
726 + int i, err = 0;
727 +
728 + ltq_dma_init_port(DMA_PORT_ETOP);
729 +
730 + for (i = 0; i < 8 && !err; i++) {
731 + int irq = XRX200_DMA_IRQ + i;
732 + struct xrx200_chan *ch = &hw->chan[i];
733 +
734 + ch->idx = ch->dma.nr = i;
735 +
736 + if (i == XRX200_DMA_TX) {
737 + ltq_dma_alloc_tx(&ch->dma);
738 + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
739 + } else if (i == XRX200_DMA_RX) {
740 + ltq_dma_alloc_rx(&ch->dma);
741 + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
742 + ch->dma.desc++)
743 + if (xrx200_alloc_skb(ch))
744 + err = -ENOMEM;
745 + ch->dma.desc = 0;
746 + err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
747 + } else
748 + continue;
749 +
750 + if (!err)
751 + ch->dma.irq = irq;
752 + }
753 +
754 + return err;
755 +}
756 +
757 +#ifdef SW_POLLING
758 +static void xrx200_gmac_update(struct xrx200_port *port)
759 +{
760 + u16 phyaddr = port->phydev->addr & MDIO_PHY_ADDR_MASK;
761 + u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
762 + u16 miirate = 0;
763 +
764 + switch (port->phydev->speed) {
765 + case SPEED_1000:
766 + phyaddr |= MDIO_PHY_SPEED_G1;
767 + miirate = MII_CFG_RATE_M125;
768 + break;
769 +
770 + case SPEED_100:
771 + phyaddr |= MDIO_PHY_SPEED_M100;
772 + switch (miimode) {
773 + case MII_CFG_MODE_RMIIM:
774 + case MII_CFG_MODE_RMIIP:
775 + miirate = MII_CFG_RATE_M50;
776 + break;
777 + default:
778 + miirate = MII_CFG_RATE_M25;
779 + break;
780 + }
781 + break;
782 +
783 + default:
784 + phyaddr |= MDIO_PHY_SPEED_M10;
785 + miirate = MII_CFG_RATE_M2P5;
786 + break;
787 + }
788 +
789 + if (port->phydev->link)
790 + phyaddr |= MDIO_PHY_LINK_UP;
791 + else
792 + phyaddr |= MDIO_PHY_LINK_DOWN;
793 +
794 + if (port->phydev->duplex == DUPLEX_FULL)
795 + phyaddr |= MDIO_PHY_FDUP_EN;
796 + else
797 + phyaddr |= MDIO_PHY_FDUP_DIS;
798 +
799 + ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
800 + ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
801 + udelay(1);
802 +}
803 +#else
804 +static void xrx200_gmac_update(struct xrx200_port *port)
805 +{
806 +
807 +}
808 +#endif
809 +
810 +static void xrx200_mdio_link(struct net_device *dev)
811 +{
812 + struct xrx200_priv *priv = netdev_priv(dev);
813 + int i;
814 +
815 + for (i = 0; i < priv->num_port; i++) {
816 + if (!priv->port[i].phydev)
817 + continue;
818 +
819 + if (priv->port[i].link != priv->port[i].phydev->link) {
820 + xrx200_gmac_update(&priv->port[i]);
821 + priv->port[i].link = priv->port[i].phydev->link;
822 + netdev_info(dev, "port %d %s link\n",
823 + priv->port[i].num,
824 + (priv->port[i].link)?("got"):("lost"));
825 + }
826 + }
827 +}
828 +
829 +static inline int xrx200_mdio_poll(struct mii_bus *bus)
830 +{
831 + unsigned cnt = 10000;
832 +
833 + while (likely(cnt--)) {
834 + unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
835 + if ((ctrl & MDIO_BUSY) == 0)
836 + return 0;
837 + }
838 +
839 + return 1;
840 +}
841 +
842 +static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
843 +{
844 + if (xrx200_mdio_poll(bus))
845 + return 1;
846 +
847 + ltq_mdio_w32(val, MDIO_WRITE);
848 + ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
849 + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
850 + (reg & MDIO_MASK),
851 + MDIO_CTRL);
852 +
853 + return 0;
854 +}
855 +
856 +static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
857 +{
858 + if (xrx200_mdio_poll(bus))
859 + return -1;
860 +
861 + ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
862 + ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
863 + (reg & MDIO_MASK),
864 + MDIO_CTRL);
865 +
866 + if (xrx200_mdio_poll(bus))
867 + return -1;
868 +
869 + return ltq_mdio_r32(MDIO_READ);
870 +}
871 +
872 +static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
873 +{
874 + struct xrx200_priv *priv = netdev_priv(dev);
875 + struct phy_device *phydev = NULL;
876 + unsigned val;
877 +
878 + phydev = priv->hw->mii_bus->phy_map[port->phy_addr];
879 +
880 + if (!phydev) {
881 + netdev_err(dev, "no PHY found\n");
882 + return -ENODEV;
883 + }
884 +
885 + phydev = phy_connect(dev, dev_name(&phydev->dev), &xrx200_mdio_link,
886 + 0, port->phy_if);
887 +
888 + if (IS_ERR(phydev)) {
889 + netdev_err(dev, "Could not attach to PHY\n");
890 + return PTR_ERR(phydev);
891 + }
892 +
893 + phydev->supported &= (SUPPORTED_10baseT_Half
894 + | SUPPORTED_10baseT_Full
895 + | SUPPORTED_100baseT_Half
896 + | SUPPORTED_100baseT_Full
897 + | SUPPORTED_1000baseT_Half
898 + | SUPPORTED_1000baseT_Full
899 + | SUPPORTED_Autoneg
900 + | SUPPORTED_MII
901 + | SUPPORTED_TP);
902 + phydev->advertising = phydev->supported;
903 + port->phydev = phydev;
904 +
905 + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
906 + dev->name, phydev->drv->name,
907 + dev_name(&phydev->dev), phydev->irq);
908 +
909 +#ifdef SW_POLLING
910 + phy_read_status(phydev);
911 +
912 + val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
913 + val |= ADVERTIZE_MPD;
914 + xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
915 + xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
916 +
917 + phy_start_aneg(phydev);
918 +#endif
919 + return 0;
920 +}
921 +
922 +static void xrx200_port_config(struct xrx200_priv *priv,
923 + const struct xrx200_port *port)
924 +{
925 + u16 miimode = 0;
926 +
927 + switch (port->num) {
928 + case 0: /* xMII0 */
929 + case 1: /* xMII1 */
930 + switch (port->phy_if) {
931 + case PHY_INTERFACE_MODE_MII:
932 + if (port->flags & XRX200_PORT_TYPE_PHY)
933 + /* MII MAC mode, connected to external PHY */
934 + miimode = MII_CFG_MODE_MIIM;
935 + else
936 + /* MII PHY mode, connected to external MAC */
937 + miimode = MII_CFG_MODE_MIIP;
938 + break;
939 + case PHY_INTERFACE_MODE_RMII:
940 + if (port->flags & XRX200_PORT_TYPE_PHY)
941 + /* RMII MAC mode, connected to external PHY */
942 + miimode = MII_CFG_MODE_RMIIM;
943 + else
944 + /* RMII PHY mode, connected to external MAC */
945 + miimode = MII_CFG_MODE_RMIIP;
946 + break;
947 + case PHY_INTERFACE_MODE_RGMII:
948 + /* RGMII MAC mode, connected to external PHY */
949 + miimode = MII_CFG_MODE_RGMII;
950 + break;
951 + default:
952 + break;
953 + }
954 + break;
955 + case 2: /* internal GPHY0 */
956 + case 3: /* internal GPHY0 */
957 + case 4: /* internal GPHY1 */
958 + switch (port->phy_if) {
959 + case PHY_INTERFACE_MODE_MII:
960 + case PHY_INTERFACE_MODE_GMII:
961 + /* MII MAC mode, connected to internal GPHY */
962 + miimode = MII_CFG_MODE_MIIM;
963 + break;
964 + default:
965 + break;
966 + }
967 + break;
968 + case 5: /* internal GPHY1 or xMII2 */
969 + switch (port->phy_if) {
970 + case PHY_INTERFACE_MODE_MII:
971 + /* MII MAC mode, connected to internal GPHY */
972 + miimode = MII_CFG_MODE_MIIM;
973 + break;
974 + case PHY_INTERFACE_MODE_RGMII:
975 + /* RGMII MAC mode, connected to external PHY */
976 + miimode = MII_CFG_MODE_RGMII;
977 + break;
978 + default:
979 + break;
980 + }
981 + break;
982 + default:
983 + break;
984 + }
985 +
986 + ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
987 + MII_CFG(port->num));
988 +}
989 +
990 +static int xrx200_init(struct net_device *dev)
991 +{
992 + struct xrx200_priv *priv = netdev_priv(dev);
993 + struct sockaddr mac;
994 + int err, i;
995 +
996 +#ifndef SW_POLLING
997 + unsigned int reg = 0;
998 +
999 + /* enable auto polling */
1000 + for (i = 0; i < priv->num_port; i++)
1001 + reg |= BIT(priv->port[i].num);
1002 + ltq_mdio_w32(reg, MDIO_CLK_CFG0);
1003 + ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
1004 +#endif
1005 +
1006 + /* setup each port */
1007 + for (i = 0; i < priv->num_port; i++)
1008 + xrx200_port_config(priv, &priv->port[i]);
1009 +
1010 + memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
1011 + if (!is_valid_ether_addr(mac.sa_data)) {
1012 + pr_warn("net-xrx200: invalid MAC, using random\n");
1013 + eth_random_addr(mac.sa_data);
1014 + dev->addr_assign_type |= NET_ADDR_RANDOM;
1015 + }
1016 +
1017 + err = eth_mac_addr(dev, &mac);
1018 + if (err)
1019 + goto err_netdev;
1020 +
1021 + for (i = 0; i < priv->num_port; i++)
1022 + if (xrx200_mdio_probe(dev, &priv->port[i]))
1023 + pr_warn("xrx200-mdio: probing phy of port %d failed\n",
1024 + priv->port[i].num);
1025 +
1026 + return 0;
1027 +
1028 +err_netdev:
1029 + unregister_netdev(dev);
1030 + free_netdev(dev);
1031 + return err;
1032 +}
1033 +
1034 +static void xrx200_pci_microcode(void)
1035 +{
1036 + int i;
1037 +
1038 + ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
1039 + PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
1040 + ltq_switch_w32(0, PCE_TBL_MASK);
1041 +
1042 + for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
1043 + ltq_switch_w32(i, PCE_TBL_ADDR);
1044 + ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
1045 + ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
1046 + ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
1047 + ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
1048 +
1049 + // start the table access:
1050 + ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
1051 + while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
1052 + }
1053 +
1054 + /* tell the switch that the microcode is loaded */
1055 + ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
1056 +}
1057 +
1058 +static void xrx200_hw_init(struct xrx200_hw *hw)
1059 +{
1060 + int i;
1061 +
1062 + /* enable clock gate */
1063 + clk_enable(hw->clk);
1064 +
1065 + /*
1066 + * TODO: we should really disbale all phys/miis here and explicitly
1067 + * enable them in the device secific init function
1068 + */
1069 +
1070 + /* disable port fetch/store dma */
1071 + for (i = 0; i < 7; i++ ) {
1072 + ltq_switch_w32(0, FDMA_PCTRLx(i));
1073 + ltq_switch_w32(0, SDMA_PCTRLx(i));
1074 + }
1075 +
1076 + /* enable Switch */
1077 + ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
1078 +
1079 + /* load the pce microcode */
1080 + xrx200_pci_microcode();
1081 +
1082 + /* Default unknown Broadcat/Multicast/Unicast port maps */
1083 + ltq_switch_w32(0x7f, PCE_PMAP1);
1084 + ltq_switch_w32(0x7f, PCE_PMAP2);
1085 + ltq_switch_w32(0x7f, PCE_PMAP3);
1086 +
1087 + /* RMON Counter Enable for all physical ports */
1088 + for (i = 0; i < 7; i++)
1089 + ltq_switch_w32(0x1, BM_PCFG(i));
1090 +
1091 + /* disable auto polling */
1092 + ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
1093 +
1094 + /* enable port statistic counters */
1095 + for (i = 0; i < 7; i++)
1096 + ltq_switch_w32(0x1, BM_PCFGx(i));
1097 +
1098 + /* set IPG to 12 */
1099 + ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
1100 +
1101 +#ifdef SW_ROUTING
1102 + /* enable status header, enable CRC */
1103 + ltq_pmac_w32_mask(0,
1104 + PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC,
1105 + PMAC_HD_CTL);
1106 +#else
1107 + /* disable status header, enable CRC */
1108 + ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
1109 + PMAC_HD_CTL_AC,
1110 + PMAC_HD_CTL);
1111 +#endif
1112 +
1113 + /* enable port fetch/store dma */
1114 + for (i = 0; i < 7; i++ ) {
1115 + ltq_switch_w32_mask(0, 0x01, FDMA_PCTRLx(i));
1116 + ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
1117 + ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
1118 + }
1119 +
1120 + /* enable special tag insertion on cpu port */
1121 + ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
1122 +}
1123 +
1124 +static void xrx200_hw_cleanup(struct xrx200_hw *hw)
1125 +{
1126 + int i;
1127 +
1128 + /* disable the switch */
1129 + ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
1130 +
1131 + /* free the channels and IRQs */
1132 + for (i = 0; i < 2; i++) {
1133 + ltq_dma_free(&hw->chan[i].dma);
1134 + if (hw->chan[i].dma.irq)
1135 + free_irq(hw->chan[i].dma.irq, hw);
1136 + }
1137 +
1138 + /* free the allocated RX ring */
1139 + for (i = 0; i < LTQ_DESC_NUM; i++)
1140 + dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
1141 +
1142 + /* clear the mdio bus */
1143 + mdiobus_unregister(hw->mii_bus);
1144 + mdiobus_free(hw->mii_bus);
1145 +
1146 + /* release the clock */
1147 + clk_disable(hw->clk);
1148 + clk_put(hw->clk);
1149 +}
1150 +
1151 +static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
1152 +{
1153 + hw->mii_bus = mdiobus_alloc();
1154 + if (!hw->mii_bus)
1155 + return -ENOMEM;
1156 +
1157 + hw->mii_bus->read = xrx200_mdio_rd;
1158 + hw->mii_bus->write = xrx200_mdio_wr;
1159 + hw->mii_bus->name = "lantiq,xrx200-mdio";
1160 + snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
1161 +
1162 + if (of_mdiobus_register(hw->mii_bus, np)) {
1163 + mdiobus_free(hw->mii_bus);
1164 + return -ENXIO;
1165 + }
1166 +
1167 + return 0;
1168 +}
1169 +
1170 +static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
1171 +{
1172 + const __be32 *addr, *id = of_get_property(port, "reg", NULL);
1173 + struct xrx200_port *p = &priv->port[priv->num_port];
1174 +
1175 + if (!id)
1176 + return;
1177 +
1178 + memset(p, 0, sizeof(struct xrx200_port));
1179 + p->phy_node = of_parse_phandle(port, "phy-handle", 0);
1180 + addr = of_get_property(p->phy_node, "reg", NULL);
1181 + if (!addr)
1182 + return;
1183 +
1184 + p->num = *id;
1185 + p->phy_addr = *addr;
1186 + p->phy_if = of_get_phy_mode(port);
1187 + if (p->phy_addr > 0x10)
1188 + p->flags = XRX200_PORT_TYPE_MAC;
1189 + else
1190 + p->flags = XRX200_PORT_TYPE_PHY;
1191 + priv->num_port++;
1192 +
1193 + /* is this port a wan port ? */
1194 + if (priv->wan)
1195 + priv->hw->wan_map |= BIT(p->num);
1196 +
1197 + /* store the port id in the hw struct so we can map ports -> devices */
1198 + priv->hw->port_map[p->num] = priv->hw->num_devs;
1199 +}
1200 +
1201 +static const struct net_device_ops xrx200_netdev_ops = {
1202 + .ndo_init = xrx200_init,
1203 + .ndo_open = xrx200_open,
1204 + .ndo_stop = xrx200_close,
1205 + .ndo_start_xmit = xrx200_start_xmit,
1206 + .ndo_set_mac_address = eth_mac_addr,
1207 + .ndo_validate_addr = eth_validate_addr,
1208 + .ndo_change_mtu = eth_change_mtu,
1209 + .ndo_get_stats = xrx200_get_stats,
1210 + .ndo_tx_timeout = xrx200_tx_timeout,
1211 +};
1212 +
1213 +static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface)
1214 +{
1215 + struct xrx200_priv *priv;
1216 + struct device_node *port;
1217 + const __be32 *wan;
1218 +
1219 + /* alloc the network device */
1220 + hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
1221 + if (!hw->devs[hw->num_devs])
1222 + return;
1223 +
1224 + /* setup the network device */
1225 + strcpy(hw->devs[hw->num_devs]->name, "eth%d");
1226 + hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
1227 + hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
1228 + hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
1229 +
1230 + /* setup our private data */
1231 + priv = netdev_priv(hw->devs[hw->num_devs]);
1232 + priv->hw = hw;
1233 + priv->mac = of_get_mac_address(iface);
1234 + priv->id = hw->num_devs;
1235 +
1236 + /* is this the wan interface ? */
1237 + wan = of_get_property(iface, "lantiq,wan", NULL);
1238 + if (wan && (*wan == 1))
1239 + priv->wan = 1;
1240 +
1241 + /* load the ports that are part of the interface */
1242 + for_each_child_of_node(iface, port)
1243 + if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
1244 + xrx200_of_port(priv, port);
1245 +
1246 + /* register the actual device */
1247 + if (!register_netdev(hw->devs[hw->num_devs]))
1248 + hw->num_devs++;
1249 +}
1250 +
1251 +static struct xrx200_hw xrx200_hw;
1252 +
1253 +static int __devinit xrx200_probe(struct platform_device *pdev)
1254 +{
1255 + struct resource *res[4];
1256 + struct device_node *mdio_np, *iface_np;
1257 + int i;
1258 +
1259 + /* load the memory ranges */
1260 + for (i = 0; i < 4; i++) {
1261 + res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
1262 + if (!res[i]) {
1263 + dev_err(&pdev->dev, "failed to get resources\n");
1264 + return -ENOENT;
1265 + }
1266 + }
1267 + xrx200_switch_membase = devm_request_and_ioremap(&pdev->dev, res[0]);
1268 + xrx200_mdio_membase = devm_request_and_ioremap(&pdev->dev, res[1]);
1269 + xrx200_mii_membase = devm_request_and_ioremap(&pdev->dev, res[2]);
1270 + xrx200_pmac_membase = devm_request_and_ioremap(&pdev->dev, res[3]);
1271 + if (!xrx200_switch_membase || !xrx200_mdio_membase ||
1272 + !xrx200_mii_membase || !xrx200_pmac_membase) {
1273 + dev_err(&pdev->dev, "failed to request and remap io ranges \n");
1274 + return -ENOMEM;
1275 + }
1276 +
1277 + /* get the clock */
1278 + xrx200_hw.clk = clk_get(&pdev->dev, NULL);
1279 + if (IS_ERR(xrx200_hw.clk)) {
1280 + dev_err(&pdev->dev, "failed to get clock\n");
1281 + return PTR_ERR(xrx200_hw.clk);
1282 + }
1283 +
1284 + /* bring up the dma engine and IP core */
1285 + spin_lock_init(&xrx200_hw.lock);
1286 + xrx200_dma_init(&xrx200_hw);
1287 + xrx200_hw_init(&xrx200_hw);
1288 +
1289 + /* bring up the mdio bus */
1290 + mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
1291 + "lantiq,xrx200-mdio");
1292 + if (mdio_np)
1293 + if (xrx200_of_mdio(&xrx200_hw, mdio_np))
1294 + dev_err(&pdev->dev, "mdio probe failed\n");
1295 +
1296 + /* load the interfaces */
1297 + for_each_child_of_node(pdev->dev.of_node, iface_np)
1298 + if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
1299 + if (xrx200_hw.num_devs < XRX200_MAX_DEV)
1300 + xrx200_of_iface(&xrx200_hw, iface_np);
1301 + else
1302 + dev_err(&pdev->dev,
1303 + "only %d interfaces allowed\n",
1304 + XRX200_MAX_DEV);
1305 + }
1306 +
1307 + if (!xrx200_hw.num_devs) {
1308 + xrx200_hw_cleanup(&xrx200_hw);
1309 + dev_err(&pdev->dev, "failed to load interfaces\n");
1310 + return -ENOENT;
1311 + }
1312 +
1313 + /* set wan port mask */
1314 + ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
1315 +
1316 + for (i = 0; i < xrx200_hw.num_devs; i++) {
1317 + xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
1318 + xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
1319 + }
1320 +
1321 + /* setup NAPI */
1322 + init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
1323 + init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_TX].dummy_dev);
1324 + netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
1325 + &xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
1326 + netif_napi_add(&xrx200_hw.chan[XRX200_DMA_TX].dummy_dev,
1327 + &xrx200_hw.chan[XRX200_DMA_TX].napi, xrx200_poll_tx, 8);
1328 +
1329 + platform_set_drvdata(pdev, &xrx200_hw);
1330 +
1331 + return 0;
1332 +}
1333 +
1334 +static int __devexit xrx200_remove(struct platform_device *pdev)
1335 +{
1336 + struct net_device *dev = platform_get_drvdata(pdev);
1337 + struct xrx200_priv *priv;
1338 +
1339 + if (!dev)
1340 + return 0;
1341 +
1342 + priv = netdev_priv(dev);
1343 +
1344 + /* free stack related instances */
1345 + netif_stop_queue(dev);
1346 + netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
1347 + netif_napi_del(&xrx200_hw.chan[XRX200_DMA_TX].napi);
1348 +
1349 + /* shut down hardware */
1350 + xrx200_hw_cleanup(&xrx200_hw);
1351 +
1352 + /* remove the actual device */
1353 + unregister_netdev(dev);
1354 + free_netdev(dev);
1355 +
1356 + return 0;
1357 +}
1358 +
1359 +static const struct of_device_id xrx200_match[] = {
1360 + { .compatible = "lantiq,xrx200-net" },
1361 + {},
1362 +};
1363 +MODULE_DEVICE_TABLE(of, xrx200_match);
1364 +
1365 +static struct platform_driver xrx200_driver = {
1366 + .probe = xrx200_probe,
1367 + .remove = __devexit_p(xrx200_remove),
1368 + .driver = {
1369 + .name = "lantiq,xrx200-net",
1370 + .of_match_table = xrx200_match,
1371 + .owner = THIS_MODULE,
1372 + },
1373 +};
1374 +
1375 +module_platform_driver(xrx200_driver);
1376 +
1377 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1378 +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
1379 +MODULE_LICENSE("GPL");
1380 --
1381 1.7.10.4
1382