adds 3.3 patches and files
[openwrt/openwrt.git] / target / linux / lantiq / patches-3.3 / 0039-NET-adds-driver-for-lantiq-vr9-ethernet.patch
1 From 239505c96aa66b4280b7726850235dcb707d2d91 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Fri, 9 Mar 2012 19:03:40 +0100
4 Subject: [PATCH 39/70] NET: adds driver for lantiq vr9 ethernet
5
6 ---
7 .../mips/include/asm/mach-lantiq/xway/lantiq_soc.h | 2 +-
8 arch/mips/lantiq/xway/devices.c | 20 +
9 arch/mips/lantiq/xway/devices.h | 1 +
10 drivers/net/ethernet/Kconfig | 6 +
11 drivers/net/ethernet/Makefile | 1 +
12 drivers/net/ethernet/lantiq_vrx200.c | 1358 ++++++++++++++++++++
13 6 files changed, 1387 insertions(+), 1 deletions(-)
14 create mode 100644 drivers/net/ethernet/lantiq_vrx200.c
15
16 diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
17 index ab2d236..d1b8cc8 100644
18 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
19 +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
20 @@ -102,7 +102,7 @@
21
22 /* GBIT - gigabit switch */
23 #define LTQ_GBIT_BASE_ADDR 0x1E108000
24 -#define LTQ_GBIT_SIZE 0x200
25 +#define LTQ_GBIT_SIZE 0x4000
26
27 /* DMA */
28 #define LTQ_DMA_BASE_ADDR 0x1E104100
29 diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
30 index eab4644d..5efa4f3 100644
31 --- a/arch/mips/lantiq/xway/devices.c
32 +++ b/arch/mips/lantiq/xway/devices.c
33 @@ -83,6 +83,7 @@ static struct platform_device ltq_etop = {
34 .name = "ltq_etop",
35 .resource = ltq_etop_resources,
36 .num_resources = 1,
37 + .id = -1,
38 };
39
40 void __init
41 @@ -96,3 +97,22 @@ ltq_register_etop(struct ltq_eth_data *eth)
42 platform_device_register(&ltq_etop);
43 }
44 }
45 +
46 +/* ethernet */
47 +static struct resource ltq_vrx200_resources[] = {
48 + MEM_RES("gbit", LTQ_GBIT_BASE_ADDR, LTQ_GBIT_SIZE),
49 +};
50 +
51 +static struct platform_device ltq_vrx200 = {
52 + .name = "ltq_vrx200",
53 + .resource = ltq_vrx200_resources,
54 + .num_resources = 1,
55 + .id = -1,
56 +};
57 +
58 +void __init
59 +ltq_register_vrx200(struct ltq_eth_data *eth)
60 +{
61 + ltq_vrx200.dev.platform_data = eth;
62 + platform_device_register(&ltq_vrx200);
63 +}
64 diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
65 index d825cbd..08befd9 100644
66 --- a/arch/mips/lantiq/xway/devices.h
67 +++ b/arch/mips/lantiq/xway/devices.h
68 @@ -17,5 +17,6 @@ extern void ltq_register_gpio_stp(void);
69 extern void ltq_register_ase_asc(void);
70 extern void ltq_register_etop(struct ltq_eth_data *eth);
71 extern void xway_register_nand(struct mtd_partition *parts, int count);
72 +extern void ltq_register_vrx200(struct ltq_eth_data *eth);
73
74 #endif
75 diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
76 index 3474a61..e1caa1b 100644
77 --- a/drivers/net/ethernet/Kconfig
78 +++ b/drivers/net/ethernet/Kconfig
79 @@ -85,6 +85,12 @@ config LANTIQ_ETOP
80 ---help---
81 Support for the MII0 inside the Lantiq SoC
82
83 +config LANTIQ_VRX200
84 + tristate "Lantiq SoC vrx200 driver"
85 + depends on SOC_TYPE_XWAY
86 + ---help---
87 + Support for the MII0 inside the Lantiq SoC
88 +
89 source "drivers/net/ethernet/marvell/Kconfig"
90 source "drivers/net/ethernet/mellanox/Kconfig"
91 source "drivers/net/ethernet/micrel/Kconfig"
92 diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
93 index 08d5f03..0c47dc5 100644
94 --- a/drivers/net/ethernet/Makefile
95 +++ b/drivers/net/ethernet/Makefile
96 @@ -36,6 +36,7 @@ obj-$(CONFIG_IP1000) += icplus/
97 obj-$(CONFIG_JME) += jme.o
98 obj-$(CONFIG_KORINA) += korina.o
99 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
100 +obj-$(CONFIG_LANTIQ_VRX200) += lantiq_vrx200.o
101 obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
102 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
103 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
104 diff --git a/drivers/net/ethernet/lantiq_vrx200.c b/drivers/net/ethernet/lantiq_vrx200.c
105 new file mode 100644
106 index 0000000..d79d380
107 --- /dev/null
108 +++ b/drivers/net/ethernet/lantiq_vrx200.c
109 @@ -0,0 +1,1358 @@
110 +/*
111 + * This program is free software; you can redistribute it and/or modify it
112 + * under the terms of the GNU General Public License version 2 as published
113 + * by the Free Software Foundation.
114 + *
115 + * This program is distributed in the hope that it will be useful,
116 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
117 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
118 + * GNU General Public License for more details.
119 + *
120 + * You should have received a copy of the GNU General Public License
121 + * along with this program; if not, write to the Free Software
122 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
123 + *
124 + * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
125 + */
126 +
127 +#include <linux/kernel.h>
128 +#include <linux/slab.h>
129 +#include <linux/errno.h>
130 +#include <linux/types.h>
131 +#include <linux/interrupt.h>
132 +#include <linux/uaccess.h>
133 +#include <linux/in.h>
134 +#include <linux/netdevice.h>
135 +#include <linux/etherdevice.h>
136 +#include <linux/phy.h>
137 +#include <linux/ip.h>
138 +#include <linux/tcp.h>
139 +#include <linux/skbuff.h>
140 +#include <linux/mm.h>
141 +#include <linux/platform_device.h>
142 +#include <linux/ethtool.h>
143 +#include <linux/init.h>
144 +#include <linux/delay.h>
145 +#include <linux/io.h>
146 +#include <linux/dma-mapping.h>
147 +#include <linux/module.h>
148 +#include <linux/clk.h>
149 +
150 +#include <asm/checksum.h>
151 +
152 +#include <lantiq_soc.h>
153 +#include <xway_dma.h>
154 +#include <lantiq_platform.h>
155 +
156 +#define LTQ_SWITCH_BASE 0x1E108000
157 +#define LTQ_SWITCH_CORE_BASE LTQ_SWITCH_BASE
158 +#define LTQ_SWITCH_TOP_PDI_BASE LTQ_SWITCH_CORE_BASE
159 +#define LTQ_SWITCH_BM_PDI_BASE (LTQ_SWITCH_CORE_BASE + 4 * 0x40)
160 +#define LTQ_SWITCH_MAC_PDI_0_BASE (LTQ_SWITCH_CORE_BASE + 4 * 0x900)
161 +#define LTQ_SWITCH_MAC_PDI_X_BASE(x) (LTQ_SWITCH_MAC_PDI_0_BASE + x * 0x30)
162 +#define LTQ_SWITCH_TOPLEVEL_BASE (LTQ_SWITCH_BASE + 4 * 0xC40)
163 +#define LTQ_SWITCH_MDIO_PDI_BASE (LTQ_SWITCH_TOPLEVEL_BASE)
164 +#define LTQ_SWITCH_MII_PDI_BASE (LTQ_SWITCH_TOPLEVEL_BASE + 4 * 0x36)
165 +#define LTQ_SWITCH_PMAC_PDI_BASE (LTQ_SWITCH_TOPLEVEL_BASE + 4 * 0x82)
166 +
167 +#define LTQ_ETHSW_MAC_CTRL0_PADEN (1 << 8)
168 +#define LTQ_ETHSW_MAC_CTRL0_FCS (1 << 7)
169 +#define LTQ_ETHSW_MAC_CTRL1_SHORTPRE (1 << 8)
170 +#define LTQ_ETHSW_MAC_CTRL2_MLEN (1 << 3)
171 +#define LTQ_ETHSW_MAC_CTRL2_LCHKL (1 << 2)
172 +#define LTQ_ETHSW_MAC_CTRL2_LCHKS_DIS 0
173 +#define LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG 1
174 +#define LTQ_ETHSW_MAC_CTRL2_LCHKS_TAG 2
175 +#define LTQ_ETHSW_MAC_CTRL6_RBUF_DLY_WP_SHIFT 9
176 +#define LTQ_ETHSW_MAC_CTRL6_RXBUF_BYPASS (1 << 6)
177 +#define LTQ_ETHSW_GLOB_CTRL_SE (1 << 15)
178 +#define LTQ_ETHSW_MDC_CFG1_MCEN (1 << 8)
179 +#define LTQ_ETHSW_PMAC_HD_CTL_FC (1 << 10)
180 +#define LTQ_ETHSW_PMAC_HD_CTL_RC (1 << 4)
181 +#define LTQ_ETHSW_PMAC_HD_CTL_AC (1 << 2)
182 +#define ADVERTIZE_MPD (1 << 10)
183 +
184 +#define MDIO_DEVAD_NONE (-1)
185 +
186 +#define LTQ_ETH_RX_BUFFER_CNT PKTBUFSRX
187 +
188 +#define LTQ_MDIO_DRV_NAME "ltq-mdio"
189 +#define LTQ_ETH_DRV_NAME "ltq-eth"
190 +
191 +#define LTQ_ETHSW_MAX_GMAC 1
192 +#define LTQ_ETHSW_PMAC 1
193 +
194 +#define ltq_setbits(a, set) \
195 + ltq_w32(ltq_r32(a) | (set), a)
196 +
197 +enum ltq_reset_modules {
198 + LTQ_RESET_CORE,
199 + LTQ_RESET_DMA,
200 + LTQ_RESET_ETH,
201 + LTQ_RESET_PHY,
202 + LTQ_RESET_HARD,
203 + LTQ_RESET_SOFT,
204 +};
205 +
206 +static inline void
207 +dbg_ltq_writel(void *a, unsigned int b)
208 +{
209 + ltq_w32(b, a);
210 +}
211 +
212 +int ltq_reset_once(enum ltq_reset_modules module, ulong usec);
213 +
214 +struct ltq_ethsw_mac_pdi_x_regs {
215 + u32 pstat; /* Port status */
216 + u32 pisr; /* Interrupt status */
217 + u32 pier; /* Interrupt enable */
218 + u32 ctrl_0; /* Control 0 */
219 + u32 ctrl_1; /* Control 1 */
220 + u32 ctrl_2; /* Control 2 */
221 + u32 ctrl_3; /* Control 3 */
222 + u32 ctrl_4; /* Control 4 */
223 + u32 ctrl_5; /* Control 5 */
224 + u32 ctrl_6; /* Control 6 */
225 + u32 bufst; /* TX/RX buffer control */
226 + u32 testen; /* Test enable */
227 +};
228 +
229 +struct ltq_ethsw_mac_pdi_regs {
230 + struct ltq_ethsw_mac_pdi_x_regs mac[12];
231 +};
232 +
233 +struct ltq_ethsw_mdio_pdi_regs {
234 + u32 glob_ctrl; /* Global control 0 */
235 + u32 rsvd0[7];
236 + u32 mdio_ctrl; /* MDIO control */
237 + u32 mdio_read; /* MDIO read data */
238 + u32 mdio_write; /* MDIO write data */
239 + u32 mdc_cfg_0; /* MDC clock configuration 0 */
240 + u32 mdc_cfg_1; /* MDC clock configuration 1 */
241 + u32 rsvd[3];
242 + u32 phy_addr_5; /* PHY address port 5 */
243 + u32 phy_addr_4; /* PHY address port 4 */
244 + u32 phy_addr_3; /* PHY address port 3 */
245 + u32 phy_addr_2; /* PHY address port 2 */
246 + u32 phy_addr_1; /* PHY address port 1 */
247 + u32 phy_addr_0; /* PHY address port 0 */
248 + u32 mdio_stat_0; /* MDIO PHY polling status port 0 */
249 + u32 mdio_stat_1; /* MDIO PHY polling status port 1 */
250 + u32 mdio_stat_2; /* MDIO PHY polling status port 2 */
251 + u32 mdio_stat_3; /* MDIO PHY polling status port 3 */
252 + u32 mdio_stat_4; /* MDIO PHY polling status port 4 */
253 + u32 mdio_stat_5; /* MDIO PHY polling status port 5 */
254 +};
255 +
256 +struct ltq_ethsw_mii_pdi_regs {
257 + u32 mii_cfg0; /* xMII port 0 configuration */
258 + u32 pcdu0; /* Port 0 clock delay configuration */
259 + u32 mii_cfg1; /* xMII port 1 configuration */
260 + u32 pcdu1; /* Port 1 clock delay configuration */
261 + u32 mii_cfg2; /* xMII port 2 configuration */
262 + u32 rsvd0;
263 + u32 mii_cfg3; /* xMII port 3 configuration */
264 + u32 rsvd1;
265 + u32 mii_cfg4; /* xMII port 4 configuration */
266 + u32 rsvd2;
267 + u32 mii_cfg5; /* xMII port 5 configuration */
268 + u32 pcdu5; /* Port 5 clock delay configuration */
269 +};
270 +
271 +struct ltq_ethsw_pmac_pdi_regs {
272 + u32 hd_ctl; /* PMAC header control */
273 + u32 tl; /* PMAC type/length */
274 + u32 sa1; /* PMAC source address 1 */
275 + u32 sa2; /* PMAC source address 2 */
276 + u32 sa3; /* PMAC source address 3 */
277 + u32 da1; /* PMAC destination address 1 */
278 + u32 da2; /* PMAC destination address 2 */
279 + u32 da3; /* PMAC destination address 3 */
280 + u32 vlan; /* PMAC VLAN */
281 + u32 rx_ipg; /* PMAC interpacket gap in RX direction */
282 + u32 st_etype; /* PMAC special tag ethertype */
283 + u32 ewan; /* PMAC ethernet WAN group */
284 +};
285 +
286 +struct ltq_mdio_phy_addr_reg {
287 + union {
288 + struct {
289 + unsigned rsvd:1;
290 + unsigned lnkst:2; /* Link status control */
291 + unsigned speed:2; /* Speed control */
292 + unsigned fdup:2; /* Full duplex control */
293 + unsigned fcontx:2; /* Flow control mode TX */
294 + unsigned fconrx:2; /* Flow control mode RX */
295 + unsigned addr:5; /* PHY address */
296 + } bits;
297 + u16 val;
298 + };
299 +};
300 +
301 +enum ltq_mdio_phy_addr_lnkst {
302 + LTQ_MDIO_PHY_ADDR_LNKST_AUTO = 0,
303 + LTQ_MDIO_PHY_ADDR_LNKST_UP = 1,
304 + LTQ_MDIO_PHY_ADDR_LNKST_DOWN = 2,
305 +};
306 +
307 +enum ltq_mdio_phy_addr_speed {
308 + LTQ_MDIO_PHY_ADDR_SPEED_M10 = 0,
309 + LTQ_MDIO_PHY_ADDR_SPEED_M100 = 1,
310 + LTQ_MDIO_PHY_ADDR_SPEED_G1 = 2,
311 + LTQ_MDIO_PHY_ADDR_SPEED_AUTO = 3,
312 +};
313 +
314 +enum ltq_mdio_phy_addr_fdup {
315 + LTQ_MDIO_PHY_ADDR_FDUP_AUTO = 0,
316 + LTQ_MDIO_PHY_ADDR_FDUP_ENABLE = 1,
317 + LTQ_MDIO_PHY_ADDR_FDUP_DISABLE = 3,
318 +};
319 +
320 +enum ltq_mdio_phy_addr_fcon {
321 + LTQ_MDIO_PHY_ADDR_FCON_AUTO = 0,
322 + LTQ_MDIO_PHY_ADDR_FCON_ENABLE = 1,
323 + LTQ_MDIO_PHY_ADDR_FCON_DISABLE = 3,
324 +};
325 +
326 +struct ltq_mii_mii_cfg_reg {
327 + union {
328 + struct {
329 + unsigned res:1; /* Hardware reset */
330 + unsigned en:1; /* xMII interface enable */
331 + unsigned isol:1; /* xMII interface isolate */
332 + unsigned ldclkdis:1; /* Link down clock disable */
333 + unsigned rsvd:1;
334 + unsigned crs:2; /* CRS sensitivity config */
335 + unsigned rgmii_ibs:1; /* RGMII In Band status */
336 + unsigned rmii:1; /* RMII ref clock direction */
337 + unsigned miirate:3; /* xMII interface clock rate */
338 + unsigned miimode:4; /* xMII interface mode */
339 + } bits;
340 + u16 val;
341 + };
342 +};
343 +
344 +enum ltq_mii_mii_cfg_miirate {
345 + LTQ_MII_MII_CFG_MIIRATE_M2P5 = 0,
346 + LTQ_MII_MII_CFG_MIIRATE_M25 = 1,
347 + LTQ_MII_MII_CFG_MIIRATE_M125 = 2,
348 + LTQ_MII_MII_CFG_MIIRATE_M50 = 3,
349 + LTQ_MII_MII_CFG_MIIRATE_AUTO = 4,
350 +};
351 +
352 +enum ltq_mii_mii_cfg_miimode {
353 + LTQ_MII_MII_CFG_MIIMODE_MIIP = 0,
354 + LTQ_MII_MII_CFG_MIIMODE_MIIM = 1,
355 + LTQ_MII_MII_CFG_MIIMODE_RMIIP = 2,
356 + LTQ_MII_MII_CFG_MIIMODE_RMIIM = 3,
357 + LTQ_MII_MII_CFG_MIIMODE_RGMII = 4,
358 +};
359 +
360 +struct ltq_eth_priv {
361 + struct ltq_dma_device *dma_dev;
362 + struct mii_dev *bus;
363 + struct eth_device *dev;
364 + struct phy_device *phymap[LTQ_ETHSW_MAX_GMAC];
365 + int rx_num;
366 +};
367 +
368 +enum ltq_mdio_mbusy {
369 + LTQ_MDIO_MBUSY_IDLE = 0,
370 + LTQ_MDIO_MBUSY_BUSY = 1,
371 +};
372 +
373 +enum ltq_mdio_op {
374 + LTQ_MDIO_OP_WRITE = 1,
375 + LTQ_MDIO_OP_READ = 2,
376 +};
377 +
378 +struct ltq_mdio_access {
379 + union {
380 + struct {
381 + unsigned rsvd:3;
382 + unsigned mbusy:1;
383 + unsigned op:2;
384 + unsigned phyad:5;
385 + unsigned regad:5;
386 + } bits;
387 + u16 val;
388 + };
389 +};
390 +
391 +enum LTQ_ETH_PORT_FLAGS {
392 + LTQ_ETH_PORT_NONE = 0,
393 + LTQ_ETH_PORT_PHY = 1,
394 + LTQ_ETH_PORT_SWITCH = (1 << 1),
395 + LTQ_ETH_PORT_MAC = (1 << 2),
396 +};
397 +
398 +struct ltq_eth_port_config {
399 + u8 num;
400 + u8 phy_addr;
401 + u16 flags;
402 + phy_interface_t phy_if;
403 +};
404 +
405 +struct ltq_eth_board_config {
406 + const struct ltq_eth_port_config *ports;
407 + int num_ports;
408 +};
409 +
410 +static const struct ltq_eth_port_config eth_port_config[] = {
411 + /* GMAC0: external Lantiq PEF7071 10/100/1000 PHY for LAN port 0 */
412 + { 0, 0x0, LTQ_ETH_PORT_PHY, PHY_INTERFACE_MODE_RGMII },
413 + /* GMAC1: external Lantiq PEF7071 10/100/1000 PHY for LAN port 1 */
414 + { 1, 0x1, LTQ_ETH_PORT_PHY, PHY_INTERFACE_MODE_RGMII },
415 +};
416 +
417 +static const struct ltq_eth_board_config board_config = {
418 + .ports = eth_port_config,
419 + .num_ports = ARRAY_SIZE(eth_port_config),
420 +};
421 +
422 +static struct ltq_ethsw_mac_pdi_regs *ltq_ethsw_mac_pdi_regs =
423 + (struct ltq_ethsw_mac_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MAC_PDI_0_BASE);
424 +
425 +static struct ltq_ethsw_mdio_pdi_regs *ltq_ethsw_mdio_pdi_regs =
426 + (struct ltq_ethsw_mdio_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MDIO_PDI_BASE);
427 +
428 +static struct ltq_ethsw_mii_pdi_regs *ltq_ethsw_mii_pdi_regs =
429 + (struct ltq_ethsw_mii_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MII_PDI_BASE);
430 +
431 +static struct ltq_ethsw_pmac_pdi_regs *ltq_ethsw_pmac_pdi_regs =
432 + (struct ltq_ethsw_pmac_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_PMAC_PDI_BASE);
433 +
434 +
435 +#define MAX_DMA_CHAN 0x8
436 +#define MAX_DMA_CRC_LEN 0x4
437 +#define MAX_DMA_DATA_LEN 0x600
438 +
439 +/* use 2 static channels for TX/RX
440 + depending on the SoC we need to use different DMA channels for ethernet */
441 +#define LTQ_ETOP_TX_CHANNEL 1
442 +#define LTQ_ETOP_RX_CHANNEL 0
443 +
444 +#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
445 +#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
446 +
447 +#define DRV_VERSION "1.0"
448 +
449 +static void __iomem *ltq_vrx200_membase;
450 +
451 +struct ltq_vrx200_chan {
452 + int idx;
453 + int tx_free;
454 + struct net_device *netdev;
455 + struct napi_struct napi;
456 + struct ltq_dma_channel dma;
457 + struct sk_buff *skb[LTQ_DESC_NUM];
458 +};
459 +
460 +struct ltq_vrx200_priv {
461 + struct net_device *netdev;
462 + struct ltq_eth_data *pldata;
463 + struct resource *res;
464 +
465 + struct mii_bus *mii_bus;
466 + struct phy_device *phydev;
467 +
468 + struct ltq_vrx200_chan ch[MAX_DMA_CHAN];
469 + int tx_free[MAX_DMA_CHAN >> 1];
470 +
471 + spinlock_t lock;
472 +
473 + struct clk *clk_ppe;
474 +};
475 +
476 +static int ltq_vrx200_mdio_wr(struct mii_bus *bus, int phy_addr,
477 + int phy_reg, u16 phy_data);
478 +
479 +static int
480 +ltq_vrx200_alloc_skb(struct ltq_vrx200_chan *ch)
481 +{
482 + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
483 + if (!ch->skb[ch->dma.desc])
484 + return -ENOMEM;
485 + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
486 + ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
487 + DMA_FROM_DEVICE);
488 + ch->dma.desc_base[ch->dma.desc].addr =
489 + CPHYSADDR(ch->skb[ch->dma.desc]->data);
490 + ch->dma.desc_base[ch->dma.desc].ctl =
491 + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
492 + MAX_DMA_DATA_LEN;
493 + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
494 + return 0;
495 +}
496 +
497 +static void
498 +ltq_vrx200_hw_receive(struct ltq_vrx200_chan *ch)
499 +{
500 + struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
501 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
502 + struct sk_buff *skb = ch->skb[ch->dma.desc];
503 + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
504 + unsigned long flags;
505 +
506 + spin_lock_irqsave(&priv->lock, flags);
507 + if (ltq_vrx200_alloc_skb(ch)) {
508 + netdev_err(ch->netdev,
509 + "failed to allocate new rx buffer, stopping DMA\n");
510 + ltq_dma_close(&ch->dma);
511 + }
512 + ch->dma.desc++;
513 + ch->dma.desc %= LTQ_DESC_NUM;
514 + spin_unlock_irqrestore(&priv->lock, flags);
515 +
516 + skb_put(skb, len);
517 + skb->dev = ch->netdev;
518 + skb->protocol = eth_type_trans(skb, ch->netdev);
519 + netif_receive_skb(skb);
520 +}
521 +
522 +static int
523 +ltq_vrx200_poll_rx(struct napi_struct *napi, int budget)
524 +{
525 + struct ltq_vrx200_chan *ch = container_of(napi,
526 + struct ltq_vrx200_chan, napi);
527 + struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
528 + int rx = 0;
529 + int complete = 0;
530 + unsigned long flags;
531 +
532 + while ((rx < budget) && !complete) {
533 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
534 +
535 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
536 + ltq_vrx200_hw_receive(ch);
537 + rx++;
538 + } else {
539 + complete = 1;
540 + }
541 + }
542 + if (complete || !rx) {
543 + napi_complete(&ch->napi);
544 + spin_lock_irqsave(&priv->lock, flags);
545 + ltq_dma_ack_irq(&ch->dma);
546 + spin_unlock_irqrestore(&priv->lock, flags);
547 + }
548 + return rx;
549 +}
550 +
551 +static int
552 +ltq_vrx200_poll_tx(struct napi_struct *napi, int budget)
553 +{
554 + struct ltq_vrx200_chan *ch =
555 + container_of(napi, struct ltq_vrx200_chan, napi);
556 + struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
557 + struct netdev_queue *txq =
558 + netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
559 + unsigned long flags;
560 +
561 + spin_lock_irqsave(&priv->lock, flags);
562 + while ((ch->dma.desc_base[ch->tx_free].ctl &
563 + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
564 + dev_kfree_skb_any(ch->skb[ch->tx_free]);
565 + ch->skb[ch->tx_free] = NULL;
566 + memset(&ch->dma.desc_base[ch->tx_free], 0,
567 + sizeof(struct ltq_dma_desc));
568 + ch->tx_free++;
569 + ch->tx_free %= LTQ_DESC_NUM;
570 + }
571 + spin_unlock_irqrestore(&priv->lock, flags);
572 +
573 + if (netif_tx_queue_stopped(txq))
574 + netif_tx_start_queue(txq);
575 + napi_complete(&ch->napi);
576 + spin_lock_irqsave(&priv->lock, flags);
577 + ltq_dma_ack_irq(&ch->dma);
578 + spin_unlock_irqrestore(&priv->lock, flags);
579 + return 1;
580 +}
581 +
582 +static irqreturn_t
583 +ltq_vrx200_dma_irq(int irq, void *_priv)
584 +{
585 + struct ltq_vrx200_priv *priv = _priv;
586 + int ch = irq - LTQ_DMA_ETOP;
587 +
588 + napi_schedule(&priv->ch[ch].napi);
589 + return IRQ_HANDLED;
590 +}
591 +
592 +static void
593 +ltq_vrx200_free_channel(struct net_device *dev, struct ltq_vrx200_chan *ch)
594 +{
595 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
596 +
597 + ltq_dma_free(&ch->dma);
598 + if (ch->dma.irq)
599 + free_irq(ch->dma.irq, priv);
600 + if (IS_RX(ch->idx)) {
601 + int desc;
602 + for (desc = 0; desc < LTQ_DESC_NUM; desc++)
603 + dev_kfree_skb_any(ch->skb[ch->dma.desc]);
604 + }
605 +}
606 +
607 +static void
608 +ltq_vrx200_hw_exit(struct net_device *dev)
609 +{
610 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
611 + int i;
612 +
613 + clk_disable(priv->clk_ppe);
614 +
615 + for (i = 0; i < MAX_DMA_CHAN; i++)
616 + if (IS_TX(i) || IS_RX(i))
617 + ltq_vrx200_free_channel(dev, &priv->ch[i]);
618 +}
619 +
620 +static void *ltq_eth_phy_addr_reg(int num)
621 +{
622 + switch (num) {
623 + case 0:
624 + return &ltq_ethsw_mdio_pdi_regs->phy_addr_0;
625 + case 1:
626 + return &ltq_ethsw_mdio_pdi_regs->phy_addr_1;
627 + case 2:
628 + return &ltq_ethsw_mdio_pdi_regs->phy_addr_2;
629 + case 3:
630 + return &ltq_ethsw_mdio_pdi_regs->phy_addr_3;
631 + case 4:
632 + return &ltq_ethsw_mdio_pdi_regs->phy_addr_4;
633 + case 5:
634 + return &ltq_ethsw_mdio_pdi_regs->phy_addr_5;
635 + }
636 +
637 + return NULL;
638 +}
639 +
640 +static void *ltq_eth_mii_cfg_reg(int num)
641 +{
642 + switch (num) {
643 + case 0:
644 + return &ltq_ethsw_mii_pdi_regs->mii_cfg0;
645 + case 1:
646 + return &ltq_ethsw_mii_pdi_regs->mii_cfg1;
647 + case 2:
648 + return &ltq_ethsw_mii_pdi_regs->mii_cfg2;
649 + case 3:
650 + return &ltq_ethsw_mii_pdi_regs->mii_cfg3;
651 + case 4:
652 + return &ltq_ethsw_mii_pdi_regs->mii_cfg4;
653 + case 5:
654 + return &ltq_ethsw_mii_pdi_regs->mii_cfg5;
655 + }
656 +
657 + return NULL;
658 +}
659 +
660 +static void ltq_eth_gmac_update(struct phy_device *phydev, int num)
661 +{
662 + struct ltq_mdio_phy_addr_reg phy_addr_reg;
663 + struct ltq_mii_mii_cfg_reg mii_cfg_reg;
664 + void *phy_addr = ltq_eth_phy_addr_reg(num);
665 + void *mii_cfg = ltq_eth_mii_cfg_reg(num);
666 +
667 + phy_addr_reg.val = ltq_r32(phy_addr);
668 + mii_cfg_reg.val = ltq_r32(mii_cfg);
669 +
670 + phy_addr_reg.bits.addr = phydev->addr;
671 +
672 + if (phydev->link)
673 + phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_UP;
674 + else
675 + phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_DOWN;
676 +
677 + switch (phydev->speed) {
678 + case SPEED_1000:
679 + phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_G1;
680 + mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M125;
681 + break;
682 + case SPEED_100:
683 + phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M100;
684 + switch (mii_cfg_reg.bits.miimode) {
685 + case LTQ_MII_MII_CFG_MIIMODE_RMIIM:
686 + case LTQ_MII_MII_CFG_MIIMODE_RMIIP:
687 + mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M50;
688 + break;
689 + default:
690 + mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M25;
691 + break;
692 + }
693 + break;
694 + default:
695 + phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M10;
696 + mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M2P5;
697 + break;
698 + }
699 +
700 + if (phydev->duplex == DUPLEX_FULL)
701 + phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_ENABLE;
702 + else
703 + phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_DISABLE;
704 +
705 + dbg_ltq_writel(phy_addr, phy_addr_reg.val);
706 + dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
707 + udelay(1);
708 +}
709 +
710 +
711 +static void ltq_eth_port_config(struct ltq_vrx200_priv *priv,
712 + const struct ltq_eth_port_config *port)
713 +{
714 + struct ltq_mii_mii_cfg_reg mii_cfg_reg;
715 + void *mii_cfg = ltq_eth_mii_cfg_reg(port->num);
716 + int setup_gpio = 0;
717 +
718 + mii_cfg_reg.val = ltq_r32(mii_cfg);
719 +
720 +
721 + switch (port->num) {
722 + case 0: /* xMII0 */
723 + case 1: /* xMII1 */
724 + switch (port->phy_if) {
725 + case PHY_INTERFACE_MODE_MII:
726 + if (port->flags & LTQ_ETH_PORT_PHY)
727 + /* MII MAC mode, connected to external PHY */
728 + mii_cfg_reg.bits.miimode =
729 + LTQ_MII_MII_CFG_MIIMODE_MIIM;
730 + else
731 + /* MII PHY mode, connected to external MAC */
732 + mii_cfg_reg.bits.miimode =
733 + LTQ_MII_MII_CFG_MIIMODE_MIIP;
734 + setup_gpio = 1;
735 + break;
736 + case PHY_INTERFACE_MODE_RMII:
737 + if (port->flags & LTQ_ETH_PORT_PHY)
738 + /* RMII MAC mode, connected to external PHY */
739 + mii_cfg_reg.bits.miimode =
740 + LTQ_MII_MII_CFG_MIIMODE_RMIIM;
741 + else
742 + /* RMII PHY mode, connected to external MAC */
743 + mii_cfg_reg.bits.miimode =
744 + LTQ_MII_MII_CFG_MIIMODE_RMIIP;
745 + setup_gpio = 1;
746 + break;
747 + case PHY_INTERFACE_MODE_RGMII:
748 + /* RGMII MAC mode, connected to external PHY */
749 + mii_cfg_reg.bits.miimode =
750 + LTQ_MII_MII_CFG_MIIMODE_RGMII;
751 + setup_gpio = 1;
752 + break;
753 + default:
754 + break;
755 + }
756 + break;
757 + case 2: /* internal GPHY0 */
758 + case 3: /* internal GPHY0 */
759 + case 4: /* internal GPHY1 */
760 + switch (port->phy_if) {
761 + case PHY_INTERFACE_MODE_MII:
762 + case PHY_INTERFACE_MODE_GMII:
763 + /* MII MAC mode, connected to internal GPHY */
764 + mii_cfg_reg.bits.miimode =
765 + LTQ_MII_MII_CFG_MIIMODE_MIIM;
766 + setup_gpio = 1;
767 + break;
768 + default:
769 + break;
770 + }
771 + break;
772 + case 5: /* internal GPHY1 or xMII2 */
773 + switch (port->phy_if) {
774 + case PHY_INTERFACE_MODE_MII:
775 + /* MII MAC mode, connected to internal GPHY */
776 + mii_cfg_reg.bits.miimode =
777 + LTQ_MII_MII_CFG_MIIMODE_MIIM;
778 + setup_gpio = 1;
779 + break;
780 + case PHY_INTERFACE_MODE_RGMII:
781 + /* RGMII MAC mode, connected to external PHY */
782 + mii_cfg_reg.bits.miimode =
783 + LTQ_MII_MII_CFG_MIIMODE_RGMII;
784 + setup_gpio = 1;
785 + break;
786 + default:
787 + break;
788 + }
789 + break;
790 + default:
791 + break;
792 + }
793 +
794 + /* Enable MII interface */
795 + mii_cfg_reg.bits.en = port->flags ? 1 : 0;
796 + dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
797 +
798 +}
799 +
800 +static void ltq_eth_gmac_init(int num)
801 +{
802 + struct ltq_mdio_phy_addr_reg phy_addr_reg;
803 + struct ltq_mii_mii_cfg_reg mii_cfg_reg;
804 + void *phy_addr = ltq_eth_phy_addr_reg(num);
805 + void *mii_cfg = ltq_eth_mii_cfg_reg(num);
806 + struct ltq_ethsw_mac_pdi_x_regs *mac_pdi_regs;
807 +
808 + mac_pdi_regs = &ltq_ethsw_mac_pdi_regs->mac[num];
809 +
810 + /* Reset PHY status to link down */
811 + phy_addr_reg.val = ltq_r32(phy_addr);
812 + phy_addr_reg.bits.addr = num;
813 + phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_DOWN;
814 + phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M10;
815 + phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_DISABLE;
816 + dbg_ltq_writel(phy_addr, phy_addr_reg.val);
817 +
818 + /* Reset and disable MII interface */
819 + mii_cfg_reg.val = ltq_r32(mii_cfg);
820 + mii_cfg_reg.bits.en = 0;
821 + mii_cfg_reg.bits.res = 1;
822 + mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M2P5;
823 + dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
824 +
825 + /*
826 + * Enable padding of short frames, enable frame checksum generation
827 + * in transmit direction
828 + */
829 + dbg_ltq_writel(&mac_pdi_regs->ctrl_0, LTQ_ETHSW_MAC_CTRL0_PADEN |
830 + LTQ_ETHSW_MAC_CTRL0_FCS);
831 +
832 + /* Set inter packet gap size to 12 bytes */
833 + dbg_ltq_writel(&mac_pdi_regs->ctrl_1, 12);
834 +
835 + /*
836 + * Configure frame length checks:
837 + * - allow jumbo frames
838 + * - enable long length check
839 + * - enable short length without VLAN tags
840 + */
841 + dbg_ltq_writel(&mac_pdi_regs->ctrl_2, LTQ_ETHSW_MAC_CTRL2_MLEN |
842 + LTQ_ETHSW_MAC_CTRL2_LCHKL |
843 + LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG);
844 +}
845 +
846 +
847 +static void ltq_eth_pmac_init(void)
848 +{
849 + struct ltq_ethsw_mac_pdi_x_regs *mac_pdi_regs;
850 +
851 + mac_pdi_regs = &ltq_ethsw_mac_pdi_regs->mac[LTQ_ETHSW_PMAC];
852 +
853 + /*
854 + * Enable padding of short frames, enable frame checksum generation
855 + * in transmit direction
856 + */
857 + dbg_ltq_writel(&mac_pdi_regs->ctrl_0, LTQ_ETHSW_MAC_CTRL0_PADEN |
858 + LTQ_ETHSW_MAC_CTRL0_FCS);
859 +
860 + /*
861 + * Configure frame length checks:
862 + * - allow jumbo frames
863 + * - enable long length check
864 + * - enable short length without VLAN tags
865 + */
866 + dbg_ltq_writel(&mac_pdi_regs->ctrl_2, LTQ_ETHSW_MAC_CTRL2_MLEN |
867 + LTQ_ETHSW_MAC_CTRL2_LCHKL |
868 + LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG);
869 +
870 + /*
871 + * Apply workaround for buffer congestion:
872 + * - shorten preambel to 1 byte
873 + * - set minimum inter packet gap size to 7 bytes
874 + * - enable receive buffer bypass mode
875 + */
876 + dbg_ltq_writel(&mac_pdi_regs->ctrl_1, LTQ_ETHSW_MAC_CTRL1_SHORTPRE | 7);
877 + dbg_ltq_writel(&mac_pdi_regs->ctrl_6,
878 + (6 << LTQ_ETHSW_MAC_CTRL6_RBUF_DLY_WP_SHIFT) |
879 + LTQ_ETHSW_MAC_CTRL6_RXBUF_BYPASS);
880 +
881 + /* Set request assertion threshold to 8, IPG counter to 11 */
882 + dbg_ltq_writel(&ltq_ethsw_pmac_pdi_regs->rx_ipg, 0x8B);
883 +
884 + /*
885 + * Configure frame header control:
886 + * - enable reaction on pause frames (flow control)
887 + * - remove CRC for packets from PMAC to DMA
888 + * - add CRC for packets from DMA to PMAC
889 + */
890 + dbg_ltq_writel(&ltq_ethsw_pmac_pdi_regs->hd_ctl, LTQ_ETHSW_PMAC_HD_CTL_FC |
891 + /*LTQ_ETHSW_PMAC_HD_CTL_RC | */LTQ_ETHSW_PMAC_HD_CTL_AC);
892 +}
893 +
894 +static int
895 +ltq_vrx200_hw_init(struct net_device *dev)
896 +{
897 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
898 + int err = 0;
899 + int i;
900 +
901 + netdev_info(dev, "setting up dma\n");
902 + ltq_dma_init_port(DMA_PORT_ETOP);
903 +
904 + netdev_info(dev, "setting up pmu\n");
905 + clk_enable(priv->clk_ppe);
906 +
907 + /* Reset ethernet and switch subsystems */
908 + netdev_info(dev, "reset core\n");
909 + ltq_reset_once(BIT(8), 10);
910 +
911 + /* Enable switch macro */
912 + ltq_setbits(&ltq_ethsw_mdio_pdi_regs->glob_ctrl,
913 + LTQ_ETHSW_GLOB_CTRL_SE);
914 +
915 + /* Disable MDIO auto-polling for all ports */
916 + dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdc_cfg_0, 0);
917 +
918 + /*
919 + * Enable and set MDIO management clock to 2.5 MHz. This is the
920 + * maximum clock for FE PHYs.
921 + * Formula for clock is:
922 + *
923 + * 50 MHz
924 + * x = ----------- - 1
925 + * 2 * f_MDC
926 + */
927 + dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdc_cfg_1,
928 + LTQ_ETHSW_MDC_CFG1_MCEN | 9);
929 +
930 + /* Init MAC connected to CPU */
931 + ltq_eth_pmac_init();
932 +
933 + /* Init MACs connected to external MII interfaces */
934 + for (i = 0; i < LTQ_ETHSW_MAX_GMAC; i++)
935 + ltq_eth_gmac_init(i);
936 +
937 + for (i = 0; i < MAX_DMA_CHAN && !err; i++) {
938 + int irq = LTQ_DMA_ETOP + i;
939 + struct ltq_vrx200_chan *ch = &priv->ch[i];
940 +
941 + ch->idx = ch->dma.nr = i;
942 +
943 + if (IS_TX(i)) {
944 + ltq_dma_alloc_tx(&ch->dma);
945 + err = request_irq(irq, ltq_vrx200_dma_irq, IRQF_DISABLED,
946 + "vrx200_tx", priv);
947 + } else if (IS_RX(i)) {
948 + ltq_dma_alloc_rx(&ch->dma);
949 + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
950 + ch->dma.desc++)
951 + if (ltq_vrx200_alloc_skb(ch))
952 + err = -ENOMEM;
953 + ch->dma.desc = 0;
954 + err = request_irq(irq, ltq_vrx200_dma_irq, IRQF_DISABLED,
955 + "vrx200_rx", priv);
956 + }
957 + if (!err)
958 + ch->dma.irq = irq;
959 + }
960 + for (i = 0; i < board_config.num_ports; i++)
961 + ltq_eth_port_config(priv, &board_config.ports[i]);
962 + return err;
963 +}
964 +
965 +static void
966 +ltq_vrx200_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
967 +{
968 + strcpy(info->driver, "Lantiq ETOP");
969 + strcpy(info->bus_info, "internal");
970 + strcpy(info->version, DRV_VERSION);
971 +}
972 +
973 +static int
974 +ltq_vrx200_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
975 +{
976 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
977 +
978 + return phy_ethtool_gset(priv->phydev, cmd);
979 +}
980 +
981 +static int
982 +ltq_vrx200_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
983 +{
984 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
985 +
986 + return phy_ethtool_sset(priv->phydev, cmd);
987 +}
988 +
989 +static int
990 +ltq_vrx200_nway_reset(struct net_device *dev)
991 +{
992 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
993 +
994 + return phy_start_aneg(priv->phydev);
995 +}
996 +
997 +static const struct ethtool_ops ltq_vrx200_ethtool_ops = {
998 + .get_drvinfo = ltq_vrx200_get_drvinfo,
999 + .get_settings = ltq_vrx200_get_settings,
1000 + .set_settings = ltq_vrx200_set_settings,
1001 + .nway_reset = ltq_vrx200_nway_reset,
1002 +};
1003 +
1004 +static inline int ltq_mdio_poll(struct mii_bus *bus)
1005 +{
1006 + struct ltq_mdio_access acc;
1007 + unsigned cnt = 10000;
1008 +
1009 + while (likely(cnt--)) {
1010 + acc.val = ltq_r32(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl);
1011 + if (!acc.bits.mbusy)
1012 + return 0;
1013 + }
1014 +
1015 + return 1;
1016 +}
1017 +
1018 +static int
1019 +ltq_vrx200_mdio_wr(struct mii_bus *bus, int addr, int regnum, u16 val)
1020 +{
1021 + struct ltq_mdio_access acc;
1022 + int ret;
1023 +
1024 + acc.val = 0;
1025 + acc.bits.mbusy = LTQ_MDIO_MBUSY_BUSY;
1026 + acc.bits.op = LTQ_MDIO_OP_WRITE;
1027 + acc.bits.phyad = addr;
1028 + acc.bits.regad = regnum;
1029 +
1030 + ret = ltq_mdio_poll(bus);
1031 + if (ret)
1032 + return ret;
1033 +
1034 + dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_write, val);
1035 + dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl, acc.val);
1036 +
1037 + return 0;
1038 +}
1039 +
1040 +static int
1041 +ltq_vrx200_mdio_rd(struct mii_bus *bus, int addr, int regnum)
1042 +{
1043 + struct ltq_mdio_access acc;
1044 + int ret;
1045 +
1046 + acc.val = 0;
1047 + acc.bits.mbusy = LTQ_MDIO_MBUSY_BUSY;
1048 + acc.bits.op = LTQ_MDIO_OP_READ;
1049 + acc.bits.phyad = addr;
1050 + acc.bits.regad = regnum;
1051 +
1052 + ret = ltq_mdio_poll(bus);
1053 + if (ret)
1054 + goto timeout;
1055 +
1056 + dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl, acc.val);
1057 +
1058 + ret = ltq_mdio_poll(bus);
1059 + if (ret)
1060 + goto timeout;
1061 +
1062 + ret = ltq_r32(&ltq_ethsw_mdio_pdi_regs->mdio_read);
1063 +
1064 + return ret;
1065 +timeout:
1066 + return -1;
1067 +}
1068 +
1069 +static void
1070 +ltq_vrx200_mdio_link(struct net_device *dev)
1071 +{
1072 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1073 + ltq_eth_gmac_update(priv->phydev, 0);
1074 +}
1075 +
1076 +static int
1077 +ltq_vrx200_mdio_probe(struct net_device *dev)
1078 +{
1079 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1080 + struct phy_device *phydev = NULL;
1081 + int val;
1082 +
1083 + phydev = priv->mii_bus->phy_map[0];
1084 +
1085 + if (!phydev) {
1086 + netdev_err(dev, "no PHY found\n");
1087 + return -ENODEV;
1088 + }
1089 +
1090 + phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_vrx200_mdio_link,
1091 + 0, 0);
1092 +
1093 + if (IS_ERR(phydev)) {
1094 + netdev_err(dev, "Could not attach to PHY\n");
1095 + return PTR_ERR(phydev);
1096 + }
1097 +
1098 + phydev->supported &= (SUPPORTED_10baseT_Half
1099 + | SUPPORTED_10baseT_Full
1100 + | SUPPORTED_100baseT_Half
1101 + | SUPPORTED_100baseT_Full
1102 + | SUPPORTED_1000baseT_Half
1103 + | SUPPORTED_1000baseT_Full
1104 + | SUPPORTED_Autoneg
1105 + | SUPPORTED_MII
1106 + | SUPPORTED_TP);
1107 + phydev->advertising = phydev->supported;
1108 + priv->phydev = phydev;
1109 +
1110 + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
1111 + dev->name, phydev->drv->name,
1112 + dev_name(&phydev->dev), phydev->irq);
1113 +
1114 + val = ltq_vrx200_mdio_rd(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
1115 + val |= ADVERTIZE_MPD;
1116 + ltq_vrx200_mdio_wr(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
1117 + ltq_vrx200_mdio_wr(priv->mii_bus, 0, 0, 0x1040);
1118 +
1119 + phy_start_aneg(phydev);
1120 +
1121 + return 0;
1122 +}
1123 +
1124 +static int
1125 +ltq_vrx200_mdio_init(struct net_device *dev)
1126 +{
1127 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1128 + int i;
1129 + int err;
1130 +
1131 + priv->mii_bus = mdiobus_alloc();
1132 + if (!priv->mii_bus) {
1133 + netdev_err(dev, "failed to allocate mii bus\n");
1134 + err = -ENOMEM;
1135 + goto err_out;
1136 + }
1137 +
1138 + priv->mii_bus->priv = dev;
1139 + priv->mii_bus->read = ltq_vrx200_mdio_rd;
1140 + priv->mii_bus->write = ltq_vrx200_mdio_wr;
1141 + priv->mii_bus->name = "ltq_mii";
1142 + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
1143 + priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1144 + if (!priv->mii_bus->irq) {
1145 + err = -ENOMEM;
1146 + goto err_out_free_mdiobus;
1147 + }
1148 +
1149 + for (i = 0; i < PHY_MAX_ADDR; ++i)
1150 + priv->mii_bus->irq[i] = PHY_POLL;
1151 +
1152 + if (mdiobus_register(priv->mii_bus)) {
1153 + err = -ENXIO;
1154 + goto err_out_free_mdio_irq;
1155 + }
1156 +
1157 + if (ltq_vrx200_mdio_probe(dev)) {
1158 + err = -ENXIO;
1159 + goto err_out_unregister_bus;
1160 + }
1161 + return 0;
1162 +
1163 +err_out_unregister_bus:
1164 + mdiobus_unregister(priv->mii_bus);
1165 +err_out_free_mdio_irq:
1166 + kfree(priv->mii_bus->irq);
1167 +err_out_free_mdiobus:
1168 + mdiobus_free(priv->mii_bus);
1169 +err_out:
1170 + return err;
1171 +}
1172 +
1173 +static void
1174 +ltq_vrx200_mdio_cleanup(struct net_device *dev)
1175 +{
1176 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1177 +
1178 + phy_disconnect(priv->phydev);
1179 + mdiobus_unregister(priv->mii_bus);
1180 + kfree(priv->mii_bus->irq);
1181 + mdiobus_free(priv->mii_bus);
1182 +}
1183 +
1184 +void phy_dump(struct net_device *dev)
1185 +{
1186 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1187 + int i;
1188 + for (i = 0; i < 0x1F; i++) {
1189 + unsigned int val = ltq_vrx200_mdio_rd(priv->mii_bus, 0, i);
1190 + printk("%d %4X\n", i, val);
1191 + }
1192 +}
1193 +
1194 +static int
1195 +ltq_vrx200_open(struct net_device *dev)
1196 +{
1197 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1198 + int i;
1199 + unsigned long flags;
1200 +
1201 + for (i = 0; i < MAX_DMA_CHAN; i++) {
1202 + struct ltq_vrx200_chan *ch = &priv->ch[i];
1203 +
1204 + if (!IS_TX(i) && (!IS_RX(i)))
1205 + continue;
1206 + napi_enable(&ch->napi);
1207 + spin_lock_irqsave(&priv->lock, flags);
1208 + ltq_dma_open(&ch->dma);
1209 + spin_unlock_irqrestore(&priv->lock, flags);
1210 + }
1211 + if (priv->phydev) {
1212 + phy_start(priv->phydev);
1213 + phy_dump(dev);
1214 + }
1215 + netif_tx_start_all_queues(dev);
1216 + return 0;
1217 +}
1218 +
1219 +static int
1220 +ltq_vrx200_stop(struct net_device *dev)
1221 +{
1222 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1223 + int i;
1224 + unsigned long flags;
1225 +
1226 + netif_tx_stop_all_queues(dev);
1227 + if (priv->phydev)
1228 + phy_stop(priv->phydev);
1229 + for (i = 0; i < MAX_DMA_CHAN; i++) {
1230 + struct ltq_vrx200_chan *ch = &priv->ch[i];
1231 +
1232 + if (!IS_RX(i) && !IS_TX(i))
1233 + continue;
1234 + napi_disable(&ch->napi);
1235 + spin_lock_irqsave(&priv->lock, flags);
1236 + ltq_dma_close(&ch->dma);
1237 + spin_unlock_irqrestore(&priv->lock, flags);
1238 + }
1239 + return 0;
1240 +}
1241 +
1242 +static int
1243 +ltq_vrx200_tx(struct sk_buff *skb, struct net_device *dev)
1244 +{
1245 + int queue = skb_get_queue_mapping(skb);
1246 + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
1247 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1248 + struct ltq_vrx200_chan *ch = &priv->ch[(queue << 1) | 1];
1249 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
1250 + unsigned long flags;
1251 + u32 byte_offset;
1252 + int len;
1253 +
1254 + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
1255 +
1256 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
1257 + netdev_err(dev, "tx ring full\n");
1258 + netif_tx_stop_queue(txq);
1259 + return NETDEV_TX_BUSY;
1260 + }
1261 +
1262 + /* dma needs to start on a 16 byte aligned address */
1263 + byte_offset = CPHYSADDR(skb->data) % 16;
1264 + ch->skb[ch->dma.desc] = skb;
1265 +
1266 + dev->trans_start = jiffies;
1267 +
1268 + spin_lock_irqsave(&priv->lock, flags);
1269 + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
1270 + DMA_TO_DEVICE)) - byte_offset;
1271 + wmb();
1272 + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
1273 + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
1274 + ch->dma.desc++;
1275 + ch->dma.desc %= LTQ_DESC_NUM;
1276 + spin_unlock_irqrestore(&priv->lock, flags);
1277 +
1278 + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
1279 + netif_tx_stop_queue(txq);
1280 +
1281 + return NETDEV_TX_OK;
1282 +}
1283 +
1284 +static int
1285 +ltq_vrx200_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1286 +{
1287 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1288 +
1289 + /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
1290 + return phy_mii_ioctl(priv->phydev, rq, cmd);
1291 +}
1292 +
1293 +static u16
1294 +ltq_vrx200_select_queue(struct net_device *dev, struct sk_buff *skb)
1295 +{
1296 + /* we are currently only using the first queue */
1297 + return 0;
1298 +}
1299 +
1300 +static int
1301 +ltq_vrx200_init(struct net_device *dev)
1302 +{
1303 + struct ltq_vrx200_priv *priv = netdev_priv(dev);
1304 + struct sockaddr mac;
1305 + int err;
1306 +
1307 + ether_setup(dev);
1308 + dev->watchdog_timeo = 10 * HZ;
1309 +
1310 + err = ltq_vrx200_hw_init(dev);
1311 + if (err)
1312 + goto err_hw;
1313 +
1314 + memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
1315 + if (!is_valid_ether_addr(mac.sa_data)) {
1316 + pr_warn("vrx200: invalid MAC, using random\n");
1317 + random_ether_addr(mac.sa_data);
1318 + }
1319 + eth_mac_addr(dev, &mac);
1320 +
1321 + if (!ltq_vrx200_mdio_init(dev))
1322 + dev->ethtool_ops = &ltq_vrx200_ethtool_ops;
1323 + else
1324 + pr_warn("vrx200: mdio probe failed\n");;
1325 + return 0;
1326 +
1327 +err_hw:
1328 + ltq_vrx200_hw_exit(dev);
1329 + return err;
1330 +}
1331 +
1332 +static void
1333 +ltq_vrx200_tx_timeout(struct net_device *dev)
1334 +{
1335 + int err;
1336 +
1337 + ltq_vrx200_hw_exit(dev);
1338 + err = ltq_vrx200_hw_init(dev);
1339 + if (err)
1340 + goto err_hw;
1341 + dev->trans_start = jiffies;
1342 + netif_wake_queue(dev);
1343 + return;
1344 +
1345 +err_hw:
1346 + ltq_vrx200_hw_exit(dev);
1347 + netdev_err(dev, "failed to restart vrx200 after TX timeout\n");
1348 +}
1349 +
1350 +static const struct net_device_ops ltq_eth_netdev_ops = {
1351 + .ndo_open = ltq_vrx200_open,
1352 + .ndo_stop = ltq_vrx200_stop,
1353 + .ndo_start_xmit = ltq_vrx200_tx,
1354 + .ndo_change_mtu = eth_change_mtu,
1355 + .ndo_do_ioctl = ltq_vrx200_ioctl,
1356 + .ndo_set_mac_address = eth_mac_addr,
1357 + .ndo_validate_addr = eth_validate_addr,
1358 + .ndo_select_queue = ltq_vrx200_select_queue,
1359 + .ndo_init = ltq_vrx200_init,
1360 + .ndo_tx_timeout = ltq_vrx200_tx_timeout,
1361 +};
1362 +
1363 +static int __devinit
1364 +ltq_vrx200_probe(struct platform_device *pdev)
1365 +{
1366 + struct net_device *dev;
1367 + struct ltq_vrx200_priv *priv;
1368 + struct resource *res;
1369 + int err;
1370 + int i;
1371 +
1372 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1373 + if (!res) {
1374 + dev_err(&pdev->dev, "failed to get vrx200 resource\n");
1375 + err = -ENOENT;
1376 + goto err_out;
1377 + }
1378 +
1379 + res = devm_request_mem_region(&pdev->dev, res->start,
1380 + resource_size(res), dev_name(&pdev->dev));
1381 + if (!res) {
1382 + dev_err(&pdev->dev, "failed to request vrx200 resource\n");
1383 + err = -EBUSY;
1384 + goto err_out;
1385 + }
1386 +
1387 + ltq_vrx200_membase = devm_ioremap_nocache(&pdev->dev,
1388 + res->start, resource_size(res));
1389 + if (!ltq_vrx200_membase) {
1390 + dev_err(&pdev->dev, "failed to remap vrx200 engine %d\n",
1391 + pdev->id);
1392 + err = -ENOMEM;
1393 + goto err_out;
1394 + }
1395 +
1396 + if (ltq_gpio_request(&pdev->dev, 42, 2, 1, "MDIO") ||
1397 + ltq_gpio_request(&pdev->dev, 43, 2, 1, "MDC")) {
1398 + dev_err(&pdev->dev, "failed to request MDIO gpios\n");
1399 + err = -EBUSY;
1400 + goto err_out;
1401 + }
1402 +
1403 + dev = alloc_etherdev_mq(sizeof(struct ltq_vrx200_priv), 4);
1404 + strcpy(dev->name, "eth%d");
1405 + dev->netdev_ops = &ltq_eth_netdev_ops;
1406 + priv = netdev_priv(dev);
1407 + priv->res = res;
1408 + priv->pldata = dev_get_platdata(&pdev->dev);
1409 + priv->netdev = dev;
1410 +
1411 + priv->clk_ppe = clk_get(&pdev->dev, NULL);
1412 + if (IS_ERR(priv->clk_ppe))
1413 + return PTR_ERR(priv->clk_ppe);
1414 +
1415 + spin_lock_init(&priv->lock);
1416 +
1417 + for (i = 0; i < MAX_DMA_CHAN; i++) {
1418 + if (IS_TX(i))
1419 + netif_napi_add(dev, &priv->ch[i].napi,
1420 + ltq_vrx200_poll_tx, 8);
1421 + else if (IS_RX(i))
1422 + netif_napi_add(dev, &priv->ch[i].napi,
1423 + ltq_vrx200_poll_rx, 32);
1424 + priv->ch[i].netdev = dev;
1425 + }
1426 +
1427 + err = register_netdev(dev);
1428 + if (err)
1429 + goto err_free;
1430 +
1431 + platform_set_drvdata(pdev, dev);
1432 + return 0;
1433 +
1434 +err_free:
1435 + kfree(dev);
1436 +err_out:
1437 + return err;
1438 +}
1439 +
1440 +static int __devexit
1441 +ltq_vrx200_remove(struct platform_device *pdev)
1442 +{
1443 + struct net_device *dev = platform_get_drvdata(pdev);
1444 +
1445 + if (dev) {
1446 + netif_tx_stop_all_queues(dev);
1447 + ltq_vrx200_hw_exit(dev);
1448 + ltq_vrx200_mdio_cleanup(dev);
1449 + unregister_netdev(dev);
1450 + }
1451 + return 0;
1452 +}
1453 +
1454 +static struct platform_driver ltq_mii_driver = {
1455 + .probe = ltq_vrx200_probe,
1456 + .remove = __devexit_p(ltq_vrx200_remove),
1457 + .driver = {
1458 + .name = "ltq_vrx200",
1459 + .owner = THIS_MODULE,
1460 + },
1461 +};
1462 +
1463 +module_platform_driver(ltq_mii_driver);
1464 +
1465 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1466 +MODULE_DESCRIPTION("Lantiq SoC ETOP");
1467 +MODULE_LICENSE("GPL");
1468 --
1469 1.7.9.1
1470