kernel: update linux 3.3 to 3.3.2
[openwrt/svn-archive/archive.git] / target / linux / lantiq / patches-3.3 / 0015-NET-MIPS-lantiq-make-etop-ethernet-work-on-ase-ar9.patch
1 From 204df03dd8524ab3ee8261feab44397dc890a840 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 10 Aug 2011 15:32:16 +0200
4 Subject: [PATCH 15/70] NET: MIPS: lantiq: make etop ethernet work on ase/ar9
5
6 Extend the driver to handle the different DMA channel layout for AR9 and
7 Amazon-SE SoCs. The patch also adds support for the integrated PHY found
8 on Amazon-SE and the gigabit switch found inside the AR9.
9
10 Signed-off-by: John Crispin <blogic@openwrt.org>
11 Cc: netdev@vger.kernel.org
12 ---
13 .../mips/include/asm/mach-lantiq/xway/lantiq_irq.h | 22 +---
14 .../mips/include/asm/mach-lantiq/xway/lantiq_soc.h | 10 ++
15 arch/mips/lantiq/xway/devices.c | 11 +-
16 drivers/net/ethernet/lantiq_etop.c | 171 ++++++++++++++++++--
17 4 files changed, 174 insertions(+), 40 deletions(-)
18
19 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
20 +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
21 @@ -38,26 +38,8 @@
22
23 #define MIPS_CPU_TIMER_IRQ 7
24
25 -#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
26 -#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1)
27 -#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2)
28 -#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3)
29 -#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4)
30 -#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5)
31 -#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6)
32 -#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7)
33 -#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8)
34 -#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9)
35 -#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10)
36 -#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11)
37 -#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25)
38 -#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26)
39 -#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27)
40 -#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28)
41 -#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29)
42 -#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30)
43 -#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16)
44 -#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21)
45 +#define LTQ_DMA_ETOP ((ltq_is_ase()) ? \
46 + (INT_NUM_IM3_IRL0) : (INT_NUM_IM2_IRL0))
47
48 #define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24)
49
50 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
51 +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
52 @@ -82,6 +82,7 @@
53 #define LTQ_PMU_SIZE 0x1000
54
55 #define PMU_DMA 0x0020
56 +#define PMU_EPHY 0x0080
57 #define PMU_USB 0x8041
58 #define PMU_LED 0x0800
59 #define PMU_GPT 0x1000
60 @@ -93,6 +94,10 @@
61 #define LTQ_ETOP_BASE_ADDR 0x1E180000
62 #define LTQ_ETOP_SIZE 0x40000
63
64 +/* GBIT - gigabit switch */
65 +#define LTQ_GBIT_BASE_ADDR 0x1E108000
66 +#define LTQ_GBIT_SIZE 0x200
67 +
68 /* DMA */
69 #define LTQ_DMA_BASE_ADDR 0x1E104100
70 #define LTQ_DMA_SIZE 0x800
71 @@ -147,6 +152,11 @@ extern void ltq_pmu_enable(unsigned int
72 extern void ltq_pmu_disable(unsigned int module);
73 extern void ltq_cgu_enable(unsigned int clk);
74
75 +static inline int ltq_is_ase(void)
76 +{
77 + return (ltq_get_soc_type() == SOC_TYPE_AMAZON_SE);
78 +}
79 +
80 static inline int ltq_is_ar9(void)
81 {
82 return (ltq_get_soc_type() == SOC_TYPE_AR9);
83 --- a/arch/mips/lantiq/xway/devices.c
84 +++ b/arch/mips/lantiq/xway/devices.c
85 @@ -74,18 +74,23 @@ void __init ltq_register_ase_asc(void)
86 }
87
88 /* ethernet */
89 -static struct resource ltq_etop_resources =
90 - MEM_RES("etop", LTQ_ETOP_BASE_ADDR, LTQ_ETOP_SIZE);
91 +static struct resource ltq_etop_resources[] = {
92 + MEM_RES("etop", LTQ_ETOP_BASE_ADDR, LTQ_ETOP_SIZE),
93 + MEM_RES("gbit", LTQ_GBIT_BASE_ADDR, LTQ_GBIT_SIZE),
94 +};
95
96 static struct platform_device ltq_etop = {
97 .name = "ltq_etop",
98 - .resource = &ltq_etop_resources,
99 + .resource = ltq_etop_resources,
100 .num_resources = 1,
101 };
102
103 void __init
104 ltq_register_etop(struct ltq_eth_data *eth)
105 {
106 + /* only register the gphy on socs that have one */
107 + if (ltq_is_ar9() | ltq_is_vr9())
108 + ltq_etop.num_resources = 2;
109 if (eth) {
110 ltq_etop.dev.platform_data = eth;
111 platform_device_register(&ltq_etop);
112 --- a/drivers/net/ethernet/lantiq_etop.c
113 +++ b/drivers/net/ethernet/lantiq_etop.c
114 @@ -71,10 +71,43 @@
115 #define ETOP_MII_REVERSE 0xe
116 #define ETOP_PLEN_UNDER 0x40
117 #define ETOP_CGEN 0x800
118 +#define ETOP_CFG_MII0 0x01
119
120 -/* use 2 static channels for TX/RX */
121 +#define LTQ_GBIT_MDIO_CTL 0xCC
122 +#define LTQ_GBIT_MDIO_DATA 0xd0
123 +#define LTQ_GBIT_GCTL0 0x68
124 +#define LTQ_GBIT_PMAC_HD_CTL 0x8c
125 +#define LTQ_GBIT_P0_CTL 0x4
126 +#define LTQ_GBIT_PMAC_RX_IPG 0xa8
127 +
128 +#define PMAC_HD_CTL_AS (1 << 19)
129 +#define PMAC_HD_CTL_RXSH (1 << 22)
130 +
131 +/* Switch Enable (0=disable, 1=enable) */
132 +#define GCTL0_SE 0x80000000
133 +/* Disable MDIO auto polling (0=disable, 1=enable) */
134 +#define PX_CTL_DMDIO 0x00400000
135 +
136 +/* register information for the gbit's MDIO bus */
137 +#define MDIO_XR9_REQUEST 0x00008000
138 +#define MDIO_XR9_READ 0x00000800
139 +#define MDIO_XR9_WRITE 0x00000400
140 +#define MDIO_XR9_REG_MASK 0x1f
141 +#define MDIO_XR9_ADDR_MASK 0x1f
142 +#define MDIO_XR9_RD_MASK 0xffff
143 +#define MDIO_XR9_REG_OFFSET 0
144 +#define MDIO_XR9_ADDR_OFFSET 5
145 +#define MDIO_XR9_WR_OFFSET 16
146 +
147 +/* the newer xway socks have a embedded 3/7 port gbit multiplexer */
148 +#define ltq_has_gbit() (ltq_is_ar9() || ltq_is_vr9())
149 +
150 +/* use 2 static channels for TX/RX
151 + depending on the SoC we need to use different DMA channels for ethernet */
152 #define LTQ_ETOP_TX_CHANNEL 1
153 -#define LTQ_ETOP_RX_CHANNEL 6
154 +#define LTQ_ETOP_RX_CHANNEL ((ltq_is_ase()) ? (5) : \
155 + ((ltq_has_gbit()) ? (0) : (6)))
156 +
157 #define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
158 #define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
159
160 @@ -83,9 +116,15 @@
161 #define ltq_etop_w32_mask(x, y, z) \
162 ltq_w32_mask(x, y, ltq_etop_membase + (z))
163
164 +#define ltq_gbit_r32(x) ltq_r32(ltq_gbit_membase + (x))
165 +#define ltq_gbit_w32(x, y) ltq_w32(x, ltq_gbit_membase + (y))
166 +#define ltq_gbit_w32_mask(x, y, z) \
167 + ltq_w32_mask(x, y, ltq_gbit_membase + (z))
168 +
169 #define DRV_VERSION "1.0"
170
171 static void __iomem *ltq_etop_membase;
172 +static void __iomem *ltq_gbit_membase;
173
174 struct ltq_etop_chan {
175 int idx;
176 @@ -111,6 +150,9 @@ struct ltq_etop_priv {
177 spinlock_t lock;
178 };
179
180 +static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
181 + int phy_reg, u16 phy_data);
182 +
183 static int
184 ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
185 {
186 @@ -212,7 +254,7 @@ static irqreturn_t
187 ltq_etop_dma_irq(int irq, void *_priv)
188 {
189 struct ltq_etop_priv *priv = _priv;
190 - int ch = irq - LTQ_DMA_CH0_INT;
191 + int ch = irq - LTQ_DMA_ETOP;
192
193 napi_schedule(&priv->ch[ch].napi);
194 return IRQ_HANDLED;
195 @@ -245,15 +287,43 @@ ltq_etop_hw_exit(struct net_device *dev)
196 ltq_etop_free_channel(dev, &priv->ch[i]);
197 }
198
199 +static void
200 +ltq_etop_gbit_init(void)
201 +{
202 + ltq_pmu_enable(PMU_SWITCH);
203 +
204 + ltq_gpio_request(42, 2, 1, "MDIO");
205 + ltq_gpio_request(43, 2, 1, "MDC");
206 +
207 + ltq_gbit_w32_mask(0, GCTL0_SE, LTQ_GBIT_GCTL0);
208 + /** Disable MDIO auto polling mode */
209 + ltq_gbit_w32_mask(0, PX_CTL_DMDIO, LTQ_GBIT_P0_CTL);
210 + /* set 1522 packet size */
211 + ltq_gbit_w32_mask(0x300, 0, LTQ_GBIT_GCTL0);
212 + /* disable pmac & dmac headers */
213 + ltq_gbit_w32_mask(PMAC_HD_CTL_AS | PMAC_HD_CTL_RXSH, 0,
214 + LTQ_GBIT_PMAC_HD_CTL);
215 + /* Due to traffic halt when burst length 8,
216 + replace default IPG value with 0x3B */
217 + ltq_gbit_w32(0x3B, LTQ_GBIT_PMAC_RX_IPG);
218 +}
219 +
220 static int
221 ltq_etop_hw_init(struct net_device *dev)
222 {
223 struct ltq_etop_priv *priv = netdev_priv(dev);
224 + unsigned int mii_mode = priv->pldata->mii_mode;
225 int i;
226
227 ltq_pmu_enable(PMU_PPE);
228
229 - switch (priv->pldata->mii_mode) {
230 + if (ltq_has_gbit()) {
231 + ltq_etop_gbit_init();
232 + /* force the etops link to the gbit to MII */
233 + mii_mode = PHY_INTERFACE_MODE_MII;
234 + }
235 +
236 + switch (mii_mode) {
237 case PHY_INTERFACE_MODE_RMII:
238 ltq_etop_w32_mask(ETOP_MII_MASK,
239 ETOP_MII_REVERSE, LTQ_ETOP_CFG);
240 @@ -265,6 +335,18 @@ ltq_etop_hw_init(struct net_device *dev)
241 break;
242
243 default:
244 + if (ltq_is_ase()) {
245 + ltq_pmu_enable(PMU_EPHY);
246 + /* disable external MII */
247 + ltq_etop_w32_mask(0, ETOP_CFG_MII0, LTQ_ETOP_CFG);
248 + /* enable clock for internal PHY */
249 + ltq_cgu_enable(CGU_EPHY);
250 + /* we need to write this magic to the internal phy to
251 + make it work */
252 + ltq_etop_mdio_wr(NULL, 0x8, 0x12, 0xC020);
253 + pr_info("Selected EPHY mode\n");
254 + break;
255 + }
256 netdev_err(dev, "unknown mii mode %d\n",
257 priv->pldata->mii_mode);
258 return -ENOTSUPP;
259 @@ -276,7 +358,7 @@ ltq_etop_hw_init(struct net_device *dev)
260 ltq_dma_init_port(DMA_PORT_ETOP);
261
262 for (i = 0; i < MAX_DMA_CHAN; i++) {
263 - int irq = LTQ_DMA_CH0_INT + i;
264 + int irq = LTQ_DMA_ETOP + i;
265 struct ltq_etop_chan *ch = &priv->ch[i];
266
267 ch->idx = ch->dma.nr = i;
268 @@ -340,6 +422,39 @@ static const struct ethtool_ops ltq_etop
269 };
270
271 static int
272 +ltq_etop_mdio_wr_xr9(struct mii_bus *bus, int phy_addr,
273 + int phy_reg, u16 phy_data)
274 +{
275 + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_WRITE |
276 + (phy_data << MDIO_XR9_WR_OFFSET) |
277 + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
278 + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
279 +
280 + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
281 + ;
282 + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
283 + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
284 + ;
285 + return 0;
286 +}
287 +
288 +static int
289 +ltq_etop_mdio_rd_xr9(struct mii_bus *bus, int phy_addr, int phy_reg)
290 +{
291 + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_READ |
292 + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
293 + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
294 +
295 + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
296 + ;
297 + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
298 + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
299 + ;
300 + val = ltq_gbit_r32(LTQ_GBIT_MDIO_DATA) & MDIO_XR9_RD_MASK;
301 + return val;
302 +}
303 +
304 +static int
305 ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
306 {
307 u32 val = MDIO_REQUEST |
308 @@ -380,14 +495,11 @@ ltq_etop_mdio_probe(struct net_device *d
309 {
310 struct ltq_etop_priv *priv = netdev_priv(dev);
311 struct phy_device *phydev = NULL;
312 - int phy_addr;
313
314 - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
315 - if (priv->mii_bus->phy_map[phy_addr]) {
316 - phydev = priv->mii_bus->phy_map[phy_addr];
317 - break;
318 - }
319 - }
320 + if (ltq_is_ase())
321 + phydev = priv->mii_bus->phy_map[8];
322 + else
323 + phydev = priv->mii_bus->phy_map[0];
324
325 if (!phydev) {
326 netdev_err(dev, "no PHY found\n");
327 @@ -409,6 +521,9 @@ ltq_etop_mdio_probe(struct net_device *d
328 | SUPPORTED_Autoneg
329 | SUPPORTED_MII
330 | SUPPORTED_TP);
331 + if (ltq_has_gbit())
332 + phydev->supported &= SUPPORTED_1000baseT_Half
333 + | SUPPORTED_1000baseT_Full;
334
335 phydev->advertising = phydev->supported;
336 priv->phydev = phydev;
337 @@ -434,8 +549,13 @@ ltq_etop_mdio_init(struct net_device *de
338 }
339
340 priv->mii_bus->priv = dev;
341 - priv->mii_bus->read = ltq_etop_mdio_rd;
342 - priv->mii_bus->write = ltq_etop_mdio_wr;
343 + if (ltq_has_gbit()) {
344 + priv->mii_bus->read = ltq_etop_mdio_rd_xr9;
345 + priv->mii_bus->write = ltq_etop_mdio_wr_xr9;
346 + } else {
347 + priv->mii_bus->read = ltq_etop_mdio_rd;
348 + priv->mii_bus->write = ltq_etop_mdio_wr;
349 + }
350 priv->mii_bus->name = "ltq_mii";
351 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
352 priv->pdev->name, priv->pdev->id);
353 @@ -526,9 +646,9 @@ ltq_etop_tx(struct sk_buff *skb, struct
354 struct ltq_etop_priv *priv = netdev_priv(dev);
355 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
356 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
357 - int len;
358 unsigned long flags;
359 u32 byte_offset;
360 + int len;
361
362 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
363
364 @@ -702,7 +822,7 @@ ltq_etop_probe(struct platform_device *p
365 {
366 struct net_device *dev;
367 struct ltq_etop_priv *priv;
368 - struct resource *res;
369 + struct resource *res, *gbit_res;
370 int err;
371 int i;
372
373 @@ -730,6 +850,23 @@ ltq_etop_probe(struct platform_device *p
374 goto err_out;
375 }
376
377 + if (ltq_has_gbit()) {
378 + gbit_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
379 + if (!gbit_res) {
380 + dev_err(&pdev->dev, "failed to get gbit resource\n");
381 + err = -ENOENT;
382 + goto err_out;
383 + }
384 + ltq_gbit_membase = devm_ioremap_nocache(&pdev->dev,
385 + gbit_res->start, resource_size(gbit_res));
386 + if (!ltq_gbit_membase) {
387 + dev_err(&pdev->dev, "failed to remap gigabit switch %d\n",
388 + pdev->id);
389 + err = -ENOMEM;
390 + goto err_out;
391 + }
392 + }
393 +
394 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
395 strcpy(dev->name, "eth%d");
396 dev->netdev_ops = &ltq_eth_netdev_ops;