7550f6d233323b596c7642a959a1032b9b1e0361
[openwrt/staging/mkresin.git] / target / linux / brcm63xx / patches-3.3 / 414-bcm63xx_enet-split-dma-registers-access.patch
1 From 305579c1f946ed1aa6c125252ace21c53d47c11d Mon Sep 17 00:00:00 2001
2 From: Maxime Bizon <mbizon@freebox.fr>
3 Date: Thu, 21 Jan 2010 17:50:54 +0100
4 Subject: [PATCH 30/63] bcm63xx_enet: split dma registers access.
5
6 ---
7 arch/mips/bcm63xx/dev-enet.c | 23 +++-
8 arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h | 4 +-
9 drivers/net/ethernet/broadcom/bcm63xx_enet.c | 179 ++++++++++++++--------
10 3 files changed, 138 insertions(+), 68 deletions(-)
11
12 --- a/arch/mips/bcm63xx/dev-enet.c
13 +++ b/arch/mips/bcm63xx/dev-enet.c
14 @@ -19,6 +19,16 @@ static struct resource shared_res[] = {
15 .end = -1, /* filled at runtime */
16 .flags = IORESOURCE_MEM,
17 },
18 + {
19 + .start = -1, /* filled at runtime */
20 + .end = -1, /* filled at runtime */
21 + .flags = IORESOURCE_MEM,
22 + },
23 + {
24 + .start = -1, /* filled at runtime */
25 + .end = -1, /* filled at runtime */
26 + .flags = IORESOURCE_MEM,
27 + },
28 };
29
30 static struct platform_device bcm63xx_enet_shared_device = {
31 @@ -110,10 +120,15 @@ int __init bcm63xx_enet_register(int uni
32 if (!shared_device_registered) {
33 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
34 shared_res[0].end = shared_res[0].start;
35 - if (BCMCPU_IS_6338())
36 - shared_res[0].end += (RSET_ENETDMA_SIZE / 2) - 1;
37 - else
38 - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
39 + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
40 +
41 + shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
42 + shared_res[1].end = shared_res[1].start;
43 + shared_res[1].end += RSET_ENETDMAC_SIZE(16) - 1;
44 +
45 + shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
46 + shared_res[2].end = shared_res[2].start;
47 + shared_res[2].end += RSET_ENETDMAS_SIZE(16) - 1;
48
49 ret = platform_device_register(&bcm63xx_enet_shared_device);
50 if (ret)
51 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
52 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
53 @@ -139,7 +139,9 @@ enum bcm63xx_regs_set {
54 #define BCM_6358_RSET_SPI_SIZE 1804
55 #define BCM_6368_RSET_SPI_SIZE 1804
56 #define RSET_ENET_SIZE 2048
57 -#define RSET_ENETDMA_SIZE 2048
58 +#define RSET_ENETDMA_SIZE 256
59 +#define RSET_ENETDMAC_SIZE(chans) (16 * (chans))
60 +#define RSET_ENETDMAS_SIZE(chans) (16 * (chans))
61 #define RSET_ENETSW_SIZE 65536
62 #define RSET_UART_SIZE 24
63 #define RSET_UDC_SIZE 256
64 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
65 +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
66 @@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128
67 module_param(copybreak, int, 0);
68 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
69
70 -/* io memory shared between all devices */
71 -static void __iomem *bcm_enet_shared_base;
72 +/* io registers memory shared between all devices */
73 +static void __iomem *bcm_enet_shared_base[3];
74
75 /*
76 * io helpers to access mac registers
77 @@ -63,13 +63,35 @@ static inline void enet_writel(struct bc
78 */
79 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
80 {
81 - return bcm_readl(bcm_enet_shared_base + off);
82 + return bcm_readl(bcm_enet_shared_base[0] + off);
83 }
84
85 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
86 u32 val, u32 off)
87 {
88 - bcm_writel(val, bcm_enet_shared_base + off);
89 + bcm_writel(val, bcm_enet_shared_base[0] + off);
90 +}
91 +
92 +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off)
93 +{
94 + return bcm_readl(bcm_enet_shared_base[1] + off);
95 +}
96 +
97 +static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
98 + u32 val, u32 off)
99 +{
100 + bcm_writel(val, bcm_enet_shared_base[1] + off);
101 +}
102 +
103 +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off)
104 +{
105 + return bcm_readl(bcm_enet_shared_base[2] + off);
106 +}
107 +
108 +static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
109 + u32 val, u32 off)
110 +{
111 + bcm_writel(val, bcm_enet_shared_base[2] + off);
112 }
113
114 /*
115 @@ -353,8 +375,8 @@ static int bcm_enet_receive_queue(struct
116 bcm_enet_refill_rx(dev);
117
118 /* kick rx dma */
119 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
120 - ENETDMA_CHANCFG_REG(priv->rx_chan));
121 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
122 + ENETDMAC_CHANCFG_REG(priv->rx_chan));
123 }
124
125 return processed;
126 @@ -429,10 +451,10 @@ static int bcm_enet_poll(struct napi_str
127 dev = priv->net_dev;
128
129 /* ack interrupts */
130 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
131 - ENETDMA_IR_REG(priv->rx_chan));
132 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
133 - ENETDMA_IR_REG(priv->tx_chan));
134 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
135 + ENETDMAC_IR_REG(priv->rx_chan));
136 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
137 + ENETDMAC_IR_REG(priv->tx_chan));
138
139 /* reclaim sent skb */
140 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
141 @@ -451,10 +473,10 @@ static int bcm_enet_poll(struct napi_str
142 napi_complete(napi);
143
144 /* restore rx/tx interrupt */
145 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
146 - ENETDMA_IRMASK_REG(priv->rx_chan));
147 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
148 - ENETDMA_IRMASK_REG(priv->tx_chan));
149 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
150 + ENETDMAC_IRMASK_REG(priv->rx_chan));
151 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
152 + ENETDMAC_IRMASK_REG(priv->tx_chan));
153
154 return rx_work_done;
155 }
156 @@ -497,8 +519,8 @@ static irqreturn_t bcm_enet_isr_dma(int
157 priv = netdev_priv(dev);
158
159 /* mask rx/tx interrupts */
160 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
161 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
162 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
163 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
164
165 napi_schedule(&priv->napi);
166
167 @@ -557,8 +579,8 @@ static int bcm_enet_start_xmit(struct sk
168 wmb();
169
170 /* kick tx dma */
171 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
172 - ENETDMA_CHANCFG_REG(priv->tx_chan));
173 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
174 + ENETDMAC_CHANCFG_REG(priv->tx_chan));
175
176 /* stop queue if no more desc available */
177 if (!priv->tx_desc_count)
178 @@ -801,8 +823,8 @@ static int bcm_enet_open(struct net_devi
179
180 /* mask all interrupts and request them */
181 enet_writel(priv, 0, ENET_IRMASK_REG);
182 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
183 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
184 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
185 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
186
187 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
188 if (ret)
189 @@ -891,28 +913,28 @@ static int bcm_enet_open(struct net_devi
190 }
191
192 /* write rx & tx ring addresses */
193 - enet_dma_writel(priv, priv->rx_desc_dma,
194 - ENETDMA_RSTART_REG(priv->rx_chan));
195 - enet_dma_writel(priv, priv->tx_desc_dma,
196 - ENETDMA_RSTART_REG(priv->tx_chan));
197 + enet_dmas_writel(priv, priv->rx_desc_dma,
198 + ENETDMAS_RSTART_REG(priv->rx_chan));
199 + enet_dmas_writel(priv, priv->tx_desc_dma,
200 + ENETDMAS_RSTART_REG(priv->tx_chan));
201
202 /* clear remaining state ram for rx & tx channel */
203 - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
204 - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
205 - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
206 - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
207 - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
208 - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
209 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
210 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
211 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
212 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
213 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
214 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
215
216 /* set max rx/tx length */
217 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
218 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
219
220 /* set dma maximum burst len */
221 - enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
222 - ENETDMA_MAXBURST_REG(priv->rx_chan));
223 - enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
224 - ENETDMA_MAXBURST_REG(priv->tx_chan));
225 + enet_dmac_writel(priv, BCMENET_DMA_MAXBURST,
226 + ENETDMAC_MAXBURST_REG(priv->rx_chan));
227 + enet_dmac_writel(priv, BCMENET_DMA_MAXBURST,
228 + ENETDMAC_MAXBURST_REG(priv->tx_chan));
229
230 /* set correct transmit fifo watermark */
231 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
232 @@ -930,26 +952,26 @@ static int bcm_enet_open(struct net_devi
233 val |= ENET_CTL_ENABLE_MASK;
234 enet_writel(priv, val, ENET_CTL_REG);
235 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
236 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
237 - ENETDMA_CHANCFG_REG(priv->rx_chan));
238 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
239 + ENETDMAC_CHANCFG_REG(priv->rx_chan));
240
241 /* watch "mib counters about to overflow" interrupt */
242 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
243 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
244
245 /* watch "packet transferred" interrupt in rx and tx */
246 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
247 - ENETDMA_IR_REG(priv->rx_chan));
248 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
249 - ENETDMA_IR_REG(priv->tx_chan));
250 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
251 + ENETDMAC_IR_REG(priv->rx_chan));
252 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
253 + ENETDMAC_IR_REG(priv->tx_chan));
254
255 /* make sure we enable napi before rx interrupt */
256 napi_enable(&priv->napi);
257
258 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
259 - ENETDMA_IRMASK_REG(priv->rx_chan));
260 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
261 - ENETDMA_IRMASK_REG(priv->tx_chan));
262 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
263 + ENETDMAC_IRMASK_REG(priv->rx_chan));
264 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
265 + ENETDMAC_IRMASK_REG(priv->tx_chan));
266
267 if (priv->has_phy)
268 phy_start(priv->phydev);
269 @@ -1026,14 +1048,14 @@ static void bcm_enet_disable_dma(struct
270 {
271 int limit;
272
273 - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
274 + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
275
276 limit = 1000;
277 do {
278 u32 val;
279
280 - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
281 - if (!(val & ENETDMA_CHANCFG_EN_MASK))
282 + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
283 + if (!(val & ENETDMAC_CHANCFG_EN_MASK))
284 break;
285 udelay(1);
286 } while (limit--);
287 @@ -1059,8 +1081,8 @@ static int bcm_enet_stop(struct net_devi
288
289 /* mask all interrupts */
290 enet_writel(priv, 0, ENET_IRMASK_REG);
291 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
292 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
293 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
294 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
295
296 /* make sure no mib update is scheduled */
297 cancel_work_sync(&priv->mib_update_task);
298 @@ -1598,7 +1620,7 @@ static int __devinit bcm_enet_probe(stru
299
300 /* stop if shared driver failed, assume driver->probe will be
301 * called in the same order we register devices (correct ?) */
302 - if (!bcm_enet_shared_base)
303 + if (!bcm_enet_shared_base[0])
304 return -ENODEV;
305
306 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
307 @@ -1904,30 +1926,61 @@ struct platform_driver bcm63xx_enet_driv
308 static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
309 {
310 struct resource *res;
311 + int ret, i, requested[3];
312
313 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
314 - if (!res)
315 - return -ENODEV;
316 + memset(bcm_enet_shared_base, 0, sizeof (bcm_enet_shared_base));
317 + memset(requested, 0, sizeof (requested));
318
319 - if (!request_mem_region(res->start, resource_size(res),
320 - "bcm63xx_enet_dma"))
321 - return -EBUSY;
322 + for (i = 0; i < 3; i++) {
323 + void __iomem *p;
324
325 - bcm_enet_shared_base = ioremap(res->start, resource_size(res));
326 - if (!bcm_enet_shared_base) {
327 - release_mem_region(res->start, resource_size(res));
328 - return -ENOMEM;
329 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
330 + if (!res) {
331 + ret = -EINVAL;
332 + goto fail;
333 + }
334 +
335 + if (!request_mem_region(res->start, resource_size(res),
336 + "bcm63xx_enet_dma")) {
337 + ret = -EBUSY;
338 + goto fail;
339 + }
340 + requested[i] = 0;
341 +
342 + p = ioremap(res->start, resource_size(res));
343 + if (!p) {
344 + ret = -ENOMEM;
345 + goto fail;
346 + }
347 +
348 + bcm_enet_shared_base[i] = p;
349 }
350 +
351 return 0;
352 +
353 +fail:
354 + for (i = 0; i < 3; i++) {
355 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
356 + if (!res)
357 + continue;
358 + if (bcm_enet_shared_base[i])
359 + iounmap(bcm_enet_shared_base[i]);
360 + if (requested[i])
361 + release_mem_region(res->start, resource_size(res));
362 + }
363 + return ret;
364 }
365
366 static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
367 {
368 struct resource *res;
369 + int i;
370
371 - iounmap(bcm_enet_shared_base);
372 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 - release_mem_region(res->start, resource_size(res));
374 + for (i = 0; i < 3; i++) {
375 + iounmap(bcm_enet_shared_base[i]);
376 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
377 + release_mem_region(res->start, resource_size(res));
378 + }
379 return 0;
380 }
381