add preliminary support for 3.6 kernel
[openwrt/openwrt.git] / target / linux / brcm63xx / patches-3.6 / 443-MIPS-BCM63XX-enable-enet-for-BCM6345.patch
1 [PATCH] MIPS: BCM63XX: enable ethernet for BCM6345
2
3 BCM6345 has a slightly older DMA engine which is not supported by default by
4 the bcm63xx_enet driver. This patch adds the missing Ethernet DMA definitions
5 as well as patches all the places in the ethernet driver were the DMA
6 reading/writing is different.
7
8 Signed-off-by: Florian Fainelli <florian@openwrt.org>
9 ---
10 --- a/arch/mips/bcm63xx/dev-enet.c
11 +++ b/arch/mips/bcm63xx/dev-enet.c
12 @@ -9,10 +9,44 @@
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 +#include <linux/export.h>
17 #include <bcm63xx_dev_enet.h>
18 #include <bcm63xx_io.h>
19 #include <bcm63xx_regs.h>
20
21 +#ifdef BCMCPU_RUNTIME_DETECT
22 +static const unsigned long bcm6xxx_regs_enetdmac[] = {
23 + [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG,
24 + [ENETDMAC_IR] = ENETDMAC_IR_REG,
25 + [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG,
26 + [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG,
27 +};
28 +
29 +static const unsigned long bcm6345_regs_enetdmac[] = {
30 + [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG,
31 + [ENETDMAC_IR] = ENETDMA_6345_IR_REG,
32 + [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG,
33 + [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG,
34 + [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG,
35 + [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG,
36 + [ENETDMAC_FC] = ENETDMA_6345_FC_REG,
37 + [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG,
38 +};
39 +
40 +const unsigned long *bcm63xx_regs_enetdmac;
41 +EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
42 +
43 +static __init void bcm63xx_enetdmac_regs_init(void)
44 +{
45 + if (BCMCPU_IS_6345())
46 + bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
47 + else
48 + bcm63xx_regs_enetdmac = bcm6xxx_regs_enetdmac;
49 +}
50 +#else
51 +static __init void bcm63xx_enetdmac_regs_init(void) { }
52 +#endif
53 +
54 static struct resource shared_res[] = {
55 {
56 .start = -1, /* filled at runtime */
57 @@ -137,12 +171,19 @@ static int __init register_shared(void)
58 if (shared_device_registered)
59 return 0;
60
61 + bcm63xx_enetdmac_regs_init();
62 +
63 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
64 shared_res[0].end = shared_res[0].start;
65 - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
66 + if (BCMCPU_IS_6345())
67 + shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
68 + else
69 + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
70
71 if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
72 chan_count = 32;
73 + else if (BCMCPU_IS_6345())
74 + chan_count = 8;
75 else
76 chan_count = 16;
77
78 @@ -172,7 +213,7 @@ int __init bcm63xx_enet_register(int uni
79 if (unit > 1)
80 return -ENODEV;
81
82 - if (unit == 1 && BCMCPU_IS_6338())
83 + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
84 return -ENODEV;
85
86 ret = register_shared();
87 @@ -213,6 +254,20 @@ int __init bcm63xx_enet_register(int uni
88 dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
89 }
90
91 + dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
92 + dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
93 + if (BCMCPU_IS_6345()) {
94 + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
95 + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
96 + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
97 + dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
98 + dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
99 + dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
100 + dpd->dma_no_sram = 1;
101 + dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
102 + } else
103 + dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
104 +
105 ret = platform_device_register(pdev);
106 if (ret)
107 return ret;
108 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
109 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
110 @@ -709,6 +709,8 @@
111 /*************************************************************************
112 * _REG relative to RSET_ENETDMA
113 *************************************************************************/
114 +#define ENETDMA_CHAN_WIDTH 0x10
115 +#define ENETDMA_6345_CHAN_WIDTH 0x40
116
117 /* Controller Configuration Register */
118 #define ENETDMA_CFG_REG (0x0)
119 @@ -758,29 +760,54 @@
120 /* State Ram Word 4 */
121 #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10)
122
123 +/* Broadcom 6345 ENET DMA definitions */
124 +#define ENETDMA_6345_CHANCFG_REG (0x00)
125 +
126 +#define ENETDMA_6345_MAXBURST_REG (0x04)
127 +
128 +#define ENETDMA_6345_RSTART_REG (0x08)
129 +
130 +#define ENETDMA_6345_LEN_REG (0x0C)
131 +
132 +#define ENETDMA_6345_IR_REG (0x14)
133 +
134 +#define ENETDMA_6345_IRMASK_REG (0x18)
135 +
136 +#define ENETDMA_6345_FC_REG (0x1C)
137 +
138 +#define ENETDMA_6345_BUFALLOC_REG (0x20)
139 +
140 +/* Shift down for EOP, SOP and WRAP bits */
141 +#define ENETDMA_6345_DESC_SHIFT (3)
142
143 /*************************************************************************
144 * _REG relative to RSET_ENETDMAC
145 *************************************************************************/
146
147 /* Channel Configuration register */
148 -#define ENETDMAC_CHANCFG_REG(x) ((x) * 0x10)
149 +#define ENETDMAC_CHANCFG_REG (0x0)
150 #define ENETDMAC_CHANCFG_EN_SHIFT 0
151 #define ENETDMAC_CHANCFG_EN_MASK (1 << ENETDMA_CHANCFG_EN_SHIFT)
152 #define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1
153 #define ENETDMAC_CHANCFG_PKTHALT_MASK (1 << ENETDMA_CHANCFG_PKTHALT_SHIFT)
154 +#define ENETDMAC_CHANCFG_CHAINING_SHIFT 2
155 +#define ENETDMAC_CHANCFG_CHAINING_MASK (1 << ENETDMAC_CHANCFG_CHAINING_SHIFT)
156 +#define ENETDMAC_CHANCFG_WRAP_EN_SHIFT 3
157 +#define ENETDMAC_CHANCFG_WRAP_EN_MASK (1 << ENETDMAC_CHANCFG_WRAP_EN_SHIFT)
158 +#define ENETDMAC_CHANCFG_FLOWC_EN_SHIFT 4
159 +#define ENETDMAC_CHANCFG_FLOWC_EN_MASK (1 << ENETDMAC_CHANCFG_FLOWC_EN_SHIFT)
160
161 /* Interrupt Control/Status register */
162 -#define ENETDMAC_IR_REG(x) (0x4 + (x) * 0x10)
163 +#define ENETDMAC_IR_REG (0x4)
164 #define ENETDMAC_IR_BUFDONE_MASK (1 << 0)
165 #define ENETDMAC_IR_PKTDONE_MASK (1 << 1)
166 #define ENETDMAC_IR_NOTOWNER_MASK (1 << 2)
167
168 /* Interrupt Mask register */
169 -#define ENETDMAC_IRMASK_REG(x) (0x8 + (x) * 0x10)
170 +#define ENETDMAC_IRMASK_REG (0x8)
171
172 /* Maximum Burst Length */
173 -#define ENETDMAC_MAXBURST_REG(x) (0xc + (x) * 0x10)
174 +#define ENETDMAC_MAXBURST_REG (0xc)
175
176
177 /*************************************************************************
178 @@ -788,16 +815,16 @@
179 *************************************************************************/
180
181 /* Ring Start Address register */
182 -#define ENETDMAS_RSTART_REG(x) ((x) * 0x10)
183 +#define ENETDMAS_RSTART_REG (0x0)
184
185 /* State Ram Word 2 */
186 -#define ENETDMAS_SRAM2_REG(x) (0x4 + (x) * 0x10)
187 +#define ENETDMAS_SRAM2_REG (0x4)
188
189 /* State Ram Word 3 */
190 -#define ENETDMAS_SRAM3_REG(x) (0x8 + (x) * 0x10)
191 +#define ENETDMAS_SRAM3_REG (0x8)
192
193 /* State Ram Word 4 */
194 -#define ENETDMAS_SRAM4_REG(x) (0xc + (x) * 0x10)
195 +#define ENETDMAS_SRAM4_REG (0xc)
196
197
198 /*************************************************************************
199 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
200 +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
201 @@ -115,26 +115,28 @@ static inline void enet_dma_writel(struc
202 bcm_writel(val, bcm_enet_shared_base[0] + off);
203 }
204
205 -static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off)
206 +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
207 {
208 - return bcm_readl(bcm_enet_shared_base[1] + off);
209 + return bcm_readl(bcm_enet_shared_base[1] +
210 + (bcm63xx_enetdmacreg(off) + (chan * priv->dma_chan_width)));
211 }
212
213 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
214 - u32 val, u32 off)
215 + u32 val, u32 off, int chan)
216 {
217 - bcm_writel(val, bcm_enet_shared_base[1] + off);
218 + bcm_writel(val, bcm_enet_shared_base[1] +
219 + (bcm63xx_enetdmacreg(off) + (chan * priv->dma_chan_width)));
220 }
221
222 -static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off)
223 +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
224 {
225 - return bcm_readl(bcm_enet_shared_base[2] + off);
226 + return bcm_readl(bcm_enet_shared_base[2] + (off + (chan * priv->dma_chan_width)));
227 }
228
229 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
230 - u32 val, u32 off)
231 + u32 val, u32 off, int chan)
232 {
233 - bcm_writel(val, bcm_enet_shared_base[2] + off);
234 + bcm_writel(val, bcm_enet_shared_base[2] + (off + (chan * priv->dma_chan_width)));
235 }
236
237 /*
238 @@ -270,7 +272,7 @@ static int bcm_enet_refill_rx(struct net
239 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
240 len_stat |= DMADESC_OWNER_MASK;
241 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
242 - len_stat |= DMADESC_WRAP_MASK;
243 + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
244 priv->rx_dirty_desc = 0;
245 } else {
246 priv->rx_dirty_desc++;
247 @@ -281,7 +283,10 @@ static int bcm_enet_refill_rx(struct net
248 priv->rx_desc_count++;
249
250 /* tell dma engine we allocated one buffer */
251 - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
252 + if (!priv->dma_no_sram)
253 + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
254 + else
255 + enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
256 }
257
258 /* If rx ring is still empty, set a timer to try allocating
259 @@ -357,7 +362,8 @@ static int bcm_enet_receive_queue(struct
260
261 /* if the packet does not have start of packet _and_
262 * end of packet flag set, then just recycle it */
263 - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
264 + if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
265 + (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
266 dev->stats.rx_dropped++;
267 continue;
268 }
269 @@ -418,8 +424,8 @@ static int bcm_enet_receive_queue(struct
270 bcm_enet_refill_rx(dev);
271
272 /* kick rx dma */
273 - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
274 - ENETDMAC_CHANCFG_REG(priv->rx_chan));
275 + enet_dmac_writel(priv, priv->dma_chan_en_mask,
276 + ENETDMAC_CHANCFG, priv->rx_chan);
277 }
278
279 return processed;
280 @@ -494,10 +500,10 @@ static int bcm_enet_poll(struct napi_str
281 dev = priv->net_dev;
282
283 /* ack interrupts */
284 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
285 - ENETDMAC_IR_REG(priv->rx_chan));
286 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
287 - ENETDMAC_IR_REG(priv->tx_chan));
288 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
289 + ENETDMAC_IR, priv->rx_chan);
290 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
291 + ENETDMAC_IR, priv->tx_chan);
292
293 /* reclaim sent skb */
294 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
295 @@ -516,10 +522,10 @@ static int bcm_enet_poll(struct napi_str
296 napi_complete(napi);
297
298 /* restore rx/tx interrupt */
299 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
300 - ENETDMAC_IRMASK_REG(priv->rx_chan));
301 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
302 - ENETDMAC_IRMASK_REG(priv->tx_chan));
303 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
304 + ENETDMAC_IRMASK, priv->rx_chan);
305 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
306 + ENETDMAC_IRMASK, priv->tx_chan);
307
308 return rx_work_done;
309 }
310 @@ -562,8 +568,8 @@ static irqreturn_t bcm_enet_isr_dma(int
311 priv = netdev_priv(dev);
312
313 /* mask rx/tx interrupts */
314 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
315 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
316 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
317 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
318
319 napi_schedule(&priv->napi);
320
321 @@ -624,14 +630,14 @@ static int bcm_enet_start_xmit(struct sk
322 DMA_TO_DEVICE);
323
324 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
325 - len_stat |= DMADESC_ESOP_MASK |
326 + len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
327 DMADESC_APPEND_CRC |
328 DMADESC_OWNER_MASK;
329
330 priv->tx_curr_desc++;
331 if (priv->tx_curr_desc == priv->tx_ring_size) {
332 priv->tx_curr_desc = 0;
333 - len_stat |= DMADESC_WRAP_MASK;
334 + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
335 }
336 priv->tx_desc_count--;
337
338 @@ -642,8 +648,8 @@ static int bcm_enet_start_xmit(struct sk
339 wmb();
340
341 /* kick tx dma */
342 - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
343 - ENETDMAC_CHANCFG_REG(priv->tx_chan));
344 + enet_dmac_writel(priv, priv->dma_chan_en_mask,
345 + ENETDMAC_CHANCFG, priv->tx_chan);
346
347 /* stop queue if no more desc available */
348 if (!priv->tx_desc_count)
349 @@ -771,6 +777,9 @@ static void bcm_enet_set_flow(struct bcm
350 val &= ~ENET_RXCFG_ENFLOW_MASK;
351 enet_writel(priv, val, ENET_RXCFG_REG);
352
353 + if (priv->dma_no_sram)
354 + return;
355 +
356 /* tx flow control (pause frame generation) */
357 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
358 if (tx_en)
359 @@ -886,8 +895,8 @@ static int bcm_enet_open(struct net_devi
360
361 /* mask all interrupts and request them */
362 enet_writel(priv, 0, ENET_IRMASK_REG);
363 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
364 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
365 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
366 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
367
368 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
369 if (ret)
370 @@ -966,8 +975,12 @@ static int bcm_enet_open(struct net_devi
371 priv->rx_curr_desc = 0;
372
373 /* initialize flow control buffer allocation */
374 - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
375 - ENETDMA_BUFALLOC_REG(priv->rx_chan));
376 + if (!priv->dma_no_sram)
377 + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
378 + ENETDMA_BUFALLOC_REG(priv->rx_chan));
379 + else
380 + enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
381 + ENETDMAC_BUFALLOC, priv->rx_chan);
382
383 if (bcm_enet_refill_rx(dev)) {
384 dev_err(kdev, "cannot allocate rx skb queue\n");
385 @@ -976,18 +989,30 @@ static int bcm_enet_open(struct net_devi
386 }
387
388 /* write rx & tx ring addresses */
389 - enet_dmas_writel(priv, priv->rx_desc_dma,
390 - ENETDMAS_RSTART_REG(priv->rx_chan));
391 - enet_dmas_writel(priv, priv->tx_desc_dma,
392 - ENETDMAS_RSTART_REG(priv->tx_chan));
393 + if (!priv->dma_no_sram) {
394 + enet_dmas_writel(priv, priv->rx_desc_dma,
395 + ENETDMAS_RSTART_REG, priv->rx_chan);
396 + enet_dmas_writel(priv, priv->tx_desc_dma,
397 + ENETDMAS_RSTART_REG, priv->tx_chan);
398 + } else {
399 + enet_dmac_writel(priv, priv->rx_desc_dma,
400 + ENETDMAC_RSTART, priv->rx_chan);
401 + enet_dmac_writel(priv, priv->tx_desc_dma,
402 + ENETDMAC_RSTART, priv->tx_chan);
403 + }
404
405 /* clear remaining state ram for rx & tx channel */
406 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
407 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
408 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
409 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
410 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
411 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
412 + if (!priv->dma_no_sram) {
413 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
414 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
415 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
416 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
417 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
418 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
419 + } else {
420 + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
421 + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
422 + }
423
424 /* set max rx/tx length */
425 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
426 @@ -995,18 +1020,24 @@ static int bcm_enet_open(struct net_devi
427
428 /* set dma maximum burst len */
429 enet_dmac_writel(priv, priv->dma_maxburst,
430 - ENETDMAC_MAXBURST_REG(priv->rx_chan));
431 + ENETDMAC_MAXBURST, priv->rx_chan);
432 enet_dmac_writel(priv, priv->dma_maxburst,
433 - ENETDMAC_MAXBURST_REG(priv->tx_chan));
434 + ENETDMAC_MAXBURST, priv->tx_chan);
435
436 /* set correct transmit fifo watermark */
437 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
438
439 /* set flow control low/high threshold to 1/3 / 2/3 */
440 - val = priv->rx_ring_size / 3;
441 - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
442 - val = (priv->rx_ring_size * 2) / 3;
443 - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
444 + if (!priv->dma_no_sram) {
445 + val = priv->rx_ring_size / 3;
446 + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
447 + val = (priv->rx_ring_size * 2) / 3;
448 + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
449 + } else {
450 + enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
451 + enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
452 + enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
453 + }
454
455 /* all set, enable mac and interrupts, start dma engine and
456 * kick rx dma channel */
457 @@ -1015,26 +1046,26 @@ static int bcm_enet_open(struct net_devi
458 val |= ENET_CTL_ENABLE_MASK;
459 enet_writel(priv, val, ENET_CTL_REG);
460 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
461 - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
462 - ENETDMAC_CHANCFG_REG(priv->rx_chan));
463 + enet_dmac_writel(priv, priv->dma_chan_en_mask,
464 + ENETDMAC_CHANCFG, priv->rx_chan);
465
466 /* watch "mib counters about to overflow" interrupt */
467 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
468 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
469
470 /* watch "packet transferred" interrupt in rx and tx */
471 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
472 - ENETDMAC_IR_REG(priv->rx_chan));
473 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
474 - ENETDMAC_IR_REG(priv->tx_chan));
475 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
476 + ENETDMAC_IR, priv->rx_chan);
477 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
478 + ENETDMAC_IR, priv->tx_chan);
479
480 /* make sure we enable napi before rx interrupt */
481 napi_enable(&priv->napi);
482
483 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
484 - ENETDMAC_IRMASK_REG(priv->rx_chan));
485 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
486 - ENETDMAC_IRMASK_REG(priv->tx_chan));
487 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
488 + ENETDMAC_IRMASK, priv->rx_chan);
489 + enet_dmac_writel(priv, priv->dma_chan_int_mask,
490 + ENETDMAC_IRMASK, priv->tx_chan);
491
492 if (priv->has_phy)
493 phy_start(priv->phydev);
494 @@ -1111,13 +1142,13 @@ static void bcm_enet_disable_dma(struct
495 {
496 int limit;
497
498 - enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
499 + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
500
501 limit = 1000;
502 do {
503 u32 val;
504
505 - val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
506 + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
507 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
508 break;
509 udelay(1);
510 @@ -1144,8 +1175,8 @@ static int bcm_enet_stop(struct net_devi
511
512 /* mask all interrupts */
513 enet_writel(priv, 0, ENET_IRMASK_REG);
514 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
515 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
516 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
517 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
518
519 /* make sure no mib update is scheduled */
520 cancel_work_sync(&priv->mib_update_task);
521 @@ -1757,6 +1788,11 @@ static int __devinit bcm_enet_probe(stru
522 priv->pause_tx = pd->pause_tx;
523 priv->force_duplex_full = pd->force_duplex_full;
524 priv->force_speed_100 = pd->force_speed_100;
525 + priv->dma_chan_en_mask = pd->dma_chan_en_mask;
526 + priv->dma_chan_int_mask = pd->dma_chan_int_mask;
527 + priv->dma_chan_width = pd->dma_chan_width;
528 + priv->dma_no_sram = pd->dma_no_sram;
529 + priv->dma_desc_shift = pd->dma_desc_shift;
530 }
531
532 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
533 @@ -2144,8 +2180,8 @@ static int bcm_enetsw_open(struct net_de
534 kdev = &priv->pdev->dev;
535
536 /* mask all interrupts and request them */
537 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
538 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
539 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG, priv->rx_chan);
540 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG, priv->tx_chan);
541
542 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
543 IRQF_DISABLED, dev->name, dev);
544 @@ -2269,23 +2305,23 @@ static int bcm_enetsw_open(struct net_de
545
546 /* write rx & tx ring addresses */
547 enet_dmas_writel(priv, priv->rx_desc_dma,
548 - ENETDMAS_RSTART_REG(priv->rx_chan));
549 + ENETDMAS_RSTART_REG, priv->rx_chan);
550 enet_dmas_writel(priv, priv->tx_desc_dma,
551 - ENETDMAS_RSTART_REG(priv->tx_chan));
552 + ENETDMAS_RSTART_REG, priv->tx_chan);
553
554 /* clear remaining state ram for rx & tx channel */
555 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
556 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
557 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
558 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
559 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
560 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
561 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
562 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
563 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
564 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
565 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
566 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
567
568 /* set dma maximum burst len */
569 enet_dmac_writel(priv, priv->dma_maxburst,
570 - ENETDMAC_MAXBURST_REG(priv->rx_chan));
571 + ENETDMAC_MAXBURST_REG, priv->rx_chan);
572 enet_dmac_writel(priv, priv->dma_maxburst,
573 - ENETDMAC_MAXBURST_REG(priv->tx_chan));
574 + ENETDMAC_MAXBURST_REG, priv->tx_chan);
575
576 /* set flow control low/high threshold to 1/3 / 2/3 */
577 val = priv->rx_ring_size / 3;
578 @@ -2298,21 +2334,21 @@ static int bcm_enetsw_open(struct net_de
579 wmb();
580 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
581 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
582 - ENETDMAC_CHANCFG_REG(priv->rx_chan));
583 + ENETDMAC_CHANCFG_REG, priv->rx_chan);
584
585 /* watch "packet transferred" interrupt in rx and tx */
586 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
587 - ENETDMAC_IR_REG(priv->rx_chan));
588 + ENETDMAC_IR_REG, priv->rx_chan);
589 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
590 - ENETDMAC_IR_REG(priv->tx_chan));
591 + ENETDMAC_IR_REG, priv->tx_chan);
592
593 /* make sure we enable napi before rx interrupt */
594 napi_enable(&priv->napi);
595
596 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
597 - ENETDMAC_IRMASK_REG(priv->rx_chan));
598 + ENETDMAC_IRMASK_REG, priv->rx_chan);
599 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
600 - ENETDMAC_IRMASK_REG(priv->tx_chan));
601 + ENETDMAC_IRMASK_REG, priv->tx_chan);
602
603 netif_carrier_on(dev);
604 netif_start_queue(dev);
605 @@ -2419,8 +2455,8 @@ static int bcm_enetsw_stop(struct net_de
606 del_timer_sync(&priv->rx_timeout);
607
608 /* mask all interrupts */
609 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
610 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
611 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG, priv->rx_chan);
612 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG, priv->tx_chan);
613
614 /* disable dma & mac */
615 bcm_enet_disable_dma(priv, priv->tx_chan);
616 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
617 +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
618 @@ -367,6 +367,21 @@ struct bcm_enet_priv {
619 /* used to poll switch port state */
620 struct timer_list swphy_poll;
621 spinlock_t enetsw_mdio_lock;
622 +
623 + /* dma channel enable mask */
624 + u32 dma_chan_en_mask;
625 +
626 + /* dma channel interrupt mask */
627 + u32 dma_chan_int_mask;
628 +
629 + /* dma engine has *no* internal SRAM */
630 + unsigned int dma_no_sram;
631 +
632 + /* dma channel width */
633 + unsigned int dma_chan_width;
634 +
635 + /* dma descriptor shift value */
636 + unsigned int dma_desc_shift;
637 };
638
639 static inline int bcm_enet_port_is_rgmii(int portid)
640 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
641 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
642 @@ -173,6 +173,7 @@ enum bcm63xx_regs_set {
643 #define BCM_6368_RSET_SPI_SIZE 1804
644 #define RSET_ENET_SIZE 2048
645 #define RSET_ENETDMA_SIZE 256
646 +#define RSET_6345_ENETDMA_SIZE 64
647 #define RSET_ENETDMAC_SIZE(chans) (16 * (chans))
648 #define RSET_ENETDMAS_SIZE(chans) (16 * (chans))
649 #define RSET_ENETSW_SIZE 65536
650 @@ -299,7 +300,7 @@ enum bcm63xx_regs_set {
651 #define BCM_6345_USBDMA_BASE (0xfffe2800)
652 #define BCM_6345_ENET0_BASE (0xfffe1800)
653 #define BCM_6345_ENETDMA_BASE (0xfffe2800)
654 -#define BCM_6345_ENETDMAC_BASE (0xfffe2900)
655 +#define BCM_6345_ENETDMAC_BASE (0xfffe2840)
656 #define BCM_6345_ENETDMAS_BASE (0xfffe2a00)
657 #define BCM_6345_ENETSW_BASE (0xdeadbeef)
658 #define BCM_6345_PCMCIA_BASE (0xfffe2028)
659 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
660 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
661 @@ -4,6 +4,8 @@
662 #include <linux/if_ether.h>
663 #include <linux/init.h>
664
665 +#include <bcm63xx_regs.h>
666 +
667 /*
668 * on board ethernet platform data
669 */
670 @@ -37,6 +39,21 @@ struct bcm63xx_enet_platform_data {
671 int phy_id, int reg),
672 void (*mii_write)(struct net_device *dev,
673 int phy_id, int reg, int val));
674 +
675 + /* DMA channel enable mask */
676 + u32 dma_chan_en_mask;
677 +
678 + /* DMA channel interrupt mask */
679 + u32 dma_chan_int_mask;
680 +
681 + /* Set to one if DMA engine has *no* SRAM */
682 + unsigned int dma_no_sram;
683 +
684 + /* DMA channel register width */
685 + unsigned int dma_chan_width;
686 +
687 + /* DMA descriptor shift */
688 + unsigned int dma_desc_shift;
689 };
690
691 /*
692 @@ -72,4 +89,66 @@ int __init bcm63xx_enet_register(int uni
693 int __init
694 bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd);
695
696 +enum bcm63xx_regs_enetdmac {
697 + ENETDMAC_CHANCFG,
698 + ENETDMAC_IR,
699 + ENETDMAC_IRMASK,
700 + ENETDMAC_MAXBURST,
701 + ENETDMAC_BUFALLOC,
702 + ENETDMAC_RSTART,
703 + ENETDMAC_FC,
704 + ENETDMAC_LEN,
705 +};
706 +
707 +static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg)
708 +{
709 +#ifdef BCMCPU_RUNTIME_DETECT
710 + extern const unsigned long *bcm63xx_regs_enetdmac;
711 +
712 + return bcm63xx_regs_enetdmac[reg];
713 +#else
714 +#ifdef CONFIG_BCM63XX_CPU_6345
715 + switch (reg) {
716 + case ENETDMAC_CHANCFG:
717 + return ENETDMA_6345_CHANCFG_REG;
718 + case ENETDMAC_IR:
719 + return ENETDMA_6345_IR_REG;
720 + case ENETDMAC_IRMASK:
721 + return ENETDMA_6345_IRMASK_REG;
722 + case ENETDMAC_MAXBURST:
723 + return ENETDMA_6345_MAXBURST_REG;
724 + case ENETDMAC_BUFALLOC:
725 + return ENETDMA_6345_BUFALLOC_REG;
726 + case ENETDMAC_RSTART:
727 + return ENETDMA_6345_RSTART_REG;
728 + case ENETDMAC_FC:
729 + return ENETDMA_6345_FC_REG;
730 + case ENETDMAC_LEN:
731 + return ENETDMA_6345_LEN_REG;
732 + }
733 +#endif
734 +#if defined(CONFIG_BCM6XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348) \
735 + defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) \
736 + defined(CONFIG_BCM63XX_CPU_6368)
737 + switch (reg) {
738 + case ENETDMAC_CHANCFG:
739 + return ENETDMAC_CHANCFG_REG;
740 + case ENETDMAC_IR:
741 + return ENETDMAC_IR_REG;
742 + case ENETDMAC_IRMASK:
743 + return ENETDMAC_IRMASK_REG;
744 + case ENETDMAC_MAXBURST:
745 + return ENETDMAC_MAXBURST_REG;
746 + case ENETDMAC_BUFALLOC:
747 + case ENETDMAC_RSTART:
748 + case ENETDMAC_FC:
749 + case ENETDMAC_LEN:
750 + return 0;
751 + }
752 +#endif
753 +#endif
754 + return 0;
755 +}
756 +
757 +
758 #endif /* ! BCM63XX_DEV_ENET_H_ */