[brcm63xx] add back support for BCM6345 Ethernet DMA engine
[openwrt/svn-archive/archive.git] / target / linux / brcm63xx / patches-3.3 / 443-MIPS-BCM63XX-enable-enet-for-BCM6345.patch
1 [PATCH] MIPS: BCM63XX: enable ethernet for BCM6345
2
3 BCM6345 has a slightly older DMA engine which is not supported by default by
4 the bcm63xx_enet driver. This patch adds the missing Ethernet DMA definitions
5 as well as patches all the places in the ethernet driver were the DMA
6 reading/writing is different.
7
8 Signed-off-by: Florian Fainelli <florian@openwrt.org>
9 ---
10 --- a/arch/mips/bcm63xx/dev-enet.c
11 +++ b/arch/mips/bcm63xx/dev-enet.c
12 @@ -172,7 +172,7 @@ int __init bcm63xx_enet_register(int uni
13 if (unit > 1)
14 return -ENODEV;
15
16 - if (unit == 1 && BCMCPU_IS_6338())
17 + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
18 return -ENODEV;
19
20 ret = register_shared();
21 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
22 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
23 @@ -764,6 +764,37 @@
24 /* State Ram Word 4 */
25 #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10)
26
27 +/* Broadcom 6345 ENET DMA definitions */
28 +#define ENETDMA_6345_CHANCFG_REG(x) (0x00 + (x) * 0x40)
29 +#define ENETDMA_6345_CHANCFG_EN_SHIFT 0
30 +#define ENETDMA_6345_CHANCFG_EN_MASK (1 << ENETDMA_6345_CHANCFG_EN_SHIFT)
31 +#define ENETDMA_6345_PKTHALT_SHIFT 1
32 +#define ENETDMA_6345_PKTHALT_MASK (1 << ENETDMA_6345_PKTHALT_SHIFT)
33 +#define ENETDMA_6345_CHAINING_SHIFT 2
34 +#define ENETDMA_6345_CHAINING_MASK (1 << ENETDMA_6345_CHAINING_SHIFT)
35 +#define ENETDMA_6345_WRAP_EN_SHIFT 3
36 +#define ENETDMA_6345_WRAP_EN_MASK (1 << ENETDMA_6345_WRAP_EN_SHIFT)
37 +#define ENETDMA_6345_FLOWC_EN_SHIFT 4
38 +#define ENETDMA_6345_FLOWC_EN_MASK (1 << ENETDMA_6345_FLOWC_EN_SHIFT)
39 +
40 +#define ENETDMA_6345_MAXBURST_REG(x) (0x04 + (x) * 0x40)
41 +
42 +#define ENETDMA_6345_RSTART_REG(x) (0x08 + (x) * 0x40)
43 +
44 +#define ENETDMA_6345_LEN_REG(x) (0x0C + (x) * 0x40)
45 +
46 +#define ENETDMA_6345_BSTAT_REG(x) (0x10 + (x) * 0x40)
47 +
48 +#define ENETDMA_6345_IR_REG(x) (0x14 + (x) * 0x40)
49 +#define ENETDMA_6345_IR_BUFDONE_MASK (1 << 0)
50 +#define ENETDMA_6345_IR_PKTDONE_MASK (1 << 1)
51 +#define ENETDMA_6345_IR_NOTOWNER_MASK (1 << 2)
52 +
53 +#define ENETDMA_6345_IRMASK_REG(x) (0x18 + (x) * 0x40)
54 +
55 +#define ENETDMA_6345_FC_REG(x) (0x1C + (x) * 0x40)
56 +
57 +#define ENETDMA_6345_BUFALLOC_REG(x) (0x20 + (x) * 0x40)
58
59 /*************************************************************************
60 * _REG relative to RSET_ENETDMAC
61 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
62 +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
63 @@ -32,6 +32,7 @@
64 #include <linux/if_vlan.h>
65
66 #include <bcm63xx_dev_enet.h>
67 +#include <bcm63xx_cpu.h>
68 #include "bcm63xx_enet.h"
69
70 static char bcm_enet_driver_name[] = "bcm63xx_enet";
71 @@ -243,6 +244,7 @@ static void bcm_enet_mdio_write_mii(stru
72 static int bcm_enet_refill_rx(struct net_device *dev)
73 {
74 struct bcm_enet_priv *priv;
75 + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0;
76
77 priv = netdev_priv(dev);
78
79 @@ -270,7 +272,7 @@ static int bcm_enet_refill_rx(struct net
80 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
81 len_stat |= DMADESC_OWNER_MASK;
82 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
83 - len_stat |= DMADESC_WRAP_MASK;
84 + len_stat |= (DMADESC_WRAP_MASK >> desc_shift);
85 priv->rx_dirty_desc = 0;
86 } else {
87 priv->rx_dirty_desc++;
88 @@ -281,7 +283,10 @@ static int bcm_enet_refill_rx(struct net
89 priv->rx_desc_count++;
90
91 /* tell dma engine we allocated one buffer */
92 - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
93 + if (!BCMCPU_IS_6345())
94 + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
95 + else
96 + enet_dma_writel(priv, 1, ENETDMA_6345_BUFALLOC_REG(priv->rx_chan));
97 }
98
99 /* If rx ring is still empty, set a timer to try allocating
100 @@ -319,6 +324,7 @@ static int bcm_enet_receive_queue(struct
101 struct bcm_enet_priv *priv;
102 struct device *kdev;
103 int processed;
104 + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0;
105
106 priv = netdev_priv(dev);
107 kdev = &priv->pdev->dev;
108 @@ -357,7 +363,7 @@ static int bcm_enet_receive_queue(struct
109
110 /* if the packet does not have start of packet _and_
111 * end of packet flag set, then just recycle it */
112 - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
113 + if ((len_stat & (DMADESC_ESOP_MASK >> desc_shift)) != (DMADESC_ESOP_MASK >> desc_shift)) {
114 dev->stats.rx_dropped++;
115 continue;
116 }
117 @@ -418,8 +424,15 @@ static int bcm_enet_receive_queue(struct
118 bcm_enet_refill_rx(dev);
119
120 /* kick rx dma */
121 - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
122 - ENETDMAC_CHANCFG_REG(priv->rx_chan));
123 + if (!BCMCPU_IS_6345())
124 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
125 + ENETDMAC_CHANCFG_REG(priv->rx_chan));
126 + else
127 + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK |
128 + ENETDMA_6345_CHAINING_MASK |
129 + ENETDMA_6345_WRAP_EN_MASK |
130 + ENETDMA_6345_FLOWC_EN_MASK,
131 + ENETDMA_6345_CHANCFG_REG(priv->rx_chan));
132 }
133
134 return processed;
135 @@ -494,10 +507,21 @@ static int bcm_enet_poll(struct napi_str
136 dev = priv->net_dev;
137
138 /* ack interrupts */
139 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
140 - ENETDMAC_IR_REG(priv->rx_chan));
141 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
142 - ENETDMAC_IR_REG(priv->tx_chan));
143 + if (!BCMCPU_IS_6345()) {
144 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
145 + ENETDMAC_IR_REG(priv->rx_chan));
146 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
147 + ENETDMAC_IR_REG(priv->tx_chan));
148 + } else {
149 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
150 + ENETDMA_IR_PKTDONE_MASK |
151 + ENETDMA_IR_NOTOWNER_MASK,
152 + ENETDMA_6345_IR_REG(priv->rx_chan));
153 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
154 + ENETDMA_IR_PKTDONE_MASK |
155 + ENETDMA_IR_NOTOWNER_MASK,
156 + ENETDMA_6345_IR_REG(priv->tx_chan));
157 + }
158
159 /* reclaim sent skb */
160 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
161 @@ -516,10 +540,21 @@ static int bcm_enet_poll(struct napi_str
162 napi_complete(napi);
163
164 /* restore rx/tx interrupt */
165 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
166 - ENETDMAC_IRMASK_REG(priv->rx_chan));
167 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
168 - ENETDMAC_IRMASK_REG(priv->tx_chan));
169 + if (!BCMCPU_IS_6345()) {
170 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
171 + ENETDMAC_IRMASK_REG(priv->rx_chan));
172 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
173 + ENETDMAC_IRMASK_REG(priv->tx_chan));
174 + } else {
175 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
176 + ENETDMA_IR_PKTDONE_MASK |
177 + ENETDMA_IR_NOTOWNER_MASK,
178 + ENETDMA_6345_IRMASK_REG(priv->rx_chan));
179 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
180 + ENETDMA_IR_PKTDONE_MASK |
181 + ENETDMA_IR_NOTOWNER_MASK,
182 + ENETDMA_6345_IRMASK_REG(priv->tx_chan));
183 + }
184
185 return rx_work_done;
186 }
187 @@ -562,8 +597,13 @@ static irqreturn_t bcm_enet_isr_dma(int
188 priv = netdev_priv(dev);
189
190 /* mask rx/tx interrupts */
191 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
192 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
193 + if (!BCMCPU_IS_6345()) {
194 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
195 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
196 + } else {
197 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan));
198 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan));
199 + }
200
201 napi_schedule(&priv->napi);
202
203 @@ -579,6 +619,7 @@ static int bcm_enet_start_xmit(struct sk
204 struct bcm_enet_desc *desc;
205 u32 len_stat;
206 int ret;
207 + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0;
208
209 priv = netdev_priv(dev);
210
211 @@ -624,14 +665,14 @@ static int bcm_enet_start_xmit(struct sk
212 DMA_TO_DEVICE);
213
214 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
215 - len_stat |= DMADESC_ESOP_MASK |
216 + len_stat |= (DMADESC_ESOP_MASK >> desc_shift) |
217 DMADESC_APPEND_CRC |
218 DMADESC_OWNER_MASK;
219
220 priv->tx_curr_desc++;
221 if (priv->tx_curr_desc == priv->tx_ring_size) {
222 priv->tx_curr_desc = 0;
223 - len_stat |= DMADESC_WRAP_MASK;
224 + len_stat |= (DMADESC_WRAP_MASK >> desc_shift);
225 }
226 priv->tx_desc_count--;
227
228 @@ -642,8 +683,15 @@ static int bcm_enet_start_xmit(struct sk
229 wmb();
230
231 /* kick tx dma */
232 - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
233 - ENETDMAC_CHANCFG_REG(priv->tx_chan));
234 + if (!BCMCPU_IS_6345())
235 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
236 + ENETDMAC_CHANCFG_REG(priv->tx_chan));
237 + else
238 + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK |
239 + ENETDMA_6345_CHAINING_MASK |
240 + ENETDMA_6345_WRAP_EN_MASK |
241 + ENETDMA_6345_FLOWC_EN_MASK,
242 + ENETDMA_6345_CHANCFG_REG(priv->tx_chan));
243
244 /* stop queue if no more desc available */
245 if (!priv->tx_desc_count)
246 @@ -771,6 +819,9 @@ static void bcm_enet_set_flow(struct bcm
247 val &= ~ENET_RXCFG_ENFLOW_MASK;
248 enet_writel(priv, val, ENET_RXCFG_REG);
249
250 + if (BCMCPU_IS_6345())
251 + return;
252 +
253 /* tx flow control (pause frame generation) */
254 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
255 if (tx_en)
256 @@ -886,8 +937,13 @@ static int bcm_enet_open(struct net_devi
257
258 /* mask all interrupts and request them */
259 enet_writel(priv, 0, ENET_IRMASK_REG);
260 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
261 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
262 + if (!BCMCPU_IS_6345()) {
263 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
264 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
265 + } else {
266 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan));
267 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan));
268 + }
269
270 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
271 if (ret)
272 @@ -966,8 +1022,12 @@ static int bcm_enet_open(struct net_devi
273 priv->rx_curr_desc = 0;
274
275 /* initialize flow control buffer allocation */
276 - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
277 - ENETDMA_BUFALLOC_REG(priv->rx_chan));
278 + if (!BCMCPU_IS_6345())
279 + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
280 + ENETDMA_BUFALLOC_REG(priv->rx_chan));
281 + else
282 + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
283 + ENETDMA_6345_BUFALLOC_REG(priv->rx_chan));
284
285 if (bcm_enet_refill_rx(dev)) {
286 dev_err(kdev, "cannot allocate rx skb queue\n");
287 @@ -976,37 +1036,62 @@ static int bcm_enet_open(struct net_devi
288 }
289
290 /* write rx & tx ring addresses */
291 - enet_dmas_writel(priv, priv->rx_desc_dma,
292 - ENETDMAS_RSTART_REG(priv->rx_chan));
293 - enet_dmas_writel(priv, priv->tx_desc_dma,
294 + if (!BCMCPU_IS_6345()) {
295 + enet_dmas_writel(priv, priv->rx_desc_dma,
296 + ENETDMAS_RSTART_REG(priv->rx_chan));
297 + enet_dmas_writel(priv, priv->tx_desc_dma,
298 ENETDMAS_RSTART_REG(priv->tx_chan));
299 + } else {
300 + enet_dma_writel(priv, priv->rx_desc_dma,
301 + ENETDMA_6345_RSTART_REG(priv->rx_chan));
302 + enet_dma_writel(priv, priv->tx_desc_dma,
303 + ENETDMA_6345_RSTART_REG(priv->tx_chan));
304 + }
305
306 /* clear remaining state ram for rx & tx channel */
307 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
308 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
309 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
310 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
311 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
312 - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
313 + if (!BCMCPU_IS_6345()) {
314 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
315 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
316 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
317 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
318 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
319 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
320 + } else {
321 + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->rx_chan));
322 + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->tx_chan));
323 + }
324
325 /* set max rx/tx length */
326 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
327 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
328
329 /* set dma maximum burst len */
330 - enet_dmac_writel(priv, priv->dma_maxburst,
331 - ENETDMAC_MAXBURST_REG(priv->rx_chan));
332 - enet_dmac_writel(priv, priv->dma_maxburst,
333 - ENETDMAC_MAXBURST_REG(priv->tx_chan));
334 + if (!BCMCPU_IS_6345()) {
335 + enet_dmac_writel(priv, priv->dma_maxburst,
336 + ENETDMAC_MAXBURST_REG(priv->rx_chan));
337 + enet_dmac_writel(priv, priv->dma_maxburst,
338 + ENETDMAC_MAXBURST_REG(priv->tx_chan));
339 + } else {
340 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
341 + ENETDMA_6345_MAXBURST_REG(priv->rx_chan));
342 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
343 + ENETDMA_6345_MAXBURST_REG(priv->tx_chan));
344 + }
345
346 /* set correct transmit fifo watermark */
347 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
348
349 /* set flow control low/high threshold to 1/3 / 2/3 */
350 - val = priv->rx_ring_size / 3;
351 - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
352 - val = (priv->rx_ring_size * 2) / 3;
353 - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
354 + if (!BCMCPU_IS_6345()) {
355 + val = priv->rx_ring_size / 3;
356 + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
357 + val = (priv->rx_ring_size * 2) / 3;
358 + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
359 + } else {
360 + enet_dma_writel(priv, 5, ENETDMA_6345_FC_REG(priv->rx_chan));
361 + enet_dma_writel(priv, priv->rx_ring_size, ENETDMA_6345_LEN_REG(priv->rx_chan));
362 + enet_dma_writel(priv, priv->tx_ring_size, ENETDMA_6345_LEN_REG(priv->tx_chan));
363 + }
364
365 /* all set, enable mac and interrupts, start dma engine and
366 * kick rx dma channel */
367 @@ -1014,27 +1099,57 @@ static int bcm_enet_open(struct net_devi
368 val = enet_readl(priv, ENET_CTL_REG);
369 val |= ENET_CTL_ENABLE_MASK;
370 enet_writel(priv, val, ENET_CTL_REG);
371 - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
372 - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
373 - ENETDMAC_CHANCFG_REG(priv->rx_chan));
374 + if (!BCMCPU_IS_6345()) {
375 + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
376 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
377 + ENETDMAC_CHANCFG_REG(priv->rx_chan));
378 + } else {
379 + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK |
380 + ENETDMA_6345_CHAINING_MASK |
381 + ENETDMA_6345_WRAP_EN_MASK |
382 + ENETDMA_6345_FLOWC_EN_MASK,
383 + ENETDMA_6345_CHANCFG_REG(priv->rx_chan));
384 + }
385
386 /* watch "mib counters about to overflow" interrupt */
387 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
388 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
389
390 /* watch "packet transferred" interrupt in rx and tx */
391 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
392 - ENETDMAC_IR_REG(priv->rx_chan));
393 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
394 - ENETDMAC_IR_REG(priv->tx_chan));
395 + if (!BCMCPU_IS_6345()) {
396 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
397 + ENETDMAC_IR_REG(priv->rx_chan));
398 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
399 + ENETDMAC_IR_REG(priv->tx_chan));
400 + } else {
401 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
402 + ENETDMA_IR_PKTDONE_MASK |
403 + ENETDMA_IR_NOTOWNER_MASK,
404 + ENETDMA_6345_IR_REG(priv->rx_chan));
405 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
406 + ENETDMA_IR_PKTDONE_MASK |
407 + ENETDMA_IR_NOTOWNER_MASK,
408 + ENETDMA_6345_IR_REG(priv->tx_chan));
409 + }
410
411 /* make sure we enable napi before rx interrupt */
412 napi_enable(&priv->napi);
413
414 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
415 - ENETDMAC_IRMASK_REG(priv->rx_chan));
416 - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
417 - ENETDMAC_IRMASK_REG(priv->tx_chan));
418 + if (!BCMCPU_IS_6345()) {
419 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
420 + ENETDMAC_IRMASK_REG(priv->rx_chan));
421 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
422 + ENETDMAC_IRMASK_REG(priv->tx_chan));
423 + } else {
424 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
425 + ENETDMA_IR_PKTDONE_MASK |
426 + ENETDMA_IR_NOTOWNER_MASK,
427 + ENETDMA_6345_IRMASK_REG(priv->rx_chan));
428 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
429 + ENETDMA_IR_PKTDONE_MASK |
430 + ENETDMA_IR_NOTOWNER_MASK,
431 + ENETDMA_6345_IRMASK_REG(priv->tx_chan));
432 + }
433
434 if (priv->has_phy)
435 phy_start(priv->phydev);
436 @@ -1111,13 +1226,19 @@ static void bcm_enet_disable_dma(struct
437 {
438 int limit;
439
440 - enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
441 + if (!BCMCPU_IS_6345())
442 + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
443 + else
444 + enet_dma_writel(priv, 0, ENETDMA_6345_CHANCFG_REG(chan));
445
446 limit = 1000;
447 do {
448 u32 val;
449
450 - val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
451 + if (!BCMCPU_IS_6345())
452 + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
453 + else
454 + val = enet_dma_readl(priv, ENETDMA_6345_CHANCFG_REG(chan));
455 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
456 break;
457 udelay(1);
458 @@ -1144,8 +1265,13 @@ static int bcm_enet_stop(struct net_devi
459
460 /* mask all interrupts */
461 enet_writel(priv, 0, ENET_IRMASK_REG);
462 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
463 - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
464 + if (!BCMCPU_IS_6345()) {
465 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
466 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
467 + } else {
468 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan));
469 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan));
470 + }
471
472 /* make sure no mib update is scheduled */
473 cancel_work_sync(&priv->mib_update_task);
474 @@ -1680,6 +1806,7 @@ static int __devinit bcm_enet_probe(stru
475 struct mii_bus *bus;
476 const char *clk_name;
477 int i, ret;
478 + unsigned int chan_offset = 0;
479
480 /* stop if shared driver failed, assume driver->probe will be
481 * called in the same order we register devices (correct ?) */
482 @@ -1722,10 +1849,13 @@ static int __devinit bcm_enet_probe(stru
483 priv->irq_tx = res_irq_tx->start;
484 priv->mac_id = pdev->id;
485
486 + if (BCMCPU_IS_6345())
487 + chan_offset = 1;
488 +
489 /* get rx & tx dma channel id for this mac */
490 if (priv->mac_id == 0) {
491 - priv->rx_chan = 0;
492 - priv->tx_chan = 1;
493 + priv->rx_chan = 0 + chan_offset;
494 + priv->tx_chan = 1 + chan_offset;
495 clk_name = "enet0";
496 } else {
497 priv->rx_chan = 2;
498 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
499 +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
500 @@ -47,6 +47,9 @@ struct bcm_enet_desc {
501 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
502 #define DMADESC_WRAP_MASK (1 << 12)
503
504 +/* Shift down for EOP, SOP and WRAP bits */
505 +#define DMADESC_6345_SHIFT (3)
506 +
507 #define DMADESC_UNDER_MASK (1 << 9)
508 #define DMADESC_APPEND_CRC (1 << 8)
509 #define DMADESC_OVSIZE_MASK (1 << 4)