brcm63xx: Add profile and build image for Sagemcom F@ST2704V2 ADSL router
[openwrt/openwrt.git] / target / linux / brcm63xx / patches-3.10 / 046-bcm63xx_enet-split-DMA-channel-register-accesses.patch
1 From 33cab1696444a8e333cf0490bfe04c32d583fd51 Mon Sep 17 00:00:00 2001
2 From: Maxime Bizon <mbizon@freebox.fr>
3 Date: Tue, 4 Jun 2013 20:53:34 +0000
4 Subject: [PATCH 2/3] bcm63xx_enet: split DMA channel register accesses
5
6 The current bcm63xx_enet driver always uses bcmenet_shared_base whenever
7 it needs to access DMA channel configuration space or access the DMA
8 channel state RAM. Split these register in 3 parts to be more accurate:
9
10 - global DMA configuration
11 - per DMA channel configuration space
12 - per DMA channel state RAM space
13
14 This is preliminary to support new chips where the global DMA
15 configuration remains the same, but there is a varying number of DMA
16 channels located at a different memory offset.
17
18 Signed-off-by: Maxime Bizon <mbizon@freebox.fr>
19 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
20 ---
21 arch/mips/bcm63xx/dev-enet.c | 23 +++-
22 arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h | 4 +-
23 drivers/net/ethernet/broadcom/bcm63xx_enet.c | 139 +++++++++++++---------
24 3 files changed, 105 insertions(+), 61 deletions(-)
25
26 --- a/arch/mips/bcm63xx/dev-enet.c
27 +++ b/arch/mips/bcm63xx/dev-enet.c
28 @@ -19,6 +19,16 @@ static struct resource shared_res[] = {
29 .end = -1, /* filled at runtime */
30 .flags = IORESOURCE_MEM,
31 },
32 + {
33 + .start = -1, /* filled at runtime */
34 + .end = -1, /* filled at runtime */
35 + .flags = IORESOURCE_MEM,
36 + },
37 + {
38 + .start = -1, /* filled at runtime */
39 + .end = -1, /* filled at runtime */
40 + .flags = IORESOURCE_MEM,
41 + },
42 };
43
44 static struct platform_device bcm63xx_enet_shared_device = {
45 @@ -110,10 +120,15 @@ int __init bcm63xx_enet_register(int uni
46 if (!shared_device_registered) {
47 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
48 shared_res[0].end = shared_res[0].start;
49 - if (BCMCPU_IS_6338())
50 - shared_res[0].end += (RSET_ENETDMA_SIZE / 2) - 1;
51 - else
52 - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
53 + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
54 +
55 + shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
56 + shared_res[1].end = shared_res[1].start;
57 + shared_res[1].end += RSET_ENETDMAC_SIZE(16) - 1;
58 +
59 + shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
60 + shared_res[2].end = shared_res[2].start;
61 + shared_res[2].end += RSET_ENETDMAS_SIZE(16) - 1;
62
63 ret = platform_device_register(&bcm63xx_enet_shared_device);
64 if (ret)
65 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
66 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
67 @@ -187,7 +187,9 @@ enum bcm63xx_regs_set {
68 #define BCM_6358_RSET_SPI_SIZE 1804
69 #define BCM_6368_RSET_SPI_SIZE 1804
70 #define RSET_ENET_SIZE 2048
71 -#define RSET_ENETDMA_SIZE 2048
72 +#define RSET_ENETDMA_SIZE 256
73 +#define RSET_ENETDMAC_SIZE(chans) (16 * (chans))
74 +#define RSET_ENETDMAS_SIZE(chans) (16 * (chans))
75 #define RSET_ENETSW_SIZE 65536
76 #define RSET_UART_SIZE 24
77 #define RSET_UDC_SIZE 256
78 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
79 +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
80 @@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128
81 module_param(copybreak, int, 0);
82 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
83
84 -/* io memory shared between all devices */
85 -static void __iomem *bcm_enet_shared_base;
86 +/* io registers memory shared between all devices */
87 +static void __iomem *bcm_enet_shared_base[3];
88
89 /*
90 * io helpers to access mac registers
91 @@ -63,13 +63,35 @@ static inline void enet_writel(struct bc
92 */
93 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
94 {
95 - return bcm_readl(bcm_enet_shared_base + off);
96 + return bcm_readl(bcm_enet_shared_base[0] + off);
97 }
98
99 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
100 u32 val, u32 off)
101 {
102 - bcm_writel(val, bcm_enet_shared_base + off);
103 + bcm_writel(val, bcm_enet_shared_base[0] + off);
104 +}
105 +
106 +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off)
107 +{
108 + return bcm_readl(bcm_enet_shared_base[1] + off);
109 +}
110 +
111 +static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
112 + u32 val, u32 off)
113 +{
114 + bcm_writel(val, bcm_enet_shared_base[1] + off);
115 +}
116 +
117 +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off)
118 +{
119 + return bcm_readl(bcm_enet_shared_base[2] + off);
120 +}
121 +
122 +static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
123 + u32 val, u32 off)
124 +{
125 + bcm_writel(val, bcm_enet_shared_base[2] + off);
126 }
127
128 /*
129 @@ -353,8 +375,8 @@ static int bcm_enet_receive_queue(struct
130 bcm_enet_refill_rx(dev);
131
132 /* kick rx dma */
133 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
134 - ENETDMA_CHANCFG_REG(priv->rx_chan));
135 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
136 + ENETDMAC_CHANCFG_REG(priv->rx_chan));
137 }
138
139 return processed;
140 @@ -429,10 +451,10 @@ static int bcm_enet_poll(struct napi_str
141 dev = priv->net_dev;
142
143 /* ack interrupts */
144 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
145 - ENETDMA_IR_REG(priv->rx_chan));
146 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
147 - ENETDMA_IR_REG(priv->tx_chan));
148 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
149 + ENETDMAC_IR_REG(priv->rx_chan));
150 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
151 + ENETDMAC_IR_REG(priv->tx_chan));
152
153 /* reclaim sent skb */
154 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
155 @@ -451,10 +473,10 @@ static int bcm_enet_poll(struct napi_str
156 napi_complete(napi);
157
158 /* restore rx/tx interrupt */
159 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
160 - ENETDMA_IRMASK_REG(priv->rx_chan));
161 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
162 - ENETDMA_IRMASK_REG(priv->tx_chan));
163 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
164 + ENETDMAC_IRMASK_REG(priv->rx_chan));
165 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
166 + ENETDMAC_IRMASK_REG(priv->tx_chan));
167
168 return rx_work_done;
169 }
170 @@ -497,8 +519,8 @@ static irqreturn_t bcm_enet_isr_dma(int
171 priv = netdev_priv(dev);
172
173 /* mask rx/tx interrupts */
174 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
175 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
176 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
177 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
178
179 napi_schedule(&priv->napi);
180
181 @@ -557,8 +579,8 @@ static int bcm_enet_start_xmit(struct sk
182 wmb();
183
184 /* kick tx dma */
185 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
186 - ENETDMA_CHANCFG_REG(priv->tx_chan));
187 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
188 + ENETDMAC_CHANCFG_REG(priv->tx_chan));
189
190 /* stop queue if no more desc available */
191 if (!priv->tx_desc_count)
192 @@ -833,8 +855,8 @@ static int bcm_enet_open(struct net_devi
193
194 /* mask all interrupts and request them */
195 enet_writel(priv, 0, ENET_IRMASK_REG);
196 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
197 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
198 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
199 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
200
201 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
202 if (ret)
203 @@ -919,28 +941,28 @@ static int bcm_enet_open(struct net_devi
204 }
205
206 /* write rx & tx ring addresses */
207 - enet_dma_writel(priv, priv->rx_desc_dma,
208 - ENETDMA_RSTART_REG(priv->rx_chan));
209 - enet_dma_writel(priv, priv->tx_desc_dma,
210 - ENETDMA_RSTART_REG(priv->tx_chan));
211 + enet_dmas_writel(priv, priv->rx_desc_dma,
212 + ENETDMAS_RSTART_REG(priv->rx_chan));
213 + enet_dmas_writel(priv, priv->tx_desc_dma,
214 + ENETDMAS_RSTART_REG(priv->tx_chan));
215
216 /* clear remaining state ram for rx & tx channel */
217 - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
218 - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
219 - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
220 - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
221 - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
222 - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
223 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
224 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
225 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
226 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
227 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
228 + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
229
230 /* set max rx/tx length */
231 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
232 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
233
234 /* set dma maximum burst len */
235 - enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
236 - ENETDMA_MAXBURST_REG(priv->rx_chan));
237 - enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
238 - ENETDMA_MAXBURST_REG(priv->tx_chan));
239 + enet_dmac_writel(priv, BCMENET_DMA_MAXBURST,
240 + ENETDMAC_MAXBURST_REG(priv->rx_chan));
241 + enet_dmac_writel(priv, BCMENET_DMA_MAXBURST,
242 + ENETDMAC_MAXBURST_REG(priv->tx_chan));
243
244 /* set correct transmit fifo watermark */
245 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
246 @@ -958,26 +980,26 @@ static int bcm_enet_open(struct net_devi
247 val |= ENET_CTL_ENABLE_MASK;
248 enet_writel(priv, val, ENET_CTL_REG);
249 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
250 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
251 - ENETDMA_CHANCFG_REG(priv->rx_chan));
252 + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
253 + ENETDMAC_CHANCFG_REG(priv->rx_chan));
254
255 /* watch "mib counters about to overflow" interrupt */
256 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
257 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
258
259 /* watch "packet transferred" interrupt in rx and tx */
260 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
261 - ENETDMA_IR_REG(priv->rx_chan));
262 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
263 - ENETDMA_IR_REG(priv->tx_chan));
264 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
265 + ENETDMAC_IR_REG(priv->rx_chan));
266 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
267 + ENETDMAC_IR_REG(priv->tx_chan));
268
269 /* make sure we enable napi before rx interrupt */
270 napi_enable(&priv->napi);
271
272 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
273 - ENETDMA_IRMASK_REG(priv->rx_chan));
274 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
275 - ENETDMA_IRMASK_REG(priv->tx_chan));
276 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
277 + ENETDMAC_IRMASK_REG(priv->rx_chan));
278 + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
279 + ENETDMAC_IRMASK_REG(priv->tx_chan));
280
281 if (priv->has_phy)
282 phy_start(priv->phydev);
283 @@ -1057,14 +1079,14 @@ static void bcm_enet_disable_dma(struct
284 {
285 int limit;
286
287 - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
288 + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
289
290 limit = 1000;
291 do {
292 u32 val;
293
294 - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
295 - if (!(val & ENETDMA_CHANCFG_EN_MASK))
296 + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
297 + if (!(val & ENETDMAC_CHANCFG_EN_MASK))
298 break;
299 udelay(1);
300 } while (limit--);
301 @@ -1090,8 +1112,8 @@ static int bcm_enet_stop(struct net_devi
302
303 /* mask all interrupts */
304 enet_writel(priv, 0, ENET_IRMASK_REG);
305 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
306 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
307 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
308 + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
309
310 /* make sure no mib update is scheduled */
311 cancel_work_sync(&priv->mib_update_task);
312 @@ -1636,7 +1658,7 @@ static int bcm_enet_probe(struct platfor
313
314 /* stop if shared driver failed, assume driver->probe will be
315 * called in the same order we register devices (correct ?) */
316 - if (!bcm_enet_shared_base)
317 + if (!bcm_enet_shared_base[0])
318 return -ENODEV;
319
320 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
321 @@ -1882,14 +1904,19 @@ struct platform_driver bcm63xx_enet_driv
322 static int bcm_enet_shared_probe(struct platform_device *pdev)
323 {
324 struct resource *res;
325 + void __iomem *p[3];
326 + unsigned int i;
327
328 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
329 - if (!res)
330 - return -ENODEV;
331 + memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
332
333 - bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
334 - if (!bcm_enet_shared_base)
335 - return -ENOMEM;
336 + for (i = 0; i < 3; i++) {
337 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
338 + p[i] = devm_ioremap_resource(&pdev->dev, res);
339 + if (!p[i])
340 + return -ENOMEM;
341 + }
342 +
343 + memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
344
345 return 0;
346 }