kernel/3.1[02]: move MTD root device setup code to mtdcore
[openwrt/svn-archive/archive.git] / target / linux / generic / patches-3.10 / 770-bgmac-backport.patch
1 patches for bgmac backported from net-next/master
2
3 --- a/drivers/net/ethernet/broadcom/Kconfig
4 +++ b/drivers/net/ethernet/broadcom/Kconfig
5 @@ -132,7 +132,8 @@ config BNX2X_SRIOV
6
7 config BGMAC
8 tristate "BCMA bus GBit core support"
9 - depends on BCMA_HOST_SOC && HAS_DMA
10 + depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
11 + select PHYLIB
12 ---help---
13 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
14 They can be found on BCM47xx SoCs and provide gigabit ethernet.
15 --- a/drivers/net/ethernet/broadcom/bgmac.c
16 +++ b/drivers/net/ethernet/broadcom/bgmac.c
17 @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(stru
18 dma_desc->ctl0 = cpu_to_le32(ctl0);
19 dma_desc->ctl1 = cpu_to_le32(ctl1);
20
21 + netdev_sent_queue(net_dev, skb->len);
22 +
23 wmb();
24
25 /* Increase ring->end to point empty slot. We tell hardware the first
26 @@ -157,6 +159,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
27 if (++ring->end >= BGMAC_TX_RING_SLOTS)
28 ring->end = 0;
29 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
30 + ring->index_base +
31 ring->end * sizeof(struct bgmac_dma_desc));
32
33 /* Always keep one slot free to allow detecting bugged calls. */
34 @@ -177,10 +180,13 @@ static void bgmac_dma_tx_free(struct bgm
35 struct device *dma_dev = bgmac->core->dma_dev;
36 int empty_slot;
37 bool freed = false;
38 + unsigned bytes_compl = 0, pkts_compl = 0;
39
40 /* The last slot that hardware didn't consume yet */
41 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
42 empty_slot &= BGMAC_DMA_TX_STATDPTR;
43 + empty_slot -= ring->index_base;
44 + empty_slot &= BGMAC_DMA_TX_STATDPTR;
45 empty_slot /= sizeof(struct bgmac_dma_desc);
46
47 while (ring->start != empty_slot) {
48 @@ -192,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgm
49 slot->skb->len, DMA_TO_DEVICE);
50 slot->dma_addr = 0;
51
52 + bytes_compl += slot->skb->len;
53 + pkts_compl++;
54 +
55 /* Free memory! :) */
56 dev_kfree_skb(slot->skb);
57 slot->skb = NULL;
58 @@ -205,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgm
59 freed = true;
60 }
61
62 + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
63 +
64 if (freed && netif_queue_stopped(bgmac->net_dev))
65 netif_wake_queue(bgmac->net_dev);
66 }
67 @@ -241,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(str
68 struct bgmac_slot_info *slot)
69 {
70 struct device *dma_dev = bgmac->core->dma_dev;
71 + struct sk_buff *skb;
72 + dma_addr_t dma_addr;
73 struct bgmac_rx_header *rx;
74
75 /* Alloc skb */
76 - slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
77 - if (!slot->skb)
78 + skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
79 + if (!skb)
80 return -ENOMEM;
81
82 /* Poison - if everything goes fine, hardware will overwrite it */
83 - rx = (struct bgmac_rx_header *)slot->skb->data;
84 + rx = (struct bgmac_rx_header *)skb->data;
85 rx->len = cpu_to_le16(0xdead);
86 rx->flags = cpu_to_le16(0xbeef);
87
88 /* Map skb for the DMA */
89 - slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
90 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
91 - if (dma_mapping_error(dma_dev, slot->dma_addr)) {
92 + dma_addr = dma_map_single(dma_dev, skb->data,
93 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
94 + if (dma_mapping_error(dma_dev, dma_addr)) {
95 bgmac_err(bgmac, "DMA mapping error\n");
96 + dev_kfree_skb(skb);
97 return -ENOMEM;
98 }
99 +
100 + /* Update the slot */
101 + slot->skb = skb;
102 + slot->dma_addr = dma_addr;
103 +
104 if (slot->dma_addr & 0xC0000000)
105 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
106
107 return 0;
108 }
109
110 +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
111 + struct bgmac_dma_ring *ring, int desc_idx)
112 +{
113 + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
114 + u32 ctl0 = 0, ctl1 = 0;
115 +
116 + if (desc_idx == ring->num_slots - 1)
117 + ctl0 |= BGMAC_DESC_CTL0_EOT;
118 + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
119 + /* Is there any BGMAC device that requires extension? */
120 + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
121 + * B43_DMA64_DCTL1_ADDREXT_MASK;
122 + */
123 +
124 + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
125 + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
126 + dma_desc->ctl0 = cpu_to_le32(ctl0);
127 + dma_desc->ctl1 = cpu_to_le32(ctl1);
128 +}
129 +
130 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
131 int weight)
132 {
133 @@ -274,6 +313,8 @@ static int bgmac_dma_rx_read(struct bgma
134
135 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
136 end_slot &= BGMAC_DMA_RX_STATDPTR;
137 + end_slot -= ring->index_base;
138 + end_slot &= BGMAC_DMA_RX_STATDPTR;
139 end_slot /= sizeof(struct bgmac_dma_desc);
140
141 ring->end = end_slot;
142 @@ -282,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgma
143 struct device *dma_dev = bgmac->core->dma_dev;
144 struct bgmac_slot_info *slot = &ring->slots[ring->start];
145 struct sk_buff *skb = slot->skb;
146 - struct sk_buff *new_skb;
147 struct bgmac_rx_header *rx;
148 u16 len, flags;
149
150 @@ -295,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgma
151 len = le16_to_cpu(rx->len);
152 flags = le16_to_cpu(rx->flags);
153
154 - /* Check for poison and drop or pass the packet */
155 - if (len == 0xdead && flags == 0xbeef) {
156 - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
157 - ring->start);
158 - } else {
159 + do {
160 + dma_addr_t old_dma_addr = slot->dma_addr;
161 + int err;
162 +
163 + /* Check for poison and drop or pass the packet */
164 + if (len == 0xdead && flags == 0xbeef) {
165 + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
166 + ring->start);
167 + dma_sync_single_for_device(dma_dev,
168 + slot->dma_addr,
169 + BGMAC_RX_BUF_SIZE,
170 + DMA_FROM_DEVICE);
171 + break;
172 + }
173 +
174 /* Omit CRC. */
175 len -= ETH_FCS_LEN;
176
177 - new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
178 - if (new_skb) {
179 - skb_put(new_skb, len);
180 - skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
181 - new_skb->data,
182 - len);
183 - skb_checksum_none_assert(skb);
184 - new_skb->protocol =
185 - eth_type_trans(new_skb, bgmac->net_dev);
186 - netif_receive_skb(new_skb);
187 - handled++;
188 - } else {
189 - bgmac->net_dev->stats.rx_dropped++;
190 - bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
191 + /* Prepare new skb as replacement */
192 + err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
193 + if (err) {
194 + /* Poison the old skb */
195 + rx->len = cpu_to_le16(0xdead);
196 + rx->flags = cpu_to_le16(0xbeef);
197 +
198 + dma_sync_single_for_device(dma_dev,
199 + slot->dma_addr,
200 + BGMAC_RX_BUF_SIZE,
201 + DMA_FROM_DEVICE);
202 + break;
203 }
204 + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
205
206 - /* Poison the old skb */
207 - rx->len = cpu_to_le16(0xdead);
208 - rx->flags = cpu_to_le16(0xbeef);
209 - }
210 -
211 - /* Make it back accessible to the hardware */
212 - dma_sync_single_for_device(dma_dev, slot->dma_addr,
213 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
214 + /* Unmap old skb, we'll pass it to the netfif */
215 + dma_unmap_single(dma_dev, old_dma_addr,
216 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
217 +
218 + skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
219 + skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
220 +
221 + skb_checksum_none_assert(skb);
222 + skb->protocol = eth_type_trans(skb, bgmac->net_dev);
223 + netif_receive_skb(skb);
224 + handled++;
225 + } while (0);
226
227 if (++ring->start >= BGMAC_RX_RING_SLOTS)
228 ring->start = 0;
229 @@ -418,9 +471,6 @@ static int bgmac_dma_alloc(struct bgmac
230 ring = &bgmac->tx_ring[i];
231 ring->num_slots = BGMAC_TX_RING_SLOTS;
232 ring->mmio_base = ring_base[i];
233 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
234 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
235 - ring->mmio_base);
236
237 /* Alloc ring of descriptors */
238 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
239 @@ -435,6 +485,13 @@ static int bgmac_dma_alloc(struct bgmac
240 if (ring->dma_base & 0xC0000000)
241 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
242
243 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
244 + BGMAC_DMA_RING_TX);
245 + if (ring->unaligned)
246 + ring->index_base = lower_32_bits(ring->dma_base);
247 + else
248 + ring->index_base = 0;
249 +
250 /* No need to alloc TX slots yet */
251 }
252
253 @@ -444,9 +501,6 @@ static int bgmac_dma_alloc(struct bgmac
254 ring = &bgmac->rx_ring[i];
255 ring->num_slots = BGMAC_RX_RING_SLOTS;
256 ring->mmio_base = ring_base[i];
257 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
258 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
259 - ring->mmio_base);
260
261 /* Alloc ring of descriptors */
262 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
263 @@ -462,6 +516,13 @@ static int bgmac_dma_alloc(struct bgmac
264 if (ring->dma_base & 0xC0000000)
265 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
266
267 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
268 + BGMAC_DMA_RING_RX);
269 + if (ring->unaligned)
270 + ring->index_base = lower_32_bits(ring->dma_base);
271 + else
272 + ring->index_base = 0;
273 +
274 /* Alloc RX slots */
275 for (j = 0; j < ring->num_slots; j++) {
276 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
277 @@ -482,19 +543,19 @@ err_dma_free:
278 static void bgmac_dma_init(struct bgmac *bgmac)
279 {
280 struct bgmac_dma_ring *ring;
281 - struct bgmac_dma_desc *dma_desc;
282 - u32 ctl0, ctl1;
283 int i;
284
285 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
286 ring = &bgmac->tx_ring[i];
287
288 - /* We don't implement unaligned addressing, so enable first */
289 - bgmac_dma_tx_enable(bgmac, ring);
290 + if (!ring->unaligned)
291 + bgmac_dma_tx_enable(bgmac, ring);
292 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
293 lower_32_bits(ring->dma_base));
294 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
295 upper_32_bits(ring->dma_base));
296 + if (ring->unaligned)
297 + bgmac_dma_tx_enable(bgmac, ring);
298
299 ring->start = 0;
300 ring->end = 0; /* Points the slot that should *not* be read */
301 @@ -505,32 +566,20 @@ static void bgmac_dma_init(struct bgmac
302
303 ring = &bgmac->rx_ring[i];
304
305 - /* We don't implement unaligned addressing, so enable first */
306 - bgmac_dma_rx_enable(bgmac, ring);
307 + if (!ring->unaligned)
308 + bgmac_dma_rx_enable(bgmac, ring);
309 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
310 lower_32_bits(ring->dma_base));
311 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
312 upper_32_bits(ring->dma_base));
313 + if (ring->unaligned)
314 + bgmac_dma_rx_enable(bgmac, ring);
315
316 - for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
317 - j++, dma_desc++) {
318 - ctl0 = ctl1 = 0;
319 -
320 - if (j == ring->num_slots - 1)
321 - ctl0 |= BGMAC_DESC_CTL0_EOT;
322 - ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
323 - /* Is there any BGMAC device that requires extension? */
324 - /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
325 - * B43_DMA64_DCTL1_ADDREXT_MASK;
326 - */
327 -
328 - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
329 - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
330 - dma_desc->ctl0 = cpu_to_le32(ctl0);
331 - dma_desc->ctl1 = cpu_to_le32(ctl1);
332 - }
333 + for (j = 0; j < ring->num_slots; j++)
334 + bgmac_dma_rx_setup_desc(bgmac, ring, j);
335
336 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
337 + ring->index_base +
338 ring->num_slots * sizeof(struct bgmac_dma_desc));
339
340 ring->start = 0;
341 @@ -909,9 +958,9 @@ static void bgmac_chip_reset(struct bgma
342 u8 et_swtype = 0;
343 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
344 BGMAC_CHIPCTL_1_IF_TYPE_MII;
345 - char buf[2];
346 + char buf[4];
347
348 - if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
349 + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
350 if (kstrtou8(buf, 0, &et_swtype))
351 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
352 buf);
353 @@ -970,6 +1019,8 @@ static void bgmac_chip_reset(struct bgma
354 bgmac_miiconfig(bgmac);
355 bgmac_phy_init(bgmac);
356
357 + netdev_reset_queue(bgmac->net_dev);
358 +
359 bgmac->int_status = 0;
360 }
361
362 --- a/drivers/net/ethernet/broadcom/bgmac.h
363 +++ b/drivers/net/ethernet/broadcom/bgmac.h
364 @@ -384,6 +384,8 @@ struct bgmac_dma_ring {
365 u16 mmio_base;
366 struct bgmac_dma_desc *cpu_base;
367 dma_addr_t dma_base;
368 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */
369 + bool unaligned;
370
371 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
372 };