kernel: add a small xfrm related performance optimization
[openwrt/svn-archive/archive.git] / target / linux / generic / patches-3.10 / 770-bgmac-backport.patch
1 --- a/drivers/net/ethernet/broadcom/Kconfig
2 +++ b/drivers/net/ethernet/broadcom/Kconfig
3 @@ -132,7 +132,8 @@ config BNX2X_SRIOV
4
5 config BGMAC
6 tristate "BCMA bus GBit core support"
7 - depends on BCMA_HOST_SOC && HAS_DMA
8 + depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
9 + select PHYLIB
10 ---help---
11 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
12 They can be found on BCM47xx SoCs and provide gigabit ethernet.
13 --- a/drivers/net/ethernet/broadcom/bgmac.c
14 +++ b/drivers/net/ethernet/broadcom/bgmac.c
15 @@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct b
16 u32 ctl;
17
18 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
19 + if (bgmac->core->id.rev >= 4) {
20 + ctl &= ~BGMAC_DMA_TX_BL_MASK;
21 + ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
22 +
23 + ctl &= ~BGMAC_DMA_TX_MR_MASK;
24 + ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
25 +
26 + ctl &= ~BGMAC_DMA_TX_PC_MASK;
27 + ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
28 +
29 + ctl &= ~BGMAC_DMA_TX_PT_MASK;
30 + ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
31 + }
32 ctl |= BGMAC_DMA_TX_ENABLE;
33 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
34 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
35 @@ -149,6 +162,8 @@ static netdev_tx_t bgmac_dma_tx_add(stru
36 dma_desc->ctl0 = cpu_to_le32(ctl0);
37 dma_desc->ctl1 = cpu_to_le32(ctl1);
38
39 + netdev_sent_queue(net_dev, skb->len);
40 +
41 wmb();
42
43 /* Increase ring->end to point empty slot. We tell hardware the first
44 @@ -157,6 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
45 if (++ring->end >= BGMAC_TX_RING_SLOTS)
46 ring->end = 0;
47 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
48 + ring->index_base +
49 ring->end * sizeof(struct bgmac_dma_desc));
50
51 /* Always keep one slot free to allow detecting bugged calls. */
52 @@ -177,10 +193,13 @@ static void bgmac_dma_tx_free(struct bgm
53 struct device *dma_dev = bgmac->core->dma_dev;
54 int empty_slot;
55 bool freed = false;
56 + unsigned bytes_compl = 0, pkts_compl = 0;
57
58 /* The last slot that hardware didn't consume yet */
59 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
60 empty_slot &= BGMAC_DMA_TX_STATDPTR;
61 + empty_slot -= ring->index_base;
62 + empty_slot &= BGMAC_DMA_TX_STATDPTR;
63 empty_slot /= sizeof(struct bgmac_dma_desc);
64
65 while (ring->start != empty_slot) {
66 @@ -192,6 +211,9 @@ static void bgmac_dma_tx_free(struct bgm
67 slot->skb->len, DMA_TO_DEVICE);
68 slot->dma_addr = 0;
69
70 + bytes_compl += slot->skb->len;
71 + pkts_compl++;
72 +
73 /* Free memory! :) */
74 dev_kfree_skb(slot->skb);
75 slot->skb = NULL;
76 @@ -205,6 +227,8 @@ static void bgmac_dma_tx_free(struct bgm
77 freed = true;
78 }
79
80 + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
81 +
82 if (freed && netif_queue_stopped(bgmac->net_dev))
83 netif_wake_queue(bgmac->net_dev);
84 }
85 @@ -229,6 +253,16 @@ static void bgmac_dma_rx_enable(struct b
86 u32 ctl;
87
88 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
89 + if (bgmac->core->id.rev >= 4) {
90 + ctl &= ~BGMAC_DMA_RX_BL_MASK;
91 + ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
92 +
93 + ctl &= ~BGMAC_DMA_RX_PC_MASK;
94 + ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
95 +
96 + ctl &= ~BGMAC_DMA_RX_PT_MASK;
97 + ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
98 + }
99 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
100 ctl |= BGMAC_DMA_RX_ENABLE;
101 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
102 @@ -241,31 +275,59 @@ static int bgmac_dma_rx_skb_for_slot(str
103 struct bgmac_slot_info *slot)
104 {
105 struct device *dma_dev = bgmac->core->dma_dev;
106 + struct sk_buff *skb;
107 + dma_addr_t dma_addr;
108 struct bgmac_rx_header *rx;
109
110 /* Alloc skb */
111 - slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
112 - if (!slot->skb)
113 + skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
114 + if (!skb)
115 return -ENOMEM;
116
117 /* Poison - if everything goes fine, hardware will overwrite it */
118 - rx = (struct bgmac_rx_header *)slot->skb->data;
119 + rx = (struct bgmac_rx_header *)skb->data;
120 rx->len = cpu_to_le16(0xdead);
121 rx->flags = cpu_to_le16(0xbeef);
122
123 /* Map skb for the DMA */
124 - slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
125 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
126 - if (dma_mapping_error(dma_dev, slot->dma_addr)) {
127 + dma_addr = dma_map_single(dma_dev, skb->data,
128 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
129 + if (dma_mapping_error(dma_dev, dma_addr)) {
130 bgmac_err(bgmac, "DMA mapping error\n");
131 + dev_kfree_skb(skb);
132 return -ENOMEM;
133 }
134 +
135 + /* Update the slot */
136 + slot->skb = skb;
137 + slot->dma_addr = dma_addr;
138 +
139 if (slot->dma_addr & 0xC0000000)
140 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
141
142 return 0;
143 }
144
145 +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
146 + struct bgmac_dma_ring *ring, int desc_idx)
147 +{
148 + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
149 + u32 ctl0 = 0, ctl1 = 0;
150 +
151 + if (desc_idx == ring->num_slots - 1)
152 + ctl0 |= BGMAC_DESC_CTL0_EOT;
153 + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
154 + /* Is there any BGMAC device that requires extension? */
155 + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
156 + * B43_DMA64_DCTL1_ADDREXT_MASK;
157 + */
158 +
159 + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
160 + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
161 + dma_desc->ctl0 = cpu_to_le32(ctl0);
162 + dma_desc->ctl1 = cpu_to_le32(ctl1);
163 +}
164 +
165 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
166 int weight)
167 {
168 @@ -274,6 +336,8 @@ static int bgmac_dma_rx_read(struct bgma
169
170 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
171 end_slot &= BGMAC_DMA_RX_STATDPTR;
172 + end_slot -= ring->index_base;
173 + end_slot &= BGMAC_DMA_RX_STATDPTR;
174 end_slot /= sizeof(struct bgmac_dma_desc);
175
176 ring->end = end_slot;
177 @@ -282,7 +346,6 @@ static int bgmac_dma_rx_read(struct bgma
178 struct device *dma_dev = bgmac->core->dma_dev;
179 struct bgmac_slot_info *slot = &ring->slots[ring->start];
180 struct sk_buff *skb = slot->skb;
181 - struct sk_buff *new_skb;
182 struct bgmac_rx_header *rx;
183 u16 len, flags;
184
185 @@ -295,38 +358,51 @@ static int bgmac_dma_rx_read(struct bgma
186 len = le16_to_cpu(rx->len);
187 flags = le16_to_cpu(rx->flags);
188
189 - /* Check for poison and drop or pass the packet */
190 - if (len == 0xdead && flags == 0xbeef) {
191 - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
192 - ring->start);
193 - } else {
194 + do {
195 + dma_addr_t old_dma_addr = slot->dma_addr;
196 + int err;
197 +
198 + /* Check for poison and drop or pass the packet */
199 + if (len == 0xdead && flags == 0xbeef) {
200 + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
201 + ring->start);
202 + dma_sync_single_for_device(dma_dev,
203 + slot->dma_addr,
204 + BGMAC_RX_BUF_SIZE,
205 + DMA_FROM_DEVICE);
206 + break;
207 + }
208 +
209 /* Omit CRC. */
210 len -= ETH_FCS_LEN;
211
212 - new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
213 - if (new_skb) {
214 - skb_put(new_skb, len);
215 - skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
216 - new_skb->data,
217 - len);
218 - skb_checksum_none_assert(skb);
219 - new_skb->protocol =
220 - eth_type_trans(new_skb, bgmac->net_dev);
221 - netif_receive_skb(new_skb);
222 - handled++;
223 - } else {
224 - bgmac->net_dev->stats.rx_dropped++;
225 - bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
226 + /* Prepare new skb as replacement */
227 + err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
228 + if (err) {
229 + /* Poison the old skb */
230 + rx->len = cpu_to_le16(0xdead);
231 + rx->flags = cpu_to_le16(0xbeef);
232 +
233 + dma_sync_single_for_device(dma_dev,
234 + slot->dma_addr,
235 + BGMAC_RX_BUF_SIZE,
236 + DMA_FROM_DEVICE);
237 + break;
238 }
239 + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
240
241 - /* Poison the old skb */
242 - rx->len = cpu_to_le16(0xdead);
243 - rx->flags = cpu_to_le16(0xbeef);
244 - }
245 -
246 - /* Make it back accessible to the hardware */
247 - dma_sync_single_for_device(dma_dev, slot->dma_addr,
248 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
249 + /* Unmap old skb, we'll pass it to the netfif */
250 + dma_unmap_single(dma_dev, old_dma_addr,
251 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
252 +
253 + skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
254 + skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
255 +
256 + skb_checksum_none_assert(skb);
257 + skb->protocol = eth_type_trans(skb, bgmac->net_dev);
258 + netif_receive_skb(skb);
259 + handled++;
260 + } while (0);
261
262 if (++ring->start >= BGMAC_RX_RING_SLOTS)
263 ring->start = 0;
264 @@ -418,9 +494,6 @@ static int bgmac_dma_alloc(struct bgmac
265 ring = &bgmac->tx_ring[i];
266 ring->num_slots = BGMAC_TX_RING_SLOTS;
267 ring->mmio_base = ring_base[i];
268 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
269 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
270 - ring->mmio_base);
271
272 /* Alloc ring of descriptors */
273 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
274 @@ -435,6 +508,13 @@ static int bgmac_dma_alloc(struct bgmac
275 if (ring->dma_base & 0xC0000000)
276 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
277
278 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
279 + BGMAC_DMA_RING_TX);
280 + if (ring->unaligned)
281 + ring->index_base = lower_32_bits(ring->dma_base);
282 + else
283 + ring->index_base = 0;
284 +
285 /* No need to alloc TX slots yet */
286 }
287
288 @@ -444,9 +524,6 @@ static int bgmac_dma_alloc(struct bgmac
289 ring = &bgmac->rx_ring[i];
290 ring->num_slots = BGMAC_RX_RING_SLOTS;
291 ring->mmio_base = ring_base[i];
292 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
293 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
294 - ring->mmio_base);
295
296 /* Alloc ring of descriptors */
297 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
298 @@ -462,6 +539,13 @@ static int bgmac_dma_alloc(struct bgmac
299 if (ring->dma_base & 0xC0000000)
300 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
301
302 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
303 + BGMAC_DMA_RING_RX);
304 + if (ring->unaligned)
305 + ring->index_base = lower_32_bits(ring->dma_base);
306 + else
307 + ring->index_base = 0;
308 +
309 /* Alloc RX slots */
310 for (j = 0; j < ring->num_slots; j++) {
311 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
312 @@ -482,19 +566,19 @@ err_dma_free:
313 static void bgmac_dma_init(struct bgmac *bgmac)
314 {
315 struct bgmac_dma_ring *ring;
316 - struct bgmac_dma_desc *dma_desc;
317 - u32 ctl0, ctl1;
318 int i;
319
320 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
321 ring = &bgmac->tx_ring[i];
322
323 - /* We don't implement unaligned addressing, so enable first */
324 - bgmac_dma_tx_enable(bgmac, ring);
325 + if (!ring->unaligned)
326 + bgmac_dma_tx_enable(bgmac, ring);
327 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
328 lower_32_bits(ring->dma_base));
329 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
330 upper_32_bits(ring->dma_base));
331 + if (ring->unaligned)
332 + bgmac_dma_tx_enable(bgmac, ring);
333
334 ring->start = 0;
335 ring->end = 0; /* Points the slot that should *not* be read */
336 @@ -505,32 +589,20 @@ static void bgmac_dma_init(struct bgmac
337
338 ring = &bgmac->rx_ring[i];
339
340 - /* We don't implement unaligned addressing, so enable first */
341 - bgmac_dma_rx_enable(bgmac, ring);
342 + if (!ring->unaligned)
343 + bgmac_dma_rx_enable(bgmac, ring);
344 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
345 lower_32_bits(ring->dma_base));
346 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
347 upper_32_bits(ring->dma_base));
348 + if (ring->unaligned)
349 + bgmac_dma_rx_enable(bgmac, ring);
350
351 - for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
352 - j++, dma_desc++) {
353 - ctl0 = ctl1 = 0;
354 -
355 - if (j == ring->num_slots - 1)
356 - ctl0 |= BGMAC_DESC_CTL0_EOT;
357 - ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
358 - /* Is there any BGMAC device that requires extension? */
359 - /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
360 - * B43_DMA64_DCTL1_ADDREXT_MASK;
361 - */
362 -
363 - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
364 - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
365 - dma_desc->ctl0 = cpu_to_le32(ctl0);
366 - dma_desc->ctl1 = cpu_to_le32(ctl1);
367 - }
368 + for (j = 0; j < ring->num_slots; j++)
369 + bgmac_dma_rx_setup_desc(bgmac, ring, j);
370
371 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
372 + ring->index_base +
373 ring->num_slots * sizeof(struct bgmac_dma_desc));
374
375 ring->start = 0;
376 @@ -633,70 +705,6 @@ static int bgmac_phy_write(struct bgmac
377 return 0;
378 }
379
380 -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
381 -static void bgmac_phy_force(struct bgmac *bgmac)
382 -{
383 - u16 ctl;
384 - u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
385 - BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
386 -
387 - if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
388 - return;
389 -
390 - if (bgmac->autoneg)
391 - return;
392 -
393 - ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
394 - ctl &= mask;
395 - if (bgmac->full_duplex)
396 - ctl |= BGMAC_PHY_CTL_DUPLEX;
397 - if (bgmac->speed == BGMAC_SPEED_100)
398 - ctl |= BGMAC_PHY_CTL_SPEED_100;
399 - else if (bgmac->speed == BGMAC_SPEED_1000)
400 - ctl |= BGMAC_PHY_CTL_SPEED_1000;
401 - bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
402 -}
403 -
404 -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
405 -static void bgmac_phy_advertise(struct bgmac *bgmac)
406 -{
407 - u16 adv;
408 -
409 - if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
410 - return;
411 -
412 - if (!bgmac->autoneg)
413 - return;
414 -
415 - /* Adv selected 10/100 speeds */
416 - adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
417 - adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
418 - BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
419 - if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
420 - adv |= BGMAC_PHY_ADV_10HALF;
421 - if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
422 - adv |= BGMAC_PHY_ADV_100HALF;
423 - if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
424 - adv |= BGMAC_PHY_ADV_10FULL;
425 - if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
426 - adv |= BGMAC_PHY_ADV_100FULL;
427 - bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
428 -
429 - /* Adv selected 1000 speeds */
430 - adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
431 - adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
432 - if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
433 - adv |= BGMAC_PHY_ADV2_1000HALF;
434 - if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
435 - adv |= BGMAC_PHY_ADV2_1000FULL;
436 - bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
437 -
438 - /* Restart */
439 - bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
440 - bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
441 - BGMAC_PHY_CTL_RESTART);
442 -}
443 -
444 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
445 static void bgmac_phy_init(struct bgmac *bgmac)
446 {
447 @@ -740,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac
448 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
449 return;
450
451 - bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
452 - BGMAC_PHY_CTL_RESET);
453 + bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
454 udelay(100);
455 - if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
456 - BGMAC_PHY_CTL_RESET)
457 + if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
458 bgmac_err(bgmac, "PHY reset failed\n");
459 bgmac_phy_init(bgmac);
460 }
461 @@ -762,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct
462 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
463 u32 new_val = (cmdcfg & mask) | set;
464
465 - bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
466 + bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
467 udelay(2);
468
469 if (new_val != cmdcfg || force)
470 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
471
472 - bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
473 + bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
474 udelay(2);
475 }
476
477 @@ -827,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac
478 }
479
480 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
481 -static void bgmac_speed(struct bgmac *bgmac, int speed)
482 +static void bgmac_mac_speed(struct bgmac *bgmac)
483 {
484 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
485 u32 set = 0;
486
487 - if (speed & BGMAC_SPEED_10)
488 + switch (bgmac->mac_speed) {
489 + case SPEED_10:
490 set |= BGMAC_CMDCFG_ES_10;
491 - if (speed & BGMAC_SPEED_100)
492 + break;
493 + case SPEED_100:
494 set |= BGMAC_CMDCFG_ES_100;
495 - if (speed & BGMAC_SPEED_1000)
496 + break;
497 + case SPEED_1000:
498 set |= BGMAC_CMDCFG_ES_1000;
499 - if (!bgmac->full_duplex)
500 + break;
501 + case SPEED_2500:
502 + set |= BGMAC_CMDCFG_ES_2500;
503 + break;
504 + default:
505 + bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
506 + }
507 +
508 + if (bgmac->mac_duplex == DUPLEX_HALF)
509 set |= BGMAC_CMDCFG_HD;
510 +
511 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
512 }
513
514 static void bgmac_miiconfig(struct bgmac *bgmac)
515 {
516 - u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
517 - BGMAC_DS_MM_SHIFT;
518 - if (imode == 0 || imode == 1) {
519 - if (bgmac->autoneg)
520 - bgmac_speed(bgmac, BGMAC_SPEED_100);
521 - else
522 - bgmac_speed(bgmac, bgmac->speed);
523 + struct bcma_device *core = bgmac->core;
524 + struct bcma_chipinfo *ci = &core->bus->chipinfo;
525 + u8 imode;
526 +
527 + if (ci->id == BCMA_CHIP_ID_BCM4707 ||
528 + ci->id == BCMA_CHIP_ID_BCM53018) {
529 + bcma_awrite32(core, BCMA_IOCTL,
530 + bcma_aread32(core, BCMA_IOCTL) | 0x40 |
531 + BGMAC_BCMA_IOCTL_SW_CLKEN);
532 + bgmac->mac_speed = SPEED_2500;
533 + bgmac->mac_duplex = DUPLEX_FULL;
534 + bgmac_mac_speed(bgmac);
535 + } else {
536 + imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
537 + BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
538 + if (imode == 0 || imode == 1) {
539 + bgmac->mac_speed = SPEED_100;
540 + bgmac->mac_duplex = DUPLEX_FULL;
541 + bgmac_mac_speed(bgmac);
542 + }
543 }
544 }
545
546 @@ -861,7 +892,7 @@ static void bgmac_chip_reset(struct bgma
547 struct bcma_device *core = bgmac->core;
548 struct bcma_bus *bus = core->bus;
549 struct bcma_chipinfo *ci = &bus->chipinfo;
550 - u32 flags = 0;
551 + u32 flags;
552 u32 iost;
553 int i;
554
555 @@ -884,44 +915,55 @@ static void bgmac_chip_reset(struct bgma
556 }
557
558 iost = bcma_aread32(core, BCMA_IOST);
559 - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
560 + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
561 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
562 - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
563 + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
564 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
565
566 - if (iost & BGMAC_BCMA_IOST_ATTACHED) {
567 - flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
568 - if (!bgmac->has_robosw)
569 - flags |= BGMAC_BCMA_IOCTL_SW_RESET;
570 + /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
571 + if (ci->id != BCMA_CHIP_ID_BCM4707) {
572 + flags = 0;
573 + if (iost & BGMAC_BCMA_IOST_ATTACHED) {
574 + flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
575 + if (!bgmac->has_robosw)
576 + flags |= BGMAC_BCMA_IOCTL_SW_RESET;
577 + }
578 + bcma_core_enable(core, flags);
579 }
580
581 - bcma_core_enable(core, flags);
582 -
583 - if (core->id.rev > 2) {
584 - bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
585 - bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
586 + /* Request Misc PLL for corerev > 2 */
587 + if (core->id.rev > 2 &&
588 + ci->id != BCMA_CHIP_ID_BCM4707 &&
589 + ci->id != BCMA_CHIP_ID_BCM53018) {
590 + bgmac_set(bgmac, BCMA_CLKCTLST,
591 + BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
592 + bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
593 + BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
594 + BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
595 1000);
596 }
597
598 - if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
599 + if (ci->id == BCMA_CHIP_ID_BCM5357 ||
600 + ci->id == BCMA_CHIP_ID_BCM4749 ||
601 ci->id == BCMA_CHIP_ID_BCM53572) {
602 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
603 u8 et_swtype = 0;
604 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
605 BGMAC_CHIPCTL_1_IF_TYPE_MII;
606 - char buf[2];
607 + char buf[4];
608
609 - if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
610 + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
611 if (kstrtou8(buf, 0, &et_swtype))
612 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
613 buf);
614 et_swtype &= 0x0f;
615 et_swtype <<= 4;
616 sw_type = et_swtype;
617 - } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
618 + } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
619 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
620 - } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
621 - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
622 + } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
623 + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
624 + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
625 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
626 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
627 }
628 @@ -958,8 +1000,10 @@ static void bgmac_chip_reset(struct bgma
629 BGMAC_CMDCFG_PROM |
630 BGMAC_CMDCFG_NLC |
631 BGMAC_CMDCFG_CFE |
632 - BGMAC_CMDCFG_SR,
633 + BGMAC_CMDCFG_SR(core->id.rev),
634 false);
635 + bgmac->mac_speed = SPEED_UNKNOWN;
636 + bgmac->mac_duplex = DUPLEX_UNKNOWN;
637
638 bgmac_clear_mib(bgmac);
639 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
640 @@ -970,6 +1014,8 @@ static void bgmac_chip_reset(struct bgma
641 bgmac_miiconfig(bgmac);
642 bgmac_phy_init(bgmac);
643
644 + netdev_reset_queue(bgmac->net_dev);
645 +
646 bgmac->int_status = 0;
647 }
648
649 @@ -997,7 +1043,7 @@ static void bgmac_enable(struct bgmac *b
650
651 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
652 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
653 - BGMAC_CMDCFG_SR, true);
654 + BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
655 udelay(2);
656 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
657 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
658 @@ -1026,12 +1072,16 @@ static void bgmac_enable(struct bgmac *b
659 break;
660 }
661
662 - rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
663 - rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
664 - bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
665 - mdp = (bp_clk * 128 / 1000) - 3;
666 - rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
667 - bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
668 + if (ci->id != BCMA_CHIP_ID_BCM4707 &&
669 + ci->id != BCMA_CHIP_ID_BCM53018) {
670 + rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
671 + rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
672 + bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
673 + 1000000;
674 + mdp = (bp_clk * 128 / 1000) - 3;
675 + rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
676 + bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
677 + }
678 }
679
680 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
681 @@ -1057,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac
682
683 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
684
685 - if (!bgmac->autoneg) {
686 - bgmac_speed(bgmac, bgmac->speed);
687 - bgmac_phy_force(bgmac);
688 - } else if (bgmac->speed) { /* if there is anything to adv */
689 - bgmac_phy_advertise(bgmac);
690 - }
691 -
692 if (full_init) {
693 bgmac_dma_init(bgmac);
694 if (1) /* FIXME: is there any case we don't want IRQs? */
695 @@ -1153,6 +1196,8 @@ static int bgmac_open(struct net_device
696 }
697 napi_enable(&bgmac->napi);
698
699 + phy_start(bgmac->phy_dev);
700 +
701 netif_carrier_on(net_dev);
702
703 err_out:
704 @@ -1165,6 +1210,8 @@ static int bgmac_stop(struct net_device
705
706 netif_carrier_off(net_dev);
707
708 + phy_stop(bgmac->phy_dev);
709 +
710 napi_disable(&bgmac->napi);
711 bgmac_chip_intrs_off(bgmac);
712 free_irq(bgmac->core->irq, net_dev);
713 @@ -1201,27 +1248,11 @@ static int bgmac_set_mac_address(struct
714 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
715 {
716 struct bgmac *bgmac = netdev_priv(net_dev);
717 - struct mii_ioctl_data *data = if_mii(ifr);
718
719 - switch (cmd) {
720 - case SIOCGMIIPHY:
721 - data->phy_id = bgmac->phyaddr;
722 - /* fallthru */
723 - case SIOCGMIIREG:
724 - if (!netif_running(net_dev))
725 - return -EAGAIN;
726 - data->val_out = bgmac_phy_read(bgmac, data->phy_id,
727 - data->reg_num & 0x1f);
728 - return 0;
729 - case SIOCSMIIREG:
730 - if (!netif_running(net_dev))
731 - return -EAGAIN;
732 - bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
733 - data->val_in);
734 - return 0;
735 - default:
736 - return -EOPNOTSUPP;
737 - }
738 + if (!netif_running(net_dev))
739 + return -EINVAL;
740 +
741 + return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
742 }
743
744 static const struct net_device_ops bgmac_netdev_ops = {
745 @@ -1243,61 +1274,16 @@ static int bgmac_get_settings(struct net
746 {
747 struct bgmac *bgmac = netdev_priv(net_dev);
748
749 - cmd->supported = SUPPORTED_10baseT_Half |
750 - SUPPORTED_10baseT_Full |
751 - SUPPORTED_100baseT_Half |
752 - SUPPORTED_100baseT_Full |
753 - SUPPORTED_1000baseT_Half |
754 - SUPPORTED_1000baseT_Full |
755 - SUPPORTED_Autoneg;
756 -
757 - if (bgmac->autoneg) {
758 - WARN_ON(cmd->advertising);
759 - if (bgmac->full_duplex) {
760 - if (bgmac->speed & BGMAC_SPEED_10)
761 - cmd->advertising |= ADVERTISED_10baseT_Full;
762 - if (bgmac->speed & BGMAC_SPEED_100)
763 - cmd->advertising |= ADVERTISED_100baseT_Full;
764 - if (bgmac->speed & BGMAC_SPEED_1000)
765 - cmd->advertising |= ADVERTISED_1000baseT_Full;
766 - } else {
767 - if (bgmac->speed & BGMAC_SPEED_10)
768 - cmd->advertising |= ADVERTISED_10baseT_Half;
769 - if (bgmac->speed & BGMAC_SPEED_100)
770 - cmd->advertising |= ADVERTISED_100baseT_Half;
771 - if (bgmac->speed & BGMAC_SPEED_1000)
772 - cmd->advertising |= ADVERTISED_1000baseT_Half;
773 - }
774 - } else {
775 - switch (bgmac->speed) {
776 - case BGMAC_SPEED_10:
777 - ethtool_cmd_speed_set(cmd, SPEED_10);
778 - break;
779 - case BGMAC_SPEED_100:
780 - ethtool_cmd_speed_set(cmd, SPEED_100);
781 - break;
782 - case BGMAC_SPEED_1000:
783 - ethtool_cmd_speed_set(cmd, SPEED_1000);
784 - break;
785 - }
786 - }
787 -
788 - cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
789 -
790 - cmd->autoneg = bgmac->autoneg;
791 -
792 - return 0;
793 + return phy_ethtool_gset(bgmac->phy_dev, cmd);
794 }
795
796 -#if 0
797 static int bgmac_set_settings(struct net_device *net_dev,
798 struct ethtool_cmd *cmd)
799 {
800 struct bgmac *bgmac = netdev_priv(net_dev);
801
802 - return -1;
803 + return phy_ethtool_sset(bgmac->phy_dev, cmd);
804 }
805 -#endif
806
807 static void bgmac_get_drvinfo(struct net_device *net_dev,
808 struct ethtool_drvinfo *info)
809 @@ -1308,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net
810
811 static const struct ethtool_ops bgmac_ethtool_ops = {
812 .get_settings = bgmac_get_settings,
813 + .set_settings = bgmac_set_settings,
814 .get_drvinfo = bgmac_get_drvinfo,
815 };
816
817 @@ -1326,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bu
818 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
819 }
820
821 +static void bgmac_adjust_link(struct net_device *net_dev)
822 +{
823 + struct bgmac *bgmac = netdev_priv(net_dev);
824 + struct phy_device *phy_dev = bgmac->phy_dev;
825 + bool update = false;
826 +
827 + if (phy_dev->link) {
828 + if (phy_dev->speed != bgmac->mac_speed) {
829 + bgmac->mac_speed = phy_dev->speed;
830 + update = true;
831 + }
832 +
833 + if (phy_dev->duplex != bgmac->mac_duplex) {
834 + bgmac->mac_duplex = phy_dev->duplex;
835 + update = true;
836 + }
837 + }
838 +
839 + if (update) {
840 + bgmac_mac_speed(bgmac);
841 + phy_print_status(phy_dev);
842 + }
843 +}
844 +
845 static int bgmac_mii_register(struct bgmac *bgmac)
846 {
847 struct mii_bus *mii_bus;
848 + struct phy_device *phy_dev;
849 + char bus_id[MII_BUS_ID_SIZE + 3];
850 int i, err = 0;
851
852 mii_bus = mdiobus_alloc();
853 @@ -1360,8 +1373,22 @@ static int bgmac_mii_register(struct bgm
854
855 bgmac->mii_bus = mii_bus;
856
857 + /* Connect to the PHY */
858 + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
859 + bgmac->phyaddr);
860 + phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
861 + PHY_INTERFACE_MODE_MII);
862 + if (IS_ERR(phy_dev)) {
863 + bgmac_err(bgmac, "PHY connecton failed\n");
864 + err = PTR_ERR(phy_dev);
865 + goto err_unregister_bus;
866 + }
867 + bgmac->phy_dev = phy_dev;
868 +
869 return err;
870
871 +err_unregister_bus:
872 + mdiobus_unregister(mii_bus);
873 err_free_irq:
874 kfree(mii_bus->irq);
875 err_free_bus:
876 @@ -1416,9 +1443,6 @@ static int bgmac_probe(struct bcma_devic
877 bcma_set_drvdata(core, bgmac);
878
879 /* Defaults */
880 - bgmac->autoneg = true;
881 - bgmac->full_duplex = true;
882 - bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
883 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
884
885 /* On BCM4706 we need common core to access PHY */
886 @@ -1449,6 +1473,27 @@ static int bgmac_probe(struct bcma_devic
887
888 bgmac_chip_reset(bgmac);
889
890 + /* For Northstar, we have to take all GMAC core out of reset */
891 + if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
892 + core->id.id == BCMA_CHIP_ID_BCM53018) {
893 + struct bcma_device *ns_core;
894 + int ns_gmac;
895 +
896 + /* Northstar has 4 GMAC cores */
897 + for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
898 + /* As Northstar requirement, we have to reset all GMACs
899 + * before accessing one. bgmac_chip_reset() call
900 + * bcma_core_enable() for this core. Then the other
901 + * three GMACs didn't reset. We do it here.
902 + */
903 + ns_core = bcma_find_core_unit(core->bus,
904 + BCMA_CORE_MAC_GBIT,
905 + ns_gmac);
906 + if (ns_core && !bcma_core_is_enabled(ns_core))
907 + bcma_core_enable(ns_core, 0);
908 + }
909 + }
910 +
911 err = bgmac_dma_alloc(bgmac);
912 if (err) {
913 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
914 @@ -1473,14 +1518,12 @@ static int bgmac_probe(struct bcma_devic
915 err = bgmac_mii_register(bgmac);
916 if (err) {
917 bgmac_err(bgmac, "Cannot register MDIO\n");
918 - err = -ENOTSUPP;
919 goto err_dma_free;
920 }
921
922 err = register_netdev(bgmac->net_dev);
923 if (err) {
924 bgmac_err(bgmac, "Cannot register net device\n");
925 - err = -ENOTSUPP;
926 goto err_mii_unregister;
927 }
928
929 --- a/drivers/net/ethernet/broadcom/bgmac.h
930 +++ b/drivers/net/ethernet/broadcom/bgmac.h
931 @@ -95,7 +95,11 @@
932 #define BGMAC_RXQ_CTL_MDP_SHIFT 24
933 #define BGMAC_GPIO_SELECT 0x194
934 #define BGMAC_GPIO_OUTPUT_EN 0x198
935 -/* For 0x1e0 see BCMA_CLKCTLST */
936 +
937 +/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
938 +#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100
939 +#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000
940 +
941 #define BGMAC_HW_WAR 0x1e4
942 #define BGMAC_PWR_CTL 0x1e8
943 #define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
944 @@ -185,6 +189,7 @@
945 #define BGMAC_CMDCFG_ES_10 0x00000000
946 #define BGMAC_CMDCFG_ES_100 0x00000004
947 #define BGMAC_CMDCFG_ES_1000 0x00000008
948 +#define BGMAC_CMDCFG_ES_2500 0x0000000C
949 #define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
950 #define BGMAC_CMDCFG_PAD_EN 0x00000020
951 #define BGMAC_CMDCFG_CF 0x00000040
952 @@ -193,7 +198,9 @@
953 #define BGMAC_CMDCFG_TAI 0x00000200
954 #define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
955 #define BGMAC_CMDCFG_HD_SHIFT 10
956 -#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
957 +#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
958 +#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
959 +#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
960 #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
961 #define BGMAC_CMDCFG_AE 0x00400000
962 #define BGMAC_CMDCFG_CFE 0x00800000
963 @@ -216,27 +223,6 @@
964 #define BGMAC_RX_STATUS 0xb38
965 #define BGMAC_TX_STATUS 0xb3c
966
967 -#define BGMAC_PHY_CTL 0x00
968 -#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
969 -#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
970 -#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
971 -#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
972 -#define BGMAC_PHY_CTL_SPEED 0x2000
973 -#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
974 -#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
975 -/* Helpers */
976 -#define BGMAC_PHY_CTL_SPEED_10 0
977 -#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
978 -#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
979 -#define BGMAC_PHY_ADV 0x04
980 -#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
981 -#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
982 -#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
983 -#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
984 -#define BGMAC_PHY_ADV2 0x09
985 -#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
986 -#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
987 -
988 /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
989 #define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
990 #define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
991 @@ -254,9 +240,34 @@
992 #define BGMAC_DMA_TX_SUSPEND 0x00000002
993 #define BGMAC_DMA_TX_LOOPBACK 0x00000004
994 #define BGMAC_DMA_TX_FLUSH 0x00000010
995 +#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
996 +#define BGMAC_DMA_TX_MR_SHIFT 6
997 +#define BGMAC_DMA_TX_MR_1 0
998 +#define BGMAC_DMA_TX_MR_2 1
999 #define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
1000 #define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
1001 #define BGMAC_DMA_TX_ADDREXT_SHIFT 16
1002 +#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */
1003 +#define BGMAC_DMA_TX_BL_SHIFT 18
1004 +#define BGMAC_DMA_TX_BL_16 0
1005 +#define BGMAC_DMA_TX_BL_32 1
1006 +#define BGMAC_DMA_TX_BL_64 2
1007 +#define BGMAC_DMA_TX_BL_128 3
1008 +#define BGMAC_DMA_TX_BL_256 4
1009 +#define BGMAC_DMA_TX_BL_512 5
1010 +#define BGMAC_DMA_TX_BL_1024 6
1011 +#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */
1012 +#define BGMAC_DMA_TX_PC_SHIFT 21
1013 +#define BGMAC_DMA_TX_PC_0 0
1014 +#define BGMAC_DMA_TX_PC_4 1
1015 +#define BGMAC_DMA_TX_PC_8 2
1016 +#define BGMAC_DMA_TX_PC_16 3
1017 +#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */
1018 +#define BGMAC_DMA_TX_PT_SHIFT 24
1019 +#define BGMAC_DMA_TX_PT_1 0
1020 +#define BGMAC_DMA_TX_PT_2 1
1021 +#define BGMAC_DMA_TX_PT_4 2
1022 +#define BGMAC_DMA_TX_PT_8 3
1023 #define BGMAC_DMA_TX_INDEX 0x04
1024 #define BGMAC_DMA_TX_RINGLO 0x08
1025 #define BGMAC_DMA_TX_RINGHI 0x0C
1026 @@ -284,8 +295,33 @@
1027 #define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
1028 #define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
1029 #define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
1030 +#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
1031 +#define BGMAC_DMA_RX_MR_SHIFT 6
1032 +#define BGMAC_DMA_TX_MR_1 0
1033 +#define BGMAC_DMA_TX_MR_2 1
1034 #define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
1035 #define BGMAC_DMA_RX_ADDREXT_SHIFT 16
1036 +#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */
1037 +#define BGMAC_DMA_RX_BL_SHIFT 18
1038 +#define BGMAC_DMA_RX_BL_16 0
1039 +#define BGMAC_DMA_RX_BL_32 1
1040 +#define BGMAC_DMA_RX_BL_64 2
1041 +#define BGMAC_DMA_RX_BL_128 3
1042 +#define BGMAC_DMA_RX_BL_256 4
1043 +#define BGMAC_DMA_RX_BL_512 5
1044 +#define BGMAC_DMA_RX_BL_1024 6
1045 +#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */
1046 +#define BGMAC_DMA_RX_PC_SHIFT 21
1047 +#define BGMAC_DMA_RX_PC_0 0
1048 +#define BGMAC_DMA_RX_PC_4 1
1049 +#define BGMAC_DMA_RX_PC_8 2
1050 +#define BGMAC_DMA_RX_PC_16 3
1051 +#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */
1052 +#define BGMAC_DMA_RX_PT_SHIFT 24
1053 +#define BGMAC_DMA_RX_PT_1 0
1054 +#define BGMAC_DMA_RX_PT_2 1
1055 +#define BGMAC_DMA_RX_PT_4 2
1056 +#define BGMAC_DMA_RX_PT_8 3
1057 #define BGMAC_DMA_RX_INDEX 0x24
1058 #define BGMAC_DMA_RX_RINGLO 0x28
1059 #define BGMAC_DMA_RX_RINGHI 0x2C
1060 @@ -342,10 +378,6 @@
1061 #define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
1062 #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
1063
1064 -#define BGMAC_SPEED_10 0x0001
1065 -#define BGMAC_SPEED_100 0x0002
1066 -#define BGMAC_SPEED_1000 0x0004
1067 -
1068 #define BGMAC_WEIGHT 64
1069
1070 #define ETHER_MAX_LEN 1518
1071 @@ -384,6 +416,8 @@ struct bgmac_dma_ring {
1072 u16 mmio_base;
1073 struct bgmac_dma_desc *cpu_base;
1074 dma_addr_t dma_base;
1075 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */
1076 + bool unaligned;
1077
1078 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
1079 };
1080 @@ -400,6 +434,7 @@ struct bgmac {
1081 struct net_device *net_dev;
1082 struct napi_struct napi;
1083 struct mii_bus *mii_bus;
1084 + struct phy_device *phy_dev;
1085
1086 /* DMA */
1087 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
1088 @@ -414,10 +449,9 @@ struct bgmac {
1089 u32 int_mask;
1090 u32 int_status;
1091
1092 - /* Speed-related */
1093 - int speed;
1094 - bool autoneg;
1095 - bool full_duplex;
1096 + /* Current MAC state */
1097 + int mac_speed;
1098 + int mac_duplex;
1099
1100 u8 phyaddr;
1101 bool has_robosw;