1 bgmac: implement unaligned addressing for DMA rings that support it
3 This is important patch for new devices that support unaligned
4 addressing. That devices suffer from the backward-compatibility bug in
5 DMA engine. In theory we should be able to use old mechanism, but in
6 practice DMA address seems to be randomly copied into status register
7 when hardware reaches end of a ring. This breaks reading slot number
8 from status register and we can't use DMA anymore.
10 Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
12 --- a/drivers/net/ethernet/broadcom/bgmac.c
13 +++ b/drivers/net/ethernet/broadcom/bgmac.c
14 @@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
15 if (++ring->end >= BGMAC_TX_RING_SLOTS)
17 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
19 ring->end * sizeof(struct bgmac_dma_desc));
21 /* Always keep one slot free to allow detecting bugged calls. */
22 @@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgm
23 /* The last slot that hardware didn't consume yet */
24 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
25 empty_slot &= BGMAC_DMA_TX_STATDPTR;
26 + empty_slot -= ring->index_base;
27 + empty_slot &= BGMAC_DMA_TX_STATDPTR;
28 empty_slot /= sizeof(struct bgmac_dma_desc);
30 while (ring->start != empty_slot) {
31 @@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgma
33 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
34 end_slot &= BGMAC_DMA_RX_STATDPTR;
35 + end_slot -= ring->index_base;
36 + end_slot &= BGMAC_DMA_RX_STATDPTR;
37 end_slot /= sizeof(struct bgmac_dma_desc);
40 @@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac
41 ring = &bgmac->tx_ring[i];
42 ring->num_slots = BGMAC_TX_RING_SLOTS;
43 ring->mmio_base = ring_base[i];
44 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
45 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
48 /* Alloc ring of descriptors */
49 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
50 @@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac
51 if (ring->dma_base & 0xC0000000)
52 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
54 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
56 + if (ring->unaligned)
57 + ring->index_base = lower_32_bits(ring->dma_base);
59 + ring->index_base = 0;
61 /* No need to alloc TX slots yet */
64 @@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac
65 ring = &bgmac->rx_ring[i];
66 ring->num_slots = BGMAC_RX_RING_SLOTS;
67 ring->mmio_base = ring_base[i];
68 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
69 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
72 /* Alloc ring of descriptors */
73 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
74 @@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac
75 if (ring->dma_base & 0xC0000000)
76 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
78 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
80 + if (ring->unaligned)
81 + ring->index_base = lower_32_bits(ring->dma_base);
83 + ring->index_base = 0;
86 for (j = 0; j < ring->num_slots; j++) {
87 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
88 @@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac
89 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
90 ring = &bgmac->tx_ring[i];
92 - /* We don't implement unaligned addressing, so enable first */
93 - bgmac_dma_tx_enable(bgmac, ring);
94 + if (!ring->unaligned)
95 + bgmac_dma_tx_enable(bgmac, ring);
96 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
97 lower_32_bits(ring->dma_base));
98 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
99 upper_32_bits(ring->dma_base));
100 + if (ring->unaligned)
101 + bgmac_dma_tx_enable(bgmac, ring);
104 ring->end = 0; /* Points the slot that should *not* be read */
105 @@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac
107 ring = &bgmac->rx_ring[i];
109 - /* We don't implement unaligned addressing, so enable first */
110 - bgmac_dma_rx_enable(bgmac, ring);
111 + if (!ring->unaligned)
112 + bgmac_dma_rx_enable(bgmac, ring);
113 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
114 lower_32_bits(ring->dma_base));
115 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
116 upper_32_bits(ring->dma_base));
117 + if (ring->unaligned)
118 + bgmac_dma_rx_enable(bgmac, ring);
120 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
122 @@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac
125 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
127 ring->num_slots * sizeof(struct bgmac_dma_desc));
130 --- a/drivers/net/ethernet/broadcom/bgmac.h
131 +++ b/drivers/net/ethernet/broadcom/bgmac.h
132 @@ -384,6 +384,8 @@ struct bgmac_dma_ring {
134 struct bgmac_dma_desc *cpu_base;
136 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */
139 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];