4987e5e720f8d02e8efff620de13a5059b11df12
[openwrt/svn-archive/archive.git] / target / linux / bcm53xx / patches-3.10 / 212-bgmac_implement_unaligned_addressing.patch
1 bgmac: implement unaligned addressing for DMA rings that support it
2
3 This is important patch for new devices that support unaligned
4 addressing. That devices suffer from the backward-compatibility bug in
5 DMA engine. In theory we should be able to use old mechanism, but in
6 practice DMA address seems to be randomly copied into status register
7 when hardware reaches end of a ring. This breaks reading slot number
8 from status register and we can't use DMA anymore.
9
10 Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
11
12 --- a/drivers/net/ethernet/broadcom/bgmac.c
13 +++ b/drivers/net/ethernet/broadcom/bgmac.c
14 @@ -162,6 +162,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
15 if (++ring->end >= BGMAC_TX_RING_SLOTS)
16 ring->end = 0;
17 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
18 + ring->index_base +
19 ring->end * sizeof(struct bgmac_dma_desc));
20
21 /* Always keep one slot free to allow detecting bugged calls. */
22 @@ -186,6 +187,8 @@ static void bgmac_dma_tx_free(struct bgm
23 /* The last slot that hardware didn't consume yet */
24 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
25 empty_slot &= BGMAC_DMA_TX_STATDPTR;
26 + empty_slot -= ring->index_base;
27 + empty_slot &= BGMAC_DMA_TX_STATDPTR;
28 empty_slot /= sizeof(struct bgmac_dma_desc);
29
30 while (ring->start != empty_slot) {
31 @@ -279,6 +282,8 @@ static int bgmac_dma_rx_read(struct bgma
32
33 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
34 end_slot &= BGMAC_DMA_RX_STATDPTR;
35 + end_slot -= ring->index_base;
36 + end_slot &= BGMAC_DMA_RX_STATDPTR;
37 end_slot /= sizeof(struct bgmac_dma_desc);
38
39 ring->end = end_slot;
40 @@ -423,9 +428,6 @@ static int bgmac_dma_alloc(struct bgmac
41 ring = &bgmac->tx_ring[i];
42 ring->num_slots = BGMAC_TX_RING_SLOTS;
43 ring->mmio_base = ring_base[i];
44 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
45 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
46 - ring->mmio_base);
47
48 /* Alloc ring of descriptors */
49 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
50 @@ -440,6 +442,13 @@ static int bgmac_dma_alloc(struct bgmac
51 if (ring->dma_base & 0xC0000000)
52 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
53
54 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
55 + BGMAC_DMA_RING_TX);
56 + if (ring->unaligned)
57 + ring->index_base = lower_32_bits(ring->dma_base);
58 + else
59 + ring->index_base = 0;
60 +
61 /* No need to alloc TX slots yet */
62 }
63
64 @@ -449,9 +458,6 @@ static int bgmac_dma_alloc(struct bgmac
65 ring = &bgmac->rx_ring[i];
66 ring->num_slots = BGMAC_RX_RING_SLOTS;
67 ring->mmio_base = ring_base[i];
68 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
69 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
70 - ring->mmio_base);
71
72 /* Alloc ring of descriptors */
73 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
74 @@ -467,6 +473,13 @@ static int bgmac_dma_alloc(struct bgmac
75 if (ring->dma_base & 0xC0000000)
76 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
77
78 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
79 + BGMAC_DMA_RING_RX);
80 + if (ring->unaligned)
81 + ring->index_base = lower_32_bits(ring->dma_base);
82 + else
83 + ring->index_base = 0;
84 +
85 /* Alloc RX slots */
86 for (j = 0; j < ring->num_slots; j++) {
87 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
88 @@ -494,12 +507,14 @@ static void bgmac_dma_init(struct bgmac
89 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
90 ring = &bgmac->tx_ring[i];
91
92 - /* We don't implement unaligned addressing, so enable first */
93 - bgmac_dma_tx_enable(bgmac, ring);
94 + if (!ring->unaligned)
95 + bgmac_dma_tx_enable(bgmac, ring);
96 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
97 lower_32_bits(ring->dma_base));
98 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
99 upper_32_bits(ring->dma_base));
100 + if (ring->unaligned)
101 + bgmac_dma_tx_enable(bgmac, ring);
102
103 ring->start = 0;
104 ring->end = 0; /* Points the slot that should *not* be read */
105 @@ -510,12 +525,14 @@ static void bgmac_dma_init(struct bgmac
106
107 ring = &bgmac->rx_ring[i];
108
109 - /* We don't implement unaligned addressing, so enable first */
110 - bgmac_dma_rx_enable(bgmac, ring);
111 + if (!ring->unaligned)
112 + bgmac_dma_rx_enable(bgmac, ring);
113 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
114 lower_32_bits(ring->dma_base));
115 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
116 upper_32_bits(ring->dma_base));
117 + if (ring->unaligned)
118 + bgmac_dma_rx_enable(bgmac, ring);
119
120 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
121 j++, dma_desc++) {
122 @@ -536,6 +553,7 @@ static void bgmac_dma_init(struct bgmac
123 }
124
125 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
126 + ring->index_base +
127 ring->num_slots * sizeof(struct bgmac_dma_desc));
128
129 ring->start = 0;
130 --- a/drivers/net/ethernet/broadcom/bgmac.h
131 +++ b/drivers/net/ethernet/broadcom/bgmac.h
132 @@ -386,6 +386,8 @@ struct bgmac_dma_ring {
133 u16 mmio_base;
134 struct bgmac_dma_desc *cpu_base;
135 dma_addr_t dma_base;
136 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */
137 + bool unaligned;
138
139 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
140 };