3651be70cdd20c78453497ec5b5b3b54e8a88b0a
[openwrt/openwrt.git] / target / linux / generic / patches-3.10 / 770-bgmac-backport.patch
1 patches for bgmac backported from net-next/master
2
3 --- a/drivers/net/ethernet/broadcom/Kconfig
4 +++ b/drivers/net/ethernet/broadcom/Kconfig
5 @@ -132,7 +132,8 @@ config BNX2X_SRIOV
6
7 config BGMAC
8 tristate "BCMA bus GBit core support"
9 - depends on BCMA_HOST_SOC && HAS_DMA
10 + depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
11 + select PHYLIB
12 ---help---
13 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
14 They can be found on BCM47xx SoCs and provide gigabit ethernet.
15 --- a/drivers/net/ethernet/broadcom/bgmac.c
16 +++ b/drivers/net/ethernet/broadcom/bgmac.c
17 @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(stru
18 dma_desc->ctl0 = cpu_to_le32(ctl0);
19 dma_desc->ctl1 = cpu_to_le32(ctl1);
20
21 + netdev_sent_queue(net_dev, skb->len);
22 +
23 wmb();
24
25 /* Increase ring->end to point empty slot. We tell hardware the first
26 @@ -157,6 +159,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
27 if (++ring->end >= BGMAC_TX_RING_SLOTS)
28 ring->end = 0;
29 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
30 + ring->index_base +
31 ring->end * sizeof(struct bgmac_dma_desc));
32
33 /* Always keep one slot free to allow detecting bugged calls. */
34 @@ -177,10 +180,13 @@ static void bgmac_dma_tx_free(struct bgm
35 struct device *dma_dev = bgmac->core->dma_dev;
36 int empty_slot;
37 bool freed = false;
38 + unsigned bytes_compl = 0, pkts_compl = 0;
39
40 /* The last slot that hardware didn't consume yet */
41 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
42 empty_slot &= BGMAC_DMA_TX_STATDPTR;
43 + empty_slot -= ring->index_base;
44 + empty_slot &= BGMAC_DMA_TX_STATDPTR;
45 empty_slot /= sizeof(struct bgmac_dma_desc);
46
47 while (ring->start != empty_slot) {
48 @@ -192,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgm
49 slot->skb->len, DMA_TO_DEVICE);
50 slot->dma_addr = 0;
51
52 + bytes_compl += slot->skb->len;
53 + pkts_compl++;
54 +
55 /* Free memory! :) */
56 dev_kfree_skb(slot->skb);
57 slot->skb = NULL;
58 @@ -205,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgm
59 freed = true;
60 }
61
62 + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
63 +
64 if (freed && netif_queue_stopped(bgmac->net_dev))
65 netif_wake_queue(bgmac->net_dev);
66 }
67 @@ -274,6 +285,8 @@ static int bgmac_dma_rx_read(struct bgma
68
69 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
70 end_slot &= BGMAC_DMA_RX_STATDPTR;
71 + end_slot -= ring->index_base;
72 + end_slot &= BGMAC_DMA_RX_STATDPTR;
73 end_slot /= sizeof(struct bgmac_dma_desc);
74
75 ring->end = end_slot;
76 @@ -418,9 +431,6 @@ static int bgmac_dma_alloc(struct bgmac
77 ring = &bgmac->tx_ring[i];
78 ring->num_slots = BGMAC_TX_RING_SLOTS;
79 ring->mmio_base = ring_base[i];
80 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
81 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
82 - ring->mmio_base);
83
84 /* Alloc ring of descriptors */
85 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
86 @@ -435,6 +445,13 @@ static int bgmac_dma_alloc(struct bgmac
87 if (ring->dma_base & 0xC0000000)
88 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
89
90 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
91 + BGMAC_DMA_RING_TX);
92 + if (ring->unaligned)
93 + ring->index_base = lower_32_bits(ring->dma_base);
94 + else
95 + ring->index_base = 0;
96 +
97 /* No need to alloc TX slots yet */
98 }
99
100 @@ -444,9 +461,6 @@ static int bgmac_dma_alloc(struct bgmac
101 ring = &bgmac->rx_ring[i];
102 ring->num_slots = BGMAC_RX_RING_SLOTS;
103 ring->mmio_base = ring_base[i];
104 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
105 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
106 - ring->mmio_base);
107
108 /* Alloc ring of descriptors */
109 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
110 @@ -462,6 +476,13 @@ static int bgmac_dma_alloc(struct bgmac
111 if (ring->dma_base & 0xC0000000)
112 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
113
114 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
115 + BGMAC_DMA_RING_RX);
116 + if (ring->unaligned)
117 + ring->index_base = lower_32_bits(ring->dma_base);
118 + else
119 + ring->index_base = 0;
120 +
121 /* Alloc RX slots */
122 for (j = 0; j < ring->num_slots; j++) {
123 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
124 @@ -489,12 +510,14 @@ static void bgmac_dma_init(struct bgmac
125 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
126 ring = &bgmac->tx_ring[i];
127
128 - /* We don't implement unaligned addressing, so enable first */
129 - bgmac_dma_tx_enable(bgmac, ring);
130 + if (!ring->unaligned)
131 + bgmac_dma_tx_enable(bgmac, ring);
132 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
133 lower_32_bits(ring->dma_base));
134 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
135 upper_32_bits(ring->dma_base));
136 + if (ring->unaligned)
137 + bgmac_dma_tx_enable(bgmac, ring);
138
139 ring->start = 0;
140 ring->end = 0; /* Points the slot that should *not* be read */
141 @@ -505,12 +528,14 @@ static void bgmac_dma_init(struct bgmac
142
143 ring = &bgmac->rx_ring[i];
144
145 - /* We don't implement unaligned addressing, so enable first */
146 - bgmac_dma_rx_enable(bgmac, ring);
147 + if (!ring->unaligned)
148 + bgmac_dma_rx_enable(bgmac, ring);
149 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
150 lower_32_bits(ring->dma_base));
151 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
152 upper_32_bits(ring->dma_base));
153 + if (ring->unaligned)
154 + bgmac_dma_rx_enable(bgmac, ring);
155
156 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
157 j++, dma_desc++) {
158 @@ -531,6 +556,7 @@ static void bgmac_dma_init(struct bgmac
159 }
160
161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
162 + ring->index_base +
163 ring->num_slots * sizeof(struct bgmac_dma_desc));
164
165 ring->start = 0;
166 @@ -908,10 +934,10 @@ static void bgmac_chip_reset(struct bgma
167 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
168 u8 et_swtype = 0;
169 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
170 - BGMAC_CHIPCTL_1_IF_TYPE_RMII;
171 - char buf[2];
172 + BGMAC_CHIPCTL_1_IF_TYPE_MII;
173 + char buf[4];
174
175 - if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
176 + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
177 if (kstrtou8(buf, 0, &et_swtype))
178 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
179 buf);
180 @@ -970,6 +996,8 @@ static void bgmac_chip_reset(struct bgma
181 bgmac_miiconfig(bgmac);
182 bgmac_phy_init(bgmac);
183
184 + netdev_reset_queue(bgmac->net_dev);
185 +
186 bgmac->int_status = 0;
187 }
188
189 --- a/drivers/net/ethernet/broadcom/bgmac.h
190 +++ b/drivers/net/ethernet/broadcom/bgmac.h
191 @@ -333,7 +333,7 @@
192
193 #define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
194 #define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
195 -#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
196 +#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
197 #define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
198 #define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
199 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
200 @@ -384,6 +384,8 @@ struct bgmac_dma_ring {
201 u16 mmio_base;
202 struct bgmac_dma_desc *cpu_base;
203 dma_addr_t dma_base;
204 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */
205 + bool unaligned;
206
207 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
208 };