tools: mkimage: provide dtc path during build
[openwrt/openwrt.git] / target / linux / mvebu / patches-4.4 / 046-net-mvneta-Use-the-new-hwbm-framework.patch
1 From: Gregory CLEMENT <gregory.clement@free-electrons.com>
2 Date: Mon, 14 Mar 2016 09:39:05 +0100
3 Subject: [PATCH] net: mvneta: Use the new hwbm framework
4
5 Now that the hardware buffer management framework had been introduced,
6 let's use it.
7
8 Tested-by: Sebastian Careba <nitroshift@yahoo.com>
9 Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
10 Signed-off-by: David S. Miller <davem@davemloft.net>
11 ---
12
13 --- a/drivers/net/ethernet/marvell/Kconfig
14 +++ b/drivers/net/ethernet/marvell/Kconfig
15 @@ -43,6 +43,7 @@ config MVMDIO
16 config MVNETA_BM
17 tristate "Marvell Armada 38x/XP network interface BM support"
18 depends on MVNETA
19 + select HWBM
20 ---help---
21 This driver supports auxiliary block of the network
22 interface units in the Marvell ARMADA XP and ARMADA 38x SoC
23 --- a/drivers/net/ethernet/marvell/mvneta.c
24 +++ b/drivers/net/ethernet/marvell/mvneta.c
25 @@ -30,6 +30,7 @@
26 #include <linux/phy.h>
27 #include <linux/platform_device.h>
28 #include <linux/skbuff.h>
29 +#include <net/hwbm.h>
30 #include "mvneta_bm.h"
31 #include <net/ip.h>
32 #include <net/ipv6.h>
33 @@ -1024,11 +1025,12 @@ static int mvneta_bm_port_init(struct pl
34 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
35 {
36 struct mvneta_bm_pool *bm_pool = pp->pool_long;
37 + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
38 int num;
39
40 /* Release all buffers from long pool */
41 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
42 - if (bm_pool->buf_num) {
43 + if (hwbm_pool->buf_num) {
44 WARN(1, "cannot free all buffers in pool %d\n",
45 bm_pool->id);
46 goto bm_mtu_err;
47 @@ -1036,14 +1038,14 @@ static void mvneta_bm_update_mtu(struct
48
49 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
50 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
51 - bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
52 - SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
53 + hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
54 + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
55
56 /* Fill entire long pool */
57 - num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size);
58 - if (num != bm_pool->size) {
59 + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
60 + if (num != hwbm_pool->size) {
61 WARN(1, "pool %d: %d of %d allocated\n",
62 - bm_pool->id, num, bm_pool->size);
63 + bm_pool->id, num, hwbm_pool->size);
64 goto bm_mtu_err;
65 }
66 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
67 @@ -2068,14 +2070,14 @@ err_drop_frame:
68 }
69
70 /* Refill processing */
71 - err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool);
72 + err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
73 if (err) {
74 netdev_err(dev, "Linux processing - Can't refill\n");
75 rxq->missed++;
76 goto err_drop_frame_ret_pool;
77 }
78
79 - frag_size = bm_pool->frag_size;
80 + frag_size = bm_pool->hwbm_pool.frag_size;
81
82 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
83
84 --- a/drivers/net/ethernet/marvell/mvneta_bm.c
85 +++ b/drivers/net/ethernet/marvell/mvneta_bm.c
86 @@ -10,16 +10,17 @@
87 * warranty of any kind, whether express or implied.
88 */
89
90 -#include <linux/kernel.h>
91 +#include <linux/clk.h>
92 #include <linux/genalloc.h>
93 -#include <linux/platform_device.h>
94 -#include <linux/netdevice.h>
95 -#include <linux/skbuff.h>
96 +#include <linux/io.h>
97 +#include <linux/kernel.h>
98 #include <linux/mbus.h>
99 #include <linux/module.h>
100 -#include <linux/io.h>
101 +#include <linux/netdevice.h>
102 #include <linux/of.h>
103 -#include <linux/clk.h>
104 +#include <linux/platform_device.h>
105 +#include <linux/skbuff.h>
106 +#include <net/hwbm.h>
107 #include "mvneta_bm.h"
108
109 #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
110 @@ -88,17 +89,13 @@ static void mvneta_bm_pool_target_set(st
111 mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
112 }
113
114 -/* Allocate skb for BM pool */
115 -void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
116 - dma_addr_t *buf_phys_addr)
117 +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
118 {
119 - void *buf;
120 + struct mvneta_bm_pool *bm_pool =
121 + (struct mvneta_bm_pool *)hwbm_pool->priv;
122 + struct mvneta_bm *priv = bm_pool->priv;
123 dma_addr_t phys_addr;
124
125 - buf = mvneta_frag_alloc(bm_pool->frag_size);
126 - if (!buf)
127 - return NULL;
128 -
129 /* In order to update buf_cookie field of RX descriptor properly,
130 * BM hardware expects buf virtual address to be placed in the
131 * first four bytes of mapped buffer.
132 @@ -106,75 +103,13 @@ void *mvneta_buf_alloc(struct mvneta_bm
133 *(u32 *)buf = (u32)buf;
134 phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
135 DMA_FROM_DEVICE);
136 - if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) {
137 - mvneta_frag_free(bm_pool->frag_size, buf);
138 - return NULL;
139 - }
140 - *buf_phys_addr = phys_addr;
141 -
142 - return buf;
143 -}
144 -
145 -/* Refill processing for HW buffer management */
146 -int mvneta_bm_pool_refill(struct mvneta_bm *priv,
147 - struct mvneta_bm_pool *bm_pool)
148 -{
149 - dma_addr_t buf_phys_addr;
150 - void *buf;
151 -
152 - buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr);
153 - if (!buf)
154 + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
155 return -ENOMEM;
156
157 - mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr);
158 -
159 + mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
160 return 0;
161 }
162 -EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill);
163 -
164 -/* Allocate buffers for the pool */
165 -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
166 - int buf_num)
167 -{
168 - int err, i;
169 -
170 - if (bm_pool->buf_num == bm_pool->size) {
171 - dev_dbg(&priv->pdev->dev, "pool %d already filled\n",
172 - bm_pool->id);
173 - return bm_pool->buf_num;
174 - }
175 -
176 - if (buf_num < 0 ||
177 - (buf_num + bm_pool->buf_num > bm_pool->size)) {
178 - dev_err(&priv->pdev->dev,
179 - "cannot allocate %d buffers for pool %d\n",
180 - buf_num, bm_pool->id);
181 - return 0;
182 - }
183 -
184 - for (i = 0; i < buf_num; i++) {
185 - err = mvneta_bm_pool_refill(priv, bm_pool);
186 - if (err < 0)
187 - break;
188 - }
189 -
190 - /* Update BM driver with number of buffers added to pool */
191 - bm_pool->buf_num += i;
192 -
193 - dev_dbg(&priv->pdev->dev,
194 - "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n",
195 - bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
196 - bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size,
197 - bm_pool->frag_size);
198 -
199 - dev_dbg(&priv->pdev->dev,
200 - "%s pool %d: %d of %d buffers added\n",
201 - bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
202 - bm_pool->id, i, buf_num);
203 -
204 - return i;
205 -}
206 -EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add);
207 +EXPORT_SYMBOL_GPL(mvneta_bm_construct);
208
209 /* Create pool */
210 static int mvneta_bm_pool_create(struct mvneta_bm *priv,
211 @@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct
212 struct platform_device *pdev = priv->pdev;
213 u8 target_id, attr;
214 int size_bytes, err;
215 -
216 - size_bytes = sizeof(u32) * bm_pool->size;
217 + size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
218 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
219 &bm_pool->phys_addr,
220 GFP_KERNEL);
221 @@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_us
222
223 /* Allocate buffers in case BM pool hasn't been used yet */
224 if (new_pool->type == MVNETA_BM_FREE) {
225 + struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
226 +
227 + new_pool->priv = priv;
228 new_pool->type = type;
229 new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
230 - new_pool->frag_size =
231 + hwbm_pool->frag_size =
232 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
233 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
234 + hwbm_pool->construct = mvneta_bm_construct;
235 + hwbm_pool->priv = new_pool;
236
237 /* Create new pool */
238 err = mvneta_bm_pool_create(priv, new_pool);
239 @@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_us
240 }
241
242 /* Allocate buffers for this pool */
243 - num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size);
244 - if (num != new_pool->size) {
245 + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
246 + if (num != hwbm_pool->size) {
247 WARN(1, "pool %d: %d of %d allocated\n",
248 - new_pool->id, num, new_pool->size);
249 + new_pool->id, num, hwbm_pool->size);
250 return NULL;
251 }
252 }
253 @@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_b
254
255 mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
256
257 - for (i = 0; i < bm_pool->buf_num; i++) {
258 + for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
259 dma_addr_t buf_phys_addr;
260 u32 *vaddr;
261
262 @@ -303,13 +242,13 @@ void mvneta_bm_bufs_free(struct mvneta_b
263
264 dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
265 bm_pool->buf_size, DMA_FROM_DEVICE);
266 - mvneta_frag_free(bm_pool->frag_size, vaddr);
267 + hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
268 }
269
270 mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
271
272 /* Update BM driver with number of buffers removed from pool */
273 - bm_pool->buf_num -= i;
274 + bm_pool->hwbm_pool.buf_num -= i;
275 }
276 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
277
278 @@ -317,6 +256,7 @@ EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
279 void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
280 struct mvneta_bm_pool *bm_pool, u8 port_map)
281 {
282 + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
283 bm_pool->port_map &= ~port_map;
284 if (bm_pool->port_map)
285 return;
286 @@ -324,11 +264,12 @@ void mvneta_bm_pool_destroy(struct mvnet
287 bm_pool->type = MVNETA_BM_FREE;
288
289 mvneta_bm_bufs_free(priv, bm_pool, port_map);
290 - if (bm_pool->buf_num)
291 + if (hwbm_pool->buf_num)
292 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
293
294 if (bm_pool->virt_addr) {
295 - dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size,
296 + dma_free_coherent(&priv->pdev->dev,
297 + sizeof(u32) * hwbm_pool->size,
298 bm_pool->virt_addr, bm_pool->phys_addr);
299 bm_pool->virt_addr = NULL;
300 }
301 @@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct
302 MVNETA_BM_POOL_CAP_ALIGN));
303 size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
304 }
305 - bm_pool->size = size;
306 + bm_pool->hwbm_pool.size = size;
307
308 mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
309 - bm_pool->size);
310 + bm_pool->hwbm_pool.size);
311
312 /* Obtain custom pkt_size from DT */
313 sprintf(prop, "pool%d,pkt-size", i);
314 --- a/drivers/net/ethernet/marvell/mvneta_bm.h
315 +++ b/drivers/net/ethernet/marvell/mvneta_bm.h
316 @@ -108,20 +108,15 @@ struct mvneta_bm {
317 };
318
319 struct mvneta_bm_pool {
320 + struct hwbm_pool hwbm_pool;
321 /* Pool number in the range 0-3 */
322 u8 id;
323 enum mvneta_bm_type type;
324
325 - /* Buffer Pointers Pool External (BPPE) size in number of bytes */
326 - int size;
327 - /* Number of buffers used by this pool */
328 - int buf_num;
329 - /* Pool buffer size */
330 - int buf_size;
331 /* Packet size */
332 int pkt_size;
333 - /* Single frag size */
334 - u32 frag_size;
335 + /* Size of the buffer acces through DMA*/
336 + u32 buf_size;
337
338 /* BPPE virtual base address */
339 u32 *virt_addr;
340 @@ -143,8 +138,7 @@ void mvneta_bm_pool_destroy(struct mvnet
341 struct mvneta_bm_pool *bm_pool, u8 port_map);
342 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
343 u8 port_map);
344 -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
345 - int buf_num);
346 +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf);
347 int mvneta_bm_pool_refill(struct mvneta_bm *priv,
348 struct mvneta_bm_pool *bm_pool);
349 struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
350 @@ -170,8 +164,7 @@ void mvneta_bm_pool_destroy(struct mvnet
351 struct mvneta_bm_pool *bm_pool, u8 port_map) {}
352 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
353 u8 port_map) {}
354 -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
355 - int buf_num) { return 0; }
356 +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
357 int mvneta_bm_pool_refill(struct mvneta_bm *priv,
358 struct mvneta_bm_pool *bm_pool) {return 0; }
359 struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,