412ffd6485d6bc3ccf712c79b86db98057ecf20a
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-3.14 / 0026-bcm2708_fb-DMA-acceleration-for-fb_copyarea.patch
1 From c2731f282848af32425043a2df88c1289538983e Mon Sep 17 00:00:00 2001
2 From: Siarhei Siamashka <siarhei.siamashka@gmail.com>
3 Date: Mon, 17 Jun 2013 16:00:25 +0300
4 Subject: [PATCH 26/54] bcm2708_fb: DMA acceleration for fb_copyarea
5
6 Based on http://www.raspberrypi.org/phpBB3/viewtopic.php?p=62425#p62425
7 Also used Simon's dmaer_master module as a reference for tweaking DMA
8 settings for better performance.
9
10 For now busylooping only. IRQ support might be added later.
11 With non-overclocked Raspberry Pi, the performance is ~360 MB/s
12 for simple copy or ~260 MB/s for two-pass copy (used when dragging
13 windows to the right).
14
15 In the case of using DMA channel 0, the performance improves
16 to ~440 MB/s.
17
18 For comparison, VFP optimized CPU copy can only do ~114 MB/s in
19 the same conditions (hindered by reading uncached source buffer).
20
21 Signed-off-by: Siarhei Siamashka <siarhei.siamashka@gmail.com>
22
23 bcm2708_fb: report number of dma copies
24
25 Add a counter (exported via debugfs) reporting the
26 number of dma copies that the framebuffer driver
27 has done, in order to help evaluate different
28 optimization strategies.
29
30 Signed-off-by: Luke Diamand <luked@broadcom.com>
31
32 bcm2708_fb: use IRQ for DMA copies
33
34 The copyarea ioctl() uses DMA to speed things along. This
35 was busy-waiting for completion. This change supports using
36 an interrupt instead for larger transfers. For small
37 transfers, busy-waiting is still likely to be faster.
38
39 Signed-off-by: Luke Diamand <luke@diamand.org>
40 ---
41 arch/arm/mach-bcm2708/dma.c | 8 +
42 arch/arm/mach-bcm2708/include/mach/dma.h | 2 +
43 drivers/video/bcm2708_fb.c | 273 ++++++++++++++++++++++++++++++-
44 3 files changed, 278 insertions(+), 5 deletions(-)
45
46 diff --git a/arch/arm/mach-bcm2708/dma.c b/arch/arm/mach-bcm2708/dma.c
47 index 51d147a..1da2413 100644
48 --- a/arch/arm/mach-bcm2708/dma.c
49 +++ b/arch/arm/mach-bcm2708/dma.c
50 @@ -83,6 +83,14 @@ extern void bcm_dma_wait_idle(void __iomem *dma_chan_base)
51
52 EXPORT_SYMBOL_GPL(bcm_dma_start);
53
54 +extern bool bcm_dma_is_busy(void __iomem *dma_chan_base)
55 +{
56 + dsb();
57 +
58 + return readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE;
59 +}
60 +EXPORT_SYMBOL_GPL(bcm_dma_is_busy);
61 +
62 /* Complete an ongoing DMA (assuming its results are to be ignored)
63 Does nothing if there is no DMA in progress.
64 This routine waits for the current AXI transfer to complete before
65 diff --git a/arch/arm/mach-bcm2708/include/mach/dma.h b/arch/arm/mach-bcm2708/include/mach/dma.h
66 index ac7a4a0..6d2f9a0 100644
67 --- a/arch/arm/mach-bcm2708/include/mach/dma.h
68 +++ b/arch/arm/mach-bcm2708/include/mach/dma.h
69 @@ -62,11 +62,13 @@ struct bcm2708_dma_cb {
70 unsigned long next;
71 unsigned long pad[2];
72 };
73 +struct scatterlist;
74
75 extern int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, int sg_len);
76 extern void bcm_dma_start(void __iomem *dma_chan_base,
77 dma_addr_t control_block);
78 extern void bcm_dma_wait_idle(void __iomem *dma_chan_base);
79 +extern bool bcm_dma_is_busy(void __iomem *dma_chan_base);
80 extern int /*rc*/ bcm_dma_abort(void __iomem *dma_chan_base);
81
82 /* When listing features we can ask for when allocating DMA channels give
83 diff --git a/drivers/video/bcm2708_fb.c b/drivers/video/bcm2708_fb.c
84 index 54cd760..798eb52 100644
85 --- a/drivers/video/bcm2708_fb.c
86 +++ b/drivers/video/bcm2708_fb.c
87 @@ -21,13 +21,16 @@
88 #include <linux/mm.h>
89 #include <linux/fb.h>
90 #include <linux/init.h>
91 +#include <linux/interrupt.h>
92 #include <linux/ioport.h>
93 #include <linux/list.h>
94 #include <linux/platform_device.h>
95 #include <linux/clk.h>
96 #include <linux/printk.h>
97 #include <linux/console.h>
98 +#include <linux/debugfs.h>
99
100 +#include <mach/dma.h>
101 #include <mach/platform.h>
102 #include <mach/vcio.h>
103
104 @@ -51,6 +54,10 @@ static int fbheight = 480; /* module parameter */
105 static int fbdepth = 16; /* module parameter */
106 static int fbswap = 0; /* module parameter */
107
108 +static u32 dma_busy_wait_threshold = 1<<15;
109 +module_param(dma_busy_wait_threshold, int, 0644);
110 +MODULE_PARM_DESC(dma_busy_wait_threshold, "Busy-wait for DMA completion below this area");
111 +
112 /* this data structure describes each frame buffer device we find */
113
114 struct fbinfo_s {
115 @@ -62,16 +69,73 @@ struct fbinfo_s {
116 u16 cmap[256];
117 };
118
119 +struct bcm2708_fb_stats {
120 + struct debugfs_regset32 regset;
121 + u32 dma_copies;
122 + u32 dma_irqs;
123 +};
124 +
125 struct bcm2708_fb {
126 struct fb_info fb;
127 struct platform_device *dev;
128 struct fbinfo_s *info;
129 dma_addr_t dma;
130 u32 cmap[16];
131 + int dma_chan;
132 + int dma_irq;
133 + void __iomem *dma_chan_base;
134 + void *cb_base; /* DMA control blocks */
135 + dma_addr_t cb_handle;
136 + struct dentry *debugfs_dir;
137 + wait_queue_head_t dma_waitq;
138 + struct bcm2708_fb_stats stats;
139 };
140
141 #define to_bcm2708(info) container_of(info, struct bcm2708_fb, fb)
142
143 +static void bcm2708_fb_debugfs_deinit(struct bcm2708_fb *fb)
144 +{
145 + debugfs_remove_recursive(fb->debugfs_dir);
146 + fb->debugfs_dir = NULL;
147 +}
148 +
149 +static int bcm2708_fb_debugfs_init(struct bcm2708_fb *fb)
150 +{
151 + static struct debugfs_reg32 stats_registers[] = {
152 + {
153 + "dma_copies",
154 + offsetof(struct bcm2708_fb_stats, dma_copies)
155 + },
156 + {
157 + "dma_irqs",
158 + offsetof(struct bcm2708_fb_stats, dma_irqs)
159 + },
160 + };
161 +
162 + fb->debugfs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
163 + if (!fb->debugfs_dir) {
164 + pr_warn("%s: could not create debugfs entry\n",
165 + __func__);
166 + return -EFAULT;
167 + }
168 +
169 + fb->stats.regset.regs = stats_registers;
170 + fb->stats.regset.nregs = ARRAY_SIZE(stats_registers);
171 + fb->stats.regset.base = &fb->stats;
172 +
173 + if (!debugfs_create_regset32(
174 + "stats", 0444, fb->debugfs_dir, &fb->stats.regset)) {
175 + pr_warn("%s: could not create statistics registers\n",
176 + __func__);
177 + goto fail;
178 + }
179 + return 0;
180 +
181 +fail:
182 + bcm2708_fb_debugfs_deinit(fb);
183 + return -EFAULT;
184 +}
185 +
186 static int bcm2708_fb_set_bitfields(struct fb_var_screeninfo *var)
187 {
188 int ret = 0;
189 @@ -322,11 +386,148 @@ static void bcm2708_fb_fillrect(struct fb_info *info,
190 cfb_fillrect(info, rect);
191 }
192
193 +/* A helper function for configuring dma control block */
194 +static void set_dma_cb(struct bcm2708_dma_cb *cb,
195 + int burst_size,
196 + dma_addr_t dst,
197 + int dst_stride,
198 + dma_addr_t src,
199 + int src_stride,
200 + int w,
201 + int h)
202 +{
203 + cb->info = BCM2708_DMA_BURST(burst_size) | BCM2708_DMA_S_WIDTH |
204 + BCM2708_DMA_S_INC | BCM2708_DMA_D_WIDTH |
205 + BCM2708_DMA_D_INC | BCM2708_DMA_TDMODE;
206 + cb->dst = dst;
207 + cb->src = src;
208 + /*
209 + * This is not really obvious from the DMA documentation,
210 + * but the top 16 bits must be programmmed to "height -1"
211 + * and not "height" in 2D mode.
212 + */
213 + cb->length = ((h - 1) << 16) | w;
214 + cb->stride = ((dst_stride - w) << 16) | (u16)(src_stride - w);
215 + cb->pad[0] = 0;
216 + cb->pad[1] = 0;
217 +}
218 +
219 static void bcm2708_fb_copyarea(struct fb_info *info,
220 const struct fb_copyarea *region)
221 {
222 - /*print_debug("bcm2708_fb_copyarea\n"); */
223 - cfb_copyarea(info, region);
224 + struct bcm2708_fb *fb = to_bcm2708(info);
225 + struct bcm2708_dma_cb *cb = fb->cb_base;
226 + int bytes_per_pixel = (info->var.bits_per_pixel + 7) >> 3;
227 + /* Channel 0 supports larger bursts and is a bit faster */
228 + int burst_size = (fb->dma_chan == 0) ? 8 : 2;
229 + int pixels = region->width * region->height;
230 +
231 + /* Fallback to cfb_copyarea() if we don't like something */
232 + if (bytes_per_pixel > 4 ||
233 + info->var.xres * info->var.yres > 1920 * 1200 ||
234 + region->width <= 0 || region->width > info->var.xres ||
235 + region->height <= 0 || region->height > info->var.yres ||
236 + region->sx < 0 || region->sx >= info->var.xres ||
237 + region->sy < 0 || region->sy >= info->var.yres ||
238 + region->dx < 0 || region->dx >= info->var.xres ||
239 + region->dy < 0 || region->dy >= info->var.yres ||
240 + region->sx + region->width > info->var.xres ||
241 + region->dx + region->width > info->var.xres ||
242 + region->sy + region->height > info->var.yres ||
243 + region->dy + region->height > info->var.yres) {
244 + cfb_copyarea(info, region);
245 + return;
246 + }
247 +
248 + if (region->dy == region->sy && region->dx > region->sx) {
249 + /*
250 + * A difficult case of overlapped copy. Because DMA can't
251 + * copy individual scanlines in backwards direction, we need
252 + * two-pass processing. We do it by programming a chain of dma
253 + * control blocks in the first 16K part of the buffer and use
254 + * the remaining 48K as the intermediate temporary scratch
255 + * buffer. The buffer size is sufficient to handle up to
256 + * 1920x1200 resolution at 32bpp pixel depth.
257 + */
258 + int y;
259 + dma_addr_t control_block_pa = fb->cb_handle;
260 + dma_addr_t scratchbuf = fb->cb_handle + 16 * 1024;
261 + int scanline_size = bytes_per_pixel * region->width;
262 + int scanlines_per_cb = (64 * 1024 - 16 * 1024) / scanline_size;
263 +
264 + for (y = 0; y < region->height; y += scanlines_per_cb) {
265 + dma_addr_t src =
266 + fb->fb.fix.smem_start +
267 + bytes_per_pixel * region->sx +
268 + (region->sy + y) * fb->fb.fix.line_length;
269 + dma_addr_t dst =
270 + fb->fb.fix.smem_start +
271 + bytes_per_pixel * region->dx +
272 + (region->dy + y) * fb->fb.fix.line_length;
273 +
274 + if (region->height - y < scanlines_per_cb)
275 + scanlines_per_cb = region->height - y;
276 +
277 + set_dma_cb(cb, burst_size, scratchbuf, scanline_size,
278 + src, fb->fb.fix.line_length,
279 + scanline_size, scanlines_per_cb);
280 + control_block_pa += sizeof(struct bcm2708_dma_cb);
281 + cb->next = control_block_pa;
282 + cb++;
283 +
284 + set_dma_cb(cb, burst_size, dst, fb->fb.fix.line_length,
285 + scratchbuf, scanline_size,
286 + scanline_size, scanlines_per_cb);
287 + control_block_pa += sizeof(struct bcm2708_dma_cb);
288 + cb->next = control_block_pa;
289 + cb++;
290 + }
291 + /* move the pointer back to the last dma control block */
292 + cb--;
293 + } else {
294 + /* A single dma control block is enough. */
295 + int sy, dy, stride;
296 + if (region->dy <= region->sy) {
297 + /* processing from top to bottom */
298 + dy = region->dy;
299 + sy = region->sy;
300 + stride = fb->fb.fix.line_length;
301 + } else {
302 + /* processing from bottom to top */
303 + dy = region->dy + region->height - 1;
304 + sy = region->sy + region->height - 1;
305 + stride = -fb->fb.fix.line_length;
306 + }
307 + set_dma_cb(cb, burst_size,
308 + fb->fb.fix.smem_start + dy * fb->fb.fix.line_length +
309 + bytes_per_pixel * region->dx,
310 + stride,
311 + fb->fb.fix.smem_start + sy * fb->fb.fix.line_length +
312 + bytes_per_pixel * region->sx,
313 + stride,
314 + region->width * bytes_per_pixel,
315 + region->height);
316 + }
317 +
318 + /* end of dma control blocks chain */
319 + cb->next = 0;
320 +
321 +
322 + if (pixels < dma_busy_wait_threshold) {
323 + bcm_dma_start(fb->dma_chan_base, fb->cb_handle);
324 + bcm_dma_wait_idle(fb->dma_chan_base);
325 + } else {
326 + void __iomem *dma_chan = fb->dma_chan_base;
327 + cb->info |= BCM2708_DMA_INT_EN;
328 + bcm_dma_start(fb->dma_chan_base, fb->cb_handle);
329 + while (bcm_dma_is_busy(dma_chan)) {
330 + wait_event_interruptible(
331 + fb->dma_waitq,
332 + !bcm_dma_is_busy(dma_chan));
333 + }
334 + fb->stats.dma_irqs++;
335 + }
336 + fb->stats.dma_copies++;
337 }
338
339 static void bcm2708_fb_imageblit(struct fb_info *info,
340 @@ -336,6 +537,24 @@ static void bcm2708_fb_imageblit(struct fb_info *info,
341 cfb_imageblit(info, image);
342 }
343
344 +static irqreturn_t bcm2708_fb_dma_irq(int irq, void *cxt)
345 +{
346 + struct bcm2708_fb *fb = cxt;
347 +
348 + /* FIXME: should read status register to check if this is
349 + * actually interrupting us or not, in case this interrupt
350 + * ever becomes shared amongst several DMA channels
351 + *
352 + * readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_IRQ;
353 + */
354 +
355 + /* acknowledge the interrupt */
356 + writel(BCM2708_DMA_INT, fb->dma_chan_base + BCM2708_DMA_CS);
357 +
358 + wake_up(&fb->dma_waitq);
359 + return IRQ_HANDLED;
360 +}
361 +
362 static struct fb_ops bcm2708_fb_ops = {
363 .owner = THIS_MODULE,
364 .fb_check_var = bcm2708_fb_check_var,
365 @@ -365,7 +584,7 @@ static int bcm2708_fb_register(struct bcm2708_fb *fb)
366 fb->dma = dma;
367 }
368 fb->fb.fbops = &bcm2708_fb_ops;
369 - fb->fb.flags = FBINFO_FLAG_DEFAULT;
370 + fb->fb.flags = FBINFO_FLAG_DEFAULT | FBINFO_HWACCEL_COPYAREA;
371 fb->fb.pseudo_palette = fb->cmap;
372
373 strncpy(fb->fb.fix.id, bcm2708_name, sizeof(fb->fb.fix.id));
374 @@ -396,6 +615,7 @@ static int bcm2708_fb_register(struct bcm2708_fb *fb)
375 fb->fb.monspecs.dclkmax = 100000000;
376
377 bcm2708_fb_set_bitfields(&fb->fb.var);
378 + init_waitqueue_head(&fb->dma_waitq);
379
380 /*
381 * Allocate colourmap.
382 @@ -421,14 +641,45 @@ static int bcm2708_fb_probe(struct platform_device *dev)
383 struct bcm2708_fb *fb;
384 int ret;
385
386 - fb = kmalloc(sizeof(struct bcm2708_fb), GFP_KERNEL);
387 + fb = kzalloc(sizeof(struct bcm2708_fb), GFP_KERNEL);
388 if (!fb) {
389 dev_err(&dev->dev,
390 "could not allocate new bcm2708_fb struct\n");
391 ret = -ENOMEM;
392 goto free_region;
393 }
394 - memset(fb, 0, sizeof(struct bcm2708_fb));
395 +
396 + bcm2708_fb_debugfs_init(fb);
397 +
398 + fb->cb_base = dma_alloc_writecombine(&dev->dev, SZ_64K,
399 + &fb->cb_handle, GFP_KERNEL);
400 + if (!fb->cb_base) {
401 + dev_err(&dev->dev, "cannot allocate DMA CBs\n");
402 + ret = -ENOMEM;
403 + goto free_fb;
404 + }
405 +
406 + pr_info("BCM2708FB: allocated DMA memory %08x\n",
407 + fb->cb_handle);
408 +
409 + ret = bcm_dma_chan_alloc(BCM_DMA_FEATURE_BULK,
410 + &fb->dma_chan_base, &fb->dma_irq);
411 + if (ret < 0) {
412 + dev_err(&dev->dev, "couldn't allocate a DMA channel\n");
413 + goto free_cb;
414 + }
415 + fb->dma_chan = ret;
416 +
417 + ret = request_irq(fb->dma_irq, bcm2708_fb_dma_irq,
418 + 0, "bcm2708_fb dma", fb);
419 + if (ret) {
420 + pr_err("%s: failed to request DMA irq\n", __func__);
421 + goto free_dma_chan;
422 + }
423 +
424 +
425 + pr_info("BCM2708FB: allocated DMA channel %d @ %p\n",
426 + fb->dma_chan, fb->dma_chan_base);
427
428 fb->dev = dev;
429
430 @@ -438,6 +689,11 @@ static int bcm2708_fb_probe(struct platform_device *dev)
431 goto out;
432 }
433
434 +free_dma_chan:
435 + bcm_dma_chan_free(fb->dma_chan);
436 +free_cb:
437 + dma_free_writecombine(&dev->dev, SZ_64K, fb->cb_base, fb->cb_handle);
438 +free_fb:
439 kfree(fb);
440 free_region:
441 dev_err(&dev->dev, "probe failed, err %d\n", ret);
442 @@ -455,8 +711,15 @@ static int bcm2708_fb_remove(struct platform_device *dev)
443 iounmap(fb->fb.screen_base);
444 unregister_framebuffer(&fb->fb);
445
446 + dma_free_writecombine(&dev->dev, SZ_64K, fb->cb_base, fb->cb_handle);
447 + bcm_dma_chan_free(fb->dma_chan);
448 +
449 dma_free_coherent(NULL, PAGE_ALIGN(sizeof(*fb->info)), (void *)fb->info,
450 fb->dma);
451 + bcm2708_fb_debugfs_deinit(fb);
452 +
453 + free_irq(fb->dma_irq, fb);
454 +
455 kfree(fb);
456
457 return 0;
458 --
459 1.9.1
460