kernel: bump 4.14 to 4.14.44
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.14 / 0205-dmaengine-mediatek-Add-MediaTek-High-Speed-DMA-contr.patch
1 From 2b97c5d7886a920adc8f7c32c2a60583475654f2 Mon Sep 17 00:00:00 2001
2 From: Sean Wang <sean.wang@mediatek.com>
3 Date: Fri, 12 May 2017 17:05:12 +0800
4 Subject: [PATCH 205/224] dmaengine: mediatek: Add MediaTek High-Speed DMA
5 controller for MT7622 and MT7623 SoC
6
7 MediaTek High-Speed DMA controller (HSDMA) on MT7622 and MT7623 SoC has
8 a single ring is dedicated to memory-to-memory transfer through ring based
9 descriptor management.
10
11 Even though there is only one physical ring available inside HSDMA, the
12 driver can be easily extended to the support of multiple virtual channels
13 processing simultaneously by means of DMA_VIRTUAL_CHANNELS effort.
14
15 Signed-off-by: Sean Wang <sean.wang@mediatek.com>
16 Cc: Randy Dunlap <rdunlap@infradead.org>
17 Cc: Fengguang Wu <fengguang.wu@intel.com>
18 Cc: Julia Lawall <julia.lawall@lip6.fr>
19 ---
20 drivers/dma/Kconfig | 2 +
21 drivers/dma/Makefile | 1 +
22 drivers/dma/mediatek/Kconfig | 13 +
23 drivers/dma/mediatek/Makefile | 1 +
24 drivers/dma/mediatek/mtk-hsdma.c | 1056 ++++++++++++++++++++++++++++++++++++++
25 5 files changed, 1073 insertions(+)
26 create mode 100644 drivers/dma/mediatek/Kconfig
27 create mode 100644 drivers/dma/mediatek/Makefile
28 create mode 100644 drivers/dma/mediatek/mtk-hsdma.c
29
30 --- a/drivers/dma/Kconfig
31 +++ b/drivers/dma/Kconfig
32 @@ -604,6 +604,8 @@ config ZX_DMA
33 # driver files
34 source "drivers/dma/bestcomm/Kconfig"
35
36 +source "drivers/dma/mediatek/Kconfig"
37 +
38 source "drivers/dma/qcom/Kconfig"
39
40 source "drivers/dma/dw/Kconfig"
41 --- a/drivers/dma/Makefile
42 +++ b/drivers/dma/Makefile
43 @@ -72,5 +72,6 @@ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
44 obj-$(CONFIG_ZX_DMA) += zx_dma.o
45 obj-$(CONFIG_ST_FDMA) += st_fdma.o
46
47 +obj-y += mediatek/
48 obj-y += qcom/
49 obj-y += xilinx/
50 --- /dev/null
51 +++ b/drivers/dma/mediatek/Kconfig
52 @@ -0,0 +1,13 @@
53 +
54 +config MTK_HSDMA
55 + tristate "MediaTek High-Speed DMA controller support"
56 + depends on ARCH_MEDIATEK || COMPILE_TEST
57 + select DMA_ENGINE
58 + select DMA_VIRTUAL_CHANNELS
59 + ---help---
60 + Enable support for High-Speed DMA controller on MediaTek
61 + SoCs.
62 +
63 + This controller provides the channels which is dedicated to
64 + memory-to-memory transfer to offload from CPU through ring-
65 + based descriptor management.
66 --- /dev/null
67 +++ b/drivers/dma/mediatek/Makefile
68 @@ -0,0 +1 @@
69 +obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
70 --- /dev/null
71 +++ b/drivers/dma/mediatek/mtk-hsdma.c
72 @@ -0,0 +1,1056 @@
73 +// SPDX-License-Identifier: GPL-2.0
74 +// Copyright (c) 2017-2018 MediaTek Inc.
75 +
76 +/*
77 + * Driver for MediaTek High-Speed DMA Controller
78 + *
79 + * Author: Sean Wang <sean.wang@mediatek.com>
80 + *
81 + */
82 +
83 +#include <linux/bitops.h>
84 +#include <linux/clk.h>
85 +#include <linux/dmaengine.h>
86 +#include <linux/dma-mapping.h>
87 +#include <linux/err.h>
88 +#include <linux/iopoll.h>
89 +#include <linux/list.h>
90 +#include <linux/module.h>
91 +#include <linux/of.h>
92 +#include <linux/of_device.h>
93 +#include <linux/of_dma.h>
94 +#include <linux/platform_device.h>
95 +#include <linux/pm_runtime.h>
96 +#include <linux/refcount.h>
97 +#include <linux/slab.h>
98 +
99 +#include "../virt-dma.h"
100 +
101 +#define MTK_HSDMA_USEC_POLL 20
102 +#define MTK_HSDMA_TIMEOUT_POLL 200000
103 +#define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
104 +
105 +/* The default number of virtual channel */
106 +#define MTK_HSDMA_NR_VCHANS 3
107 +
108 +/* Only one physical channel supported */
109 +#define MTK_HSDMA_NR_MAX_PCHANS 1
110 +
111 +/* Macro for physical descriptor (PD) manipulation */
112 +/* The number of PD which must be 2 of power */
113 +#define MTK_DMA_SIZE 64
114 +#define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1))
115 +#define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1))
116 +#define MTK_HSDMA_MAX_LEN 0x3f80
117 +#define MTK_HSDMA_ALIGN_SIZE 4
118 +#define MTK_HSDMA_PLEN_MASK 0x3fff
119 +#define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16)
120 +#define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK)
121 +
122 +/* Registers for underlying ring manipulation */
123 +#define MTK_HSDMA_TX_BASE 0x0
124 +#define MTK_HSDMA_TX_CNT 0x4
125 +#define MTK_HSDMA_TX_CPU 0x8
126 +#define MTK_HSDMA_TX_DMA 0xc
127 +#define MTK_HSDMA_RX_BASE 0x100
128 +#define MTK_HSDMA_RX_CNT 0x104
129 +#define MTK_HSDMA_RX_CPU 0x108
130 +#define MTK_HSDMA_RX_DMA 0x10c
131 +
132 +/* Registers for global setup */
133 +#define MTK_HSDMA_GLO 0x204
134 +#define MTK_HSDMA_GLO_MULTI_DMA BIT(10)
135 +#define MTK_HSDMA_TX_WB_DDONE BIT(6)
136 +#define MTK_HSDMA_BURST_64BYTES (0x2 << 4)
137 +#define MTK_HSDMA_GLO_RX_BUSY BIT(3)
138 +#define MTK_HSDMA_GLO_RX_DMA BIT(2)
139 +#define MTK_HSDMA_GLO_TX_BUSY BIT(1)
140 +#define MTK_HSDMA_GLO_TX_DMA BIT(0)
141 +#define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \
142 + MTK_HSDMA_GLO_RX_DMA)
143 +#define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \
144 + MTK_HSDMA_GLO_TX_BUSY)
145 +#define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \
146 + MTK_HSDMA_GLO_RX_DMA | \
147 + MTK_HSDMA_TX_WB_DDONE | \
148 + MTK_HSDMA_BURST_64BYTES | \
149 + MTK_HSDMA_GLO_MULTI_DMA)
150 +
151 +/* Registers for reset */
152 +#define MTK_HSDMA_RESET 0x208
153 +#define MTK_HSDMA_RST_TX BIT(0)
154 +#define MTK_HSDMA_RST_RX BIT(16)
155 +
156 +/* Registers for interrupt control */
157 +#define MTK_HSDMA_DLYINT 0x20c
158 +#define MTK_HSDMA_RXDLY_INT_EN BIT(15)
159 +
160 +/* Interrupt fires when the pending number's more than the specified */
161 +#define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8)
162 +
163 +/* Interrupt fires when the pending time's more than the specified in 20 us */
164 +#define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f)
165 +#define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \
166 + MTK_HSDMA_RXMAX_PINT(20) | \
167 + MTK_HSDMA_RXMAX_PTIME(20))
168 +#define MTK_HSDMA_INT_STATUS 0x220
169 +#define MTK_HSDMA_INT_ENABLE 0x228
170 +#define MTK_HSDMA_INT_RXDONE BIT(16)
171 +
172 +enum mtk_hsdma_vdesc_flag {
173 + MTK_HSDMA_VDESC_FINISHED = 0x01,
174 +};
175 +
176 +#define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED)
177 +
178 +/**
179 + * struct mtk_hsdma_pdesc - This is the struct holding info describing physical
180 + * descriptor (PD) and its placement must be kept at
181 + * 4-bytes alignment in little endian order.
182 + * @desc[1-4]: The control pad used to indicate hardware how to
183 + * deal with the descriptor such as source and
184 + * destination address and data length. The maximum
185 + * data length each pdesc can handle is 0x3f80 bytes
186 + */
187 +struct mtk_hsdma_pdesc {
188 + __le32 desc1;
189 + __le32 desc2;
190 + __le32 desc3;
191 + __le32 desc4;
192 +} __packed __aligned(4);
193 +
194 +/**
195 + * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual
196 + * descriptor (VD)
197 + * @vd: An instance for struct virt_dma_desc
198 + * @len: The total data size device wants to move
199 + * @residue: The remaining data size device will move
200 + * @dest: The destination address device wants to move to
201 + * @src: The source address device wants to move from
202 + */
203 +struct mtk_hsdma_vdesc {
204 + struct virt_dma_desc vd;
205 + size_t len;
206 + size_t residue;
207 + dma_addr_t dest;
208 + dma_addr_t src;
209 +};
210 +
211 +/**
212 + * struct mtk_hsdma_cb - This is the struct holding extra info required for RX
213 + * ring to know what relevant VD the the PD is being
214 + * mapped to.
215 + * @vd: Pointer to the relevant VD.
216 + * @flag: Flag indicating what action should be taken when VD
217 + * is completed.
218 + */
219 +struct mtk_hsdma_cb {
220 + struct virt_dma_desc *vd;
221 + enum mtk_hsdma_vdesc_flag flag;
222 +};
223 +
224 +/**
225 + * struct mtk_hsdma_ring - This struct holds info describing underlying ring
226 + * space
227 + * @txd: The descriptor TX ring which describes DMA source
228 + * information
229 + * @rxd: The descriptor RX ring which describes DMA
230 + * destination information
231 + * @cb: The extra information pointed at by RX ring
232 + * @tphys: The physical addr of TX ring
233 + * @rphys: The physical addr of RX ring
234 + * @cur_tptr: Pointer to the next free descriptor used by the host
235 + * @cur_rptr: Pointer to the last done descriptor by the device
236 + */
237 +struct mtk_hsdma_ring {
238 + struct mtk_hsdma_pdesc *txd;
239 + struct mtk_hsdma_pdesc *rxd;
240 + struct mtk_hsdma_cb *cb;
241 + dma_addr_t tphys;
242 + dma_addr_t rphys;
243 + u16 cur_tptr;
244 + u16 cur_rptr;
245 +};
246 +
247 +/**
248 + * struct mtk_hsdma_pchan - This is the struct holding info describing physical
249 + * channel (PC)
250 + * @ring: An instance for the underlying ring
251 + * @sz_ring: Total size allocated for the ring
252 + * @nr_free: Total number of free rooms in the ring. It would
253 + * be accessed and updated frequently between IRQ
254 + * context and user context to reflect whether ring
255 + * can accept requests from VD.
256 + */
257 +struct mtk_hsdma_pchan {
258 + struct mtk_hsdma_ring ring;
259 + size_t sz_ring;
260 + atomic_t nr_free;
261 +};
262 +
263 +/**
264 + * struct mtk_hsdma_vchan - This is the struct holding info describing virtual
265 + * channel (VC)
266 + * @vc: An instance for struct virt_dma_chan
267 + * @issue_completion: The wait for all issued descriptors completited
268 + * @issue_synchronize: Bool indicating channel synchronization starts
269 + * @desc_hw_processing: List those descriptors the hardware is processing,
270 + * which is protected by vc.lock
271 + */
272 +struct mtk_hsdma_vchan {
273 + struct virt_dma_chan vc;
274 + struct completion issue_completion;
275 + bool issue_synchronize;
276 + struct list_head desc_hw_processing;
277 +};
278 +
279 +/**
280 + * struct mtk_hsdma_soc - This is the struct holding differences among SoCs
281 + * @ddone: Bit mask for DDONE
282 + * @ls0: Bit mask for LS0
283 + */
284 +struct mtk_hsdma_soc {
285 + __le32 ddone;
286 + __le32 ls0;
287 +};
288 +
289 +/**
290 + * struct mtk_hsdma_device - This is the struct holding info describing HSDMA
291 + * device
292 + * @ddev: An instance for struct dma_device
293 + * @base: The mapped register I/O base
294 + * @clk: The clock that device internal is using
295 + * @irq: The IRQ that device are using
296 + * @dma_requests: The number of VCs the device supports to
297 + * @vc: The pointer to all available VCs
298 + * @pc: The pointer to the underlying PC
299 + * @pc_refcnt: Track how many VCs are using the PC
300 + * @lock: Lock protect agaisting multiple VCs access PC
301 + * @soc: The pointer to area holding differences among
302 + * vaious platform
303 + */
304 +struct mtk_hsdma_device {
305 + struct dma_device ddev;
306 + void __iomem *base;
307 + struct clk *clk;
308 + u32 irq;
309 +
310 + u32 dma_requests;
311 + struct mtk_hsdma_vchan *vc;
312 + struct mtk_hsdma_pchan *pc;
313 + refcount_t pc_refcnt;
314 +
315 + /* Lock used to protect against multiple VCs access PC */
316 + spinlock_t lock;
317 +
318 + const struct mtk_hsdma_soc *soc;
319 +};
320 +
321 +static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan)
322 +{
323 + return container_of(chan->device, struct mtk_hsdma_device, ddev);
324 +}
325 +
326 +static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan)
327 +{
328 + return container_of(chan, struct mtk_hsdma_vchan, vc.chan);
329 +}
330 +
331 +static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd)
332 +{
333 + return container_of(vd, struct mtk_hsdma_vdesc, vd);
334 +}
335 +
336 +static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma)
337 +{
338 + return hsdma->ddev.dev;
339 +}
340 +
341 +static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg)
342 +{
343 + return readl(hsdma->base + reg);
344 +}
345 +
346 +static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
347 +{
348 + writel(val, hsdma->base + reg);
349 +}
350 +
351 +static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg,
352 + u32 mask, u32 set)
353 +{
354 + u32 val;
355 +
356 + val = mtk_dma_read(hsdma, reg);
357 + val &= ~mask;
358 + val |= set;
359 + mtk_dma_write(hsdma, reg, val);
360 +}
361 +
362 +static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
363 +{
364 + mtk_dma_rmw(hsdma, reg, 0, val);
365 +}
366 +
367 +static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
368 +{
369 + mtk_dma_rmw(hsdma, reg, val, 0);
370 +}
371 +
372 +static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd)
373 +{
374 + kfree(container_of(vd, struct mtk_hsdma_vdesc, vd));
375 +}
376 +
377 +static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma)
378 +{
379 + u32 status = 0;
380 +
381 + return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status,
382 + !(status & MTK_HSDMA_GLO_BUSY),
383 + MTK_HSDMA_USEC_POLL,
384 + MTK_HSDMA_TIMEOUT_POLL);
385 +}
386 +
387 +static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
388 + struct mtk_hsdma_pchan *pc)
389 +{
390 + struct mtk_hsdma_ring *ring = &pc->ring;
391 + int err;
392 +
393 + memset(pc, 0, sizeof(*pc));
394 +
395 + /*
396 + * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring
397 + * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
398 + */
399 + pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
400 + ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
401 + &ring->tphys, GFP_NOWAIT);
402 + if (!ring->txd)
403 + return -ENOMEM;
404 +
405 + ring->rxd = &ring->txd[MTK_DMA_SIZE];
406 + ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd);
407 + ring->cur_tptr = 0;
408 + ring->cur_rptr = MTK_DMA_SIZE - 1;
409 +
410 + ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT);
411 + if (!ring->cb) {
412 + err = -ENOMEM;
413 + goto err_free_dma;
414 + }
415 +
416 + atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1);
417 +
418 + /* Disable HSDMA and wait for the completion */
419 + mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
420 + err = mtk_hsdma_busy_wait(hsdma);
421 + if (err)
422 + goto err_free_cb;
423 +
424 + /* Reset */
425 + mtk_dma_set(hsdma, MTK_HSDMA_RESET,
426 + MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
427 + mtk_dma_clr(hsdma, MTK_HSDMA_RESET,
428 + MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
429 +
430 + /* Setup HSDMA initial pointer in the ring */
431 + mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys);
432 + mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE);
433 + mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
434 + mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0);
435 + mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys);
436 + mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE);
437 + mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr);
438 + mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0);
439 +
440 + /* Enable HSDMA */
441 + mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
442 +
443 + /* Setup delayed interrupt */
444 + mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT);
445 +
446 + /* Enable interrupt */
447 + mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
448 +
449 + return 0;
450 +
451 +err_free_cb:
452 + kfree(ring->cb);
453 +
454 +err_free_dma:
455 + dma_free_coherent(hsdma2dev(hsdma),
456 + pc->sz_ring, ring->txd, ring->tphys);
457 + return err;
458 +}
459 +
460 +static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma,
461 + struct mtk_hsdma_pchan *pc)
462 +{
463 + struct mtk_hsdma_ring *ring = &pc->ring;
464 +
465 + /* Disable HSDMA and then wait for the completion */
466 + mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
467 + mtk_hsdma_busy_wait(hsdma);
468 +
469 + /* Reset pointer in the ring */
470 + mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
471 + mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0);
472 + mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0);
473 + mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0);
474 + mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0);
475 + mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0);
476 + mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1);
477 +
478 + kfree(ring->cb);
479 +
480 + dma_free_coherent(hsdma2dev(hsdma),
481 + pc->sz_ring, ring->txd, ring->tphys);
482 +}
483 +
484 +static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma,
485 + struct mtk_hsdma_pchan *pc,
486 + struct mtk_hsdma_vdesc *hvd)
487 +{
488 + struct mtk_hsdma_ring *ring = &pc->ring;
489 + struct mtk_hsdma_pdesc *txd, *rxd;
490 + u16 reserved, prev, tlen, num_sgs;
491 + unsigned long flags;
492 +
493 + /* Protect against PC is accessed by multiple VCs simultaneously */
494 + spin_lock_irqsave(&hsdma->lock, flags);
495 +
496 + /*
497 + * Reserve rooms, where pc->nr_free is used to track how many free
498 + * rooms in the ring being updated in user and IRQ context.
499 + */
500 + num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN);
501 + reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free));
502 +
503 + if (!reserved) {
504 + spin_unlock_irqrestore(&hsdma->lock, flags);
505 + return -ENOSPC;
506 + }
507 +
508 + atomic_sub(reserved, &pc->nr_free);
509 +
510 + while (reserved--) {
511 + /* Limit size by PD capability for valid data moving */
512 + tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ?
513 + MTK_HSDMA_MAX_LEN : hvd->len;
514 +
515 + /*
516 + * Setup PDs using the remaining VD info mapped on those
517 + * reserved rooms. And since RXD is shared memory between the
518 + * host and the device allocated by dma_alloc_coherent call,
519 + * the helper macro WRITE_ONCE can ensure the data written to
520 + * RAM would really happens.
521 + */
522 + txd = &ring->txd[ring->cur_tptr];
523 + WRITE_ONCE(txd->desc1, hvd->src);
524 + WRITE_ONCE(txd->desc2,
525 + hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen));
526 +
527 + rxd = &ring->rxd[ring->cur_tptr];
528 + WRITE_ONCE(rxd->desc1, hvd->dest);
529 + WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen));
530 +
531 + /* Associate VD, the PD belonged to */
532 + ring->cb[ring->cur_tptr].vd = &hvd->vd;
533 +
534 + /* Move forward the pointer of TX ring */
535 + ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr,
536 + MTK_DMA_SIZE);
537 +
538 + /* Update VD with remaining data */
539 + hvd->src += tlen;
540 + hvd->dest += tlen;
541 + hvd->len -= tlen;
542 + }
543 +
544 + /*
545 + * Tagging flag for the last PD for VD will be responsible for
546 + * completing VD.
547 + */
548 + if (!hvd->len) {
549 + prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE);
550 + ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
551 + }
552 +
553 + /* Ensure all changes indeed done before we're going on */
554 + wmb();
555 +
556 + /*
557 + * Updating into hardware the pointer of TX ring lets HSDMA to take
558 + * action for those pending PDs.
559 + */
560 + mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
561 +
562 + spin_unlock_irqrestore(&hsdma->lock, flags);
563 +
564 + return 0;
565 +}
566 +
567 +static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma,
568 + struct mtk_hsdma_vchan *hvc)
569 +{
570 + struct virt_dma_desc *vd, *vd2;
571 + int err;
572 +
573 + lockdep_assert_held(&hvc->vc.lock);
574 +
575 + list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) {
576 + struct mtk_hsdma_vdesc *hvd;
577 +
578 + hvd = to_hsdma_vdesc(vd);
579 +
580 + /* Map VD into PC and all VCs shares a single PC */
581 + err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd);
582 +
583 + /*
584 + * Move VD from desc_issued to desc_hw_processing when entire
585 + * VD is fit into available PDs. Otherwise, the uncompleted
586 + * VDs would stay in list desc_issued and then restart the
587 + * processing as soon as possible once underlying ring space
588 + * got freed.
589 + */
590 + if (err == -ENOSPC || hvd->len > 0)
591 + break;
592 +
593 + /*
594 + * The extra list desc_hw_processing is used because
595 + * hardware can't provide sufficient information allowing us
596 + * to know what VDs are still working on the underlying ring.
597 + * Through the additional list, it can help us to implement
598 + * terminate_all, residue calculation and such thing needed
599 + * to know detail descriptor status on the hardware.
600 + */
601 + list_move_tail(&vd->node, &hvc->desc_hw_processing);
602 + }
603 +}
604 +
605 +static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
606 +{
607 + struct mtk_hsdma_vchan *hvc;
608 + struct mtk_hsdma_pdesc *rxd;
609 + struct mtk_hsdma_vdesc *hvd;
610 + struct mtk_hsdma_pchan *pc;
611 + struct mtk_hsdma_cb *cb;
612 + int i = MTK_DMA_SIZE;
613 + __le32 desc2;
614 + u32 status;
615 + u16 next;
616 +
617 + /* Read IRQ status */
618 + status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS);
619 + if (unlikely(!(status & MTK_HSDMA_INT_RXDONE)))
620 + goto rx_done;
621 +
622 + pc = hsdma->pc;
623 +
624 + /*
625 + * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to
626 + * reclaim these finished descriptors: The most number of PDs the ISR
627 + * can handle at one time shouldn't be more than MTK_DMA_SIZE so we
628 + * take it as limited count instead of just using a dangerous infinite
629 + * poll.
630 + */
631 + while (i--) {
632 + next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr,
633 + MTK_DMA_SIZE);
634 + rxd = &pc->ring.rxd[next];
635 +
636 + /*
637 + * If MTK_HSDMA_DESC_DDONE is no specified, that means data
638 + * moving for the PD is still under going.
639 + */
640 + desc2 = READ_ONCE(rxd->desc2);
641 + if (!(desc2 & hsdma->soc->ddone))
642 + break;
643 +
644 + cb = &pc->ring.cb[next];
645 + if (unlikely(!cb->vd)) {
646 + dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n");
647 + break;
648 + }
649 +
650 + /* Update residue of VD the associated PD belonged to */
651 + hvd = to_hsdma_vdesc(cb->vd);
652 + hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2);
653 +
654 + /* Complete VD until the relevant last PD is finished */
655 + if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) {
656 + hvc = to_hsdma_vchan(cb->vd->tx.chan);
657 +
658 + spin_lock(&hvc->vc.lock);
659 +
660 + /* Remove VD from list desc_hw_processing */
661 + list_del(&cb->vd->node);
662 +
663 + /* Add VD into list desc_completed */
664 + vchan_cookie_complete(cb->vd);
665 +
666 + if (hvc->issue_synchronize &&
667 + list_empty(&hvc->desc_hw_processing)) {
668 + complete(&hvc->issue_completion);
669 + hvc->issue_synchronize = false;
670 + }
671 + spin_unlock(&hvc->vc.lock);
672 +
673 + cb->flag = 0;
674 + }
675 +
676 + cb->vd = 0;
677 +
678 + /*
679 + * Recycle the RXD with the helper WRITE_ONCE that can ensure
680 + * data written into RAM would really happens.
681 + */
682 + WRITE_ONCE(rxd->desc1, 0);
683 + WRITE_ONCE(rxd->desc2, 0);
684 + pc->ring.cur_rptr = next;
685 +
686 + /* Release rooms */
687 + atomic_inc(&pc->nr_free);
688 + }
689 +
690 + /* Ensure all changes indeed done before we're going on */
691 + wmb();
692 +
693 + /* Update CPU pointer for those completed PDs */
694 + mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr);
695 +
696 + /*
697 + * Acking the pending IRQ allows hardware no longer to keep the used
698 + * IRQ line in certain trigger state when software has completed all
699 + * the finished physical descriptors.
700 + */
701 + if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1)
702 + mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status);
703 +
704 + /* ASAP handles pending VDs in all VCs after freeing some rooms */
705 + for (i = 0; i < hsdma->dma_requests; i++) {
706 + hvc = &hsdma->vc[i];
707 + spin_lock(&hvc->vc.lock);
708 + mtk_hsdma_issue_vchan_pending(hsdma, hvc);
709 + spin_unlock(&hvc->vc.lock);
710 + }
711 +
712 +rx_done:
713 + /* All completed PDs are cleaned up, so enable interrupt again */
714 + mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
715 +}
716 +
717 +static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
718 +{
719 + struct mtk_hsdma_device *hsdma = devid;
720 +
721 + /*
722 + * Disable interrupt until all completed PDs are cleaned up in
723 + * mtk_hsdma_free_rooms call.
724 + */
725 + mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
726 +
727 + mtk_hsdma_free_rooms_in_ring(hsdma);
728 +
729 + return IRQ_HANDLED;
730 +}
731 +
732 +static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c,
733 + dma_cookie_t cookie)
734 +{
735 + struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
736 + struct virt_dma_desc *vd;
737 +
738 + list_for_each_entry(vd, &hvc->desc_hw_processing, node)
739 + if (vd->tx.cookie == cookie)
740 + return vd;
741 +
742 + list_for_each_entry(vd, &hvc->vc.desc_issued, node)
743 + if (vd->tx.cookie == cookie)
744 + return vd;
745 +
746 + return NULL;
747 +}
748 +
749 +static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
750 + dma_cookie_t cookie,
751 + struct dma_tx_state *txstate)
752 +{
753 + struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
754 + struct mtk_hsdma_vdesc *hvd;
755 + struct virt_dma_desc *vd;
756 + enum dma_status ret;
757 + unsigned long flags;
758 + size_t bytes = 0;
759 +
760 + ret = dma_cookie_status(c, cookie, txstate);
761 + if (ret == DMA_COMPLETE || !txstate)
762 + return ret;
763 +
764 + spin_lock_irqsave(&hvc->vc.lock, flags);
765 + vd = mtk_hsdma_find_active_desc(c, cookie);
766 + spin_unlock_irqrestore(&hvc->vc.lock, flags);
767 +
768 + if (vd) {
769 + hvd = to_hsdma_vdesc(vd);
770 + bytes = hvd->residue;
771 + }
772 +
773 + dma_set_residue(txstate, bytes);
774 +
775 + return ret;
776 +}
777 +
778 +static void mtk_hsdma_issue_pending(struct dma_chan *c)
779 +{
780 + struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
781 + struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
782 + unsigned long flags;
783 +
784 + spin_lock_irqsave(&hvc->vc.lock, flags);
785 +
786 + if (vchan_issue_pending(&hvc->vc))
787 + mtk_hsdma_issue_vchan_pending(hsdma, hvc);
788 +
789 + spin_unlock_irqrestore(&hvc->vc.lock, flags);
790 +}
791 +
792 +static struct dma_async_tx_descriptor *
793 +mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
794 + dma_addr_t src, size_t len, unsigned long flags)
795 +{
796 + struct mtk_hsdma_vdesc *hvd;
797 +
798 + hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT);
799 + if (!hvd)
800 + return NULL;
801 +
802 + hvd->len = len;
803 + hvd->residue = len;
804 + hvd->src = src;
805 + hvd->dest = dest;
806 +
807 + return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags);
808 +}
809 +
810 +static int mtk_hsdma_free_inactive_desc(struct dma_chan *c)
811 +{
812 + struct virt_dma_chan *vc = to_virt_chan(c);
813 + unsigned long flags;
814 + LIST_HEAD(head);
815 +
816 + spin_lock_irqsave(&vc->lock, flags);
817 + list_splice_tail_init(&vc->desc_allocated, &head);
818 + list_splice_tail_init(&vc->desc_submitted, &head);
819 + list_splice_tail_init(&vc->desc_issued, &head);
820 + spin_unlock_irqrestore(&vc->lock, flags);
821 +
822 + /* At the point, we don't expect users put descriptor into VC again */
823 + vchan_dma_desc_free_list(vc, &head);
824 +
825 + return 0;
826 +}
827 +
828 +static void mtk_hsdma_free_active_desc(struct dma_chan *c)
829 +{
830 + struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
831 + bool sync_needed = false;
832 +
833 + /*
834 + * Once issue_synchronize is being set, which means once the hardware
835 + * consumes all descriptors for the channel in the ring, the
836 + * synchronization must be be notified immediately it is completed.
837 + */
838 + spin_lock(&hvc->vc.lock);
839 + if (!list_empty(&hvc->desc_hw_processing)) {
840 + hvc->issue_synchronize = true;
841 + sync_needed = true;
842 + }
843 + spin_unlock(&hvc->vc.lock);
844 +
845 + if (sync_needed)
846 + wait_for_completion(&hvc->issue_completion);
847 + /*
848 + * At the point, we expect that all remaining descriptors in the ring
849 + * for the channel should be all processing done.
850 + */
851 + WARN_ONCE(!list_empty(&hvc->desc_hw_processing),
852 + "Desc pending still in list desc_hw_processing\n");
853 +
854 + /* Free all descriptors in list desc_completed */
855 + vchan_synchronize(&hvc->vc);
856 +
857 + WARN_ONCE(!list_empty(&hvc->vc.desc_completed),
858 + "Desc pending still in list desc_completed\n");
859 +}
860 +
861 +static int mtk_hsdma_terminate_all(struct dma_chan *c)
862 +{
863 + /*
864 + * Free pending descriptors not processed yet by hardware that have
865 + * previously been submitted to the channel.
866 + */
867 + mtk_hsdma_free_inactive_desc(c);
868 +
869 + /*
870 + * However, the DMA engine doesn't provide any way to stop these
871 + * descriptors being processed currently by hardware. The only way is
872 + * to just waiting until these descriptors are all processed completely
873 + * through mtk_hsdma_free_active_desc call.
874 + */
875 + mtk_hsdma_free_active_desc(c);
876 +
877 + return 0;
878 +}
879 +
880 +static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c)
881 +{
882 + struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
883 + int err;
884 +
885 + /*
886 + * Since HSDMA has only one PC, the resource for PC is being allocated
887 + * when the first VC is being created and the other VCs would run on
888 + * the same PC.
889 + */
890 + if (!refcount_read(&hsdma->pc_refcnt)) {
891 + err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc);
892 + if (err)
893 + return err;
894 + /*
895 + * refcount_inc would complain increment on 0; use-after-free.
896 + * Thus, we need to explicitly set it as 1 initially.
897 + */
898 + refcount_set(&hsdma->pc_refcnt, 1);
899 + } else {
900 + refcount_inc(&hsdma->pc_refcnt);
901 + }
902 +
903 + return 0;
904 +}
905 +
906 +static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
907 +{
908 + struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
909 +
910 + /* Free all descriptors in all lists on the VC */
911 + mtk_hsdma_terminate_all(c);
912 +
913 + /* The resource for PC is not freed until all the VCs are destroyed */
914 + if (!refcount_dec_and_test(&hsdma->pc_refcnt))
915 + return;
916 +
917 + mtk_hsdma_free_pchan(hsdma, hsdma->pc);
918 +}
919 +
920 +static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma)
921 +{
922 + int err;
923 +
924 + pm_runtime_enable(hsdma2dev(hsdma));
925 + pm_runtime_get_sync(hsdma2dev(hsdma));
926 +
927 + err = clk_prepare_enable(hsdma->clk);
928 + if (err)
929 + return err;
930 +
931 + mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
932 + mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT);
933 +
934 + return 0;
935 +}
936 +
937 +static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma)
938 +{
939 + mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0);
940 +
941 + clk_disable_unprepare(hsdma->clk);
942 +
943 + pm_runtime_put_sync(hsdma2dev(hsdma));
944 + pm_runtime_disable(hsdma2dev(hsdma));
945 +
946 + return 0;
947 +}
948 +
949 +static const struct mtk_hsdma_soc mt7623_soc = {
950 + .ddone = BIT(31),
951 + .ls0 = BIT(30),
952 +};
953 +
954 +static const struct mtk_hsdma_soc mt7622_soc = {
955 + .ddone = BIT(15),
956 + .ls0 = BIT(14),
957 +};
958 +
959 +static const struct of_device_id mtk_hsdma_match[] = {
960 + { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc},
961 + { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc},
962 + { /* sentinel */ }
963 +};
964 +MODULE_DEVICE_TABLE(of, mtk_hsdma_match);
965 +
966 +static int mtk_hsdma_probe(struct platform_device *pdev)
967 +{
968 + struct mtk_hsdma_device *hsdma;
969 + struct mtk_hsdma_vchan *vc;
970 + struct dma_device *dd;
971 + struct resource *res;
972 + int i, err;
973 +
974 + hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
975 + if (!hsdma)
976 + return -ENOMEM;
977 +
978 + dd = &hsdma->ddev;
979 +
980 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 + hsdma->base = devm_ioremap_resource(&pdev->dev, res);
982 + if (IS_ERR(hsdma->base))
983 + return PTR_ERR(hsdma->base);
984 +
985 + hsdma->soc = of_device_get_match_data(&pdev->dev);
986 + if (!hsdma->soc) {
987 + dev_err(&pdev->dev, "No device match found\n");
988 + return -ENODEV;
989 + }
990 +
991 + hsdma->clk = devm_clk_get(&pdev->dev, "hsdma");
992 + if (IS_ERR(hsdma->clk)) {
993 + dev_err(&pdev->dev, "No clock for %s\n",
994 + dev_name(&pdev->dev));
995 + return PTR_ERR(hsdma->clk);
996 + }
997 +
998 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
999 + if (!res) {
1000 + dev_err(&pdev->dev, "No irq resource for %s\n",
1001 + dev_name(&pdev->dev));
1002 + return -EINVAL;
1003 + }
1004 + hsdma->irq = res->start;
1005 +
1006 + refcount_set(&hsdma->pc_refcnt, 0);
1007 + spin_lock_init(&hsdma->lock);
1008 +
1009 + dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1010 +
1011 + dd->copy_align = MTK_HSDMA_ALIGN_SIZE;
1012 + dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources;
1013 + dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
1014 + dd->device_tx_status = mtk_hsdma_tx_status;
1015 + dd->device_issue_pending = mtk_hsdma_issue_pending;
1016 + dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
1017 + dd->device_terminate_all = mtk_hsdma_terminate_all;
1018 + dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
1019 + dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
1020 + dd->directions = BIT(DMA_MEM_TO_MEM);
1021 + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1022 + dd->dev = &pdev->dev;
1023 + INIT_LIST_HEAD(&dd->channels);
1024 +
1025 + hsdma->dma_requests = MTK_HSDMA_NR_VCHANS;
1026 + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1027 + "dma-requests",
1028 + &hsdma->dma_requests)) {
1029 + dev_info(&pdev->dev,
1030 + "Using %u as missing dma-requests property\n",
1031 + MTK_HSDMA_NR_VCHANS);
1032 + }
1033 +
1034 + hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS,
1035 + sizeof(*hsdma->pc), GFP_KERNEL);
1036 + if (!hsdma->pc)
1037 + return -ENOMEM;
1038 +
1039 + hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests,
1040 + sizeof(*hsdma->vc), GFP_KERNEL);
1041 + if (!hsdma->vc)
1042 + return -ENOMEM;
1043 +
1044 + for (i = 0; i < hsdma->dma_requests; i++) {
1045 + vc = &hsdma->vc[i];
1046 + vc->vc.desc_free = mtk_hsdma_vdesc_free;
1047 + vchan_init(&vc->vc, dd);
1048 + init_completion(&vc->issue_completion);
1049 + INIT_LIST_HEAD(&vc->desc_hw_processing);
1050 + }
1051 +
1052 + err = dma_async_device_register(dd);
1053 + if (err)
1054 + return err;
1055 +
1056 + err = of_dma_controller_register(pdev->dev.of_node,
1057 + of_dma_xlate_by_chan_id, hsdma);
1058 + if (err) {
1059 + dev_err(&pdev->dev,
1060 + "MediaTek HSDMA OF registration failed %d\n", err);
1061 + goto err_unregister;
1062 + }
1063 +
1064 + mtk_hsdma_hw_init(hsdma);
1065 +
1066 + err = devm_request_irq(&pdev->dev, hsdma->irq,
1067 + mtk_hsdma_irq, 0,
1068 + dev_name(&pdev->dev), hsdma);
1069 + if (err) {
1070 + dev_err(&pdev->dev,
1071 + "request_irq failed with err %d\n", err);
1072 + goto err_unregister;
1073 + }
1074 +
1075 + platform_set_drvdata(pdev, hsdma);
1076 +
1077 + dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n");
1078 +
1079 + return 0;
1080 +
1081 +err_unregister:
1082 + dma_async_device_unregister(dd);
1083 +
1084 + return err;
1085 +}
1086 +
1087 +static int mtk_hsdma_remove(struct platform_device *pdev)
1088 +{
1089 + struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev);
1090 + struct mtk_hsdma_vchan *vc;
1091 + int i;
1092 +
1093 + /* Kill VC task */
1094 + for (i = 0; i < hsdma->dma_requests; i++) {
1095 + vc = &hsdma->vc[i];
1096 +
1097 + list_del(&vc->vc.chan.device_node);
1098 + tasklet_kill(&vc->vc.task);
1099 + }
1100 +
1101 + /* Disable DMA interrupt */
1102 + mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
1103 +
1104 + /* Waits for any pending IRQ handlers to complete */
1105 + synchronize_irq(hsdma->irq);
1106 +
1107 + /* Disable hardware */
1108 + mtk_hsdma_hw_deinit(hsdma);
1109 +
1110 + dma_async_device_unregister(&hsdma->ddev);
1111 + of_dma_controller_free(pdev->dev.of_node);
1112 +
1113 + return 0;
1114 +}
1115 +
1116 +static struct platform_driver mtk_hsdma_driver = {
1117 + .probe = mtk_hsdma_probe,
1118 + .remove = mtk_hsdma_remove,
1119 + .driver = {
1120 + .name = KBUILD_MODNAME,
1121 + .of_match_table = mtk_hsdma_match,
1122 + },
1123 +};
1124 +module_platform_driver(mtk_hsdma_driver);
1125 +
1126 +MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver");
1127 +MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1128 +MODULE_LICENSE("GPL v2");