kernel: bump kernel 4.14 to version 4.14.63
[openwrt/openwrt.git] / target / linux / ipq40xx / patches-4.14 / 050-0003-mtd-nand-qcom-support-for-command-descriptor-formati.patch
1 From 8d6b6d7e135e9bbfc923d34a45cb0e72695e63ed Mon Sep 17 00:00:00 2001
2 From: Abhishek Sahu <absahu@codeaurora.org>
3 Date: Mon, 25 Sep 2017 13:21:26 +0530
4 Subject: [PATCH 3/7] mtd: nand: qcom: support for command descriptor formation
5
6 1. Add the function for command descriptor preparation which will
7 be used only by BAM DMA and it will form the DMA descriptors
8 containing command elements
9 2. DMA_PREP_CMD flag should be used for forming command DMA
10 descriptors
11
12 Reviewed-by: Archit Taneja <architt@codeaurora.org>
13 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
14 Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
15 ---
16 drivers/mtd/nand/qcom_nandc.c | 108 +++++++++++++++++++++++++++++++++++-------
17 1 file changed, 92 insertions(+), 16 deletions(-)
18
19 --- a/drivers/mtd/nand/qcom_nandc.c
20 +++ b/drivers/mtd/nand/qcom_nandc.c
21 @@ -200,6 +200,14 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
22 */
23 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
24
25 +/* Returns the NAND register physical address */
26 +#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
27 +
28 +/* Returns the dma address for reg read buffer */
29 +#define reg_buf_dma_addr(chip, vaddr) \
30 + ((chip)->reg_read_dma + \
31 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
32 +
33 #define QPIC_PER_CW_CMD_ELEMENTS 32
34 #define QPIC_PER_CW_CMD_SGL 32
35 #define QPIC_PER_CW_DATA_SGL 8
36 @@ -317,7 +325,8 @@ struct nandc_regs {
37 * controller
38 * @dev: parent device
39 * @base: MMIO base
40 - * @base_dma: physical base address of controller registers
41 + * @base_phys: physical base address of controller registers
42 + * @base_dma: dma base address of controller registers
43 * @core_clk: controller clock
44 * @aon_clk: another controller clock
45 *
46 @@ -350,6 +359,7 @@ struct qcom_nand_controller {
47 struct device *dev;
48
49 void __iomem *base;
50 + phys_addr_t base_phys;
51 dma_addr_t base_dma;
52
53 struct clk *core_clk;
54 @@ -751,6 +761,66 @@ static int prepare_bam_async_desc(struct
55 }
56
57 /*
58 + * Prepares the command descriptor for BAM DMA which will be used for NAND
59 + * register reads and writes. The command descriptor requires the command
60 + * to be formed in command element type so this function uses the command
61 + * element from bam transaction ce array and fills the same with required
62 + * data. A single SGL can contain multiple command elements so
63 + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
64 + * after the current command element.
65 + */
66 +static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
67 + int reg_off, const void *vaddr,
68 + int size, unsigned int flags)
69 +{
70 + int bam_ce_size;
71 + int i, ret;
72 + struct bam_cmd_element *bam_ce_buffer;
73 + struct bam_transaction *bam_txn = nandc->bam_txn;
74 +
75 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
76 +
77 + /* fill the command desc */
78 + for (i = 0; i < size; i++) {
79 + if (read)
80 + bam_prep_ce(&bam_ce_buffer[i],
81 + nandc_reg_phys(nandc, reg_off + 4 * i),
82 + BAM_READ_COMMAND,
83 + reg_buf_dma_addr(nandc,
84 + (__le32 *)vaddr + i));
85 + else
86 + bam_prep_ce_le32(&bam_ce_buffer[i],
87 + nandc_reg_phys(nandc, reg_off + 4 * i),
88 + BAM_WRITE_COMMAND,
89 + *((__le32 *)vaddr + i));
90 + }
91 +
92 + bam_txn->bam_ce_pos += size;
93 +
94 + /* use the separate sgl after this command */
95 + if (flags & NAND_BAM_NEXT_SGL) {
96 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
97 + bam_ce_size = (bam_txn->bam_ce_pos -
98 + bam_txn->bam_ce_start) *
99 + sizeof(struct bam_cmd_element);
100 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
101 + bam_ce_buffer, bam_ce_size);
102 + bam_txn->cmd_sgl_pos++;
103 + bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
104 +
105 + if (flags & NAND_BAM_NWD) {
106 + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
107 + DMA_PREP_FENCE |
108 + DMA_PREP_CMD);
109 + if (ret)
110 + return ret;
111 + }
112 + }
113 +
114 + return 0;
115 +}
116 +
117 +/*
118 * Prepares the data descriptor for BAM DMA which will be used for NAND
119 * data reads and writes.
120 */
121 @@ -868,19 +938,22 @@ static int read_reg_dma(struct qcom_nand
122 {
123 bool flow_control = false;
124 void *vaddr;
125 - int size;
126
127 - if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
128 - flow_control = true;
129 + vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
130 + nandc->reg_read_pos += num_regs;
131
132 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
133 first = dev_cmd_reg_addr(nandc, first);
134
135 - size = num_regs * sizeof(u32);
136 - vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
137 - nandc->reg_read_pos += num_regs;
138 + if (nandc->props->is_bam)
139 + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
140 + num_regs, flags);
141 +
142 + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
143 + flow_control = true;
144
145 - return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
146 + return prep_adm_dma_desc(nandc, true, first, vaddr,
147 + num_regs * sizeof(u32), flow_control);
148 }
149
150 /*
151 @@ -897,13 +970,9 @@ static int write_reg_dma(struct qcom_nan
152 bool flow_control = false;
153 struct nandc_regs *regs = nandc->regs;
154 void *vaddr;
155 - int size;
156
157 vaddr = offset_to_nandc_reg(regs, first);
158
159 - if (first == NAND_FLASH_CMD)
160 - flow_control = true;
161 -
162 if (first == NAND_ERASED_CW_DETECT_CFG) {
163 if (flags & NAND_ERASED_CW_SET)
164 vaddr = &regs->erased_cw_detect_cfg_set;
165 @@ -920,10 +989,15 @@ static int write_reg_dma(struct qcom_nan
166 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
167 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
168
169 - size = num_regs * sizeof(u32);
170 + if (nandc->props->is_bam)
171 + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
172 + num_regs, flags);
173 +
174 + if (first == NAND_FLASH_CMD)
175 + flow_control = true;
176
177 - return prep_adm_dma_desc(nandc, false, first, vaddr, size,
178 - flow_control);
179 + return prep_adm_dma_desc(nandc, false, first, vaddr,
180 + num_regs * sizeof(u32), flow_control);
181 }
182
183 /*
184 @@ -1187,7 +1261,8 @@ static int submit_descs(struct qcom_nand
185 }
186
187 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
188 - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
189 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
190 + DMA_PREP_CMD);
191 if (r)
192 return r;
193 }
194 @@ -2725,6 +2800,7 @@ static int qcom_nandc_probe(struct platf
195 if (IS_ERR(nandc->base))
196 return PTR_ERR(nandc->base);
197
198 + nandc->base_phys = res->start;
199 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
200
201 nandc->core_clk = devm_clk_get(dev, "core");