mediatek: Add support for Xiaomi Redmi Router AX6S
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-5.4 / 804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch
1 From e640f4bcfa0088ff696bc5da6063a1ea8d782189 Mon Sep 17 00:00:00 2001
2 From: Iuliana Prodan <iuliana.prodan@nxp.com>
3 Date: Thu, 26 Sep 2019 15:26:29 +0300
4 Subject: [PATCH] crypto: caam - use mapped_{src,dst}_nents for descriptor
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 The mapped_{src,dst}_nents _returned_ from the dma_map_sg
10 call (which could be less than src/dst_nents) have to be
11 used to generate the job descriptors.
12
13 Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
14 Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
15 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
16 (cherry picked from commit eff9771d51529acf7f6f58a60b2923b98da28f0e)
17 ---
18 drivers/crypto/caam/caampkc.c | 72 +++++++++++++++++++++++--------------------
19 drivers/crypto/caam/caampkc.h | 8 +++--
20 2 files changed, 45 insertions(+), 35 deletions(-)
21
22 --- a/drivers/crypto/caam/caampkc.c
23 +++ b/drivers/crypto/caam/caampkc.c
24 @@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc
25 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
26 GFP_KERNEL : GFP_ATOMIC;
27 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
28 - int sgc;
29 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
30 int src_nents, dst_nents;
31 + int mapped_src_nents, mapped_dst_nents;
32 unsigned int diff_size = 0;
33 int lzeros;
34
35 @@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc
36 req_ctx->fixup_src_len);
37 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
38
39 - if (!diff_size && src_nents == 1)
40 + mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
41 + DMA_TO_DEVICE);
42 + if (unlikely(!mapped_src_nents)) {
43 + dev_err(dev, "unable to map source\n");
44 + return ERR_PTR(-ENOMEM);
45 + }
46 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
47 + DMA_FROM_DEVICE);
48 + if (unlikely(!mapped_dst_nents)) {
49 + dev_err(dev, "unable to map destination\n");
50 + goto src_fail;
51 + }
52 +
53 + if (!diff_size && mapped_src_nents == 1)
54 sec4_sg_len = 0; /* no need for an input hw s/g table */
55 else
56 - sec4_sg_len = src_nents + !!diff_size;
57 + sec4_sg_len = mapped_src_nents + !!diff_size;
58 sec4_sg_index = sec4_sg_len;
59 - if (dst_nents > 1)
60 - sec4_sg_len += pad_sg_nents(dst_nents);
61 +
62 + if (mapped_dst_nents > 1)
63 + sec4_sg_len += pad_sg_nents(mapped_dst_nents);
64 else
65 sec4_sg_len = pad_sg_nents(sec4_sg_len);
66
67 @@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc
68 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
69 GFP_DMA | flags);
70 if (!edesc)
71 - return ERR_PTR(-ENOMEM);
72 -
73 - sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
74 - if (unlikely(!sgc)) {
75 - dev_err(dev, "unable to map source\n");
76 - goto src_fail;
77 - }
78 -
79 - sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
80 - if (unlikely(!sgc)) {
81 - dev_err(dev, "unable to map destination\n");
82 goto dst_fail;
83 - }
84
85 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
86 if (diff_size)
87 @@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc
88 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
89 edesc->sec4_sg + !!diff_size, 0);
90
91 - if (dst_nents > 1)
92 + if (mapped_dst_nents > 1)
93 sg_to_sec4_sg_last(req->dst, req->dst_len,
94 edesc->sec4_sg + sec4_sg_index, 0);
95
96 @@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc
97 if (!sec4_sg_bytes)
98 return edesc;
99
100 + edesc->mapped_src_nents = mapped_src_nents;
101 + edesc->mapped_dst_nents = mapped_dst_nents;
102 +
103 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
104 sec4_sg_bytes, DMA_TO_DEVICE);
105 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
106 @@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc
107 return edesc;
108
109 sec4_sg_fail:
110 - dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
111 + kfree(edesc);
112 dst_fail:
113 - dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
114 + dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
115 src_fail:
116 - kfree(edesc);
117 + dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
118 return ERR_PTR(-ENOMEM);
119 }
120
121 @@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akciph
122 return -ENOMEM;
123 }
124
125 - if (edesc->src_nents > 1) {
126 + if (edesc->mapped_src_nents > 1) {
127 pdb->sgf |= RSA_PDB_SGF_F;
128 pdb->f_dma = edesc->sec4_sg_dma;
129 - sec4_sg_index += edesc->src_nents;
130 + sec4_sg_index += edesc->mapped_src_nents;
131 } else {
132 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
133 }
134
135 - if (edesc->dst_nents > 1) {
136 + if (edesc->mapped_dst_nents > 1) {
137 pdb->sgf |= RSA_PDB_SGF_G;
138 pdb->g_dma = edesc->sec4_sg_dma +
139 sec4_sg_index * sizeof(struct sec4_sg_entry);
140 @@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct ak
141 return -ENOMEM;
142 }
143
144 - if (edesc->src_nents > 1) {
145 + if (edesc->mapped_src_nents > 1) {
146 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
147 pdb->g_dma = edesc->sec4_sg_dma;
148 - sec4_sg_index += edesc->src_nents;
149 + sec4_sg_index += edesc->mapped_src_nents;
150 +
151 } else {
152 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
153
154 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
155 }
156
157 - if (edesc->dst_nents > 1) {
158 + if (edesc->mapped_dst_nents > 1) {
159 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
160 pdb->f_dma = edesc->sec4_sg_dma +
161 sec4_sg_index * sizeof(struct sec4_sg_entry);
162 @@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct ak
163 goto unmap_tmp1;
164 }
165
166 - if (edesc->src_nents > 1) {
167 + if (edesc->mapped_src_nents > 1) {
168 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
169 pdb->g_dma = edesc->sec4_sg_dma;
170 - sec4_sg_index += edesc->src_nents;
171 + sec4_sg_index += edesc->mapped_src_nents;
172 } else {
173 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
174
175 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
176 }
177
178 - if (edesc->dst_nents > 1) {
179 + if (edesc->mapped_dst_nents > 1) {
180 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
181 pdb->f_dma = edesc->sec4_sg_dma +
182 sec4_sg_index * sizeof(struct sec4_sg_entry);
183 @@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct ak
184 goto unmap_tmp1;
185 }
186
187 - if (edesc->src_nents > 1) {
188 + if (edesc->mapped_src_nents > 1) {
189 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
190 pdb->g_dma = edesc->sec4_sg_dma;
191 - sec4_sg_index += edesc->src_nents;
192 + sec4_sg_index += edesc->mapped_src_nents;
193 } else {
194 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
195
196 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
197 }
198
199 - if (edesc->dst_nents > 1) {
200 + if (edesc->mapped_dst_nents > 1) {
201 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
202 pdb->f_dma = edesc->sec4_sg_dma +
203 sec4_sg_index * sizeof(struct sec4_sg_entry);
204 --- a/drivers/crypto/caam/caampkc.h
205 +++ b/drivers/crypto/caam/caampkc.h
206 @@ -112,8 +112,10 @@ struct caam_rsa_req_ctx {
207
208 /**
209 * rsa_edesc - s/w-extended rsa descriptor
210 - * @src_nents : number of segments in input scatterlist
211 - * @dst_nents : number of segments in output scatterlist
212 + * @src_nents : number of segments in input s/w scatterlist
213 + * @dst_nents : number of segments in output s/w scatterlist
214 + * @mapped_src_nents: number of segments in input h/w link table
215 + * @mapped_dst_nents: number of segments in output h/w link table
216 * @sec4_sg_bytes : length of h/w link table
217 * @sec4_sg_dma : dma address of h/w link table
218 * @sec4_sg : pointer to h/w link table
219 @@ -123,6 +125,8 @@ struct caam_rsa_req_ctx {
220 struct rsa_edesc {
221 int src_nents;
222 int dst_nents;
223 + int mapped_src_nents;
224 + int mapped_dst_nents;
225 int sec4_sg_bytes;
226 dma_addr_t sec4_sg_dma;
227 struct sec4_sg_entry *sec4_sg;