kernel: add Intel/Lantiq VRX518 EP driver
[openwrt/openwrt.git] / package / kernel / lantiq / vrx518_ep / src / aca.c
1 /*******************************************************************************
2
3 Intel SmartPHY DSL PCIe Endpoint/ACA Linux driver
4 Copyright(c) 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 *******************************************************************************/
23 #define DEBUG
24 #include <linux/init.h>
25 #include <linux/types.h>
26 #include <linux/delay.h>
27 #include <linux/firmware.h>
28
29 #include <net/dc_ep.h>
30
31 #include "regs.h"
32 #include "ep.h"
33 #include "misc.h"
34 #include "aca.h"
35
36 #define ACA_FW_FILE "aca_fw.bin"
37
38 #define set_mask_bit(val, set, mask, bits) \
39 (val = (((val) & (~((mask) << (bits)))) \
40 | (((set) & (mask)) << (bits))))
41
42 static char soc_str[128];
43
44 static const char *const aca_img_type_str[ACA_IMG_MAX] = {
45 "vrx518",
46 "vrx618",
47 "falcon-mx",
48 "pmua",
49 };
50
51 static void soc_type_to_str(u32 soc)
52 {
53 memset(soc_str, 0, sizeof(soc_str));
54
55 if ((soc & ACA_SOC_XRX300))
56 strcat(soc_str, "xrx300 ");
57
58 if ((soc & ACA_SOC_XRX500))
59 strcat(soc_str, "xrx500 ");
60
61 if ((soc & ACA_SOC_PUMA))
62 strcat(soc_str, "puma ");
63
64 if ((soc & ACA_SOC_3RD_PARTY))
65 strcat(soc_str, "third party SoC ");
66 }
67
68 static const char *fw_id_to_str(u32 fw_id)
69 {
70 switch (fw_id) {
71 case ACA_FW_TXIN:
72 return "txin";
73
74 case ACA_FW_TXOUT:
75 return "txout";
76
77 case ACA_FW_RXIN:
78 return "rxin";
79
80 case ACA_FW_RXOUT:
81 return "rxout";
82
83 case ACA_FW_GNRC:
84 return "Genrisc";
85
86 default:
87 return "unknow";
88 }
89 }
90
91 static const char * const sec_id_str[] = {
92 "Unknown", "HIF", "GenRisc", "MAC_HT", "TXIN", "TXIN_PDRING", "TXOUT",
93 "TXOUT_PDRING", "RXIN", "RXIN_PDRING", "RXOUT", "RXOUT_PDRING", "DMA",
94 "FW_INIT",
95 };
96 static const char *sec_id_to_str(u32 sec_id)
97 {
98 switch (sec_id) {
99 case ACA_SEC_HIF:
100 case ACA_SEC_GNR:
101 case ACA_SEC_MAC_HT:
102 case ACA_SEC_MEM_TXIN:
103 case ACA_SEC_MEM_TXIN_PDRING:
104 case ACA_SEC_MEM_TXOUT:
105 case ACA_SEC_MEM_TXOUT_PDRING:
106 case ACA_SEC_MEM_RXIN:
107 case ACA_SEC_MEM_RXIN_PDRING:
108 case ACA_SEC_MEM_RXOUT:
109 case ACA_SEC_MEM_RXOUT_PDRING:
110 case ACA_SEC_DMA:
111 case ACA_SEC_FW_INIT:
112 return sec_id_str[sec_id];
113 case ACA_SEC_FW:
114 return "ACA FW";
115
116 default:
117 return "unknown";
118 }
119 }
120
121 static inline struct aca_fw_info *to_fw_info(struct dc_ep_priv *priv)
122 {
123 return &priv->aca.fw_info;
124 }
125
126 static inline struct aca_fw_dl_addr *to_fw_addr(struct dc_ep_priv *priv)
127 {
128 return &priv->aca.fw_info.fw_dl;
129 }
130
131 static inline struct aca_mem_layout *to_mem_layout(struct dc_ep_priv *priv)
132 {
133 return &priv->aca.fw_info.mem_layout;
134 }
135
136 static inline struct aca_pdmem_layout *to_pdmem_layout(struct dc_ep_priv *priv)
137 {
138 return &priv->aca.fw_info.pdmem_layout;
139 }
140
141 static inline struct aca_fw_param *to_aca_fw_param(struct dc_ep_priv *priv)
142 {
143 return &priv->aca.fw_info.fw_param;
144 }
145
146 static inline struct aca_hif_params *to_hif_params(struct dc_ep_priv *priv)
147 {
148 return priv->aca.hif_params;
149 }
150
151 static const struct firmware *aca_fetch_fw_file(struct dc_ep_priv *priv,
152 char *dir, const char *file)
153 {
154 int ret;
155 char filename[100] = {0};
156 const struct firmware *fw;
157
158 if (file == NULL)
159 return ERR_PTR(-ENOENT);
160
161 if (dir == NULL)
162 dir = ".";
163
164 snprintf(filename, sizeof(filename), "%s/%s", dir, file);
165 ret = request_firmware(&fw, filename, priv->dev);
166 if (ret)
167 return ERR_PTR(ret);
168
169 return fw;
170 }
171
172 void dc_aca_free_fw_file(struct dc_ep_priv *priv)
173 {
174 struct aca_fw_info *fw_info = to_fw_info(priv);
175
176 if (fw_info->fw && !IS_ERR(fw_info->fw))
177 release_firmware(fw_info->fw);
178
179 fw_info->fw = NULL;
180 fw_info->fw_data = NULL;
181 fw_info->fw_len = 0;
182 }
183
184 static void aca_dma_parse(struct dc_ep_priv *priv, const char *data, int chn)
185 {
186 int i;
187 u32 cid, dbase;
188 struct aca_fw_dma *fw_dma;
189 struct aca_fw_info *fw_info = to_fw_info(priv);
190
191 fw_info->chan_num = chn;
192
193 for (i = 0; i < fw_info->chan_num; i++) {
194 fw_dma = (struct aca_fw_dma *)(data + i * sizeof(*fw_dma));
195 cid = be32_to_cpu(fw_dma->cid);
196 dbase = be32_to_cpu(fw_dma->base);
197 fw_info->adma_desc_base[cid] = dbase;
198 dev_dbg(priv->dev, "dma channel %d desc base 0x%08x\n",
199 cid, dbase);
200 }
201 }
202
203 static void aca_sram_desc_parse(struct dc_ep_priv *priv, const char *data,
204 u32 sid)
205 {
206 u32 dbase, dnum;
207 struct aca_sram_desc *desc_base;
208 struct aca_mem_layout *mem_layout = to_mem_layout(priv);
209 struct aca_pdmem_layout *pdmem = to_pdmem_layout(priv);
210
211 desc_base = (struct aca_sram_desc *)data;
212 dbase = be32_to_cpu(desc_base->dbase);
213 dnum = be32_to_cpu(desc_base->dnum);
214
215 dev_dbg(priv->dev, "Sec %s desc base 0x%08x, des_num: %d\n",
216 sec_id_to_str(sid), dbase, dnum);
217
218 switch (sid) {
219 case ACA_SEC_MEM_TXIN:
220 mem_layout->txin_host_desc_base = dbase;
221 mem_layout->txin_host_dnum = dnum;
222 break;
223
224 case ACA_SEC_MEM_TXOUT:
225 mem_layout->txout_host_desc_base = dbase;
226 mem_layout->txout_host_dnum = dnum;
227 break;
228
229 case ACA_SEC_MEM_RXIN:
230 mem_layout->rxin_host_desc_base = dbase;
231 mem_layout->rxin_host_dnum = dnum;
232 break;
233
234 case ACA_SEC_MEM_RXOUT:
235 mem_layout->rxout_host_desc_base = dbase;
236 mem_layout->rxout_host_dnum = dnum;
237 break;
238 case ACA_SEC_MEM_TXIN_PDRING:
239 pdmem->txin_pd_desc_base = dbase;
240 pdmem->txin_pd_dnum = dnum;
241 break;
242 case ACA_SEC_MEM_TXOUT_PDRING:
243 pdmem->txout_pd_desc_base = dbase;
244 pdmem->txout_pd_dnum = dnum;
245 break;
246 case ACA_SEC_MEM_RXIN_PDRING:
247 pdmem->rxin_pd_desc_base = dbase;
248 pdmem->rxin_pd_dnum = dnum;
249 break;
250 case ACA_SEC_MEM_RXOUT_PDRING:
251 pdmem->rxin_pd_desc_base = dbase;
252 pdmem->rxin_pd_dnum = dnum;
253 break;
254 default:
255 dev_err(priv->dev, "Unknow aca sram section %d\n", sid);
256 break;
257 }
258 }
259
260 static void aca_init_parse(struct dc_ep_priv *priv, const char *data,
261 u32 sid)
262 {
263 struct aca_fw_param *fw_param = to_aca_fw_param(priv);
264 struct aca_fw_param *param;
265 u32 hdr_sz, hdr_addr;
266
267 param = (struct aca_fw_param *)data;
268 hdr_sz = be32_to_cpu(param->st_sz);
269 hdr_addr = be32_to_cpu(param->init_addr);
270
271 fw_param->init_addr = hdr_addr;
272 fw_param->st_sz = hdr_sz;
273 dev_dbg(priv->dev, "init st size: %d, addr: 0x%x\n",
274 hdr_sz, hdr_addr);
275 }
276
277 static void aca_fw_parse(struct dc_ep_priv *priv, const char *data,
278 const char *fw_base, int fw_num)
279 {
280 int i;
281 size_t size;
282 u32 id, offset, addr;
283 struct aca_int_hdr *hdr;
284 struct aca_fw_dl_addr *fw_dl = to_fw_addr(priv);
285
286 fw_dl->fw_num = fw_num;
287
288 for (i = 0; i < fw_dl->fw_num; i++) {
289 hdr = (struct aca_int_hdr *)(data + i * sizeof(*hdr));
290 id = be32_to_cpu(hdr->id);
291 offset = be32_to_cpu(hdr->offset);
292 size = be32_to_cpu(hdr->size);
293 addr = be32_to_cpu(hdr->load_addr);
294
295 fw_dl->fw_addr[i].fw_id = id;
296 fw_dl->fw_addr[i].fw_load_addr = addr;
297 fw_dl->fw_addr[i].fw_size = size;
298 fw_dl->fw_addr[i].fw_base = fw_base + offset;
299 dev_dbg(priv->dev,
300 "aca %s fw offset 0x%x size %zd loc 0x%x fw base %p\n",
301 fw_id_to_str(id), offset, size, addr, fw_base + offset);
302 }
303 }
304
305 /* --------------------------------------------------------
306 | Fixed header (20Bytes) |
307 ---------------------------------------------------------
308 | Variable header |
309 | ie / payload |
310 |-------------------------------------------------------|
311 | Actual ACA FW |
312 ---------------------------------------------------------
313 */
314 static int aca_section_parse(struct dc_ep_priv *priv, const char *fw_data)
315 {
316 int ret = 0;
317 u32 fixed_hlen;
318 u32 var_hlen;
319 u32 ie_id;
320 size_t ie_len, ie_hlen, ie_dlen;
321 u32 fw_hlen;
322 struct aca_fw_f_hdr *fw_f_hdr;
323 struct aca_fw_ie *ie_hdr;
324 struct aca_int_hdr *aca_hdr;
325 const char *data = fw_data;
326 const char *aca_fw_data;
327 struct device *dev = priv->dev;
328
329 fw_f_hdr = (struct aca_fw_f_hdr *)data;
330
331 fw_hlen = be32_to_cpu(fw_f_hdr->hdr_size);
332 fixed_hlen = sizeof(*fw_f_hdr);
333 var_hlen = fw_hlen - fixed_hlen;
334 ie_hlen = sizeof(*ie_hdr);
335
336 /* Record actual ACA fw data pointer */
337 aca_fw_data = data + fw_hlen;
338
339 /* Point to variable header and parse them */
340 data += fixed_hlen;
341
342 while (var_hlen > ie_hlen) {
343 /* Variable header information element */
344 ie_hdr = (struct aca_fw_ie *)data;
345 ie_id = be32_to_cpu(ie_hdr->id);
346 ie_len = be32_to_cpu(ie_hdr->len);
347 dev_dbg(dev, "Section %s ie_len %zd\n", sec_id_to_str(ie_id),
348 ie_len);
349
350 /* Variable header data conents */
351 data += ie_hlen;
352 var_hlen -= ie_hlen;
353
354 switch (ie_id) {
355 case ACA_SEC_HIF:
356 case ACA_SEC_GNR:
357 case ACA_SEC_MAC_HT:
358 ie_dlen = ie_len * sizeof(struct aca_fw_reg);
359 data += ie_dlen;
360 var_hlen -= ie_dlen;
361
362 break;
363
364 case ACA_SEC_MEM_TXIN:
365 case ACA_SEC_MEM_TXOUT:
366 case ACA_SEC_MEM_RXIN:
367 case ACA_SEC_MEM_RXOUT:
368 case ACA_SEC_MEM_TXIN_PDRING:
369 case ACA_SEC_MEM_TXOUT_PDRING:
370 case ACA_SEC_MEM_RXIN_PDRING:
371 case ACA_SEC_MEM_RXOUT_PDRING:
372 aca_sram_desc_parse(priv, data, ie_id);
373 ie_dlen = ie_len * sizeof(struct aca_sram_desc);
374 data += ie_dlen;
375 var_hlen -= ie_dlen;
376 break;
377
378 case ACA_SEC_DMA:
379 if (ie_len > ACA_DMA_CHAN_MAX) {
380 dev_err(dev, "invalid dma channel %d\n",
381 ie_len);
382 ret = -EINVAL;
383 goto done;
384 }
385 aca_dma_parse(priv, data, ie_len);
386 ie_dlen = ie_len * sizeof(struct aca_fw_dma);
387 data += ie_dlen;
388 var_hlen -= ie_dlen;
389 break;
390
391 case ACA_SEC_FW_INIT:
392 aca_init_parse(priv, data, ie_id);
393 ie_dlen = ie_len * sizeof(struct aca_fw_param);
394 data += ie_dlen;
395 var_hlen -= ie_dlen;
396 break;
397
398 case ACA_SEC_FW:
399 if (ie_len > ACA_FW_MAX) {
400 dev_err(dev, "Too many aca fws %d\n", ie_len);
401 ret = -EINVAL;
402 goto done;
403 }
404 aca_fw_parse(priv, data, aca_fw_data, ie_len);
405 ie_dlen = ie_len * sizeof(*aca_hdr);
406 data += ie_dlen;
407 var_hlen -= ie_dlen;
408 break;
409
410 default:
411 dev_warn(dev, "Unknown Sec id: %u\n", ie_id);
412 break;
413 }
414 }
415 done:
416 return ret;
417 }
418
419 static int aca_fetch_fw_api(struct dc_ep_priv *priv, const char *name)
420 {
421 int ret;
422 size_t hdr_len;
423 const u8 *fw_data;
424 size_t fw_len;
425 char dir[8] = {0};
426 union fw_ver ver;
427 union img_soc_type type;
428 struct device *dev = priv->dev;
429 struct aca_fw_f_hdr *fw_f_hdr;
430 struct aca_fw_info *fw_info = to_fw_info(priv);
431
432 sprintf(dir, "%04x", priv->pdev->device);
433 fw_info->fw = aca_fetch_fw_file(priv, dir, name);
434 if (IS_ERR(fw_info->fw)) {
435 dev_err(dev, "Could not fetch firmware file '%s': %ld\n",
436 name, PTR_ERR(fw_info->fw));
437 return PTR_ERR(fw_info->fw);
438 }
439
440 fw_data = fw_info->fw->data;
441 fw_len = fw_info->fw->size;
442
443 /* Parse the fixed header part */
444 fw_f_hdr = (struct aca_fw_f_hdr *)fw_data;
445 ver.all = be32_to_cpu(fw_f_hdr->ver);
446
447 dev_info(dev, "ACA fw build %d branch %d major 0x%2x minor 0x%04x\n",
448 ver.field.build, ver.field.branch,
449 ver.field.major, ver.field.minor);
450
451 type.all = be32_to_cpu(fw_f_hdr->type);
452
453 if (type.field.img_type > (ACA_IMG_MAX - 1)
454 || ((type.field.soc_type & ACA_SOC_MASK) == 0)) {
455 dev_err(dev, "Invalid aca fw img %d soc %d\n",
456 type.field.img_type, type.field.soc_type);
457 ret = -EINVAL;
458 goto err;
459 }
460
461 soc_type_to_str(type.field.soc_type);
462
463 dev_info(priv->dev, "ACA fw for %s supported SoC type %s\n",
464 aca_img_type_str[type.field.img_type], soc_str);
465
466 hdr_len = be32_to_cpu(fw_f_hdr->hdr_size);
467 /* Sanity Check */
468 if (fw_len < hdr_len) {
469 dev_err(dev, "Invalid aca fw hdr len %zd fw len %zd\n",
470 hdr_len, fw_len);
471 ret = -EINVAL;
472 goto err;
473 }
474 dev_dbg(dev, "Header size 0x%08x fw size 0x%08x\n",
475 hdr_len, be32_to_cpu(fw_f_hdr->fw_size));
476 dev_dbg(dev, "section number %d\n",
477 be32_to_cpu(fw_f_hdr->num_section));
478
479 aca_section_parse(priv, fw_data);
480 return 0;
481 err:
482 dc_aca_free_fw_file(priv);
483 return ret;
484 }
485
486 static int aca_fetch_fw(struct dc_ep_priv *priv)
487 {
488 return aca_fetch_fw_api(priv, ACA_FW_FILE);
489 }
490
491 static int aca_fw_download(struct dc_ep_priv *priv)
492 {
493 int i, j;
494 u32 val;
495 size_t size;
496 u32 id, load_addr;
497 const char *fw_base;
498 struct aca_fw_dl_addr *fw_dl = to_fw_addr(priv);
499
500 for (i = 0; i < fw_dl->fw_num; i++) {
501 id = fw_dl->fw_addr[i].fw_id;
502 load_addr = fw_dl->fw_addr[i].fw_load_addr;
503 size = fw_dl->fw_addr[i].fw_size;
504 fw_base = fw_dl->fw_addr[i].fw_base;
505
506 if (size % 4) {
507 dev_err(priv->dev,
508 "aca %s fw size is not a multiple of 4\n",
509 fw_id_to_str(id));
510 return -EINVAL;
511 }
512
513 for (j = 0; j < size; j += 4) {
514 val = *((u32 *)(fw_base + j));
515 wr32(cpu_to_be32(val), load_addr + j);
516 }
517 /* Write flush */
518 rd32(load_addr);
519 #ifdef DEBUG
520 {
521 u32 src, dst;
522
523 for (j = 0; j < size; j += 4) {
524 dst = rd32(load_addr + j);
525 src = *((u32 *)(fw_base + j));
526 if (dst != cpu_to_be32(src)) {
527 dev_info(priv->dev,
528 "dst 0x%08x != src 0x%08x\n", dst, src);
529 return -EIO;
530 }
531 }
532 }
533 #endif /* DEBUG */
534 }
535 return 0;
536 }
537
538 static void aca_dma_ctrl_init(struct dc_ep_priv *priv)
539 {
540 u32 val;
541 struct dc_aca *aca = to_aca(priv);
542
543 /* Global software reset CDMA */
544 wr32_mask(0, BIT(CTRL_RST), ADMA_CTRL);
545 while ((rd32(ADMA_CTRL) & BIT(CTRL_RST)))
546 ;
547
548 val = rd32(ADMA_ID);
549 /* Record max dma channels for later usage */
550 aca->adma_chans = MS(val, ADMA_ID_CHNR);
551 val = rd32(ADMA_CTRL);
552 /*
553 * Enable Packet Arbitration
554 * Enable Meta data copy
555 * Enable Dedicated Descriptor port
556 */
557 val |= BIT(CTRL_PKTARB) | BIT(CTRL_MDC) | BIT(CTRL_DSRAM);
558 set_mask_bit(val, 1, 1, CTRL_ENBE); /* Enable byte enable */
559 set_mask_bit(val, 1, 1, CTRL_DCNF); /* 2DW descriptor format */
560 set_mask_bit(val, 1, 1, CTRL_DDBR); /* Descriptor read back */
561 set_mask_bit(val, 1, 1, CTRL_DRB); /* Dynamic burst read */
562 wr32(val, ADMA_CTRL);
563
564 /* Polling cnt cfg */
565 wr32(ADMA_CPOLL_EN | SM(ADMA_DEFAULT_POLL, ADMA_CPOLL_CNT),
566 ADMA_CPOLL);
567 }
568
569 static void aca_dma_port_init(struct dc_ep_priv *priv)
570 {
571 u32 val;
572
573 /* Only one port /port 0 */
574 wr32(0, ADMA_PS);
575 val = rd32(ADMA_PCTRL);
576 set_mask_bit(val, 1, 1, PCTRL_RXBL16);
577 set_mask_bit(val, 1, 1, PCTRL_TXBL16);
578 set_mask_bit(val, 0, 3, PCTRL_RXBL);
579 set_mask_bit(val, 0, 3, PCTRL_TXBL);
580
581 set_mask_bit(val, 0, 3, PCTRL_TXENDI);
582 set_mask_bit(val, 0, 3, PCTRL_RXENDI);
583 wr32(val, ADMA_PCTRL);
584 }
585
586 static void aca_dma_ch_init(struct dc_ep_priv *priv, u32 cid,
587 u32 dbase, u32 dlen)
588 {
589 /* Select channel */
590 wr32(cid, ADMA_CS);
591
592 /* Reset Channel */
593 wr32_mask(0, BIT(CCTRL_RST), ADMA_CCTRL);
594 while ((rd32(ADMA_CCTRL) & BIT(CCTRL_RST)))
595 ;
596
597 /* Set descriptor list base and length */
598 wr32(dbase, ADMA_CDBA);
599 wr32(dlen, ADMA_CDLEN);
600
601 /*Clear Intr */
602 wr32(ADMA_CI_ALL, ADMA_CIS);
603 /* Enable Intr */
604 wr32(ADMA_CI_ALL, ADMA_CIE);
605
606 /* Enable Channel */
607 wr32_mask(0, BIT(CCTRL_ONOFF), ADMA_CCTRL);
608 mb();
609 }
610
611 static void aca_dma_ch_off(struct dc_ep_priv *priv)
612 {
613 int i;
614 struct dc_aca *aca = to_aca(priv);
615
616 /* Shared between OS and ACA FW. Stop ACA first */
617 for (i = 0; i < aca->adma_chans; i++) {
618 wr32(i, ADMA_CS);
619 wr32_mask(BIT(CCTRL_ONOFF), 0, ADMA_CCTRL);
620 while (rd32(ADMA_CCTRL) & BIT(CCTRL_ONOFF))
621 ;
622 }
623 dev_dbg(priv->dev, "aca dma channel done\n");
624 }
625
626 static void aca_xbar_ia_reject_set(struct dc_ep_priv *priv, int ia_id)
627 {
628 u32 val;
629 int timeout = 1000;
630 struct device *dev = priv->dev;
631
632 /* Set reject bit */
633 wr32(XBAR_CTRL_REJECT, ACA_AGENT_CTRL(ia_id));
634
635 /* Poll burst, readex, resp_waiting, req_active */
636 val = XBAR_STAT_REQ_ACTIVE | XBAR_STAT_RESP_WAITING
637 | XBAR_STAT_BURST | XBAR_STAT_READEX;
638 while (--timeout && !!(rd32(ACA_AGENT_STATUS(ia_id)) & val))
639 udelay(1);
640
641 if (timeout <= 0) {
642 dev_dbg(dev,
643 "ACA XBAR IA: %d reset timeout, pending on 0x%x\n",
644 ia_id, rd32(ACA_AGENT_STATUS(ia_id)));
645 return;
646 }
647 }
648
649 static void aca_xbar_ia_reject_clr(struct dc_ep_priv *priv, int ia_id)
650 {
651 u32 val;
652
653 /* Check reject bit */
654 val = rd32(ACA_AGENT_CTRL(ia_id));
655 if ((val & XBAR_CTRL_REJECT) == 0)
656 return;
657
658 /* Clear reject bit */
659 val &= ~XBAR_CTRL_REJECT;
660 wr32(val, ACA_AGENT_CTRL(ia_id));
661 rd32(ACA_AGENT_CTRL(ia_id));
662 }
663
664 static void aca_xbar_ia_reset(struct dc_ep_priv *priv, int ia_id)
665 {
666 /* ACA IA reset */
667 wr32(XBAR_CTRL_CORE_RESET, ACA_AGENT_CTRL(ia_id));
668
669 /* Read till status become 1 */
670 while ((rd32(ACA_AGENT_STATUS(ia_id)) & XBAR_STAT_CORE_RESET) == 0)
671 ;
672
673 /* Clear the IA Reset signal */
674 wr32(0, ACA_AGENT_CTRL(ia_id));
675
676 /* Read till status become 0 */
677 while ((rd32(ACA_AGENT_STATUS(ia_id)) & XBAR_STAT_CORE_RESET) == 1)
678 ;
679
680 dev_dbg(priv->dev, "ACA XBAR IA(%d) reset done\n", ia_id);
681 }
682
683 void dc_aca_shutdown(struct dc_ep_priv *priv)
684 {
685 struct dc_aca *aca = to_aca(priv);
686
687 if (aca->initialized) {
688 aca_xbar_ia_reset(priv, ACA_ACC_IA04);
689 aca_xbar_ia_reset(priv, ACA_M_IA06);
690 }
691 }
692
693 static void aca_dma_init(struct dc_ep_priv *priv)
694 {
695 int i;
696 struct aca_fw_info *fw_info = to_fw_info(priv);
697
698 aca_dma_ctrl_init(priv);
699 aca_dma_port_init(priv);
700
701 for (i = 0; i < fw_info->chan_num; i++) {
702 aca_dma_ch_init(priv, i,
703 fw_info->adma_desc_base[i] | priv->phymem,
704 DESC_NUM_PER_CH);
705 }
706
707 dev_dbg(priv->dev, "aca dma init done\n");
708 }
709
710 static void aca_basic_init(struct dc_ep_priv *priv)
711 {
712 u32 addr, mask;
713
714 /* Low 32 is RX, High 32 is TX */
715 wr32(0x1, UMT_ORDER_CFG);
716 /* TXIN/TXOUT/RXIN/RXOUT All Controlled by Genrisc */
717 wr32(0xF, HOST_TYPE);
718 /* Enable Host Gate CLK */
719 wr32(0x4000, HT_GCLK_ENABLE);
720 /* Host Page/MASK */
721 mask = ~priv->memsize + 1;
722 addr = mask | ((priv->phymem & mask) >> 16);
723 wr32(addr, AHB_ARB_HP_REG);
724 wr32(addr, OCP_ARB_ACC_PAGE_REG);
725 /* Stop all functions first */
726 wr32(0, GNRC_EN_TASK_BITMAP);
727
728 /* Enable XBAR */
729 aca_xbar_ia_reject_clr(priv, ACA_ACC_IA04);
730 aca_xbar_ia_reject_clr(priv, ACA_M_IA06);
731
732 dev_dbg(priv->dev, "aca basic config done\n");
733 }
734
735 static int aca_hif_param_init(struct dc_ep_priv *priv)
736 {
737 struct dc_aca *aca = to_aca(priv);
738
739 aca->hif_params = kzalloc(sizeof(struct aca_hif_params), GFP_KERNEL);
740 if (!aca->hif_params)
741 return -ENOMEM;
742 aca->hif_params->task_mask = 0x0000000F;
743 dev_dbg(priv->dev, "%s\n", __func__);
744 return 0;
745 }
746
747 static void aca_hif_param_init_done(struct dc_ep_priv *priv)
748 {
749 u32 addr;
750 struct aca_hif_params *hif_params = to_hif_params(priv);
751 struct aca_fw_param *fw_param = to_aca_fw_param(priv);
752
753 /* wr32(ACA_HIF_PARAM_ADDR, ACA_HIF_LOC_POS);*/
754 /* addr = rd32(ACA_HIF_LOC_POS);*/
755
756 addr = fw_param->init_addr;
757 dev_dbg(priv->dev, "init_addr: %x\n", addr);
758 memcpy_toio(priv->mem + addr, hif_params, sizeof(*hif_params));
759 kzfree(hif_params);
760 dev_dbg(priv->dev, "%s\n", __func__);
761 }
762
763 static bool aca_hif_param_init_check(struct dc_ep_priv *priv)
764 {
765 u32 addr;
766 int timeout = ACA_LOOP_CNT;
767 u32 offset = offsetof(struct aca_hif_params, magic);
768 struct aca_fw_param *fw_param = to_aca_fw_param(priv);
769
770 /* addr = rd32(ACA_HIF_LOC_POS);*/
771 addr = fw_param->init_addr;
772 while (--timeout && (rd32(addr + offset) != ACA_MAGIC))
773 udelay(1);
774
775 if (timeout <= 0) {
776 dev_err(priv->dev, "aca hif params init failed\n");
777 return false;
778 }
779
780 return true;
781 }
782
783 static void aca_txin_init(struct dc_ep_priv *priv,
784 struct aca_cfg_param *aca_txin)
785 {
786 u32 val = 0;
787 struct aca_mem_layout *mem_layout = to_mem_layout(priv);
788 struct aca_hif_params *hif_params = to_hif_params(priv);
789 struct aca_hif_param *txin_param = &hif_params->txin;
790
791 if (aca_txin->byteswap)
792 val = BYTE_SWAP_EN;
793
794 val |= (aca_txin->hd_size_in_dw - 1)
795 | SM((aca_txin->pd_size_in_dw - 1), PD_DESC_IN_DW);
796 wr32(val, TXIN_CONV_CFG);
797
798 /* SoC cumulative counter address */
799 wr32(aca_txin->soc_cmlt_cnt_addr, GNRC_TXIN_CMLT_CNT_ADDR);
800
801
802 /* SoC descriptors */
803 txin_param->soc_desc_base = aca_txin->soc_desc_base;
804 txin_param->soc_desc_num = aca_txin->soc_desc_num;
805
806 /* Ping/pong buffer */
807 txin_param->pp_buf_base = priv->phymem
808 + mem_layout->txin_host_desc_base;
809
810 txin_param->pp_buf_num = mem_layout->txin_host_dnum;
811
812 /* PD ring */
813 txin_param->pd_desc_base = priv->phymem
814 + aca_txin->pd_desc_base;
815 txin_param->pd_desc_num = aca_txin->pd_desc_num;
816
817 dev_dbg(priv->dev, "aca txin init done\n");
818 }
819
820 static void aca_txout_init(struct dc_ep_priv *priv,
821 struct aca_cfg_param *aca_txout)
822 {
823 u32 val = 0;
824 struct aca_mem_layout *mem_layout = to_mem_layout(priv);
825 struct aca_hif_params *hif_params = to_hif_params(priv);
826 struct aca_hif_param *txout_param = &hif_params->txout;
827
828 if (aca_txout->byteswap)
829 val = BYTE_SWAP_EN;
830
831 val |= (aca_txout->hd_size_in_dw - 1)
832 | SM((aca_txout->pd_size_in_dw - 1), PD_DESC_IN_DW);
833 wr32(val, TXOUT_CONV_CFG);
834
835 /* SoC Ring size */
836 val = aca_txout->soc_desc_num;
837 wr32(val, TXOUT_RING_CFG);
838
839 /* SoC cumulative counter address */
840 wr32(aca_txout->soc_cmlt_cnt_addr, GNRC_TXOUT_CMLT_CNT_ADDR);
841 /* SoC descriptors */
842 txout_param->soc_desc_base = aca_txout->soc_desc_base;
843 txout_param->soc_desc_num = aca_txout->soc_desc_num;
844
845 /* Ping/pong buffer */
846 txout_param->pp_buf_base = priv->phymem
847 +mem_layout->txout_host_desc_base;
848
849 txout_param->pp_buf_num = mem_layout->txout_host_dnum;
850
851 /* PD ring */
852 txout_param->pd_desc_base = priv->phymem
853 + aca_txout->pd_desc_base;
854 txout_param->pd_desc_num = aca_txout->pd_desc_num;
855
856 txout_param->pd_desc_threshold = aca_txout->pp_buf_desc_num;
857
858 dev_dbg(priv->dev, "aca txout init done\n");
859 }
860
861 static void aca_rxin_init(struct dc_ep_priv *priv,
862 struct aca_cfg_param *aca_rxin)
863 {
864 u32 val = 0;
865 struct aca_mem_layout *mem_layout = to_mem_layout(priv);
866 struct aca_hif_params *hif_params = to_hif_params(priv);
867 struct aca_hif_param *rxin_param = &hif_params->rxin;
868
869 if (aca_rxin->byteswap)
870 val = BYTE_SWAP_EN;
871
872 val |= (aca_rxin->hd_size_in_dw - 1)
873 | SM((aca_rxin->pd_size_in_dw - 1), PD_DESC_IN_DW);
874 wr32(val, RXIN_CONV_CFG);
875
876 /* SoC cumulative counter address */
877 wr32(aca_rxin->soc_cmlt_cnt_addr, GNRC_RXIN_CMLT_CNT_ADDR);
878
879 /* RXIN may not be used */
880 if (!(aca_rxin->soc_desc_base))
881 goto __RXIN_DONE;
882 /* SoC descriptors */
883 rxin_param->soc_desc_base = aca_rxin->soc_desc_base;
884 rxin_param->soc_desc_num = aca_rxin->soc_desc_num;
885
886 /* Ping/pong buffer */
887 rxin_param->pp_buf_base = (u32)priv->phymem
888 + mem_layout->rxin_host_desc_base;
889
890 rxin_param->pp_buf_num = mem_layout->rxin_host_dnum;
891
892 /* PD ring */
893 rxin_param->pd_desc_base = (u32)priv->phymem
894 + aca_rxin->pd_desc_base;
895 rxin_param->pd_desc_num = aca_rxin->pd_desc_num;
896
897 rxin_param->pd_desc_threshold = aca_rxin->pp_buf_desc_num;
898
899 __RXIN_DONE:
900 dev_dbg(priv->dev, "aca rxin init done\n");
901 }
902
903 static void aca_rxout_init(struct dc_ep_priv *priv,
904 struct aca_cfg_param *aca_rxout)
905 {
906 u32 val = 0;
907 struct aca_mem_layout *mem_layout = to_mem_layout(priv);
908 struct aca_hif_params *hif_params = to_hif_params(priv);
909 struct aca_hif_param *rxout_param = &hif_params->rxout;
910
911 if (aca_rxout->byteswap)
912 val = BYTE_SWAP_EN;
913
914 val |= (aca_rxout->hd_size_in_dw - 1)
915 | SM((aca_rxout->pd_size_in_dw - 1), PD_DESC_IN_DW);
916 wr32(val, RXOUT_CONV_CFG);
917
918 /* SoC Ring size */
919 val = aca_rxout->soc_desc_num;
920 wr32(val, RXOUT_RING_CFG);
921
922 /* SoC cumulative counter address */
923 wr32(aca_rxout->soc_cmlt_cnt_addr, GNRC_RXOUT_CMLT_CNT_ADDR);
924 /* SoC descriptors */
925 rxout_param->soc_desc_base = aca_rxout->soc_desc_base;
926 rxout_param->soc_desc_num = aca_rxout->soc_desc_num;
927
928 /* Ping/pong buffer */
929 rxout_param->pp_buf_base = (u32)priv->phymem
930 + mem_layout->rxout_host_desc_base;
931
932 rxout_param->pp_buf_num = mem_layout->rxout_host_dnum;
933
934 /* PD ring */
935 rxout_param->pd_desc_base = (u32)priv->phymem
936 + aca_rxout->pd_desc_base;
937 rxout_param->pd_desc_num = aca_rxout->pd_desc_num;
938
939 rxout_param->pd_desc_threshold = aca_rxout->pp_buf_desc_num;
940 dev_dbg(priv->dev, "aca rxout init done\n");
941 }
942
943 static void aca_mdm_init(struct dc_ep_priv *priv, struct aca_modem_param *mdm)
944 {
945 struct aca_proj_param *param;
946
947 if (!mdm)
948 return;
949
950 param = &mdm->mdm_txout;
951 wr32(param->stat | priv->phymem, GNRC_TXOUT_TGT_STAT);
952 wr32(param->pd | priv->phymem, GNRC_TXOUT_TGT_PD_OFF);
953 wr32(param->acc_cnt | priv->phymem, GNRC_TXOUT_TGT_ACCM_CNT);
954
955 param = &mdm->mdm_rxin;
956 wr32(param->stat | priv->phymem, GNRC_RXIN_TGT_STAT);
957 wr32(param->pd | priv->phymem, GNRC_RXIN_TGT_PD_OFF);
958 wr32(param->acc_cnt | priv->phymem, GNRC_RXIN_TGT_ACCM_CNT);
959
960 param = &mdm->mdm_rxout;
961 wr32(param->stat | priv->phymem, GNRC_RXOUT_TGT_STAT);
962 wr32(param->pd | priv->phymem, GNRC_RXOUT_TGT_PD_OFF);
963 wr32(param->acc_cnt | priv->phymem, GNRC_RXOUT_TGT_ACCM_CNT);
964 dev_dbg(priv->dev, "aca mdm init done\n");
965 }
966
967 static void dc_aca_clk_on(struct dc_ep_priv *priv)
968 {
969 dc_ep_clk_on(priv, PMU_ADMA);
970 }
971
972 static void dc_aca_clk_off(struct dc_ep_priv *priv)
973 {
974 dc_ep_clk_off(priv, PMU_ADMA);
975 }
976
977 static void dc_aca_reset(struct dc_ep_priv *priv)
978 {
979 dc_ep_reset_device(priv, RST_ACA_DMA | RST_ACA_HOSTIF);
980 }
981
982 static void aca_mem_clear(struct dc_ep_priv *priv)
983 {
984 struct aca_fw_dl_addr *fw_dl = to_fw_addr(priv);
985
986 memset_io(priv->mem + fw_dl->fw_addr[0].fw_load_addr,
987 0, ACA_ACC_FW_SIZE);
988 memset_io(priv->mem + ACA_SRAM_BASE, 0, ACA_SRAM_SIZE);
989 }
990
991 int dc_aca_start(struct dc_ep_priv *priv, u32 func, int start)
992 {
993 if (!func)
994 return -EINVAL;
995
996 wr32_mask(0, func, GNRC_EN_TASK_BITMAP);
997
998 /* Only do if requested by caller */
999 if (start) {
1000 wr32(0x1, GNRC_START_OP); /* Any write will trigger */
1001 rd32(GNRC_START_OP);
1002 if (!aca_hif_param_init_check(priv))
1003 return -EIO;
1004 }
1005 return 0;
1006 }
1007
1008 static void aca_sw_reset(struct dc_ep_priv *priv)
1009 {
1010 u32 val = SW_RST_GENRISC | SW_RST_HOSTIF_REG | SW_RST_RXIN
1011 | SW_RST_RXOUT | SW_RST_TXIN | SW_RST_TXOUT;
1012
1013 wr32(val, HT_SW_RST_ASSRT);
1014 udelay(1);
1015 wr32(val, HT_SW_RST_RELEASE);
1016 wmb();
1017 }
1018
1019 int dc_aca_stop(struct dc_ep_priv *priv, u32 *func, int reset)
1020 {
1021 u32 val = *func;
1022 u32 reg;
1023
1024 if (!val)
1025 return 0;
1026
1027 *func = 0;
1028
1029 /* Only do it if reset is required. Otherwise, pending is fine */
1030 if (reset) {
1031 if (val & ACA_TXIN_EN) {
1032 reg = rd32(TXIN_COUNTERS);
1033 if (MS(reg, ACA_PENDING_JOB)
1034 || (MS(reg, ACA_AVAIL_BUF) != ACA_PP_BUFS)) {
1035 *func = ACA_TXIN_EN;
1036 return -EBUSY;
1037 }
1038 }
1039
1040 if (val & ACA_TXOUT_EN) {
1041 reg = rd32(TXOUT_COUNTERS);
1042 if (MS(reg, ACA_PENDING_JOB)
1043 || (MS(reg, ACA_AVAIL_BUF) != ACA_PP_BUFS)) {
1044 *func = ACA_TXOUT_EN;
1045 return -EBUSY;
1046 }
1047 }
1048
1049
1050 if (val & ACA_RXIN_EN) {
1051 reg = rd32(RXIN_COUNTERS);
1052 if (MS(reg, ACA_PENDING_JOB)
1053 || (MS(reg, ACA_AVAIL_BUF) != ACA_PP_BUFS)) {
1054 *func = ACA_RXIN_EN;
1055 return -EBUSY;
1056 }
1057 }
1058
1059 if (val & ACA_RXOUT_EN) {
1060 reg = rd32(RXOUT_COUNTERS);
1061 if (MS(reg, ACA_PENDING_JOB)
1062 || (MS(reg, ACA_AVAIL_BUF) != ACA_PP_BUFS)) {
1063 *func = ACA_RXOUT_EN;
1064 return -EBUSY;
1065 }
1066 }
1067 }
1068
1069 wr32_mask(val, 0, GNRC_EN_TASK_BITMAP);
1070
1071 if (reset) {
1072 aca_dma_ch_off(priv);
1073 aca_xbar_ia_reject_set(priv, ACA_ACC_IA04);
1074 aca_xbar_ia_reject_set(priv, ACA_M_IA06);
1075 aca_sw_reset(priv);
1076 }
1077 return 0;
1078 }
1079
1080 #ifdef CONFIG_SOC_TYPE_XWAY
1081 static void aca_grx330_init(struct dc_ep_priv *priv)
1082 {
1083 wr32(0x0044001E, TXIN_CFG1);
1084 wr32(0x0040041F, TXIN_CFG2);
1085 wr32(0x007FE020, TXIN_CFG3);
1086
1087 wr32(0x0044001F, TXOUT_CFG1);
1088 wr32(0x0040041F, TXOUT_CFG2);
1089 wr32(0x007BE020, TXOUT_CFG3);
1090
1091 wr32(0x0044001F, RXOUT_CFG1);
1092 wr32(0x0040041F, RXOUT_CFG2);
1093 wr32(0x007BE020, RXOUT_CFG3);
1094
1095 wr32(0x0044001E, RXIN_CFG1);
1096 wr32(0x0040041F, RXIN_CFG2);
1097 wr32(0x007FE020, RXIN_CFG3);
1098
1099 wr32(0x1, TXIN_DST_OWWBIT_CFG4);
1100 wr32(0x1, TXOUT_DST_OWWBIT_CFG4);
1101 wr32(0x1, RXOUT_SRC_OWNBIT_CFG3);
1102 wr32(0x1, RXIN_SRC_OWNBIT_CFG3);
1103
1104 wr32(0x0, GNRC_TXIN_BUF_PREFILL);
1105 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0x4);
1106 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0x8);
1107 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0xc);
1108 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0x10);
1109 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0x14);
1110 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0x18);
1111 wr32(0x0, GNRC_TXIN_BUF_PREFILL + 0x1c);
1112 }
1113 #endif
1114
1115 int dc_aca_init(struct dc_ep_priv *priv, struct aca_param *param,
1116 struct aca_modem_param *mdm)
1117 {
1118 int ret;
1119 struct dc_aca *aca = to_aca(priv);
1120
1121 dc_aca_clk_on(priv);
1122 dc_aca_reset(priv);
1123
1124 ret = aca_fetch_fw(priv);
1125 if (ret) {
1126 dev_err(priv->dev,
1127 "could not fetch firmware files %d\n", ret);
1128 dc_aca_clk_off(priv);
1129 return ret;
1130 }
1131
1132 aca_mem_clear(priv);
1133 aca_dma_init(priv);
1134 aca_basic_init(priv);
1135 aca_fw_download(priv);
1136 aca_hif_param_init(priv);
1137 aca_txin_init(priv, &param->aca_txin);
1138 aca_txout_init(priv, &param->aca_txout);
1139 aca_rxout_init(priv, &param->aca_rxout);
1140 aca_rxin_init(priv, &param->aca_rxin);
1141 aca_hif_param_init_done(priv);
1142 aca_mdm_init(priv, mdm);
1143 #ifdef CONFIG_SOC_TYPE_XWAY
1144 aca_grx330_init(priv);
1145 #endif
1146 aca->initialized = true;
1147 dev_info(priv->dev, "aca init done\n");
1148 return 0;
1149 }
1150
1151 static int aca_max_gpio(struct dc_ep_priv *priv)
1152 {
1153 return fls(rd32(PADC_AVAIL));
1154 }
1155
1156 void dc_aca_info_init(struct dc_ep_priv *priv)
1157 {
1158 struct dc_aca *aca = to_aca(priv);
1159
1160 aca->initialized = false;
1161 spin_lock_init(&aca->clk_lock);
1162 spin_lock_init(&aca->rcu_lock);
1163 mutex_init(&aca->pin_lock);
1164 aca->max_gpio = aca_max_gpio(priv);
1165 }
1166
1167 #define ACA_ENDIAN_ADDR(addr, endian) \
1168 { \
1169 if (endian == ACA_BIG_ENDIAN) \
1170 return addr##_BE; \
1171 else \
1172 return addr; \
1173 }
1174
1175 u32 aca_umt_msg_addr(struct dc_ep_priv *priv, u32 endian, u32 type)
1176 {
1177 switch (type) {
1178 case ACA_TXIN:
1179 ACA_ENDIAN_ADDR(TXIN_HD_ACCUM_ADD, endian);
1180 case ACA_RXIN:
1181 ACA_ENDIAN_ADDR(RXIN_HD_ACCUM_ADD, endian);
1182 case ACA_TXOUT:
1183 ACA_ENDIAN_ADDR(TXOUT_HD_ACCUM_SUB, endian);
1184 case ACA_RXOUT:
1185 ACA_ENDIAN_ADDR(RXOUT_HD_ACCUM_SUB, endian);
1186 default:
1187 ACA_ENDIAN_ADDR(RXIN_HD_ACCUM_ADD, endian);
1188 };
1189 }
1190
1191 void dc_aca_event_addr_get(struct dc_ep_priv *priv,
1192 struct aca_event_reg_addr *regs)
1193 {
1194 regs->txin_acc_sub = TXIN_ACA_ACCUM_SUB;
1195 regs->txout_acc_add = TXOUT_ACA_ACCUM_ADD;
1196 regs->rxin_acc_sub = RXIN_ACA_ACCUM_SUB;
1197 regs->rxout_acc_add = RXOUT_ACA_ACCUM_ADD;
1198 }
1199
1200 void dc_aca_txin_sub_ack(struct dc_ep_priv *priv, u32 val)
1201 {
1202 wr32(val, TXIN_ACA_ACCUM_SUB);
1203 }
1204
1205 u32 dc_aca_txin_hd_cnt(struct dc_ep_priv *priv)
1206 {
1207 return rd32(TXIN_ACA_HD_ACC_CNT);
1208 }
1209