496cd3cacf03159a4d922e553953afd8e46af087
[openwrt/openwrt.git] / target / linux / layerscape / patches-5.4 / 814-qe-0005-QE-remove-PPCisms-for-QE.patch
1 From 3fb2f44e30cc3a151a0fa8160d8bf70062722ed7 Mon Sep 17 00:00:00 2001
2 From: Zhao Qiang <qiang.zhao@nxp.com>
3 Date: Thu, 27 Apr 2017 09:47:29 +0800
4 Subject: [PATCH] QE: remove PPCisms for QE
5
6 QE was supported on PowerPC, and dependent on PPC,
7 Now it is supported on other platforms. so remove PPCisms.
8
9 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
10 ---
11 drivers/soc/fsl/qe/Kconfig | 2 +-
12 drivers/soc/fsl/qe/qe.c | 70 +++++++++++++++++++++++++---------------
13 drivers/soc/fsl/qe/qe_io.c | 42 +++++++++++-------------
14 drivers/soc/fsl/qe/qe_tdm.c | 8 ++---
15 drivers/soc/fsl/qe/ucc.c | 10 +++---
16 drivers/soc/fsl/qe/ucc_fast.c | 74 ++++++++++++++++++++++---------------------
17 drivers/tty/serial/ucc_uart.c | 1 +
18 include/soc/fsl/qe/qe.h | 1 -
19 8 files changed, 112 insertions(+), 96 deletions(-)
20
21 --- a/drivers/soc/fsl/qe/Kconfig
22 +++ b/drivers/soc/fsl/qe/Kconfig
23 @@ -5,7 +5,7 @@
24
25 config QUICC_ENGINE
26 bool "QUICC Engine (QE) framework support"
27 - depends on FSL_SOC && PPC32
28 + depends on OF && HAS_IOMEM
29 select GENERIC_ALLOCATOR
30 select CRC32
31 help
32 --- a/drivers/soc/fsl/qe/qe.c
33 +++ b/drivers/soc/fsl/qe/qe.c
34 @@ -30,8 +30,6 @@
35 #include <asm/pgtable.h>
36 #include <soc/fsl/qe/immap_qe.h>
37 #include <soc/fsl/qe/qe.h>
38 -#include <asm/prom.h>
39 -#include <asm/rheap.h>
40
41 static void qe_snums_init(void);
42 static int qe_sdma_init(void);
43 @@ -104,15 +102,27 @@ void qe_reset(void)
44 panic("sdma init failed!");
45 }
46
47 +/* issue commands to QE, return 0 on success while -EIO on error
48 + *
49 + * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
50 + * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
51 + * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
52 + * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
53 + * @mcn_protocol: specifies mode for the command for non-MCC, should be
54 + * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
55 + * and such on.
56 + * @cmd_input: command related data.
57 + */
58 int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
59 {
60 unsigned long flags;
61 u8 mcn_shift = 0, dev_shift = 0;
62 - u32 ret;
63 + int ret;
64 + int i;
65
66 spin_lock_irqsave(&qe_lock, flags);
67 if (cmd == QE_RESET) {
68 - out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
69 + iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
70 } else {
71 if (cmd == QE_ASSIGN_PAGE) {
72 /* Here device is the SNUM, not sub-block */
73 @@ -129,20 +139,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8
74 mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
75 }
76
77 - out_be32(&qe_immr->cp.cecdr, cmd_input);
78 - out_be32(&qe_immr->cp.cecr,
79 - (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
80 - mcn_protocol << mcn_shift));
81 + iowrite32be(cmd_input, &qe_immr->cp.cecdr);
82 + iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
83 + (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
84 }
85
86 /* wait for the QE_CR_FLG to clear */
87 - ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
88 - 100, 0);
89 + ret = -EIO;
90 + for (i = 0; i < 100; i++) {
91 + if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
92 + ret = 0;
93 + break;
94 + }
95 + udelay(1);
96 + }
97 +
98 /* On timeout (e.g. failure), the expression will be false (ret == 0),
99 otherwise it will be true (ret == 1). */
100 spin_unlock_irqrestore(&qe_lock, flags);
101
102 - return ret == 1;
103 + return ret;
104 }
105 EXPORT_SYMBOL(qe_issue_cmd);
106
107 @@ -167,6 +183,8 @@ unsigned int qe_get_brg_clk(void)
108 int size;
109 const u32 *prop;
110 unsigned int mod;
111 + u32 val;
112 + int ret;
113
114 if (brg_clk)
115 return brg_clk;
116 @@ -175,9 +193,9 @@ unsigned int qe_get_brg_clk(void)
117 if (!qe)
118 return brg_clk;
119
120 - prop = of_get_property(qe, "brg-frequency", &size);
121 - if (prop && size == sizeof(*prop))
122 - brg_clk = *prop;
123 + ret = of_property_read_u32(qe, "brg-frequency", &val);
124 + if (!ret)
125 + brg_clk = val;
126
127 of_node_put(qe);
128
129 @@ -223,14 +241,16 @@ int qe_setbrg(enum qe_clock brg, unsigne
130 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
131 that the BRG divisor must be even if you're not using divide-by-16
132 mode. */
133 +#ifdef CONFIG_PPC
134 if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
135 if (!div16 && (divisor & 1) && (divisor > 3))
136 divisor++;
137 +#endif
138
139 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
140 QE_BRGC_ENABLE | div16;
141
142 - out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
143 + iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
144
145 return 0;
146 }
147 @@ -377,9 +397,9 @@ static int qe_sdma_init(void)
148 return -ENOMEM;
149 }
150
151 - out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
152 - out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
153 - (0x1 << QE_SDMR_CEN_SHIFT)));
154 + iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
155 + iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
156 + &sdma->sdmr);
157
158 return 0;
159 }
160 @@ -417,14 +437,14 @@ static void qe_upload_microcode(const vo
161 "uploading microcode '%s'\n", ucode->id);
162
163 /* Use auto-increment */
164 - out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
165 - QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
166 + iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
167 + QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
168
169 for (i = 0; i < be32_to_cpu(ucode->count); i++)
170 - out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
171 + iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
172
173 /* Set I-RAM Ready Register */
174 - out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
175 + iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
176 }
177
178 /*
179 @@ -509,7 +529,7 @@ int qe_upload_firmware(const struct qe_f
180 * If the microcode calls for it, split the I-RAM.
181 */
182 if (!firmware->split)
183 - setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
184 + qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
185
186 if (firmware->soc.model)
187 printk(KERN_INFO
188 @@ -543,11 +563,11 @@ int qe_upload_firmware(const struct qe_f
189 u32 trap = be32_to_cpu(ucode->traps[j]);
190
191 if (trap)
192 - out_be32(&qe_immr->rsp[i].tibcr[j], trap);
193 + iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
194 }
195
196 /* Enable traps */
197 - out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
198 + iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
199 }
200
201 qe_firmware_uploaded = 1;
202 --- a/drivers/soc/fsl/qe/qe_io.c
203 +++ b/drivers/soc/fsl/qe/qe_io.c
204 @@ -18,8 +18,6 @@
205
206 #include <asm/io.h>
207 #include <soc/fsl/qe/qe.h>
208 -#include <asm/prom.h>
209 -#include <sysdev/fsl_soc.h>
210
211 #undef DEBUG
212
213 @@ -57,16 +55,16 @@ void __par_io_config_pin(struct qe_pio_r
214 pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
215
216 /* Set open drain, if required */
217 - tmp_val = in_be32(&par_io->cpodr);
218 + tmp_val = ioread32be(&par_io->cpodr);
219 if (open_drain)
220 - out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
221 + iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
222 else
223 - out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
224 + iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
225
226 /* define direction */
227 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
228 - in_be32(&par_io->cpdir2) :
229 - in_be32(&par_io->cpdir1);
230 + ioread32be(&par_io->cpdir2) :
231 + ioread32be(&par_io->cpdir1);
232
233 /* get all bits mask for 2 bit per port */
234 pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
235 @@ -78,34 +76,30 @@ void __par_io_config_pin(struct qe_pio_r
236
237 /* clear and set 2 bits mask */
238 if (pin > (QE_PIO_PINS / 2) - 1) {
239 - out_be32(&par_io->cpdir2,
240 - ~pin_mask2bits & tmp_val);
241 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
242 tmp_val &= ~pin_mask2bits;
243 - out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
244 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
245 } else {
246 - out_be32(&par_io->cpdir1,
247 - ~pin_mask2bits & tmp_val);
248 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
249 tmp_val &= ~pin_mask2bits;
250 - out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
251 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
252 }
253 /* define pin assignment */
254 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
255 - in_be32(&par_io->cppar2) :
256 - in_be32(&par_io->cppar1);
257 + ioread32be(&par_io->cppar2) :
258 + ioread32be(&par_io->cppar1);
259
260 new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
261 (pin % (QE_PIO_PINS / 2) + 1) * 2));
262 /* clear and set 2 bits mask */
263 if (pin > (QE_PIO_PINS / 2) - 1) {
264 - out_be32(&par_io->cppar2,
265 - ~pin_mask2bits & tmp_val);
266 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
267 tmp_val &= ~pin_mask2bits;
268 - out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
269 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
270 } else {
271 - out_be32(&par_io->cppar1,
272 - ~pin_mask2bits & tmp_val);
273 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
274 tmp_val &= ~pin_mask2bits;
275 - out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
276 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
277 }
278 }
279 EXPORT_SYMBOL(__par_io_config_pin);
280 @@ -133,12 +127,12 @@ int par_io_data_set(u8 port, u8 pin, u8
281 /* calculate pin location */
282 pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
283
284 - tmp_val = in_be32(&par_io[port].cpdata);
285 + tmp_val = ioread32be(&par_io[port].cpdata);
286
287 if (val == 0) /* clear */
288 - out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
289 + iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
290 else /* set */
291 - out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
292 + iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
293
294 return 0;
295 }
296 --- a/drivers/soc/fsl/qe/qe_tdm.c
297 +++ b/drivers/soc/fsl/qe/qe_tdm.c
298 @@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm,
299 &siram[siram_entry_id * 32 + 0x200 + i]);
300 }
301
302 - setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
303 - SIR_LAST);
304 - setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
305 - SIR_LAST);
306 + qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
307 + SIR_LAST);
308 + qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
309 + (utdm->num_of_ts - 1)], SIR_LAST);
310
311 /* Set SIxMR register */
312 sixmr = SIMR_SAD(siram_entry_id);
313 --- a/drivers/soc/fsl/qe/ucc.c
314 +++ b/drivers/soc/fsl/qe/ucc.c
315 @@ -35,7 +35,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int
316 return -EINVAL;
317
318 spin_lock_irqsave(&cmxgcr_lock, flags);
319 - clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
320 + qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
321 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
322 spin_unlock_irqrestore(&cmxgcr_lock, flags);
323
324 @@ -80,7 +80,7 @@ int ucc_set_type(unsigned int ucc_num, e
325 return -EINVAL;
326 }
327
328 - clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
329 + qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
330 UCC_GUEMR_SET_RESERVED3 | speed);
331
332 return 0;
333 @@ -109,9 +109,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned
334 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
335
336 if (set)
337 - setbits32(cmxucr, mask << shift);
338 + qe_setbits32(cmxucr, mask << shift);
339 else
340 - clrbits32(cmxucr, mask << shift);
341 + qe_clrbits32(cmxucr, mask << shift);
342
343 return 0;
344 }
345 @@ -207,7 +207,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc
346 if (mode == COMM_DIR_RX)
347 shift += 4;
348
349 - clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
350 + qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
351 clock_bits << shift);
352
353 return 0;
354 --- a/drivers/soc/fsl/qe/ucc_fast.c
355 +++ b/drivers/soc/fsl/qe/ucc_fast.c
356 @@ -29,41 +29,41 @@ void ucc_fast_dump_regs(struct ucc_fast_
357 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
358
359 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
360 - &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
361 + &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
362 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
363 - &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
364 + &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
365 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
366 - &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
367 + &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
368 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
369 - &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
370 + &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
371 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
372 - &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
373 + &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
374 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
375 - &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
376 + &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
377 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
378 - &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
379 + &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
380 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
381 - &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
382 + &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
383 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
384 - &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
385 + &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
386 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
387 - &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
388 + &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
389 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
390 - &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
391 + &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
392 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
393 - &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
394 + &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
395 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
396 - &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
397 + &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
398 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
399 - &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
400 + &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
401 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
402 - &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
403 + &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
404 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
405 - &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
406 + &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
407 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
408 - &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
409 + &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
410 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
411 - &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
412 + &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
413 }
414 EXPORT_SYMBOL(ucc_fast_dump_regs);
415
416 @@ -85,7 +85,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc
417
418 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
419 {
420 - out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
421 + iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
422 }
423 EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
424
425 @@ -97,7 +97,7 @@ void ucc_fast_enable(struct ucc_fast_pri
426 uf_regs = uccf->uf_regs;
427
428 /* Enable reception and/or transmission on this UCC. */
429 - gumr = in_be32(&uf_regs->gumr);
430 + gumr = ioread32be(&uf_regs->gumr);
431 if (mode & COMM_DIR_TX) {
432 gumr |= UCC_FAST_GUMR_ENT;
433 uccf->enabled_tx = 1;
434 @@ -106,7 +106,7 @@ void ucc_fast_enable(struct ucc_fast_pri
435 gumr |= UCC_FAST_GUMR_ENR;
436 uccf->enabled_rx = 1;
437 }
438 - out_be32(&uf_regs->gumr, gumr);
439 + iowrite32be(gumr, &uf_regs->gumr);
440 }
441 EXPORT_SYMBOL(ucc_fast_enable);
442
443 @@ -118,7 +118,7 @@ void ucc_fast_disable(struct ucc_fast_pr
444 uf_regs = uccf->uf_regs;
445
446 /* Disable reception and/or transmission on this UCC. */
447 - gumr = in_be32(&uf_regs->gumr);
448 + gumr = ioread32be(&uf_regs->gumr);
449 if (mode & COMM_DIR_TX) {
450 gumr &= ~UCC_FAST_GUMR_ENT;
451 uccf->enabled_tx = 0;
452 @@ -127,7 +127,7 @@ void ucc_fast_disable(struct ucc_fast_pr
453 gumr &= ~UCC_FAST_GUMR_ENR;
454 uccf->enabled_rx = 0;
455 }
456 - out_be32(&uf_regs->gumr, gumr);
457 + iowrite32be(gumr, &uf_regs->gumr);
458 }
459 EXPORT_SYMBOL(ucc_fast_disable);
460
461 @@ -259,12 +259,13 @@ int ucc_fast_init(struct ucc_fast_info *
462 gumr |= uf_info->tenc;
463 gumr |= uf_info->tcrc;
464 gumr |= uf_info->mode;
465 - out_be32(&uf_regs->gumr, gumr);
466 + iowrite32be(gumr, &uf_regs->gumr);
467
468 /* Allocate memory for Tx Virtual Fifo */
469 uccf->ucc_fast_tx_virtual_fifo_base_offset =
470 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
471 - if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
472 + if (IS_ERR_VALUE((unsigned long)uccf->
473 + ucc_fast_tx_virtual_fifo_base_offset)) {
474 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
475 __func__);
476 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
477 @@ -277,7 +278,8 @@ int ucc_fast_init(struct ucc_fast_info *
478 qe_muram_alloc(uf_info->urfs +
479 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
480 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
481 - if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
482 + if (IS_ERR_VALUE((unsigned long)uccf->
483 + ucc_fast_rx_virtual_fifo_base_offset)) {
484 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
485 __func__);
486 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
487 @@ -286,15 +288,15 @@ int ucc_fast_init(struct ucc_fast_info *
488 }
489
490 /* Set Virtual Fifo registers */
491 - out_be16(&uf_regs->urfs, uf_info->urfs);
492 - out_be16(&uf_regs->urfet, uf_info->urfet);
493 - out_be16(&uf_regs->urfset, uf_info->urfset);
494 - out_be16(&uf_regs->utfs, uf_info->utfs);
495 - out_be16(&uf_regs->utfet, uf_info->utfet);
496 - out_be16(&uf_regs->utftt, uf_info->utftt);
497 + iowrite16be(uf_info->urfs, &uf_regs->urfs);
498 + iowrite16be(uf_info->urfet, &uf_regs->urfet);
499 + iowrite16be(uf_info->urfset, &uf_regs->urfset);
500 + iowrite16be(uf_info->utfs, &uf_regs->utfs);
501 + iowrite16be(uf_info->utfet, &uf_regs->utfet);
502 + iowrite16be(uf_info->utftt, &uf_regs->utftt);
503 /* utfb, urfb are offsets from MURAM base */
504 - out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
505 - out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
506 + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
507 + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
508
509 /* Mux clocking */
510 /* Grant Support */
511 @@ -362,14 +364,14 @@ int ucc_fast_init(struct ucc_fast_info *
512 }
513
514 /* Set interrupt mask register at UCC level. */
515 - out_be32(&uf_regs->uccm, uf_info->uccm_mask);
516 + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
517
518 /* First, clear anything pending at UCC level,
519 * otherwise, old garbage may come through
520 * as soon as the dam is opened. */
521
522 /* Writing '1' clears */
523 - out_be32(&uf_regs->ucce, 0xffffffff);
524 + iowrite32be(0xffffffff, &uf_regs->ucce);
525
526 *uccf_ret = uccf;
527 return 0;
528 --- a/drivers/tty/serial/ucc_uart.c
529 +++ b/drivers/tty/serial/ucc_uart.c
530 @@ -32,6 +32,7 @@
531 #include <soc/fsl/qe/ucc_slow.h>
532
533 #include <linux/firmware.h>
534 +#include <asm/cpm.h>
535 #include <asm/reg.h>
536
537 /*
538 --- a/include/soc/fsl/qe/qe.h
539 +++ b/include/soc/fsl/qe/qe.h
540 @@ -17,7 +17,6 @@
541 #include <linux/spinlock.h>
542 #include <linux/errno.h>
543 #include <linux/err.h>
544 -#include <asm/cpm.h>
545 #include <soc/fsl/qe/immap_qe.h>
546 #include <linux/of.h>
547 #include <linux/of_address.h>