2efbba304ae766700728292e4a730a21cbe09988
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 813-qe-support-layerscape.patch
1 From 2ab544f7e943c63c300933d34815e78451cc0c26 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:37:56 +0800
4 Subject: [PATCH 25/30] qe: support layerscape
5
6 This is an integrated patch for layerscape qe support.
7
8 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++++--------
12 drivers/net/wan/fsl_ucc_hdlc.c | 4 +-
13 drivers/soc/fsl/qe/Kconfig | 2 +-
14 drivers/soc/fsl/qe/Makefile | 2 +-
15 drivers/soc/fsl/qe/qe.c | 80 +++--
16 drivers/soc/fsl/qe/qe_ic.h | 103 ------
17 drivers/soc/fsl/qe/qe_io.c | 42 +--
18 drivers/soc/fsl/qe/qe_tdm.c | 8 +-
19 drivers/soc/fsl/qe/ucc.c | 10 +-
20 drivers/soc/fsl/qe/ucc_fast.c | 74 ++--
21 drivers/tty/serial/ucc_uart.c | 1 +
22 include/soc/fsl/qe/qe.h | 1 -
23 include/soc/fsl/qe/qe_ic.h | 139 --------
24 13 files changed, 359 insertions(+), 496 deletions(-)
25 rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%)
26 delete mode 100644 drivers/soc/fsl/qe/qe_ic.h
27 delete mode 100644 include/soc/fsl/qe/qe_ic.h
28
29 --- a/drivers/soc/fsl/qe/qe_ic.c
30 +++ /dev/null
31 @@ -1,512 +0,0 @@
32 -/*
33 - * arch/powerpc/sysdev/qe_lib/qe_ic.c
34 - *
35 - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
36 - *
37 - * Author: Li Yang <leoli@freescale.com>
38 - * Based on code from Shlomi Gridish <gridish@freescale.com>
39 - *
40 - * QUICC ENGINE Interrupt Controller
41 - *
42 - * This program is free software; you can redistribute it and/or modify it
43 - * under the terms of the GNU General Public License as published by the
44 - * Free Software Foundation; either version 2 of the License, or (at your
45 - * option) any later version.
46 - */
47 -
48 -#include <linux/of_irq.h>
49 -#include <linux/of_address.h>
50 -#include <linux/kernel.h>
51 -#include <linux/init.h>
52 -#include <linux/errno.h>
53 -#include <linux/reboot.h>
54 -#include <linux/slab.h>
55 -#include <linux/stddef.h>
56 -#include <linux/sched.h>
57 -#include <linux/signal.h>
58 -#include <linux/device.h>
59 -#include <linux/spinlock.h>
60 -#include <asm/irq.h>
61 -#include <asm/io.h>
62 -#include <soc/fsl/qe/qe_ic.h>
63 -
64 -#include "qe_ic.h"
65 -
66 -static DEFINE_RAW_SPINLOCK(qe_ic_lock);
67 -
68 -static struct qe_ic_info qe_ic_info[] = {
69 - [1] = {
70 - .mask = 0x00008000,
71 - .mask_reg = QEIC_CIMR,
72 - .pri_code = 0,
73 - .pri_reg = QEIC_CIPWCC,
74 - },
75 - [2] = {
76 - .mask = 0x00004000,
77 - .mask_reg = QEIC_CIMR,
78 - .pri_code = 1,
79 - .pri_reg = QEIC_CIPWCC,
80 - },
81 - [3] = {
82 - .mask = 0x00002000,
83 - .mask_reg = QEIC_CIMR,
84 - .pri_code = 2,
85 - .pri_reg = QEIC_CIPWCC,
86 - },
87 - [10] = {
88 - .mask = 0x00000040,
89 - .mask_reg = QEIC_CIMR,
90 - .pri_code = 1,
91 - .pri_reg = QEIC_CIPZCC,
92 - },
93 - [11] = {
94 - .mask = 0x00000020,
95 - .mask_reg = QEIC_CIMR,
96 - .pri_code = 2,
97 - .pri_reg = QEIC_CIPZCC,
98 - },
99 - [12] = {
100 - .mask = 0x00000010,
101 - .mask_reg = QEIC_CIMR,
102 - .pri_code = 3,
103 - .pri_reg = QEIC_CIPZCC,
104 - },
105 - [13] = {
106 - .mask = 0x00000008,
107 - .mask_reg = QEIC_CIMR,
108 - .pri_code = 4,
109 - .pri_reg = QEIC_CIPZCC,
110 - },
111 - [14] = {
112 - .mask = 0x00000004,
113 - .mask_reg = QEIC_CIMR,
114 - .pri_code = 5,
115 - .pri_reg = QEIC_CIPZCC,
116 - },
117 - [15] = {
118 - .mask = 0x00000002,
119 - .mask_reg = QEIC_CIMR,
120 - .pri_code = 6,
121 - .pri_reg = QEIC_CIPZCC,
122 - },
123 - [20] = {
124 - .mask = 0x10000000,
125 - .mask_reg = QEIC_CRIMR,
126 - .pri_code = 3,
127 - .pri_reg = QEIC_CIPRTA,
128 - },
129 - [25] = {
130 - .mask = 0x00800000,
131 - .mask_reg = QEIC_CRIMR,
132 - .pri_code = 0,
133 - .pri_reg = QEIC_CIPRTB,
134 - },
135 - [26] = {
136 - .mask = 0x00400000,
137 - .mask_reg = QEIC_CRIMR,
138 - .pri_code = 1,
139 - .pri_reg = QEIC_CIPRTB,
140 - },
141 - [27] = {
142 - .mask = 0x00200000,
143 - .mask_reg = QEIC_CRIMR,
144 - .pri_code = 2,
145 - .pri_reg = QEIC_CIPRTB,
146 - },
147 - [28] = {
148 - .mask = 0x00100000,
149 - .mask_reg = QEIC_CRIMR,
150 - .pri_code = 3,
151 - .pri_reg = QEIC_CIPRTB,
152 - },
153 - [32] = {
154 - .mask = 0x80000000,
155 - .mask_reg = QEIC_CIMR,
156 - .pri_code = 0,
157 - .pri_reg = QEIC_CIPXCC,
158 - },
159 - [33] = {
160 - .mask = 0x40000000,
161 - .mask_reg = QEIC_CIMR,
162 - .pri_code = 1,
163 - .pri_reg = QEIC_CIPXCC,
164 - },
165 - [34] = {
166 - .mask = 0x20000000,
167 - .mask_reg = QEIC_CIMR,
168 - .pri_code = 2,
169 - .pri_reg = QEIC_CIPXCC,
170 - },
171 - [35] = {
172 - .mask = 0x10000000,
173 - .mask_reg = QEIC_CIMR,
174 - .pri_code = 3,
175 - .pri_reg = QEIC_CIPXCC,
176 - },
177 - [36] = {
178 - .mask = 0x08000000,
179 - .mask_reg = QEIC_CIMR,
180 - .pri_code = 4,
181 - .pri_reg = QEIC_CIPXCC,
182 - },
183 - [40] = {
184 - .mask = 0x00800000,
185 - .mask_reg = QEIC_CIMR,
186 - .pri_code = 0,
187 - .pri_reg = QEIC_CIPYCC,
188 - },
189 - [41] = {
190 - .mask = 0x00400000,
191 - .mask_reg = QEIC_CIMR,
192 - .pri_code = 1,
193 - .pri_reg = QEIC_CIPYCC,
194 - },
195 - [42] = {
196 - .mask = 0x00200000,
197 - .mask_reg = QEIC_CIMR,
198 - .pri_code = 2,
199 - .pri_reg = QEIC_CIPYCC,
200 - },
201 - [43] = {
202 - .mask = 0x00100000,
203 - .mask_reg = QEIC_CIMR,
204 - .pri_code = 3,
205 - .pri_reg = QEIC_CIPYCC,
206 - },
207 -};
208 -
209 -static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
210 -{
211 - return in_be32(base + (reg >> 2));
212 -}
213 -
214 -static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
215 - u32 value)
216 -{
217 - out_be32(base + (reg >> 2), value);
218 -}
219 -
220 -static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
221 -{
222 - return irq_get_chip_data(virq);
223 -}
224 -
225 -static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
226 -{
227 - return irq_data_get_irq_chip_data(d);
228 -}
229 -
230 -static void qe_ic_unmask_irq(struct irq_data *d)
231 -{
232 - struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
233 - unsigned int src = irqd_to_hwirq(d);
234 - unsigned long flags;
235 - u32 temp;
236 -
237 - raw_spin_lock_irqsave(&qe_ic_lock, flags);
238 -
239 - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
240 - qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
241 - temp | qe_ic_info[src].mask);
242 -
243 - raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
244 -}
245 -
246 -static void qe_ic_mask_irq(struct irq_data *d)
247 -{
248 - struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
249 - unsigned int src = irqd_to_hwirq(d);
250 - unsigned long flags;
251 - u32 temp;
252 -
253 - raw_spin_lock_irqsave(&qe_ic_lock, flags);
254 -
255 - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
256 - qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
257 - temp & ~qe_ic_info[src].mask);
258 -
259 - /* Flush the above write before enabling interrupts; otherwise,
260 - * spurious interrupts will sometimes happen. To be 100% sure
261 - * that the write has reached the device before interrupts are
262 - * enabled, the mask register would have to be read back; however,
263 - * this is not required for correctness, only to avoid wasting
264 - * time on a large number of spurious interrupts. In testing,
265 - * a sync reduced the observed spurious interrupts to zero.
266 - */
267 - mb();
268 -
269 - raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
270 -}
271 -
272 -static struct irq_chip qe_ic_irq_chip = {
273 - .name = "QEIC",
274 - .irq_unmask = qe_ic_unmask_irq,
275 - .irq_mask = qe_ic_mask_irq,
276 - .irq_mask_ack = qe_ic_mask_irq,
277 -};
278 -
279 -static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
280 - enum irq_domain_bus_token bus_token)
281 -{
282 - /* Exact match, unless qe_ic node is NULL */
283 - struct device_node *of_node = irq_domain_get_of_node(h);
284 - return of_node == NULL || of_node == node;
285 -}
286 -
287 -static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
288 - irq_hw_number_t hw)
289 -{
290 - struct qe_ic *qe_ic = h->host_data;
291 - struct irq_chip *chip;
292 -
293 - if (hw >= ARRAY_SIZE(qe_ic_info)) {
294 - pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
295 - return -EINVAL;
296 - }
297 -
298 - if (qe_ic_info[hw].mask == 0) {
299 - printk(KERN_ERR "Can't map reserved IRQ\n");
300 - return -EINVAL;
301 - }
302 - /* Default chip */
303 - chip = &qe_ic->hc_irq;
304 -
305 - irq_set_chip_data(virq, qe_ic);
306 - irq_set_status_flags(virq, IRQ_LEVEL);
307 -
308 - irq_set_chip_and_handler(virq, chip, handle_level_irq);
309 -
310 - return 0;
311 -}
312 -
313 -static const struct irq_domain_ops qe_ic_host_ops = {
314 - .match = qe_ic_host_match,
315 - .map = qe_ic_host_map,
316 - .xlate = irq_domain_xlate_onetwocell,
317 -};
318 -
319 -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
320 -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
321 -{
322 - int irq;
323 -
324 - BUG_ON(qe_ic == NULL);
325 -
326 - /* get the interrupt source vector. */
327 - irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
328 -
329 - if (irq == 0)
330 - return NO_IRQ;
331 -
332 - return irq_linear_revmap(qe_ic->irqhost, irq);
333 -}
334 -
335 -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
336 -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
337 -{
338 - int irq;
339 -
340 - BUG_ON(qe_ic == NULL);
341 -
342 - /* get the interrupt source vector. */
343 - irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
344 -
345 - if (irq == 0)
346 - return NO_IRQ;
347 -
348 - return irq_linear_revmap(qe_ic->irqhost, irq);
349 -}
350 -
351 -void __init qe_ic_init(struct device_node *node, unsigned int flags,
352 - void (*low_handler)(struct irq_desc *desc),
353 - void (*high_handler)(struct irq_desc *desc))
354 -{
355 - struct qe_ic *qe_ic;
356 - struct resource res;
357 - u32 temp = 0, ret, high_active = 0;
358 -
359 - ret = of_address_to_resource(node, 0, &res);
360 - if (ret)
361 - return;
362 -
363 - qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
364 - if (qe_ic == NULL)
365 - return;
366 -
367 - qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
368 - &qe_ic_host_ops, qe_ic);
369 - if (qe_ic->irqhost == NULL) {
370 - kfree(qe_ic);
371 - return;
372 - }
373 -
374 - qe_ic->regs = ioremap(res.start, resource_size(&res));
375 -
376 - qe_ic->hc_irq = qe_ic_irq_chip;
377 -
378 - qe_ic->virq_high = irq_of_parse_and_map(node, 0);
379 - qe_ic->virq_low = irq_of_parse_and_map(node, 1);
380 -
381 - if (qe_ic->virq_low == NO_IRQ) {
382 - printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
383 - kfree(qe_ic);
384 - return;
385 - }
386 -
387 - /* default priority scheme is grouped. If spread mode is */
388 - /* required, configure cicr accordingly. */
389 - if (flags & QE_IC_SPREADMODE_GRP_W)
390 - temp |= CICR_GWCC;
391 - if (flags & QE_IC_SPREADMODE_GRP_X)
392 - temp |= CICR_GXCC;
393 - if (flags & QE_IC_SPREADMODE_GRP_Y)
394 - temp |= CICR_GYCC;
395 - if (flags & QE_IC_SPREADMODE_GRP_Z)
396 - temp |= CICR_GZCC;
397 - if (flags & QE_IC_SPREADMODE_GRP_RISCA)
398 - temp |= CICR_GRTA;
399 - if (flags & QE_IC_SPREADMODE_GRP_RISCB)
400 - temp |= CICR_GRTB;
401 -
402 - /* choose destination signal for highest priority interrupt */
403 - if (flags & QE_IC_HIGH_SIGNAL) {
404 - temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
405 - high_active = 1;
406 - }
407 -
408 - qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
409 -
410 - irq_set_handler_data(qe_ic->virq_low, qe_ic);
411 - irq_set_chained_handler(qe_ic->virq_low, low_handler);
412 -
413 - if (qe_ic->virq_high != NO_IRQ &&
414 - qe_ic->virq_high != qe_ic->virq_low) {
415 - irq_set_handler_data(qe_ic->virq_high, qe_ic);
416 - irq_set_chained_handler(qe_ic->virq_high, high_handler);
417 - }
418 -}
419 -
420 -void qe_ic_set_highest_priority(unsigned int virq, int high)
421 -{
422 - struct qe_ic *qe_ic = qe_ic_from_irq(virq);
423 - unsigned int src = virq_to_hw(virq);
424 - u32 temp = 0;
425 -
426 - temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
427 -
428 - temp &= ~CICR_HP_MASK;
429 - temp |= src << CICR_HP_SHIFT;
430 -
431 - temp &= ~CICR_HPIT_MASK;
432 - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
433 -
434 - qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
435 -}
436 -
437 -/* Set Priority level within its group, from 1 to 8 */
438 -int qe_ic_set_priority(unsigned int virq, unsigned int priority)
439 -{
440 - struct qe_ic *qe_ic = qe_ic_from_irq(virq);
441 - unsigned int src = virq_to_hw(virq);
442 - u32 temp;
443 -
444 - if (priority > 8 || priority == 0)
445 - return -EINVAL;
446 - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
447 - "%s: Invalid hw irq number for QEIC\n", __func__))
448 - return -EINVAL;
449 - if (qe_ic_info[src].pri_reg == 0)
450 - return -EINVAL;
451 -
452 - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
453 -
454 - if (priority < 4) {
455 - temp &= ~(0x7 << (32 - priority * 3));
456 - temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
457 - } else {
458 - temp &= ~(0x7 << (24 - priority * 3));
459 - temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
460 - }
461 -
462 - qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
463 -
464 - return 0;
465 -}
466 -
467 -/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
468 -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
469 -{
470 - struct qe_ic *qe_ic = qe_ic_from_irq(virq);
471 - unsigned int src = virq_to_hw(virq);
472 - u32 temp, control_reg = QEIC_CICNR, shift = 0;
473 -
474 - if (priority > 2 || priority == 0)
475 - return -EINVAL;
476 - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
477 - "%s: Invalid hw irq number for QEIC\n", __func__))
478 - return -EINVAL;
479 -
480 - switch (qe_ic_info[src].pri_reg) {
481 - case QEIC_CIPZCC:
482 - shift = CICNR_ZCC1T_SHIFT;
483 - break;
484 - case QEIC_CIPWCC:
485 - shift = CICNR_WCC1T_SHIFT;
486 - break;
487 - case QEIC_CIPYCC:
488 - shift = CICNR_YCC1T_SHIFT;
489 - break;
490 - case QEIC_CIPXCC:
491 - shift = CICNR_XCC1T_SHIFT;
492 - break;
493 - case QEIC_CIPRTA:
494 - shift = CRICR_RTA1T_SHIFT;
495 - control_reg = QEIC_CRICR;
496 - break;
497 - case QEIC_CIPRTB:
498 - shift = CRICR_RTB1T_SHIFT;
499 - control_reg = QEIC_CRICR;
500 - break;
501 - default:
502 - return -EINVAL;
503 - }
504 -
505 - shift += (2 - priority) * 2;
506 - temp = qe_ic_read(qe_ic->regs, control_reg);
507 - temp &= ~(SIGNAL_MASK << shift);
508 - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
509 - qe_ic_write(qe_ic->regs, control_reg, temp);
510 -
511 - return 0;
512 -}
513 -
514 -static struct bus_type qe_ic_subsys = {
515 - .name = "qe_ic",
516 - .dev_name = "qe_ic",
517 -};
518 -
519 -static struct device device_qe_ic = {
520 - .id = 0,
521 - .bus = &qe_ic_subsys,
522 -};
523 -
524 -static int __init init_qe_ic_sysfs(void)
525 -{
526 - int rc;
527 -
528 - printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
529 -
530 - rc = subsys_system_register(&qe_ic_subsys, NULL);
531 - if (rc) {
532 - printk(KERN_ERR "Failed registering qe_ic sys class\n");
533 - return -ENODEV;
534 - }
535 - rc = device_register(&device_qe_ic);
536 - if (rc) {
537 - printk(KERN_ERR "Failed registering qe_ic sys device\n");
538 - return -ENODEV;
539 - }
540 - return 0;
541 -}
542 -
543 -subsys_initcall(init_qe_ic_sysfs);
544 --- /dev/null
545 +++ b/drivers/irqchip/irq-qeic.c
546 @@ -0,0 +1,605 @@
547 +/*
548 + * drivers/irqchip/irq-qeic.c
549 + *
550 + * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
551 + *
552 + * Author: Li Yang <leoli@freescale.com>
553 + * Based on code from Shlomi Gridish <gridish@freescale.com>
554 + *
555 + * QUICC ENGINE Interrupt Controller
556 + *
557 + * This program is free software; you can redistribute it and/or modify it
558 + * under the terms of the GNU General Public License as published by the
559 + * Free Software Foundation; either version 2 of the License, or (at your
560 + * option) any later version.
561 + */
562 +
563 +#include <linux/of_irq.h>
564 +#include <linux/of_address.h>
565 +#include <linux/kernel.h>
566 +#include <linux/init.h>
567 +#include <linux/irqdomain.h>
568 +#include <linux/irqchip.h>
569 +#include <linux/errno.h>
570 +#include <linux/of_address.h>
571 +#include <linux/of_irq.h>
572 +#include <linux/reboot.h>
573 +#include <linux/slab.h>
574 +#include <linux/stddef.h>
575 +#include <linux/sched.h>
576 +#include <linux/signal.h>
577 +#include <linux/device.h>
578 +#include <linux/spinlock.h>
579 +#include <linux/irq.h>
580 +#include <asm/io.h>
581 +
582 +#define NR_QE_IC_INTS 64
583 +
584 +/* QE IC registers offset */
585 +#define QEIC_CICR 0x00
586 +#define QEIC_CIVEC 0x04
587 +#define QEIC_CRIPNR 0x08
588 +#define QEIC_CIPNR 0x0c
589 +#define QEIC_CIPXCC 0x10
590 +#define QEIC_CIPYCC 0x14
591 +#define QEIC_CIPWCC 0x18
592 +#define QEIC_CIPZCC 0x1c
593 +#define QEIC_CIMR 0x20
594 +#define QEIC_CRIMR 0x24
595 +#define QEIC_CICNR 0x28
596 +#define QEIC_CIPRTA 0x30
597 +#define QEIC_CIPRTB 0x34
598 +#define QEIC_CRICR 0x3c
599 +#define QEIC_CHIVEC 0x60
600 +
601 +/* Interrupt priority registers */
602 +#define CIPCC_SHIFT_PRI0 29
603 +#define CIPCC_SHIFT_PRI1 26
604 +#define CIPCC_SHIFT_PRI2 23
605 +#define CIPCC_SHIFT_PRI3 20
606 +#define CIPCC_SHIFT_PRI4 13
607 +#define CIPCC_SHIFT_PRI5 10
608 +#define CIPCC_SHIFT_PRI6 7
609 +#define CIPCC_SHIFT_PRI7 4
610 +
611 +/* CICR priority modes */
612 +#define CICR_GWCC 0x00040000
613 +#define CICR_GXCC 0x00020000
614 +#define CICR_GYCC 0x00010000
615 +#define CICR_GZCC 0x00080000
616 +#define CICR_GRTA 0x00200000
617 +#define CICR_GRTB 0x00400000
618 +#define CICR_HPIT_SHIFT 8
619 +#define CICR_HPIT_MASK 0x00000300
620 +#define CICR_HP_SHIFT 24
621 +#define CICR_HP_MASK 0x3f000000
622 +
623 +/* CICNR */
624 +#define CICNR_WCC1T_SHIFT 20
625 +#define CICNR_ZCC1T_SHIFT 28
626 +#define CICNR_YCC1T_SHIFT 12
627 +#define CICNR_XCC1T_SHIFT 4
628 +
629 +/* CRICR */
630 +#define CRICR_RTA1T_SHIFT 20
631 +#define CRICR_RTB1T_SHIFT 28
632 +
633 +/* Signal indicator */
634 +#define SIGNAL_MASK 3
635 +#define SIGNAL_HIGH 2
636 +#define SIGNAL_LOW 0
637 +
638 +#define NUM_OF_QE_IC_GROUPS 6
639 +
640 +/* Flags when we init the QE IC */
641 +#define QE_IC_SPREADMODE_GRP_W 0x00000001
642 +#define QE_IC_SPREADMODE_GRP_X 0x00000002
643 +#define QE_IC_SPREADMODE_GRP_Y 0x00000004
644 +#define QE_IC_SPREADMODE_GRP_Z 0x00000008
645 +#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
646 +#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
647 +
648 +#define QE_IC_LOW_SIGNAL 0x00000100
649 +#define QE_IC_HIGH_SIGNAL 0x00000200
650 +
651 +#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
652 +#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
653 +#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
654 +#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
655 +#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
656 +#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
657 +#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
658 +#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
659 +#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
660 +#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
661 +#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
662 +#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
663 +#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
664 +
665 +/* QE interrupt sources groups */
666 +enum qe_ic_grp_id {
667 + QE_IC_GRP_W = 0, /* QE interrupt controller group W */
668 + QE_IC_GRP_X, /* QE interrupt controller group X */
669 + QE_IC_GRP_Y, /* QE interrupt controller group Y */
670 + QE_IC_GRP_Z, /* QE interrupt controller group Z */
671 + QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
672 + QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
673 +};
674 +
675 +struct qe_ic {
676 + /* Control registers offset */
677 + u32 __iomem *regs;
678 +
679 + /* The remapper for this QEIC */
680 + struct irq_domain *irqhost;
681 +
682 + /* The "linux" controller struct */
683 + struct irq_chip hc_irq;
684 +
685 + /* VIRQ numbers of QE high/low irqs */
686 + unsigned int virq_high;
687 + unsigned int virq_low;
688 +};
689 +
690 +/*
691 + * QE interrupt controller internal structure
692 + */
693 +struct qe_ic_info {
694 + /* location of this source at the QIMR register. */
695 + u32 mask;
696 +
697 + /* Mask register offset */
698 + u32 mask_reg;
699 +
700 + /*
701 + * for grouped interrupts sources - the interrupt
702 + * code as appears at the group priority register
703 + */
704 + u8 pri_code;
705 +
706 + /* Group priority register offset */
707 + u32 pri_reg;
708 +};
709 +
710 +static DEFINE_RAW_SPINLOCK(qe_ic_lock);
711 +
712 +static struct qe_ic_info qe_ic_info[] = {
713 + [1] = {
714 + .mask = 0x00008000,
715 + .mask_reg = QEIC_CIMR,
716 + .pri_code = 0,
717 + .pri_reg = QEIC_CIPWCC,
718 + },
719 + [2] = {
720 + .mask = 0x00004000,
721 + .mask_reg = QEIC_CIMR,
722 + .pri_code = 1,
723 + .pri_reg = QEIC_CIPWCC,
724 + },
725 + [3] = {
726 + .mask = 0x00002000,
727 + .mask_reg = QEIC_CIMR,
728 + .pri_code = 2,
729 + .pri_reg = QEIC_CIPWCC,
730 + },
731 + [10] = {
732 + .mask = 0x00000040,
733 + .mask_reg = QEIC_CIMR,
734 + .pri_code = 1,
735 + .pri_reg = QEIC_CIPZCC,
736 + },
737 + [11] = {
738 + .mask = 0x00000020,
739 + .mask_reg = QEIC_CIMR,
740 + .pri_code = 2,
741 + .pri_reg = QEIC_CIPZCC,
742 + },
743 + [12] = {
744 + .mask = 0x00000010,
745 + .mask_reg = QEIC_CIMR,
746 + .pri_code = 3,
747 + .pri_reg = QEIC_CIPZCC,
748 + },
749 + [13] = {
750 + .mask = 0x00000008,
751 + .mask_reg = QEIC_CIMR,
752 + .pri_code = 4,
753 + .pri_reg = QEIC_CIPZCC,
754 + },
755 + [14] = {
756 + .mask = 0x00000004,
757 + .mask_reg = QEIC_CIMR,
758 + .pri_code = 5,
759 + .pri_reg = QEIC_CIPZCC,
760 + },
761 + [15] = {
762 + .mask = 0x00000002,
763 + .mask_reg = QEIC_CIMR,
764 + .pri_code = 6,
765 + .pri_reg = QEIC_CIPZCC,
766 + },
767 + [20] = {
768 + .mask = 0x10000000,
769 + .mask_reg = QEIC_CRIMR,
770 + .pri_code = 3,
771 + .pri_reg = QEIC_CIPRTA,
772 + },
773 + [25] = {
774 + .mask = 0x00800000,
775 + .mask_reg = QEIC_CRIMR,
776 + .pri_code = 0,
777 + .pri_reg = QEIC_CIPRTB,
778 + },
779 + [26] = {
780 + .mask = 0x00400000,
781 + .mask_reg = QEIC_CRIMR,
782 + .pri_code = 1,
783 + .pri_reg = QEIC_CIPRTB,
784 + },
785 + [27] = {
786 + .mask = 0x00200000,
787 + .mask_reg = QEIC_CRIMR,
788 + .pri_code = 2,
789 + .pri_reg = QEIC_CIPRTB,
790 + },
791 + [28] = {
792 + .mask = 0x00100000,
793 + .mask_reg = QEIC_CRIMR,
794 + .pri_code = 3,
795 + .pri_reg = QEIC_CIPRTB,
796 + },
797 + [32] = {
798 + .mask = 0x80000000,
799 + .mask_reg = QEIC_CIMR,
800 + .pri_code = 0,
801 + .pri_reg = QEIC_CIPXCC,
802 + },
803 + [33] = {
804 + .mask = 0x40000000,
805 + .mask_reg = QEIC_CIMR,
806 + .pri_code = 1,
807 + .pri_reg = QEIC_CIPXCC,
808 + },
809 + [34] = {
810 + .mask = 0x20000000,
811 + .mask_reg = QEIC_CIMR,
812 + .pri_code = 2,
813 + .pri_reg = QEIC_CIPXCC,
814 + },
815 + [35] = {
816 + .mask = 0x10000000,
817 + .mask_reg = QEIC_CIMR,
818 + .pri_code = 3,
819 + .pri_reg = QEIC_CIPXCC,
820 + },
821 + [36] = {
822 + .mask = 0x08000000,
823 + .mask_reg = QEIC_CIMR,
824 + .pri_code = 4,
825 + .pri_reg = QEIC_CIPXCC,
826 + },
827 + [40] = {
828 + .mask = 0x00800000,
829 + .mask_reg = QEIC_CIMR,
830 + .pri_code = 0,
831 + .pri_reg = QEIC_CIPYCC,
832 + },
833 + [41] = {
834 + .mask = 0x00400000,
835 + .mask_reg = QEIC_CIMR,
836 + .pri_code = 1,
837 + .pri_reg = QEIC_CIPYCC,
838 + },
839 + [42] = {
840 + .mask = 0x00200000,
841 + .mask_reg = QEIC_CIMR,
842 + .pri_code = 2,
843 + .pri_reg = QEIC_CIPYCC,
844 + },
845 + [43] = {
846 + .mask = 0x00100000,
847 + .mask_reg = QEIC_CIMR,
848 + .pri_code = 3,
849 + .pri_reg = QEIC_CIPYCC,
850 + },
851 +};
852 +
853 +static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
854 +{
855 + return ioread32be(base + (reg >> 2));
856 +}
857 +
858 +static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
859 + u32 value)
860 +{
861 + iowrite32be(value, base + (reg >> 2));
862 +}
863 +
864 +static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
865 +{
866 + return irq_get_chip_data(virq);
867 +}
868 +
869 +static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
870 +{
871 + return irq_data_get_irq_chip_data(d);
872 +}
873 +
874 +static void qe_ic_unmask_irq(struct irq_data *d)
875 +{
876 + struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
877 + unsigned int src = irqd_to_hwirq(d);
878 + unsigned long flags;
879 + u32 temp;
880 +
881 + raw_spin_lock_irqsave(&qe_ic_lock, flags);
882 +
883 + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
884 + qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
885 + temp | qe_ic_info[src].mask);
886 +
887 + raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
888 +}
889 +
890 +static void qe_ic_mask_irq(struct irq_data *d)
891 +{
892 + struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
893 + unsigned int src = irqd_to_hwirq(d);
894 + unsigned long flags;
895 + u32 temp;
896 +
897 + raw_spin_lock_irqsave(&qe_ic_lock, flags);
898 +
899 + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
900 + qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
901 + temp & ~qe_ic_info[src].mask);
902 +
903 + /* Flush the above write before enabling interrupts; otherwise,
904 + * spurious interrupts will sometimes happen. To be 100% sure
905 + * that the write has reached the device before interrupts are
906 + * enabled, the mask register would have to be read back; however,
907 + * this is not required for correctness, only to avoid wasting
908 + * time on a large number of spurious interrupts. In testing,
909 + * a sync reduced the observed spurious interrupts to zero.
910 + */
911 + mb();
912 +
913 + raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
914 +}
915 +
916 +static struct irq_chip qe_ic_irq_chip = {
917 + .name = "QEIC",
918 + .irq_unmask = qe_ic_unmask_irq,
919 + .irq_mask = qe_ic_mask_irq,
920 + .irq_mask_ack = qe_ic_mask_irq,
921 +};
922 +
923 +static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
924 + enum irq_domain_bus_token bus_token)
925 +{
926 + /* Exact match, unless qe_ic node is NULL */
927 + struct device_node *of_node = irq_domain_get_of_node(h);
928 + return of_node == NULL || of_node == node;
929 +}
930 +
931 +static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
932 + irq_hw_number_t hw)
933 +{
934 + struct qe_ic *qe_ic = h->host_data;
935 + struct irq_chip *chip;
936 +
937 + if (hw >= ARRAY_SIZE(qe_ic_info)) {
938 + pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
939 + return -EINVAL;
940 + }
941 +
942 + if (qe_ic_info[hw].mask == 0) {
943 + printk(KERN_ERR "Can't map reserved IRQ\n");
944 + return -EINVAL;
945 + }
946 + /* Default chip */
947 + chip = &qe_ic->hc_irq;
948 +
949 + irq_set_chip_data(virq, qe_ic);
950 + irq_set_status_flags(virq, IRQ_LEVEL);
951 +
952 + irq_set_chip_and_handler(virq, chip, handle_level_irq);
953 +
954 + return 0;
955 +}
956 +
957 +static const struct irq_domain_ops qe_ic_host_ops = {
958 + .match = qe_ic_host_match,
959 + .map = qe_ic_host_map,
960 + .xlate = irq_domain_xlate_onetwocell,
961 +};
962 +
963 +/* Return an interrupt vector or 0 if no interrupt is pending. */
964 +static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
965 +{
966 + int irq;
967 +
968 + BUG_ON(qe_ic == NULL);
969 +
970 + /* get the interrupt source vector. */
971 + irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
972 +
973 + if (irq == 0)
974 + return 0;
975 +
976 + return irq_linear_revmap(qe_ic->irqhost, irq);
977 +}
978 +
979 +/* Return an interrupt vector or 0 if no interrupt is pending. */
980 +static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
981 +{
982 + int irq;
983 +
984 + BUG_ON(qe_ic == NULL);
985 +
986 + /* get the interrupt source vector. */
987 + irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
988 +
989 + if (irq == 0)
990 + return 0;
991 +
992 + return irq_linear_revmap(qe_ic->irqhost, irq);
993 +}
994 +
995 +static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
996 +{
997 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
998 + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
999 +
1000 + if (cascade_irq != 0)
1001 + generic_handle_irq(cascade_irq);
1002 +}
1003 +
1004 +static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
1005 +{
1006 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1007 + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1008 +
1009 + if (cascade_irq != 0)
1010 + generic_handle_irq(cascade_irq);
1011 +}
1012 +
1013 +static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
1014 +{
1015 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1016 + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
1017 + struct irq_chip *chip = irq_desc_get_chip(desc);
1018 +
1019 + if (cascade_irq != 0)
1020 + generic_handle_irq(cascade_irq);
1021 +
1022 + chip->irq_eoi(&desc->irq_data);
1023 +}
1024 +
1025 +static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
1026 +{
1027 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1028 + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1029 + struct irq_chip *chip = irq_desc_get_chip(desc);
1030 +
1031 + if (cascade_irq != 0)
1032 + generic_handle_irq(cascade_irq);
1033 +
1034 + chip->irq_eoi(&desc->irq_data);
1035 +}
1036 +
1037 +static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
1038 +{
1039 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1040 + unsigned int cascade_irq;
1041 + struct irq_chip *chip = irq_desc_get_chip(desc);
1042 +
1043 + cascade_irq = qe_ic_get_high_irq(qe_ic);
1044 + if (cascade_irq == 0)
1045 + cascade_irq = qe_ic_get_low_irq(qe_ic);
1046 +
1047 + if (cascade_irq != 0)
1048 + generic_handle_irq(cascade_irq);
1049 +
1050 + chip->irq_eoi(&desc->irq_data);
1051 +}
1052 +
1053 +static int __init qe_ic_init(struct device_node *node, unsigned int flags)
1054 +{
1055 + struct qe_ic *qe_ic;
1056 + struct resource res;
1057 + u32 temp = 0, high_active = 0;
1058 + int ret = 0;
1059 +
1060 + if (!node)
1061 + return -ENODEV;
1062 +
1063 + ret = of_address_to_resource(node, 0, &res);
1064 + if (ret) {
1065 + ret = -ENODEV;
1066 + goto err_put_node;
1067 + }
1068 +
1069 + qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
1070 + if (qe_ic == NULL) {
1071 + ret = -ENOMEM;
1072 + goto err_put_node;
1073 + }
1074 +
1075 + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
1076 + &qe_ic_host_ops, qe_ic);
1077 + if (qe_ic->irqhost == NULL) {
1078 + ret = -ENOMEM;
1079 + goto err_free_qe_ic;
1080 + }
1081 +
1082 + qe_ic->regs = ioremap(res.start, resource_size(&res));
1083 +
1084 + qe_ic->hc_irq = qe_ic_irq_chip;
1085 +
1086 + qe_ic->virq_high = irq_of_parse_and_map(node, 0);
1087 + qe_ic->virq_low = irq_of_parse_and_map(node, 1);
1088 +
1089 + if (qe_ic->virq_low == 0) {
1090 + pr_err("Failed to map QE_IC low IRQ\n");
1091 + ret = -ENOMEM;
1092 + goto err_domain_remove;
1093 + }
1094 +
1095 + /* default priority scheme is grouped. If spread mode is */
1096 + /* required, configure cicr accordingly. */
1097 + if (flags & QE_IC_SPREADMODE_GRP_W)
1098 + temp |= CICR_GWCC;
1099 + if (flags & QE_IC_SPREADMODE_GRP_X)
1100 + temp |= CICR_GXCC;
1101 + if (flags & QE_IC_SPREADMODE_GRP_Y)
1102 + temp |= CICR_GYCC;
1103 + if (flags & QE_IC_SPREADMODE_GRP_Z)
1104 + temp |= CICR_GZCC;
1105 + if (flags & QE_IC_SPREADMODE_GRP_RISCA)
1106 + temp |= CICR_GRTA;
1107 + if (flags & QE_IC_SPREADMODE_GRP_RISCB)
1108 + temp |= CICR_GRTB;
1109 +
1110 + /* choose destination signal for highest priority interrupt */
1111 + if (flags & QE_IC_HIGH_SIGNAL) {
1112 + temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
1113 + high_active = 1;
1114 + }
1115 +
1116 + qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
1117 +
1118 + irq_set_handler_data(qe_ic->virq_low, qe_ic);
1119 + irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic);
1120 +
1121 + if (qe_ic->virq_high != 0 &&
1122 + qe_ic->virq_high != qe_ic->virq_low) {
1123 + irq_set_handler_data(qe_ic->virq_high, qe_ic);
1124 + irq_set_chained_handler(qe_ic->virq_high,
1125 + qe_ic_cascade_high_mpic);
1126 + }
1127 + of_node_put(node);
1128 + return 0;
1129 +
1130 +err_domain_remove:
1131 + irq_domain_remove(qe_ic->irqhost);
1132 +err_free_qe_ic:
1133 + kfree(qe_ic);
1134 +err_put_node:
1135 + of_node_put(node);
1136 + return ret;
1137 +}
1138 +
1139 +static int __init init_qe_ic(struct device_node *node,
1140 + struct device_node *parent)
1141 +{
1142 + int ret;
1143 +
1144 + ret = qe_ic_init(node, 0);
1145 + if (ret)
1146 + return ret;
1147 +
1148 + return 0;
1149 +}
1150 +
1151 +IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic);
1152 --- a/drivers/net/wan/fsl_ucc_hdlc.c
1153 +++ b/drivers/net/wan/fsl_ucc_hdlc.c
1154 @@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk
1155 /* set bd status and length */
1156 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
1157
1158 - iowrite16be(bd_status, &bd->status);
1159 iowrite16be(skb->len, &bd->length);
1160 + iowrite16be(bd_status, &bd->status);
1161
1162 /* Move to next BD in the ring */
1163 if (!(bd_status & T_W_S))
1164 @@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_
1165 struct sk_buff *skb;
1166 hdlc_device *hdlc = dev_to_hdlc(dev);
1167 struct qe_bd *bd;
1168 - u32 bd_status;
1169 + u16 bd_status;
1170 u16 length, howmany = 0;
1171 u8 *bdbuffer;
1172 int i;
1173 --- a/drivers/soc/fsl/qe/Kconfig
1174 +++ b/drivers/soc/fsl/qe/Kconfig
1175 @@ -4,7 +4,7 @@
1176
1177 config QUICC_ENGINE
1178 bool "Freescale QUICC Engine (QE) Support"
1179 - depends on FSL_SOC && PPC32
1180 + depends on OF && HAS_IOMEM
1181 select GENERIC_ALLOCATOR
1182 select CRC32
1183 help
1184 --- a/drivers/soc/fsl/qe/Makefile
1185 +++ b/drivers/soc/fsl/qe/Makefile
1186 @@ -1,7 +1,7 @@
1187 #
1188 # Makefile for the linux ppc-specific parts of QE
1189 #
1190 -obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
1191 +obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_io.o
1192 obj-$(CONFIG_CPM) += qe_common.o
1193 obj-$(CONFIG_UCC) += ucc.o
1194 obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
1195 --- a/drivers/soc/fsl/qe/qe.c
1196 +++ b/drivers/soc/fsl/qe/qe.c
1197 @@ -33,8 +33,6 @@
1198 #include <asm/pgtable.h>
1199 #include <soc/fsl/qe/immap_qe.h>
1200 #include <soc/fsl/qe/qe.h>
1201 -#include <asm/prom.h>
1202 -#include <asm/rheap.h>
1203
1204 static void qe_snums_init(void);
1205 static int qe_sdma_init(void);
1206 @@ -109,15 +107,27 @@ void qe_reset(void)
1207 panic("sdma init failed!");
1208 }
1209
1210 +/* issue commands to QE, return 0 on success while -EIO on error
1211 + *
1212 + * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
1213 + * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
1214 + * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
1215 + * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
1216 + * @mcn_protocol: specifies mode for the command for non-MCC, should be
1217 + * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
1218 + * and such on.
1219 + * @cmd_input: command related data.
1220 + */
1221 int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
1222 {
1223 unsigned long flags;
1224 u8 mcn_shift = 0, dev_shift = 0;
1225 - u32 ret;
1226 + int ret;
1227 + int i;
1228
1229 spin_lock_irqsave(&qe_lock, flags);
1230 if (cmd == QE_RESET) {
1231 - out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
1232 + iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
1233 } else {
1234 if (cmd == QE_ASSIGN_PAGE) {
1235 /* Here device is the SNUM, not sub-block */
1236 @@ -134,20 +144,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8
1237 mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
1238 }
1239
1240 - out_be32(&qe_immr->cp.cecdr, cmd_input);
1241 - out_be32(&qe_immr->cp.cecr,
1242 - (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
1243 - mcn_protocol << mcn_shift));
1244 + iowrite32be(cmd_input, &qe_immr->cp.cecdr);
1245 + iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
1246 + (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
1247 }
1248
1249 /* wait for the QE_CR_FLG to clear */
1250 - ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
1251 - 100, 0);
1252 + ret = -EIO;
1253 + for (i = 0; i < 100; i++) {
1254 + if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
1255 + ret = 0;
1256 + break;
1257 + }
1258 + udelay(1);
1259 + }
1260 +
1261 /* On timeout (e.g. failure), the expression will be false (ret == 0),
1262 otherwise it will be true (ret == 1). */
1263 spin_unlock_irqrestore(&qe_lock, flags);
1264
1265 - return ret == 1;
1266 + return ret;
1267 }
1268 EXPORT_SYMBOL(qe_issue_cmd);
1269
1270 @@ -166,8 +182,8 @@ static unsigned int brg_clk = 0;
1271 unsigned int qe_get_brg_clk(void)
1272 {
1273 struct device_node *qe;
1274 - int size;
1275 - const u32 *prop;
1276 + u32 val;
1277 + int ret;
1278
1279 if (brg_clk)
1280 return brg_clk;
1281 @@ -179,9 +195,9 @@ unsigned int qe_get_brg_clk(void)
1282 return brg_clk;
1283 }
1284
1285 - prop = of_get_property(qe, "brg-frequency", &size);
1286 - if (prop && size == sizeof(*prop))
1287 - brg_clk = *prop;
1288 + ret = of_property_read_u32(qe, "brg-frequency", &val);
1289 + if (!ret)
1290 + brg_clk = val;
1291
1292 of_node_put(qe);
1293
1294 @@ -221,7 +237,7 @@ int qe_setbrg(enum qe_clock brg, unsigne
1295 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
1296 QE_BRGC_ENABLE | div16;
1297
1298 - out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
1299 + iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
1300
1301 return 0;
1302 }
1303 @@ -355,9 +371,9 @@ static int qe_sdma_init(void)
1304 return -ENOMEM;
1305 }
1306
1307 - out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
1308 - out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
1309 - (0x1 << QE_SDMR_CEN_SHIFT)));
1310 + iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
1311 + iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
1312 + &sdma->sdmr);
1313
1314 return 0;
1315 }
1316 @@ -395,14 +411,14 @@ static void qe_upload_microcode(const vo
1317 "uploading microcode '%s'\n", ucode->id);
1318
1319 /* Use auto-increment */
1320 - out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
1321 - QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
1322 + iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
1323 + QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
1324
1325 for (i = 0; i < be32_to_cpu(ucode->count); i++)
1326 - out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
1327 + iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
1328
1329 /* Set I-RAM Ready Register */
1330 - out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
1331 + iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
1332 }
1333
1334 /*
1335 @@ -487,7 +503,7 @@ int qe_upload_firmware(const struct qe_f
1336 * If the microcode calls for it, split the I-RAM.
1337 */
1338 if (!firmware->split)
1339 - setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
1340 + qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
1341
1342 if (firmware->soc.model)
1343 printk(KERN_INFO
1344 @@ -521,11 +537,11 @@ int qe_upload_firmware(const struct qe_f
1345 u32 trap = be32_to_cpu(ucode->traps[j]);
1346
1347 if (trap)
1348 - out_be32(&qe_immr->rsp[i].tibcr[j], trap);
1349 + iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
1350 }
1351
1352 /* Enable traps */
1353 - out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
1354 + iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
1355 }
1356
1357 qe_firmware_uploaded = 1;
1358 @@ -644,9 +660,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
1359 unsigned int qe_get_num_of_snums(void)
1360 {
1361 struct device_node *qe;
1362 - int size;
1363 unsigned int num_of_snums;
1364 - const u32 *prop;
1365 + u32 val;
1366 + int ret;
1367
1368 num_of_snums = 28; /* The default number of snum for threads is 28 */
1369 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
1370 @@ -660,9 +676,9 @@ unsigned int qe_get_num_of_snums(void)
1371 return num_of_snums;
1372 }
1373
1374 - prop = of_get_property(qe, "fsl,qe-num-snums", &size);
1375 - if (prop && size == sizeof(*prop)) {
1376 - num_of_snums = *prop;
1377 + ret = of_property_read_u32(qe, "fsl,qe-num-snums", &val);
1378 + if (!ret) {
1379 + num_of_snums = val;
1380 if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
1381 /* No QE ever has fewer than 28 SNUMs */
1382 pr_err("QE: number of snum is invalid\n");
1383 --- a/drivers/soc/fsl/qe/qe_ic.h
1384 +++ /dev/null
1385 @@ -1,103 +0,0 @@
1386 -/*
1387 - * drivers/soc/fsl/qe/qe_ic.h
1388 - *
1389 - * QUICC ENGINE Interrupt Controller Header
1390 - *
1391 - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
1392 - *
1393 - * Author: Li Yang <leoli@freescale.com>
1394 - * Based on code from Shlomi Gridish <gridish@freescale.com>
1395 - *
1396 - * This program is free software; you can redistribute it and/or modify it
1397 - * under the terms of the GNU General Public License as published by the
1398 - * Free Software Foundation; either version 2 of the License, or (at your
1399 - * option) any later version.
1400 - */
1401 -#ifndef _POWERPC_SYSDEV_QE_IC_H
1402 -#define _POWERPC_SYSDEV_QE_IC_H
1403 -
1404 -#include <soc/fsl/qe/qe_ic.h>
1405 -
1406 -#define NR_QE_IC_INTS 64
1407 -
1408 -/* QE IC registers offset */
1409 -#define QEIC_CICR 0x00
1410 -#define QEIC_CIVEC 0x04
1411 -#define QEIC_CRIPNR 0x08
1412 -#define QEIC_CIPNR 0x0c
1413 -#define QEIC_CIPXCC 0x10
1414 -#define QEIC_CIPYCC 0x14
1415 -#define QEIC_CIPWCC 0x18
1416 -#define QEIC_CIPZCC 0x1c
1417 -#define QEIC_CIMR 0x20
1418 -#define QEIC_CRIMR 0x24
1419 -#define QEIC_CICNR 0x28
1420 -#define QEIC_CIPRTA 0x30
1421 -#define QEIC_CIPRTB 0x34
1422 -#define QEIC_CRICR 0x3c
1423 -#define QEIC_CHIVEC 0x60
1424 -
1425 -/* Interrupt priority registers */
1426 -#define CIPCC_SHIFT_PRI0 29
1427 -#define CIPCC_SHIFT_PRI1 26
1428 -#define CIPCC_SHIFT_PRI2 23
1429 -#define CIPCC_SHIFT_PRI3 20
1430 -#define CIPCC_SHIFT_PRI4 13
1431 -#define CIPCC_SHIFT_PRI5 10
1432 -#define CIPCC_SHIFT_PRI6 7
1433 -#define CIPCC_SHIFT_PRI7 4
1434 -
1435 -/* CICR priority modes */
1436 -#define CICR_GWCC 0x00040000
1437 -#define CICR_GXCC 0x00020000
1438 -#define CICR_GYCC 0x00010000
1439 -#define CICR_GZCC 0x00080000
1440 -#define CICR_GRTA 0x00200000
1441 -#define CICR_GRTB 0x00400000
1442 -#define CICR_HPIT_SHIFT 8
1443 -#define CICR_HPIT_MASK 0x00000300
1444 -#define CICR_HP_SHIFT 24
1445 -#define CICR_HP_MASK 0x3f000000
1446 -
1447 -/* CICNR */
1448 -#define CICNR_WCC1T_SHIFT 20
1449 -#define CICNR_ZCC1T_SHIFT 28
1450 -#define CICNR_YCC1T_SHIFT 12
1451 -#define CICNR_XCC1T_SHIFT 4
1452 -
1453 -/* CRICR */
1454 -#define CRICR_RTA1T_SHIFT 20
1455 -#define CRICR_RTB1T_SHIFT 28
1456 -
1457 -/* Signal indicator */
1458 -#define SIGNAL_MASK 3
1459 -#define SIGNAL_HIGH 2
1460 -#define SIGNAL_LOW 0
1461 -
1462 -struct qe_ic {
1463 - /* Control registers offset */
1464 - volatile u32 __iomem *regs;
1465 -
1466 - /* The remapper for this QEIC */
1467 - struct irq_domain *irqhost;
1468 -
1469 - /* The "linux" controller struct */
1470 - struct irq_chip hc_irq;
1471 -
1472 - /* VIRQ numbers of QE high/low irqs */
1473 - unsigned int virq_high;
1474 - unsigned int virq_low;
1475 -};
1476 -
1477 -/*
1478 - * QE interrupt controller internal structure
1479 - */
1480 -struct qe_ic_info {
1481 - u32 mask; /* location of this source at the QIMR register. */
1482 - u32 mask_reg; /* Mask register offset */
1483 - u8 pri_code; /* for grouped interrupts sources - the interrupt
1484 - code as appears at the group priority register */
1485 - u32 pri_reg; /* Group priority register offset */
1486 -};
1487 -
1488 -#endif /* _POWERPC_SYSDEV_QE_IC_H */
1489 --- a/drivers/soc/fsl/qe/qe_io.c
1490 +++ b/drivers/soc/fsl/qe/qe_io.c
1491 @@ -22,8 +22,6 @@
1492
1493 #include <asm/io.h>
1494 #include <soc/fsl/qe/qe.h>
1495 -#include <asm/prom.h>
1496 -#include <sysdev/fsl_soc.h>
1497
1498 #undef DEBUG
1499
1500 @@ -61,16 +59,16 @@ void __par_io_config_pin(struct qe_pio_r
1501 pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
1502
1503 /* Set open drain, if required */
1504 - tmp_val = in_be32(&par_io->cpodr);
1505 + tmp_val = ioread32be(&par_io->cpodr);
1506 if (open_drain)
1507 - out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
1508 + iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
1509 else
1510 - out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
1511 + iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
1512
1513 /* define direction */
1514 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
1515 - in_be32(&par_io->cpdir2) :
1516 - in_be32(&par_io->cpdir1);
1517 + ioread32be(&par_io->cpdir2) :
1518 + ioread32be(&par_io->cpdir1);
1519
1520 /* get all bits mask for 2 bit per port */
1521 pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
1522 @@ -82,34 +80,30 @@ void __par_io_config_pin(struct qe_pio_r
1523
1524 /* clear and set 2 bits mask */
1525 if (pin > (QE_PIO_PINS / 2) - 1) {
1526 - out_be32(&par_io->cpdir2,
1527 - ~pin_mask2bits & tmp_val);
1528 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
1529 tmp_val &= ~pin_mask2bits;
1530 - out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
1531 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
1532 } else {
1533 - out_be32(&par_io->cpdir1,
1534 - ~pin_mask2bits & tmp_val);
1535 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
1536 tmp_val &= ~pin_mask2bits;
1537 - out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
1538 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
1539 }
1540 /* define pin assignment */
1541 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
1542 - in_be32(&par_io->cppar2) :
1543 - in_be32(&par_io->cppar1);
1544 + ioread32be(&par_io->cppar2) :
1545 + ioread32be(&par_io->cppar1);
1546
1547 new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
1548 (pin % (QE_PIO_PINS / 2) + 1) * 2));
1549 /* clear and set 2 bits mask */
1550 if (pin > (QE_PIO_PINS / 2) - 1) {
1551 - out_be32(&par_io->cppar2,
1552 - ~pin_mask2bits & tmp_val);
1553 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
1554 tmp_val &= ~pin_mask2bits;
1555 - out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
1556 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
1557 } else {
1558 - out_be32(&par_io->cppar1,
1559 - ~pin_mask2bits & tmp_val);
1560 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
1561 tmp_val &= ~pin_mask2bits;
1562 - out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
1563 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
1564 }
1565 }
1566 EXPORT_SYMBOL(__par_io_config_pin);
1567 @@ -137,12 +131,12 @@ int par_io_data_set(u8 port, u8 pin, u8
1568 /* calculate pin location */
1569 pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
1570
1571 - tmp_val = in_be32(&par_io[port].cpdata);
1572 + tmp_val = ioread32be(&par_io[port].cpdata);
1573
1574 if (val == 0) /* clear */
1575 - out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
1576 + iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
1577 else /* set */
1578 - out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
1579 + iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
1580
1581 return 0;
1582 }
1583 --- a/drivers/soc/fsl/qe/qe_tdm.c
1584 +++ b/drivers/soc/fsl/qe/qe_tdm.c
1585 @@ -227,10 +227,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm,
1586 &siram[siram_entry_id * 32 + 0x200 + i]);
1587 }
1588
1589 - setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
1590 - SIR_LAST);
1591 - setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
1592 - SIR_LAST);
1593 + qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
1594 + SIR_LAST);
1595 + qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
1596 + (utdm->num_of_ts - 1)], SIR_LAST);
1597
1598 /* Set SIxMR register */
1599 sixmr = SIMR_SAD(siram_entry_id);
1600 --- a/drivers/soc/fsl/qe/ucc.c
1601 +++ b/drivers/soc/fsl/qe/ucc.c
1602 @@ -39,7 +39,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int
1603 return -EINVAL;
1604
1605 spin_lock_irqsave(&cmxgcr_lock, flags);
1606 - clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
1607 + qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
1608 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
1609 spin_unlock_irqrestore(&cmxgcr_lock, flags);
1610
1611 @@ -84,7 +84,7 @@ int ucc_set_type(unsigned int ucc_num, e
1612 return -EINVAL;
1613 }
1614
1615 - clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
1616 + qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
1617 UCC_GUEMR_SET_RESERVED3 | speed);
1618
1619 return 0;
1620 @@ -113,9 +113,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned
1621 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
1622
1623 if (set)
1624 - setbits32(cmxucr, mask << shift);
1625 + qe_setbits32(cmxucr, mask << shift);
1626 else
1627 - clrbits32(cmxucr, mask << shift);
1628 + qe_clrbits32(cmxucr, mask << shift);
1629
1630 return 0;
1631 }
1632 @@ -211,7 +211,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc
1633 if (mode == COMM_DIR_RX)
1634 shift += 4;
1635
1636 - clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
1637 + qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
1638 clock_bits << shift);
1639
1640 return 0;
1641 --- a/drivers/soc/fsl/qe/ucc_fast.c
1642 +++ b/drivers/soc/fsl/qe/ucc_fast.c
1643 @@ -33,41 +33,41 @@ void ucc_fast_dump_regs(struct ucc_fast_
1644 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
1645
1646 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
1647 - &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
1648 + &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
1649 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
1650 - &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
1651 + &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
1652 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
1653 - &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
1654 + &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
1655 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
1656 - &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
1657 + &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
1658 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
1659 - &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
1660 + &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
1661 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
1662 - &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
1663 + &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
1664 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
1665 - &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
1666 + &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
1667 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
1668 - &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
1669 + &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
1670 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
1671 - &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
1672 + &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
1673 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
1674 - &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
1675 + &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
1676 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
1677 - &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
1678 + &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
1679 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
1680 - &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
1681 + &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
1682 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
1683 - &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
1684 + &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
1685 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
1686 - &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
1687 + &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
1688 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
1689 - &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
1690 + &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
1691 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
1692 - &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
1693 + &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
1694 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
1695 - &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
1696 + &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
1697 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
1698 - &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
1699 + &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
1700 }
1701 EXPORT_SYMBOL(ucc_fast_dump_regs);
1702
1703 @@ -89,7 +89,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc
1704
1705 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
1706 {
1707 - out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
1708 + iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
1709 }
1710 EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
1711
1712 @@ -101,7 +101,7 @@ void ucc_fast_enable(struct ucc_fast_pri
1713 uf_regs = uccf->uf_regs;
1714
1715 /* Enable reception and/or transmission on this UCC. */
1716 - gumr = in_be32(&uf_regs->gumr);
1717 + gumr = ioread32be(&uf_regs->gumr);
1718 if (mode & COMM_DIR_TX) {
1719 gumr |= UCC_FAST_GUMR_ENT;
1720 uccf->enabled_tx = 1;
1721 @@ -110,7 +110,7 @@ void ucc_fast_enable(struct ucc_fast_pri
1722 gumr |= UCC_FAST_GUMR_ENR;
1723 uccf->enabled_rx = 1;
1724 }
1725 - out_be32(&uf_regs->gumr, gumr);
1726 + iowrite32be(gumr, &uf_regs->gumr);
1727 }
1728 EXPORT_SYMBOL(ucc_fast_enable);
1729
1730 @@ -122,7 +122,7 @@ void ucc_fast_disable(struct ucc_fast_pr
1731 uf_regs = uccf->uf_regs;
1732
1733 /* Disable reception and/or transmission on this UCC. */
1734 - gumr = in_be32(&uf_regs->gumr);
1735 + gumr = ioread32be(&uf_regs->gumr);
1736 if (mode & COMM_DIR_TX) {
1737 gumr &= ~UCC_FAST_GUMR_ENT;
1738 uccf->enabled_tx = 0;
1739 @@ -131,7 +131,7 @@ void ucc_fast_disable(struct ucc_fast_pr
1740 gumr &= ~UCC_FAST_GUMR_ENR;
1741 uccf->enabled_rx = 0;
1742 }
1743 - out_be32(&uf_regs->gumr, gumr);
1744 + iowrite32be(gumr, &uf_regs->gumr);
1745 }
1746 EXPORT_SYMBOL(ucc_fast_disable);
1747
1748 @@ -263,12 +263,13 @@ int ucc_fast_init(struct ucc_fast_info *
1749 gumr |= uf_info->tenc;
1750 gumr |= uf_info->tcrc;
1751 gumr |= uf_info->mode;
1752 - out_be32(&uf_regs->gumr, gumr);
1753 + iowrite32be(gumr, &uf_regs->gumr);
1754
1755 /* Allocate memory for Tx Virtual Fifo */
1756 uccf->ucc_fast_tx_virtual_fifo_base_offset =
1757 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
1758 - if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
1759 + if (IS_ERR_VALUE((unsigned long)uccf->
1760 + ucc_fast_tx_virtual_fifo_base_offset)) {
1761 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
1762 __func__);
1763 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
1764 @@ -281,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info *
1765 qe_muram_alloc(uf_info->urfs +
1766 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
1767 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
1768 - if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
1769 + if (IS_ERR_VALUE((unsigned long)uccf->
1770 + ucc_fast_rx_virtual_fifo_base_offset)) {
1771 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
1772 __func__);
1773 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
1774 @@ -290,15 +292,15 @@ int ucc_fast_init(struct ucc_fast_info *
1775 }
1776
1777 /* Set Virtual Fifo registers */
1778 - out_be16(&uf_regs->urfs, uf_info->urfs);
1779 - out_be16(&uf_regs->urfet, uf_info->urfet);
1780 - out_be16(&uf_regs->urfset, uf_info->urfset);
1781 - out_be16(&uf_regs->utfs, uf_info->utfs);
1782 - out_be16(&uf_regs->utfet, uf_info->utfet);
1783 - out_be16(&uf_regs->utftt, uf_info->utftt);
1784 + iowrite16be(uf_info->urfs, &uf_regs->urfs);
1785 + iowrite16be(uf_info->urfet, &uf_regs->urfet);
1786 + iowrite16be(uf_info->urfset, &uf_regs->urfset);
1787 + iowrite16be(uf_info->utfs, &uf_regs->utfs);
1788 + iowrite16be(uf_info->utfet, &uf_regs->utfet);
1789 + iowrite16be(uf_info->utftt, &uf_regs->utftt);
1790 /* utfb, urfb are offsets from MURAM base */
1791 - out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
1792 - out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
1793 + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
1794 + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
1795
1796 /* Mux clocking */
1797 /* Grant Support */
1798 @@ -366,14 +368,14 @@ int ucc_fast_init(struct ucc_fast_info *
1799 }
1800
1801 /* Set interrupt mask register at UCC level. */
1802 - out_be32(&uf_regs->uccm, uf_info->uccm_mask);
1803 + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
1804
1805 /* First, clear anything pending at UCC level,
1806 * otherwise, old garbage may come through
1807 * as soon as the dam is opened. */
1808
1809 /* Writing '1' clears */
1810 - out_be32(&uf_regs->ucce, 0xffffffff);
1811 + iowrite32be(0xffffffff, &uf_regs->ucce);
1812
1813 *uccf_ret = uccf;
1814 return 0;
1815 --- a/drivers/tty/serial/ucc_uart.c
1816 +++ b/drivers/tty/serial/ucc_uart.c
1817 @@ -34,6 +34,7 @@
1818 #include <soc/fsl/qe/ucc_slow.h>
1819
1820 #include <linux/firmware.h>
1821 +#include <asm/cpm.h>
1822 #include <asm/reg.h>
1823
1824 /*
1825 --- a/include/soc/fsl/qe/qe.h
1826 +++ b/include/soc/fsl/qe/qe.h
1827 @@ -21,7 +21,6 @@
1828 #include <linux/spinlock.h>
1829 #include <linux/errno.h>
1830 #include <linux/err.h>
1831 -#include <asm/cpm.h>
1832 #include <soc/fsl/qe/immap_qe.h>
1833 #include <linux/of.h>
1834 #include <linux/of_address.h>
1835 --- a/include/soc/fsl/qe/qe_ic.h
1836 +++ /dev/null
1837 @@ -1,139 +0,0 @@
1838 -/*
1839 - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
1840 - *
1841 - * Authors: Shlomi Gridish <gridish@freescale.com>
1842 - * Li Yang <leoli@freescale.com>
1843 - *
1844 - * Description:
1845 - * QE IC external definitions and structure.
1846 - *
1847 - * This program is free software; you can redistribute it and/or modify it
1848 - * under the terms of the GNU General Public License as published by the
1849 - * Free Software Foundation; either version 2 of the License, or (at your
1850 - * option) any later version.
1851 - */
1852 -#ifndef _ASM_POWERPC_QE_IC_H
1853 -#define _ASM_POWERPC_QE_IC_H
1854 -
1855 -#include <linux/irq.h>
1856 -
1857 -struct device_node;
1858 -struct qe_ic;
1859 -
1860 -#define NUM_OF_QE_IC_GROUPS 6
1861 -
1862 -/* Flags when we init the QE IC */
1863 -#define QE_IC_SPREADMODE_GRP_W 0x00000001
1864 -#define QE_IC_SPREADMODE_GRP_X 0x00000002
1865 -#define QE_IC_SPREADMODE_GRP_Y 0x00000004
1866 -#define QE_IC_SPREADMODE_GRP_Z 0x00000008
1867 -#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
1868 -#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
1869 -
1870 -#define QE_IC_LOW_SIGNAL 0x00000100
1871 -#define QE_IC_HIGH_SIGNAL 0x00000200
1872 -
1873 -#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
1874 -#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
1875 -#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
1876 -#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
1877 -#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
1878 -#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
1879 -#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
1880 -#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
1881 -#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
1882 -#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
1883 -#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
1884 -#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
1885 -#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
1886 -
1887 -/* QE interrupt sources groups */
1888 -enum qe_ic_grp_id {
1889 - QE_IC_GRP_W = 0, /* QE interrupt controller group W */
1890 - QE_IC_GRP_X, /* QE interrupt controller group X */
1891 - QE_IC_GRP_Y, /* QE interrupt controller group Y */
1892 - QE_IC_GRP_Z, /* QE interrupt controller group Z */
1893 - QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
1894 - QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
1895 -};
1896 -
1897 -#ifdef CONFIG_QUICC_ENGINE
1898 -void qe_ic_init(struct device_node *node, unsigned int flags,
1899 - void (*low_handler)(struct irq_desc *desc),
1900 - void (*high_handler)(struct irq_desc *desc));
1901 -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
1902 -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
1903 -#else
1904 -static inline void qe_ic_init(struct device_node *node, unsigned int flags,
1905 - void (*low_handler)(struct irq_desc *desc),
1906 - void (*high_handler)(struct irq_desc *desc))
1907 -{}
1908 -static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
1909 -{ return 0; }
1910 -static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
1911 -{ return 0; }
1912 -#endif /* CONFIG_QUICC_ENGINE */
1913 -
1914 -void qe_ic_set_highest_priority(unsigned int virq, int high);
1915 -int qe_ic_set_priority(unsigned int virq, unsigned int priority);
1916 -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
1917 -
1918 -static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
1919 -{
1920 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1921 - unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
1922 -
1923 - if (cascade_irq != NO_IRQ)
1924 - generic_handle_irq(cascade_irq);
1925 -}
1926 -
1927 -static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
1928 -{
1929 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1930 - unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1931 -
1932 - if (cascade_irq != NO_IRQ)
1933 - generic_handle_irq(cascade_irq);
1934 -}
1935 -
1936 -static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
1937 -{
1938 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1939 - unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
1940 - struct irq_chip *chip = irq_desc_get_chip(desc);
1941 -
1942 - if (cascade_irq != NO_IRQ)
1943 - generic_handle_irq(cascade_irq);
1944 -
1945 - chip->irq_eoi(&desc->irq_data);
1946 -}
1947 -
1948 -static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
1949 -{
1950 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1951 - unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1952 - struct irq_chip *chip = irq_desc_get_chip(desc);
1953 -
1954 - if (cascade_irq != NO_IRQ)
1955 - generic_handle_irq(cascade_irq);
1956 -
1957 - chip->irq_eoi(&desc->irq_data);
1958 -}
1959 -
1960 -static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
1961 -{
1962 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1963 - unsigned int cascade_irq;
1964 - struct irq_chip *chip = irq_desc_get_chip(desc);
1965 -
1966 - cascade_irq = qe_ic_get_high_irq(qe_ic);
1967 - if (cascade_irq == NO_IRQ)
1968 - cascade_irq = qe_ic_get_low_irq(qe_ic);
1969 -
1970 - if (cascade_irq != NO_IRQ)
1971 - generic_handle_irq(cascade_irq);
1972 -
1973 - chip->irq_eoi(&desc->irq_data);
1974 -}
1975 -
1976 -#endif /* _ASM_POWERPC_QE_IC_H */