bcm63xx: add linux v5.15 support
[openwrt/staging/ldir.git] / target / linux / bcm63xx / patches-5.15 / 322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch
1 From d2d2489e0a4b740abd980e9d1cad952d15bc2d9e Mon Sep 17 00:00:00 2001
2 From: Jonas Gorski <jogo@openwrt.org>
3 Date: Sun, 30 Nov 2014 14:55:02 +0100
4 Subject: [PATCH] MIPS: BCM63XX: switch to IRQ_DOMAIN
5
6 Now that we have working IRQ_DOMAIN drivers for both interrupt controllers,
7 switch to using them.
8
9 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
10 ---
11 arch/mips/Kconfig | 3 +
12 arch/mips/bcm63xx/irq.c | 612 +++++++++---------------------------------------
13 2 files changed, 108 insertions(+), 507 deletions(-)
14
15 --- a/arch/mips/Kconfig
16 +++ b/arch/mips/Kconfig
17 @@ -331,6 +331,9 @@ config BCM63XX
18 select SYNC_R4K
19 select DMA_NONCOHERENT
20 select IRQ_MIPS_CPU
21 + select BCM6345_EXT_IRQ
22 + select BCM6345_PERIPH_IRQ
23 + select IRQ_DOMAIN
24 select SYS_SUPPORTS_32BIT_KERNEL
25 select SYS_SUPPORTS_BIG_ENDIAN
26 select SYS_HAS_EARLY_PRINTK
27 --- a/arch/mips/bcm63xx/irq.c
28 +++ b/arch/mips/bcm63xx/irq.c
29 @@ -11,7 +11,9 @@
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 -#include <linux/spinlock.h>
34 +#include <linux/irqchip.h>
35 +#include <linux/irqchip/irq-bcm6345-ext.h>
36 +#include <linux/irqchip/irq-bcm6345-periph.h>
37 #include <asm/irq_cpu.h>
38 #include <asm/mipsregs.h>
39 #include <bcm63xx_cpu.h>
40 @@ -19,535 +21,140 @@
41 #include <bcm63xx_io.h>
42 #include <bcm63xx_irq.h>
43
44 -
45 -static DEFINE_SPINLOCK(ipic_lock);
46 -static DEFINE_SPINLOCK(epic_lock);
47 -
48 -static u32 irq_stat_addr[2];
49 -static u32 irq_mask_addr[2];
50 -static void (*dispatch_internal)(int cpu);
51 -static int is_ext_irq_cascaded;
52 -static unsigned int ext_irq_count;
53 -static unsigned int ext_irq_start, ext_irq_end;
54 -static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
55 -static void (*internal_irq_mask)(struct irq_data *d);
56 -static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
57 -
58 -
59 -static inline u32 get_ext_irq_perf_reg(int irq)
60 -{
61 - if (irq < 4)
62 - return ext_irq_cfg_reg1;
63 - return ext_irq_cfg_reg2;
64 -}
65 -
66 -static inline void handle_internal(int intbit)
67 -{
68 - if (is_ext_irq_cascaded &&
69 - intbit >= ext_irq_start && intbit <= ext_irq_end)
70 - do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
71 - else
72 - do_IRQ(intbit + IRQ_INTERNAL_BASE);
73 -}
74 -
75 -static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
76 - const struct cpumask *m)
77 -{
78 - bool enable = cpu_online(cpu);
79 -
80 -#ifdef CONFIG_SMP
81 - if (m)
82 - enable &= cpumask_test_cpu(cpu, m);
83 - else if (irqd_affinity_was_set(d))
84 - enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
85 -#endif
86 - return enable;
87 -}
88 -
89 -/*
90 - * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
91 - * prioritize any interrupt relatively to another. the static counter
92 - * will resume the loop where it ended the last time we left this
93 - * function.
94 - */
95 -
96 -#define BUILD_IPIC_INTERNAL(width) \
97 -void __dispatch_internal_##width(int cpu) \
98 -{ \
99 - u32 pending[width / 32]; \
100 - unsigned int src, tgt; \
101 - bool irqs_pending = false; \
102 - static unsigned int i[2]; \
103 - unsigned int *next = &i[cpu]; \
104 - unsigned long flags; \
105 - \
106 - /* read registers in reverse order */ \
107 - spin_lock_irqsave(&ipic_lock, flags); \
108 - for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
109 - u32 val; \
110 - \
111 - val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
112 - val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
113 - pending[--tgt] = val; \
114 - \
115 - if (val) \
116 - irqs_pending = true; \
117 - } \
118 - spin_unlock_irqrestore(&ipic_lock, flags); \
119 - \
120 - if (!irqs_pending) \
121 - return; \
122 - \
123 - while (1) { \
124 - unsigned int to_call = *next; \
125 - \
126 - *next = (*next + 1) & (width - 1); \
127 - if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
128 - handle_internal(to_call); \
129 - break; \
130 - } \
131 - } \
132 -} \
133 - \
134 -static void __internal_irq_mask_##width(struct irq_data *d) \
135 -{ \
136 - u32 val; \
137 - unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
138 - unsigned reg = (irq / 32) ^ (width/32 - 1); \
139 - unsigned bit = irq & 0x1f; \
140 - unsigned long flags; \
141 - int cpu; \
142 - \
143 - spin_lock_irqsave(&ipic_lock, flags); \
144 - for_each_present_cpu(cpu) { \
145 - if (!irq_mask_addr[cpu]) \
146 - break; \
147 - \
148 - val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
149 - val &= ~(1 << bit); \
150 - bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
151 - } \
152 - spin_unlock_irqrestore(&ipic_lock, flags); \
153 -} \
154 - \
155 -static void __internal_irq_unmask_##width(struct irq_data *d, \
156 - const struct cpumask *m) \
157 -{ \
158 - u32 val; \
159 - unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
160 - unsigned reg = (irq / 32) ^ (width/32 - 1); \
161 - unsigned bit = irq & 0x1f; \
162 - unsigned long flags; \
163 - int cpu; \
164 - \
165 - spin_lock_irqsave(&ipic_lock, flags); \
166 - for_each_present_cpu(cpu) { \
167 - if (!irq_mask_addr[cpu]) \
168 - break; \
169 - \
170 - val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
171 - if (enable_irq_for_cpu(cpu, d, m)) \
172 - val |= (1 << bit); \
173 - else \
174 - val &= ~(1 << bit); \
175 - bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
176 - } \
177 - spin_unlock_irqrestore(&ipic_lock, flags); \
178 -}
179 -
180 -BUILD_IPIC_INTERNAL(32);
181 -BUILD_IPIC_INTERNAL(64);
182 -
183 -asmlinkage void plat_irq_dispatch(void)
184 -{
185 - u32 cause;
186 -
187 - do {
188 - cause = read_c0_cause() & read_c0_status() & ST0_IM;
189 -
190 - if (!cause)
191 - break;
192 -
193 - if (cause & CAUSEF_IP7)
194 - do_IRQ(7);
195 - if (cause & CAUSEF_IP0)
196 - do_IRQ(0);
197 - if (cause & CAUSEF_IP1)
198 - do_IRQ(1);
199 - if (cause & CAUSEF_IP2)
200 - dispatch_internal(0);
201 - if (is_ext_irq_cascaded) {
202 - if (cause & CAUSEF_IP3)
203 - dispatch_internal(1);
204 - } else {
205 - if (cause & CAUSEF_IP3)
206 - do_IRQ(IRQ_EXT_0);
207 - if (cause & CAUSEF_IP4)
208 - do_IRQ(IRQ_EXT_1);
209 - if (cause & CAUSEF_IP5)
210 - do_IRQ(IRQ_EXT_2);
211 - if (cause & CAUSEF_IP6)
212 - do_IRQ(IRQ_EXT_3);
213 - }
214 - } while (1);
215 -}
216 -
217 -/*
218 - * internal IRQs operations: only mask/unmask on PERF irq mask
219 - * register.
220 - */
221 -static void bcm63xx_internal_irq_mask(struct irq_data *d)
222 -{
223 - internal_irq_mask(d);
224 -}
225 -
226 -static void bcm63xx_internal_irq_unmask(struct irq_data *d)
227 -{
228 - internal_irq_unmask(d, NULL);
229 -}
230 -
231 -/*
232 - * external IRQs operations: mask/unmask and clear on PERF external
233 - * irq control register.
234 - */
235 -static void bcm63xx_external_irq_mask(struct irq_data *d)
236 -{
237 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238 - u32 reg, regaddr;
239 - unsigned long flags;
240 -
241 - regaddr = get_ext_irq_perf_reg(irq);
242 - spin_lock_irqsave(&epic_lock, flags);
243 - reg = bcm_perf_readl(regaddr);
244 -
245 - if (BCMCPU_IS_6348())
246 - reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
247 - else
248 - reg &= ~EXTIRQ_CFG_MASK(irq % 4);
249 -
250 - bcm_perf_writel(reg, regaddr);
251 - spin_unlock_irqrestore(&epic_lock, flags);
252 -
253 - if (is_ext_irq_cascaded)
254 - internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
255 -}
256 -
257 -static void bcm63xx_external_irq_unmask(struct irq_data *d)
258 -{
259 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
260 - u32 reg, regaddr;
261 - unsigned long flags;
262 -
263 - regaddr = get_ext_irq_perf_reg(irq);
264 - spin_lock_irqsave(&epic_lock, flags);
265 - reg = bcm_perf_readl(regaddr);
266 -
267 - if (BCMCPU_IS_6348())
268 - reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
269 - else
270 - reg |= EXTIRQ_CFG_MASK(irq % 4);
271 -
272 - bcm_perf_writel(reg, regaddr);
273 - spin_unlock_irqrestore(&epic_lock, flags);
274 -
275 - if (is_ext_irq_cascaded)
276 - internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
277 - NULL);
278 -}
279 -
280 -static void bcm63xx_external_irq_clear(struct irq_data *d)
281 -{
282 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
283 - u32 reg, regaddr;
284 - unsigned long flags;
285 -
286 - regaddr = get_ext_irq_perf_reg(irq);
287 - spin_lock_irqsave(&epic_lock, flags);
288 - reg = bcm_perf_readl(regaddr);
289 -
290 - if (BCMCPU_IS_6348())
291 - reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
292 - else
293 - reg |= EXTIRQ_CFG_CLEAR(irq % 4);
294 -
295 - bcm_perf_writel(reg, regaddr);
296 - spin_unlock_irqrestore(&epic_lock, flags);
297 -}
298 -
299 -static int bcm63xx_external_irq_set_type(struct irq_data *d,
300 - unsigned int flow_type)
301 -{
302 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
303 - u32 reg, regaddr;
304 - int levelsense, sense, bothedge;
305 - unsigned long flags;
306 -
307 - flow_type &= IRQ_TYPE_SENSE_MASK;
308 -
309 - if (flow_type == IRQ_TYPE_NONE)
310 - flow_type = IRQ_TYPE_LEVEL_LOW;
311 -
312 - levelsense = sense = bothedge = 0;
313 - switch (flow_type) {
314 - case IRQ_TYPE_EDGE_BOTH:
315 - bothedge = 1;
316 - break;
317 -
318 - case IRQ_TYPE_EDGE_RISING:
319 - sense = 1;
320 - break;
321 -
322 - case IRQ_TYPE_EDGE_FALLING:
323 - break;
324 -
325 - case IRQ_TYPE_LEVEL_HIGH:
326 - levelsense = 1;
327 - sense = 1;
328 - break;
329 -
330 - case IRQ_TYPE_LEVEL_LOW:
331 - levelsense = 1;
332 - break;
333 -
334 - default:
335 - pr_err("bogus flow type combination given !\n");
336 - return -EINVAL;
337 - }
338 -
339 - regaddr = get_ext_irq_perf_reg(irq);
340 - spin_lock_irqsave(&epic_lock, flags);
341 - reg = bcm_perf_readl(regaddr);
342 - irq %= 4;
343 -
344 - switch (bcm63xx_get_cpu_id()) {
345 - case BCM6348_CPU_ID:
346 - if (levelsense)
347 - reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
348 - else
349 - reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
350 - if (sense)
351 - reg |= EXTIRQ_CFG_SENSE_6348(irq);
352 - else
353 - reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
354 - if (bothedge)
355 - reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
356 - else
357 - reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
358 - break;
359 -
360 - case BCM3368_CPU_ID:
361 - case BCM6328_CPU_ID:
362 - case BCM6338_CPU_ID:
363 - case BCM6345_CPU_ID:
364 - case BCM6358_CPU_ID:
365 - case BCM6362_CPU_ID:
366 - case BCM6368_CPU_ID:
367 - if (levelsense)
368 - reg |= EXTIRQ_CFG_LEVELSENSE(irq);
369 - else
370 - reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
371 - if (sense)
372 - reg |= EXTIRQ_CFG_SENSE(irq);
373 - else
374 - reg &= ~EXTIRQ_CFG_SENSE(irq);
375 - if (bothedge)
376 - reg |= EXTIRQ_CFG_BOTHEDGE(irq);
377 - else
378 - reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
379 - break;
380 - default:
381 - BUG();
382 - }
383 -
384 - bcm_perf_writel(reg, regaddr);
385 - spin_unlock_irqrestore(&epic_lock, flags);
386 -
387 - irqd_set_trigger_type(d, flow_type);
388 - if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
389 - irq_set_handler_locked(d, handle_level_irq);
390 - else
391 - irq_set_handler_locked(d, handle_edge_irq);
392 -
393 - return IRQ_SET_MASK_OK_NOCOPY;
394 -}
395 -
396 -#ifdef CONFIG_SMP
397 -static int bcm63xx_internal_set_affinity(struct irq_data *data,
398 - const struct cpumask *dest,
399 - bool force)
400 -{
401 - if (!irqd_irq_disabled(data))
402 - internal_irq_unmask(data, dest);
403 -
404 - return 0;
405 -}
406 -#endif
407 -
408 -static struct irq_chip bcm63xx_internal_irq_chip = {
409 - .name = "bcm63xx_ipic",
410 - .irq_mask = bcm63xx_internal_irq_mask,
411 - .irq_unmask = bcm63xx_internal_irq_unmask,
412 -};
413 -
414 -static struct irq_chip bcm63xx_external_irq_chip = {
415 - .name = "bcm63xx_epic",
416 - .irq_ack = bcm63xx_external_irq_clear,
417 -
418 - .irq_mask = bcm63xx_external_irq_mask,
419 - .irq_unmask = bcm63xx_external_irq_unmask,
420 -
421 - .irq_set_type = bcm63xx_external_irq_set_type,
422 -};
423 -
424 -static void bcm63xx_init_irq(void)
425 +void __init arch_init_irq(void)
426 {
427 - int irq_bits;
428 -
429 - irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
430 - irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
431 - irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
432 - irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
433 + void __iomem *periph_bases[2];
434 + void __iomem *ext_intc_bases[2];
435 + int periph_irq_count, periph_width, ext_irq_count, ext_shift;
436 + int periph_irqs[2] = { 2, 3 };
437 + int ext_irqs[6];
438 +
439 + periph_bases[0] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
440 + periph_bases[1] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
441 + ext_intc_bases[0] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
442 + ext_intc_bases[1] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
443
444 switch (bcm63xx_get_cpu_id()) {
445 case BCM3368_CPU_ID:
446 - irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
447 - irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
448 - irq_stat_addr[1] = 0;
449 - irq_mask_addr[1] = 0;
450 - irq_bits = 32;
451 - ext_irq_count = 4;
452 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
453 + periph_bases[0] += PERF_IRQMASK_3368_REG;
454 + periph_irq_count = 1;
455 + periph_width = 1;
456 +
457 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_3368;
458 + ext_irq_count = 4;
459 + ext_irqs[0] = BCM_3368_EXT_IRQ0;
460 + ext_irqs[1] = BCM_3368_EXT_IRQ1;
461 + ext_irqs[2] = BCM_3368_EXT_IRQ2;
462 + ext_irqs[3] = BCM_3368_EXT_IRQ3;
463 + ext_shift = 4;
464 break;
465 case BCM6328_CPU_ID:
466 - irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
467 - irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
468 - irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
469 - irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
470 - irq_bits = 64;
471 - ext_irq_count = 4;
472 - is_ext_irq_cascaded = 1;
473 - ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
474 - ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
475 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
476 + periph_bases[0] += PERF_IRQMASK_6328_REG(0);
477 + periph_bases[1] += PERF_IRQMASK_6328_REG(1);
478 + periph_irq_count = 2;
479 + periph_width = 2;
480 +
481 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6328;
482 + ext_irq_count = 4;
483 + ext_irqs[0] = BCM_6328_EXT_IRQ0;
484 + ext_irqs[1] = BCM_6328_EXT_IRQ1;
485 + ext_irqs[2] = BCM_6328_EXT_IRQ2;
486 + ext_irqs[3] = BCM_6328_EXT_IRQ3;
487 + ext_shift = 4;
488 break;
489 case BCM6338_CPU_ID:
490 - irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
491 - irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
492 - irq_stat_addr[1] = 0;
493 - irq_mask_addr[1] = 0;
494 - irq_bits = 32;
495 - ext_irq_count = 4;
496 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
497 + periph_bases[0] += PERF_IRQMASK_6338_REG;
498 + periph_irq_count = 1;
499 + periph_width = 1;
500 +
501 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6338;
502 + ext_irq_count = 4;
503 + ext_irqs[0] = 3;
504 + ext_irqs[1] = 4;
505 + ext_irqs[2] = 5;
506 + ext_irqs[3] = 6;
507 + ext_shift = 4;
508 break;
509 case BCM6345_CPU_ID:
510 - irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
511 - irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
512 - irq_stat_addr[1] = 0;
513 - irq_mask_addr[1] = 0;
514 - irq_bits = 32;
515 - ext_irq_count = 4;
516 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
517 + periph_bases[0] += PERF_IRQMASK_6345_REG;
518 + periph_irq_count = 1;
519 + periph_width = 1;
520 +
521 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6345;
522 + ext_irq_count = 4;
523 + ext_irqs[0] = 3;
524 + ext_irqs[1] = 4;
525 + ext_irqs[2] = 5;
526 + ext_irqs[3] = 6;
527 + ext_shift = 4;
528 break;
529 case BCM6348_CPU_ID:
530 - irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
531 - irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
532 - irq_stat_addr[1] = 0;
533 - irq_mask_addr[1] = 0;
534 - irq_bits = 32;
535 - ext_irq_count = 4;
536 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
537 + periph_bases[0] += PERF_IRQMASK_6348_REG;
538 + periph_irq_count = 1;
539 + periph_width = 1;
540 +
541 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6348;
542 + ext_irq_count = 4;
543 + ext_irqs[0] = 3;
544 + ext_irqs[1] = 4;
545 + ext_irqs[2] = 5;
546 + ext_irqs[3] = 6;
547 + ext_shift = 5;
548 break;
549 case BCM6358_CPU_ID:
550 - irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
551 - irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
552 - irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
553 - irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
554 - irq_bits = 32;
555 - ext_irq_count = 4;
556 - is_ext_irq_cascaded = 1;
557 - ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
558 - ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
559 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
560 + periph_bases[0] += PERF_IRQMASK_6358_REG(0);
561 + periph_bases[1] += PERF_IRQMASK_6358_REG(1);
562 + periph_irq_count = 2;
563 + periph_width = 1;
564 +
565 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6358;
566 + ext_irq_count = 4;
567 + ext_irqs[0] = BCM_6358_EXT_IRQ0;
568 + ext_irqs[1] = BCM_6358_EXT_IRQ1;
569 + ext_irqs[2] = BCM_6358_EXT_IRQ2;
570 + ext_irqs[3] = BCM_6358_EXT_IRQ3;
571 + ext_shift = 4;
572 break;
573 case BCM6362_CPU_ID:
574 - irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
575 - irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
576 - irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
577 - irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
578 - irq_bits = 64;
579 - ext_irq_count = 4;
580 - is_ext_irq_cascaded = 1;
581 - ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
582 - ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
583 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
584 + periph_bases[0] += PERF_IRQMASK_6362_REG(0);
585 + periph_bases[1] += PERF_IRQMASK_6362_REG(1);
586 + periph_irq_count = 2;
587 + periph_width = 2;
588 +
589 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6362;
590 + ext_irq_count = 4;
591 + ext_irqs[0] = BCM_6362_EXT_IRQ0;
592 + ext_irqs[1] = BCM_6362_EXT_IRQ1;
593 + ext_irqs[2] = BCM_6362_EXT_IRQ2;
594 + ext_irqs[3] = BCM_6362_EXT_IRQ3;
595 + ext_shift = 4;
596 break;
597 case BCM6368_CPU_ID:
598 - irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
599 - irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
600 - irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
601 - irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
602 - irq_bits = 64;
603 - ext_irq_count = 6;
604 - is_ext_irq_cascaded = 1;
605 - ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
606 - ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
607 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
608 - ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
609 + periph_bases[0] += PERF_IRQMASK_6368_REG(0);
610 + periph_bases[1] += PERF_IRQMASK_6368_REG(1);
611 + periph_irq_count = 2;
612 + periph_width = 2;
613 +
614 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6368;
615 + ext_intc_bases[1] += PERF_EXTIRQ_CFG_REG2_6368;
616 + ext_irq_count = 6;
617 + ext_irqs[0] = BCM_6368_EXT_IRQ0;
618 + ext_irqs[1] = BCM_6368_EXT_IRQ1;
619 + ext_irqs[2] = BCM_6368_EXT_IRQ2;
620 + ext_irqs[3] = BCM_6368_EXT_IRQ3;
621 + ext_irqs[4] = BCM_6368_EXT_IRQ4;
622 + ext_irqs[5] = BCM_6368_EXT_IRQ5;
623 + ext_shift = 4;
624 break;
625 default:
626 BUG();
627 }
628
629 - if (irq_bits == 32) {
630 - dispatch_internal = __dispatch_internal_32;
631 - internal_irq_mask = __internal_irq_mask_32;
632 - internal_irq_unmask = __internal_irq_unmask_32;
633 - } else {
634 - dispatch_internal = __dispatch_internal_64;
635 - internal_irq_mask = __internal_irq_mask_64;
636 - internal_irq_unmask = __internal_irq_unmask_64;
637 - }
638 -}
639 -
640 -void __init arch_init_irq(void)
641 -{
642 - int i, irq;
643 -
644 - bcm63xx_init_irq();
645 mips_cpu_irq_init();
646 - for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
647 - irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
648 - handle_level_irq);
649 -
650 - for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
651 - irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
652 - handle_edge_irq);
653 -
654 - if (!is_ext_irq_cascaded) {
655 - for (i = 3; i < 3 + ext_irq_count; ++i) {
656 - irq = MIPS_CPU_IRQ_BASE + i;
657 - if (request_irq(irq, no_action, IRQF_NO_THREAD,
658 - "cascade_extirq", NULL)) {
659 - pr_err("Failed to request irq %d (cascade_extirq)\n",
660 - irq);
661 - }
662 - }
663 - }
664 -
665 - irq = MIPS_CPU_IRQ_BASE + 2;
666 - if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
667 - pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
668 -#ifdef CONFIG_SMP
669 - if (is_ext_irq_cascaded) {
670 - irq = MIPS_CPU_IRQ_BASE + 3;
671 - if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
672 - NULL))
673 - pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
674 - bcm63xx_internal_irq_chip.irq_set_affinity =
675 - bcm63xx_internal_set_affinity;
676 -
677 - cpumask_clear(irq_default_affinity);
678 - cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
679 - }
680 -#endif
681 + bcm6345_periph_intc_init(periph_irq_count, periph_irqs, periph_bases,
682 + periph_width);
683 + bcm6345_ext_intc_init(4, ext_irqs, ext_intc_bases[0], ext_shift);
684 + if (ext_irq_count > 4)
685 + bcm6345_ext_intc_init(2, &ext_irqs[4], ext_intc_bases[1],
686 + ext_shift);
687 }