7f3682c4171e337843f9e8bb0d4dbb048bed7dc8
[openwrt/svn-archive/archive.git] / target / linux / brcm63xx / patches-3.14 / 322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch
1 From d93661c9e164ccc41820eeb4f1881e59a34a9e5c Mon Sep 17 00:00:00 2001
2 From: Jonas Gorski <jogo@openwrt.org>
3 Date: Sun, 30 Nov 2014 14:55:02 +0100
4 Subject: [PATCH 19/20] MIPS: BCM63XX: switch to IRQ_DOMAIN
5
6 Now that we have working IRQ_DOMAIN drivers for both interrupt controllers,
7 switch to using them.
8
9 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
10 ---
11 arch/mips/Kconfig | 3 +
12 arch/mips/bcm63xx/irq.c | 608 ++++++++---------------------------------------
13 2 files changed, 108 insertions(+), 503 deletions(-)
14
15 --- a/arch/mips/Kconfig
16 +++ b/arch/mips/Kconfig
17 @@ -135,6 +135,9 @@ config BCM63XX
18 select SYNC_R4K
19 select DMA_NONCOHERENT
20 select IRQ_CPU
21 + select BCM6345_EXT_IRQ
22 + select BCM6345_PERIPH_IRQ
23 + select IRQ_DOMAIN
24 select SYS_SUPPORTS_32BIT_KERNEL
25 select SYS_SUPPORTS_BIG_ENDIAN
26 select SYS_HAS_EARLY_PRINTK
27 --- a/arch/mips/bcm63xx/irq.c
28 +++ b/arch/mips/bcm63xx/irq.c
29 @@ -12,7 +12,9 @@
30 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 -#include <linux/spinlock.h>
34 +#include <linux/irqchip.h>
35 +#include <linux/irqchip/irq-bcm6345-ext.h>
36 +#include <linux/irqchip/irq-bcm6345-periph.h>
37 #include <asm/irq_cpu.h>
38 #include <asm/mipsregs.h>
39 #include <bcm63xx_cpu.h>
40 @@ -20,544 +22,144 @@
41 #include <bcm63xx_io.h>
42 #include <bcm63xx_irq.h>
43
44 -
45 -static DEFINE_SPINLOCK(ipic_lock);
46 -static DEFINE_SPINLOCK(epic_lock);
47 -
48 -static u32 irq_stat_addr[2];
49 -static u32 irq_mask_addr[2];
50 -static void (*dispatch_internal)(int cpu);
51 -static int is_ext_irq_cascaded;
52 -static unsigned int ext_irq_count;
53 -static unsigned int ext_irq_start, ext_irq_end;
54 -static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
55 -static void (*internal_irq_mask)(struct irq_data *d);
56 -static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
57 -
58 -
59 -static inline u32 get_ext_irq_perf_reg(int irq)
60 -{
61 - if (irq < 4)
62 - return ext_irq_cfg_reg1;
63 - return ext_irq_cfg_reg2;
64 -}
65 -
66 -static inline void handle_internal(int intbit)
67 -{
68 - if (is_ext_irq_cascaded &&
69 - intbit >= ext_irq_start && intbit <= ext_irq_end)
70 - do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
71 - else
72 - do_IRQ(intbit + IRQ_INTERNAL_BASE);
73 -}
74 -
75 -static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
76 - const struct cpumask *m)
77 -{
78 - bool enable = cpu_online(cpu);
79 -
80 -#ifdef CONFIG_SMP
81 - if (m)
82 - enable &= cpu_isset(cpu, *m);
83 - else if (irqd_affinity_was_set(d))
84 - enable &= cpu_isset(cpu, *d->affinity);
85 -#endif
86 - return enable;
87 -}
88 -
89 -/*
90 - * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
91 - * prioritize any interrupt relatively to another. the static counter
92 - * will resume the loop where it ended the last time we left this
93 - * function.
94 - */
95 -
96 -#define BUILD_IPIC_INTERNAL(width) \
97 -void __dispatch_internal_##width(int cpu) \
98 -{ \
99 - u32 pending[width / 32]; \
100 - unsigned int src, tgt; \
101 - bool irqs_pending = false; \
102 - static unsigned int i[2]; \
103 - unsigned int *next = &i[cpu]; \
104 - unsigned long flags; \
105 - \
106 - /* read registers in reverse order */ \
107 - spin_lock_irqsave(&ipic_lock, flags); \
108 - for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
109 - u32 val; \
110 - \
111 - val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
112 - val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
113 - pending[--tgt] = val; \
114 - \
115 - if (val) \
116 - irqs_pending = true; \
117 - } \
118 - spin_unlock_irqrestore(&ipic_lock, flags); \
119 - \
120 - if (!irqs_pending) \
121 - return; \
122 - \
123 - while (1) { \
124 - unsigned int to_call = *next; \
125 - \
126 - *next = (*next + 1) & (width - 1); \
127 - if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
128 - handle_internal(to_call); \
129 - break; \
130 - } \
131 - } \
132 -} \
133 - \
134 -static void __internal_irq_mask_##width(struct irq_data *d) \
135 -{ \
136 - u32 val; \
137 - unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
138 - unsigned reg = (irq / 32) ^ (width/32 - 1); \
139 - unsigned bit = irq & 0x1f; \
140 - unsigned long flags; \
141 - int cpu; \
142 - \
143 - spin_lock_irqsave(&ipic_lock, flags); \
144 - for_each_present_cpu(cpu) { \
145 - if (!irq_mask_addr[cpu]) \
146 - break; \
147 - \
148 - val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
149 - val &= ~(1 << bit); \
150 - bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
151 - } \
152 - spin_unlock_irqrestore(&ipic_lock, flags); \
153 -} \
154 - \
155 -static void __internal_irq_unmask_##width(struct irq_data *d, \
156 - const struct cpumask *m) \
157 -{ \
158 - u32 val; \
159 - unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
160 - unsigned reg = (irq / 32) ^ (width/32 - 1); \
161 - unsigned bit = irq & 0x1f; \
162 - unsigned long flags; \
163 - int cpu; \
164 - \
165 - spin_lock_irqsave(&ipic_lock, flags); \
166 - for_each_present_cpu(cpu) { \
167 - if (!irq_mask_addr[cpu]) \
168 - break; \
169 - \
170 - val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
171 - if (enable_irq_for_cpu(cpu, d, m)) \
172 - val |= (1 << bit); \
173 - else \
174 - val &= ~(1 << bit); \
175 - bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
176 - } \
177 - spin_unlock_irqrestore(&ipic_lock, flags); \
178 -}
179 -
180 -BUILD_IPIC_INTERNAL(32);
181 -BUILD_IPIC_INTERNAL(64);
182 -
183 -asmlinkage void plat_irq_dispatch(void)
184 -{
185 - u32 cause;
186 -
187 - do {
188 - cause = read_c0_cause() & read_c0_status() & ST0_IM;
189 -
190 - if (!cause)
191 - break;
192 -
193 - if (cause & CAUSEF_IP7)
194 - do_IRQ(7);
195 - if (cause & CAUSEF_IP0)
196 - do_IRQ(0);
197 - if (cause & CAUSEF_IP1)
198 - do_IRQ(1);
199 - if (cause & CAUSEF_IP2)
200 - dispatch_internal(0);
201 - if (is_ext_irq_cascaded) {
202 - if (cause & CAUSEF_IP3)
203 - dispatch_internal(1);
204 - } else {
205 - if (cause & CAUSEF_IP3)
206 - do_IRQ(IRQ_EXT_0);
207 - if (cause & CAUSEF_IP4)
208 - do_IRQ(IRQ_EXT_1);
209 - if (cause & CAUSEF_IP5)
210 - do_IRQ(IRQ_EXT_2);
211 - if (cause & CAUSEF_IP6)
212 - do_IRQ(IRQ_EXT_3);
213 - }
214 - } while (1);
215 -}
216 -
217 -/*
218 - * internal IRQs operations: only mask/unmask on PERF irq mask
219 - * register.
220 - */
221 -static void bcm63xx_internal_irq_mask(struct irq_data *d)
222 -{
223 - internal_irq_mask(d);
224 -}
225 -
226 -static void bcm63xx_internal_irq_unmask(struct irq_data *d)
227 -{
228 - internal_irq_unmask(d, NULL);
229 -}
230 -
231 -/*
232 - * external IRQs operations: mask/unmask and clear on PERF external
233 - * irq control register.
234 - */
235 -static void bcm63xx_external_irq_mask(struct irq_data *d)
236 -{
237 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238 - u32 reg, regaddr;
239 - unsigned long flags;
240 -
241 - regaddr = get_ext_irq_perf_reg(irq);
242 - spin_lock_irqsave(&epic_lock, flags);
243 - reg = bcm_perf_readl(regaddr);
244 -
245 - if (BCMCPU_IS_6348())
246 - reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
247 - else
248 - reg &= ~EXTIRQ_CFG_MASK(irq % 4);
249 -
250 - bcm_perf_writel(reg, regaddr);
251 - spin_unlock_irqrestore(&epic_lock, flags);
252 -
253 - if (is_ext_irq_cascaded)
254 - internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
255 -}
256 -
257 -static void bcm63xx_external_irq_unmask(struct irq_data *d)
258 -{
259 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
260 - u32 reg, regaddr;
261 - unsigned long flags;
262 -
263 - regaddr = get_ext_irq_perf_reg(irq);
264 - spin_lock_irqsave(&epic_lock, flags);
265 - reg = bcm_perf_readl(regaddr);
266 -
267 - if (BCMCPU_IS_6348())
268 - reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
269 - else
270 - reg |= EXTIRQ_CFG_MASK(irq % 4);
271 -
272 - bcm_perf_writel(reg, regaddr);
273 - spin_unlock_irqrestore(&epic_lock, flags);
274 -
275 - if (is_ext_irq_cascaded)
276 - internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
277 - NULL);
278 -}
279 -
280 -static void bcm63xx_external_irq_clear(struct irq_data *d)
281 -{
282 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
283 - u32 reg, regaddr;
284 - unsigned long flags;
285 -
286 - regaddr = get_ext_irq_perf_reg(irq);
287 - spin_lock_irqsave(&epic_lock, flags);
288 - reg = bcm_perf_readl(regaddr);
289 -
290 - if (BCMCPU_IS_6348())
291 - reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
292 - else
293 - reg |= EXTIRQ_CFG_CLEAR(irq % 4);
294 -
295 - bcm_perf_writel(reg, regaddr);
296 - spin_unlock_irqrestore(&epic_lock, flags);
297 -}
298 -
299 -static int bcm63xx_external_irq_set_type(struct irq_data *d,
300 - unsigned int flow_type)
301 -{
302 - unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
303 - u32 reg, regaddr;
304 - int levelsense, sense, bothedge;
305 - unsigned long flags;
306 -
307 - flow_type &= IRQ_TYPE_SENSE_MASK;
308 -
309 - if (flow_type == IRQ_TYPE_NONE)
310 - flow_type = IRQ_TYPE_LEVEL_LOW;
311 -
312 - levelsense = sense = bothedge = 0;
313 - switch (flow_type) {
314 - case IRQ_TYPE_EDGE_BOTH:
315 - bothedge = 1;
316 - break;
317 -
318 - case IRQ_TYPE_EDGE_RISING:
319 - sense = 1;
320 - break;
321 -
322 - case IRQ_TYPE_EDGE_FALLING:
323 - break;
324 -
325 - case IRQ_TYPE_LEVEL_HIGH:
326 - levelsense = 1;
327 - sense = 1;
328 - break;
329 -
330 - case IRQ_TYPE_LEVEL_LOW:
331 - levelsense = 1;
332 - break;
333 -
334 - default:
335 - printk(KERN_ERR "bogus flow type combination given !\n");
336 - return -EINVAL;
337 - }
338 -
339 - regaddr = get_ext_irq_perf_reg(irq);
340 - spin_lock_irqsave(&epic_lock, flags);
341 - reg = bcm_perf_readl(regaddr);
342 - irq %= 4;
343 -
344 - switch (bcm63xx_get_cpu_id()) {
345 - case BCM6348_CPU_ID:
346 - if (levelsense)
347 - reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
348 - else
349 - reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
350 - if (sense)
351 - reg |= EXTIRQ_CFG_SENSE_6348(irq);
352 - else
353 - reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
354 - if (bothedge)
355 - reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
356 - else
357 - reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
358 - break;
359 -
360 - case BCM3368_CPU_ID:
361 - case BCM6328_CPU_ID:
362 - case BCM6338_CPU_ID:
363 - case BCM6345_CPU_ID:
364 - case BCM6358_CPU_ID:
365 - case BCM6362_CPU_ID:
366 - case BCM6368_CPU_ID:
367 - if (levelsense)
368 - reg |= EXTIRQ_CFG_LEVELSENSE(irq);
369 - else
370 - reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
371 - if (sense)
372 - reg |= EXTIRQ_CFG_SENSE(irq);
373 - else
374 - reg &= ~EXTIRQ_CFG_SENSE(irq);
375 - if (bothedge)
376 - reg |= EXTIRQ_CFG_BOTHEDGE(irq);
377 - else
378 - reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
379 - break;
380 - default:
381 - BUG();
382 - }
383 -
384 - bcm_perf_writel(reg, regaddr);
385 - spin_unlock_irqrestore(&epic_lock, flags);
386 -
387 - irqd_set_trigger_type(d, flow_type);
388 - if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
389 - __irq_set_handler_locked(d->irq, handle_level_irq);
390 - else
391 - __irq_set_handler_locked(d->irq, handle_edge_irq);
392 -
393 - return IRQ_SET_MASK_OK_NOCOPY;
394 -}
395 -
396 -#ifdef CONFIG_SMP
397 -static int bcm63xx_internal_set_affinity(struct irq_data *data,
398 - const struct cpumask *dest,
399 - bool force)
400 -{
401 - if (!irqd_irq_disabled(data))
402 - internal_irq_unmask(data, dest);
403 -
404 - return 0;
405 -}
406 -#endif
407 -
408 -static struct irq_chip bcm63xx_internal_irq_chip = {
409 - .name = "bcm63xx_ipic",
410 - .irq_mask = bcm63xx_internal_irq_mask,
411 - .irq_unmask = bcm63xx_internal_irq_unmask,
412 -};
413 -
414 -static struct irq_chip bcm63xx_external_irq_chip = {
415 - .name = "bcm63xx_epic",
416 - .irq_ack = bcm63xx_external_irq_clear,
417 -
418 - .irq_mask = bcm63xx_external_irq_mask,
419 - .irq_unmask = bcm63xx_external_irq_unmask,
420 -
421 - .irq_set_type = bcm63xx_external_irq_set_type,
422 -};
423 -
424 -static struct irqaction cpu_ip2_cascade_action = {
425 - .handler = no_action,
426 - .name = "cascade_ip2",
427 - .flags = IRQF_NO_THREAD,
428 -};
429 -
430 -#ifdef CONFIG_SMP
431 -static struct irqaction cpu_ip3_cascade_action = {
432 - .handler = no_action,
433 - .name = "cascade_ip3",
434 - .flags = IRQF_NO_THREAD,
435 -};
436 -#endif
437 -
438 -static struct irqaction cpu_ext_cascade_action = {
439 - .handler = no_action,
440 - .name = "cascade_extirq",
441 - .flags = IRQF_NO_THREAD,
442 -};
443 -
444 static void bcm63xx_init_irq(void)
445 {
446 - int irq_bits;
447 -
448 - irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
449 - irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
450 - irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
451 - irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
452 + void __iomem *periph_bases[2];
453 + void __iomem *ext_intc_bases[2];
454 + int periph_irq_count, periph_width, ext_irq_count, ext_shift;
455 + int periph_irqs[2] = { 2, 3 };
456 + int ext_irqs[6];
457 +
458 + periph_bases[0] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
459 + periph_bases[1] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
460 + ext_intc_bases[0] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
461 + ext_intc_bases[1] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
462
463 switch (bcm63xx_get_cpu_id()) {
464 case BCM3368_CPU_ID:
465 - irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
466 - irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
467 - irq_stat_addr[1] = 0;
468 - irq_stat_addr[1] = 0;
469 - irq_bits = 32;
470 - ext_irq_count = 4;
471 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
472 + periph_bases[0] += PERF_IRQMASK_3368_REG;
473 + periph_irq_count = 1;
474 + periph_width = 1;
475 +
476 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_3368;
477 + ext_irq_count = 4;
478 + ext_irqs[0] = BCM_3368_EXT_IRQ0;
479 + ext_irqs[1] = BCM_3368_EXT_IRQ1;
480 + ext_irqs[2] = BCM_3368_EXT_IRQ2;
481 + ext_irqs[3] = BCM_3368_EXT_IRQ3;
482 + ext_shift = 4;
483 break;
484 case BCM6328_CPU_ID:
485 - irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
486 - irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
487 - irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
488 - irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1);
489 - irq_bits = 64;
490 - ext_irq_count = 4;
491 - is_ext_irq_cascaded = 1;
492 - ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
493 - ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
494 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
495 + periph_bases[0] += PERF_IRQMASK_6328_REG(0);
496 + periph_bases[1] += PERF_IRQMASK_6328_REG(1);
497 + periph_irq_count = 2;
498 + periph_width = 2;
499 +
500 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6328;
501 + ext_irq_count = 4;
502 + ext_irqs[0] = BCM_6328_EXT_IRQ0;
503 + ext_irqs[1] = BCM_6328_EXT_IRQ1;
504 + ext_irqs[2] = BCM_6328_EXT_IRQ2;
505 + ext_irqs[3] = BCM_6328_EXT_IRQ3;
506 + ext_shift = 4;
507 break;
508 case BCM6338_CPU_ID:
509 - irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
510 - irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
511 - irq_stat_addr[1] = 0;
512 - irq_mask_addr[1] = 0;
513 - irq_bits = 32;
514 - ext_irq_count = 4;
515 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
516 + periph_bases[0] += PERF_IRQMASK_6338_REG;
517 + periph_irq_count = 1;
518 + periph_width = 1;
519 +
520 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6338;
521 + ext_irq_count = 4;
522 + ext_irqs[0] = 3;
523 + ext_irqs[1] = 4;
524 + ext_irqs[2] = 5;
525 + ext_irqs[3] = 6;
526 + ext_shift = 4;
527 break;
528 case BCM6345_CPU_ID:
529 - irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
530 - irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
531 - irq_stat_addr[1] = 0;
532 - irq_mask_addr[1] = 0;
533 - irq_bits = 32;
534 - ext_irq_count = 4;
535 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
536 + periph_bases[0] += PERF_IRQMASK_6345_REG;
537 + periph_irq_count = 1;
538 + periph_width = 1;
539 +
540 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6345;
541 + ext_irq_count = 4;
542 + ext_irqs[0] = 3;
543 + ext_irqs[1] = 4;
544 + ext_irqs[2] = 5;
545 + ext_irqs[3] = 6;
546 + ext_shift = 4;
547 break;
548 case BCM6348_CPU_ID:
549 - irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
550 - irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
551 - irq_stat_addr[1] = 0;
552 - irq_mask_addr[1] = 0;
553 - irq_bits = 32;
554 - ext_irq_count = 4;
555 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
556 + periph_bases[0] += PERF_IRQMASK_6348_REG;
557 + periph_irq_count = 1;
558 + periph_width = 1;
559 +
560 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6348;
561 + ext_irq_count = 4;
562 + ext_irqs[0] = 3;
563 + ext_irqs[1] = 4;
564 + ext_irqs[2] = 5;
565 + ext_irqs[3] = 6;
566 + ext_shift = 5;
567 break;
568 case BCM6358_CPU_ID:
569 - irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
570 - irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
571 - irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
572 - irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
573 - irq_bits = 32;
574 - ext_irq_count = 4;
575 - is_ext_irq_cascaded = 1;
576 - ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
577 - ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
578 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
579 + periph_bases[0] += PERF_IRQMASK_6358_REG(0);
580 + periph_bases[1] += PERF_IRQMASK_6358_REG(1);
581 + periph_irq_count = 2;
582 + periph_width = 1;
583 +
584 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6358;
585 + ext_irq_count = 4;
586 + ext_irqs[0] = BCM_6358_EXT_IRQ0;
587 + ext_irqs[1] = BCM_6358_EXT_IRQ1;
588 + ext_irqs[2] = BCM_6358_EXT_IRQ2;
589 + ext_irqs[3] = BCM_6358_EXT_IRQ3;
590 + ext_shift = 4;
591 break;
592 case BCM6362_CPU_ID:
593 - irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
594 - irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
595 - irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
596 - irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
597 - irq_bits = 64;
598 - ext_irq_count = 4;
599 - is_ext_irq_cascaded = 1;
600 - ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
601 - ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
602 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
603 + periph_bases[0] += PERF_IRQMASK_6362_REG(0);
604 + periph_bases[1] += PERF_IRQMASK_6362_REG(1);
605 + periph_irq_count = 2;
606 + periph_width = 2;
607 +
608 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6362;
609 + ext_irq_count = 4;
610 + ext_irqs[0] = BCM_6362_EXT_IRQ0;
611 + ext_irqs[1] = BCM_6362_EXT_IRQ1;
612 + ext_irqs[2] = BCM_6362_EXT_IRQ2;
613 + ext_irqs[3] = BCM_6362_EXT_IRQ3;
614 + ext_shift = 4;
615 break;
616 case BCM6368_CPU_ID:
617 - irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
618 - irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
619 - irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
620 - irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
621 - irq_bits = 64;
622 + periph_bases[0] += PERF_IRQMASK_6368_REG(0);
623 + periph_bases[1] += PERF_IRQMASK_6368_REG(1);
624 + periph_irq_count = 2;
625 + periph_width = 2;
626 +
627 + ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6368;
628 + ext_intc_bases[1] += PERF_EXTIRQ_CFG_REG2_6368;
629 ext_irq_count = 6;
630 - is_ext_irq_cascaded = 1;
631 - ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
632 - ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
633 - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
634 - ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
635 + ext_irqs[0] = BCM_6368_EXT_IRQ0;
636 + ext_irqs[1] = BCM_6368_EXT_IRQ1;
637 + ext_irqs[2] = BCM_6368_EXT_IRQ2;
638 + ext_irqs[3] = BCM_6368_EXT_IRQ3;
639 + ext_irqs[4] = BCM_6368_EXT_IRQ4;
640 + ext_irqs[5] = BCM_6368_EXT_IRQ5;
641 + ext_shift = 4;
642 break;
643 default:
644 BUG();
645 }
646
647 - if (irq_bits == 32) {
648 - dispatch_internal = __dispatch_internal_32;
649 - internal_irq_mask = __internal_irq_mask_32;
650 - internal_irq_unmask = __internal_irq_unmask_32;
651 - } else {
652 - dispatch_internal = __dispatch_internal_64;
653 - internal_irq_mask = __internal_irq_mask_64;
654 - internal_irq_unmask = __internal_irq_unmask_64;
655 - }
656 + mips_cpu_irq_init();
657 + bcm6345_periph_intc_init(periph_irq_count, periph_irqs, periph_bases, periph_width);
658 + bcm6345_ext_intc_init(4, ext_irqs, ext_intc_bases[0], ext_shift);
659 + if (ext_irq_count > 4)
660 + bcm6345_ext_intc_init(2, &ext_irqs[4], ext_intc_bases[1],
661 + ext_shift);
662 }
663
664 void __init arch_init_irq(void)
665 {
666 - int i;
667 -
668 bcm63xx_init_irq();
669 - mips_cpu_irq_init();
670 - for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
671 - irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
672 - handle_level_irq);
673 -
674 - for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
675 - irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
676 - handle_edge_irq);
677 -
678 - if (!is_ext_irq_cascaded) {
679 - for (i = 3; i < 3 + ext_irq_count; ++i)
680 - setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
681 - }
682 -
683 - setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
684 -#ifdef CONFIG_SMP
685 - if (is_ext_irq_cascaded) {
686 - setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
687 - bcm63xx_internal_irq_chip.irq_set_affinity =
688 - bcm63xx_internal_set_affinity;
689 -
690 - cpumask_clear(irq_default_affinity);
691 - cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
692 - }
693 -#endif
694 }