2 * arch/ubicom32/kernel/irq.c
3 * Ubicom32 architecture IRQ support.
5 * (C) Copyright 2009, Ubicom, Inc.
6 * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com>
8 * This file is part of the Ubicom32 Linux Kernel Port.
10 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
11 * it and/or modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation, either version 2 of the
13 * License, or (at your option) any later version.
15 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with the Ubicom32 Linux Kernel Port. If not,
22 * see <http://www.gnu.org/licenses/>.
24 * Ubicom32 implementation derived from (with many thanks):
30 #include <linux/types.h>
31 #include <linux/irq.h>
32 #include <linux/init.h>
33 #include <linux/kernel.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/module.h>
36 #include <linux/seq_file.h>
37 #include <linux/proc_fs.h>
38 #include <asm/system.h>
39 #include <asm/traps.h>
41 #include <asm/ip5000.h>
42 #include <asm/machdep.h>
43 #include <asm/asm-offsets.h>
44 #include <asm/thread.h>
45 #include <asm/devtree.h>
47 unsigned int irq_soft_avail
;
48 static struct irqaction ubicom32_reserve_action
[NR_IRQS
];
50 #if !defined(CONFIG_DEBUG_IRQMEASURE)
51 #define IRQ_DECLARE_MEASUREMENT
52 #define IRQ_MEASUREMENT_START()
53 #define IRQ_MEASUREMENT_END(irq)
55 #define IRQ_DECLARE_MEASUREMENT \
57 unsigned int __tstart;
59 #define IRQ_MEASUREMENT_START() \
60 __tstart = UBICOM32_IO_TIMER->sysval;
62 #define IRQ_MEASUREMENT_END(irq) \
63 __diff = (int)UBICOM32_IO_TIMER->sysval - (int)__tstart; \
64 irq_measurement_update((irq), __diff);
67 * We keep track of the time spent in both irq_enter()
72 struct irq_measurement
{
73 volatile unsigned int min
;
74 volatile unsigned int avg
;
75 volatile unsigned int max
;
78 static DEFINE_SPINLOCK(irq_measurement_lock
);
81 * Add 1 in for softirq (irq_exit());
83 static struct irq_measurement irq_measurements
[NR_IRQS
+ 1];
86 * irq_measurement_update()
87 * Update an entry in the measurement array for this irq.
89 static void irq_measurement_update(int irq
, int sample
)
91 struct irq_measurement
*im
= &irq_measurements
[irq
];
92 spin_lock(&irq_measurement_lock
);
93 if ((im
->min
== 0) || (im
->min
> sample
)) {
96 if (im
->max
< sample
) {
99 im
->avg
= ((im
->avg
* (IRQ_WEIGHT
- 1)) + sample
) / IRQ_WEIGHT
;
100 spin_unlock(&irq_measurement_lock
);
105 * irq_kernel_stack_check()
106 * See if the kernel stack is within STACK_WARN of the end.
108 static void irq_kernel_stack_check(int irq
, struct pt_regs
*regs
)
110 #ifdef CONFIG_DEBUG_STACKOVERFLOW
114 * Make sure that we are not close to the top of the stack and thus
115 * can not really service this interrupt.
118 "and.4 %0, SP, %1 \n\t"
120 : "d" (THREAD_SIZE
- 1)
124 if (sp
< (sizeof(struct thread_info
) + STACK_WARN
)) {
126 "cpu[%d]: possible overflow detected sp remain: %p, "
127 "irq: %d, regs: %p\n",
128 thread_get_self(), (void *)sp
, irq
, regs
);
132 if (sp
< (sizeof(struct thread_info
) + 16)) {
140 * Get the LSB set in value
142 static int irq_get_lsb(unsigned int value
)
144 static unsigned char irq_bits
[8] = {
145 3, 0, 1, 0, 2, 0, 1, 0
149 value
= (value
>> nextbit
) | (value
<< ((sizeof(value
) * 8) - nextbit
));
152 * It's unlikely that we find that we execute the body of this while
153 * loop. 50% of the time we won't take this at all and then of the
154 * cases where we do about 50% of those we only execute once.
156 if (!(value
& 0xffff)) {
161 if (!(value
& 0xff)) {
166 if (!(value
& 0xf)) {
171 nextbit
+= irq_bits
[value
& 0x7];
173 panic("nextbit out of range: %d\n", nextbit
);
179 * ubicom32_reserve_handler()
180 * Bogus handler associated with pre-reserved IRQ(s).
182 static irqreturn_t
ubicom32_reserve_handler(int irq
, void *dev_id
)
189 * __irq_disable_vector()
190 * Disable the interrupt by clearing the appropriate bit in the
191 * LDSR Mask Register.
193 static void __irq_disable_vector(unsigned int irq
)
195 ldsr_disable_vector(irq
);
200 * Acknowledge the specific interrupt by clearing the associate bit in
203 static void __irq_ack_vector(unsigned int irq
)
206 asm volatile ("move.4 INT_CLR0, %0" : : "d" (1 << irq
));
208 asm volatile ("move.4 INT_CLR1, %0" : : "d" (1 << (irq
- 32)));
213 * __irq_enable_vector()
214 * Clean and then enable the interrupt by setting the appropriate bit in
215 * the LDSR Mask Register.
217 static void __irq_enable_vector(unsigned int irq
)
220 * Acknowledge, really clear the vector.
222 __irq_ack_vector(irq
);
223 ldsr_enable_vector(irq
);
227 * __irq_mask_vector()
229 static void __irq_mask_vector(unsigned int irq
)
231 ldsr_mask_vector(irq
);
235 * __irq_unmask_vector()
237 static void __irq_unmask_vector(unsigned int irq
)
239 ldsr_unmask_vector(irq
);
244 * Called once an interrupt is completed (reset the LDSR mask).
246 static void __irq_end_vector(unsigned int irq
)
248 ldsr_unmask_vector(irq
);
251 #if defined(CONFIG_SMP)
253 * __irq_set_affinity()
254 * Set the cpu affinity for this interrupt.
255 * affinity container allocated at boot
257 static void __irq_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
259 smp_set_affinity(irq
, dest
);
260 cpumask_copy(irq_desc
[irq
].affinity
, dest
);
265 * On-Chip Generic Interrupt function handling.
267 static struct irq_chip ubicom32_irq_chip
= {
271 .enable
= __irq_enable_vector
,
272 .disable
= __irq_disable_vector
,
273 .ack
= __irq_ack_vector
,
274 .mask
= __irq_mask_vector
,
275 .unmask
= __irq_unmask_vector
,
276 .end
= __irq_end_vector
,
277 #if defined(CONFIG_SMP)
278 .set_affinity
= __irq_set_affinity
,
284 * Primary interface for handling IRQ() requests.
286 asmlinkage
void do_IRQ(int irq
, struct pt_regs
*regs
)
288 struct pt_regs
*oldregs
;
289 struct thread_info
*ti
= current_thread_info();
291 IRQ_DECLARE_MEASUREMENT
;
294 * Mark that we are inside of an interrupt and
295 * that interrupts are disabled.
297 oldregs
= set_irq_regs(regs
);
298 ti
->interrupt_nesting
++;
299 trace_hardirqs_off();
300 irq_kernel_stack_check(irq
, regs
);
303 * Start the interrupt sequence
308 * Execute the IRQ handler and any pending SoftIRQ requests.
310 BUG_ON(!irqs_disabled());
311 IRQ_MEASUREMENT_START();
313 IRQ_MEASUREMENT_END(irq
);
314 BUG_ON(!irqs_disabled());
317 * TODO: Since IRQ's are disabled when calling irq_exit()
318 * modify Kconfig to set __ARCH_IRQ_EXIT_IRQS_DISABLED flag.
319 * This will slightly improve performance by enabling
320 * softirq handling to avoid disabling/disabled interrupts.
322 IRQ_MEASUREMENT_START();
324 IRQ_MEASUREMENT_END(NR_IRQS
);
325 BUG_ON(!irqs_disabled());
328 * Outside of an interrupt (or nested exit).
330 set_irq_regs(oldregs
);
332 ti
->interrupt_nesting
--;
337 * Allocate a soft IRQ.
339 int irq_soft_alloc(unsigned int *soft
)
341 if (irq_soft_avail
== 0) {
342 printk(KERN_NOTICE
"no soft irqs to allocate\n");
346 *soft
= irq_get_lsb(irq_soft_avail
);
347 irq_soft_avail
&= ~(1 << *soft
);
353 * Called to handle an bad irq request.
355 void ack_bad_irq(unsigned int irq
)
357 printk(KERN_ERR
"IRQ: unexpected irq=%d\n", irq
);
358 __irq_end_vector(irq
);
363 * Return a string that displays the state of each of the interrupts.
365 int show_interrupts(struct seq_file
*p
, void *v
)
367 struct irqaction
*ap
;
368 int irq
= *((loff_t
*) v
);
371 if (irq
>= NR_IRQS
) {
377 for_each_online_cpu(j
) {
378 seq_printf(p
, "CPU%d ", j
);
383 ap
= irq_desc
[irq
].action
;
385 seq_printf(p
, "%3d: ", irq
);
386 for_each_online_cpu(j
) {
387 seq_printf(p
, "%10u ", kstat_irqs_cpu(irq
, j
));
389 seq_printf(p
, "%14s ", irq_desc
[irq
].chip
->name
);
390 seq_printf(p
, "%s", ap
->name
);
391 for (ap
= ap
->next
; ap
; ap
= ap
->next
) {
392 seq_printf(p
, ", %s", ap
->name
);
399 #if defined(CONFIG_DEBUG_IRQMEASURE)
400 static unsigned int irq_cycles_to_micro(unsigned int cycles
, unsigned int frequency
)
402 unsigned int micro
= (cycles
/ (frequency
/ 1000000));
407 * irq_measurement_show()
408 * Print out the min, avg, max values for each IRQ
410 * By request, the max value is reset after each dump.
412 static int irq_measurement_show(struct seq_file
*p
, void *v
)
414 struct irqaction
*ap
;
415 unsigned int freq
= processor_frequency();
416 int irq
= *((loff_t
*) v
);
420 seq_puts(p
, "\tmin\tavg\tmax\t(micro-seconds)\n");
427 if (irq
== NR_IRQS
) {
428 unsigned int min
, avg
, max
;
429 spin_lock(&irq_measurement_lock
);
430 min
= irq_cycles_to_micro(irq_measurements
[irq
].min
, freq
);
431 avg
= irq_cycles_to_micro(irq_measurements
[irq
].avg
, freq
);
432 max
= irq_cycles_to_micro(irq_measurements
[irq
].max
, freq
);
433 irq_measurements
[irq
].max
= 0;
434 spin_unlock(&irq_measurement_lock
);
435 seq_printf(p
, " \t%u\t%u\t%u\tsoftirq\n", min
, avg
, max
);
439 ap
= irq_desc
[irq
].action
;
441 unsigned int min
, avg
, max
;
442 spin_lock(&irq_measurement_lock
);
443 min
= irq_cycles_to_micro(irq_measurements
[irq
].min
, freq
);
444 avg
= irq_cycles_to_micro(irq_measurements
[irq
].avg
, freq
);
445 max
= irq_cycles_to_micro(irq_measurements
[irq
].max
, freq
);
446 irq_measurements
[irq
].max
= 0;
447 spin_unlock(&irq_measurement_lock
);
448 seq_printf(p
, "%2u:\t%u\t%u\t%u\t%s\n", irq
, min
, avg
, max
, ap
->name
);
453 static void *irq_measurement_start(struct seq_file
*f
, loff_t
*pos
)
455 return (*pos
<= NR_IRQS
) ? pos
: NULL
;
458 static void *irq_measurement_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
466 static void irq_measurement_stop(struct seq_file
*f
, void *v
)
471 static const struct seq_operations irq_measurement_seq_ops
= {
472 .start
= irq_measurement_start
,
473 .next
= irq_measurement_next
,
474 .stop
= irq_measurement_stop
,
475 .show
= irq_measurement_show
,
478 static int irq_measurement_open(struct inode
*inode
, struct file
*filp
)
480 return seq_open(filp
, &irq_measurement_seq_ops
);
483 static const struct file_operations irq_measurement_fops
= {
484 .open
= irq_measurement_open
,
487 .release
= seq_release
,
490 static int __init
irq_measurement_init(void)
492 proc_create("irq_measurements", 0, NULL
, &irq_measurement_fops
);
495 module_init(irq_measurement_init
);
500 * Initialize the on-chip IRQ subsystem.
502 void __init
init_IRQ(void)
505 struct devtree_node
*p
= NULL
;
506 struct devtree_node
*iter
= NULL
;
507 unsigned int mask
= 0;
508 unsigned int reserved
= 0;
511 * Pull out the list of software interrupts that are avialable to
512 * Linux and provide an allocation function for them. The first
513 * 24 interrupts of INT0 are software interrupts.
516 if (processor_interrupts(&irq_soft_avail
, NULL
) < 0) {
517 printk(KERN_WARNING
"No Soft IRQ(s) available\n");
519 irq_soft_avail
&= ((1 << 24) - 1);
522 * Initialize all of the on-chip interrupt handling
523 * to use a common set of interrupt functions.
525 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
526 irq_desc
[irq
].status
= IRQ_DISABLED
;
527 irq_desc
[irq
].action
= NULL
;
528 irq_desc
[irq
].depth
= 1;
529 set_irq_chip(irq
, &ubicom32_irq_chip
);
533 * The sendirq of a devnode is not registered within Linux but instead
534 * is used by the software I/O thread. These interrupts are reserved.
535 * The recvirq is used by Linux and registered by a device driver, these
538 * recvirq(s) that are in the software interrupt range are not supposed
539 * to be marked as reserved. We track this while we scan the device
542 p
= devtree_find_next(&iter
);
544 unsigned char sendirq
, recvirq
;
545 devtree_irq(p
, &sendirq
, &recvirq
);
548 * If the sendirq is valid, mark that irq as taken by the
551 if (sendirq
< NR_IRQS
) {
552 ubicom32_reserve_action
[sendirq
].handler
=
553 ubicom32_reserve_handler
;
554 ubicom32_reserve_action
[sendirq
].name
= p
->name
;
555 irq_desc
[sendirq
].action
=
556 &ubicom32_reserve_action
[sendirq
];
557 mask
|= (1 << sendirq
);
561 * Track the relevant recieve IRQ(s)
564 mask
|= (1 << recvirq
);
568 * Move to the next node.
570 p
= devtree_find_next(&iter
);
574 * Remove these bits from the irq_soft_avail list and then use the
575 * result as the list of pre-reserved IRQ(s).
577 reserved
= ~irq_soft_avail
& ~mask
;
578 for (irq
= 0; irq
< 24; irq
++) {
579 if ((reserved
& (1 << irq
))) {
580 ubicom32_reserve_action
[irq
].handler
=
581 ubicom32_reserve_handler
;
582 ubicom32_reserve_action
[irq
].name
= "reserved";
583 irq_desc
[irq
].action
= &ubicom32_reserve_action
[irq
];
588 * Initialize the LDSR which is the Ubicom32 programmable
589 * interrupt controller.
594 * The Ubicom trap code needs a 2nd init after IRQ(s) are setup.
596 trap_init_interrupt();