1 #include <linux/irqdomain.h>
4 #include <linux/of_address.h>
5 #include <linux/of_irq.h>
6 #include <linux/irqchip/chained_irq.h>
9 #include <linux/version.h>
11 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
14 # include <linux/irqchip.h>
17 struct rps_chip_data
{
20 struct irq_domain
*domain
;
35 * Routines to acknowledge, disable and enable interrupts
37 static void rps_mask_irq(struct irq_data
*d
)
39 struct rps_chip_data
*chip_data
= irq_data_get_irq_chip_data(d
);
40 u32 mask
= BIT(d
->hwirq
);
42 iowrite32(mask
, chip_data
->base
+ RPS_MASK
);
45 static void rps_unmask_irq(struct irq_data
*d
)
47 struct rps_chip_data
*chip_data
= irq_data_get_irq_chip_data(d
);
48 u32 mask
= BIT(d
->hwirq
);
50 iowrite32(mask
, chip_data
->base
+ RPS_UNMASK
);
53 static struct irq_chip rps_chip
= {
55 .irq_mask
= rps_mask_irq
,
56 .irq_unmask
= rps_unmask_irq
,
59 static int rps_irq_domain_xlate(struct irq_domain
*d
,
60 struct device_node
*controller
,
61 const u32
*intspec
, unsigned int intsize
,
62 unsigned long *out_hwirq
,
63 unsigned int *out_type
)
65 if (d
->of_node
!= controller
)
70 *out_hwirq
= intspec
[0];
71 /* Honestly I do not know the type */
72 *out_type
= IRQ_TYPE_LEVEL_HIGH
;
77 static int rps_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
80 irq_set_chip_and_handler(irq
, &rps_chip
, handle_level_irq
);
81 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
82 set_irq_flags(irq
, IRQF_VALID
| IRQF_PROBE
);
86 irq_set_chip_data(irq
, d
->host_data
);
90 const struct irq_domain_ops rps_irq_domain_ops
= {
91 .map
= rps_irq_domain_map
,
92 .xlate
= rps_irq_domain_xlate
,
95 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
96 static void rps_handle_cascade_irq(unsigned int irq
, struct irq_desc
*desc
)
98 static void rps_handle_cascade_irq(struct irq_desc
*desc
)
101 struct rps_chip_data
*chip_data
= irq_desc_get_handler_data(desc
);
102 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
103 unsigned int cascade_irq
, rps_irq
;
106 chained_irq_enter(chip
, desc
);
108 status
= ioread32(chip_data
->base
+ RPS_STATUS
);
109 rps_irq
= __ffs(status
);
110 cascade_irq
= irq_find_mapping(chip_data
->domain
, rps_irq
);
112 if (unlikely(rps_irq
>= RPS_IRQ_COUNT
))
113 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
114 handle_bad_irq(cascade_irq
, desc
);
116 handle_bad_irq(desc
);
119 generic_handle_irq(cascade_irq
);
121 chained_irq_exit(chip
, desc
);
125 int __init
rps_of_init(struct device_node
*node
, struct device_node
*parent
)
127 void __iomem
*rps_base
;
128 int irq_start
= RPS_IRQ_BASE
;
135 rps_base
= of_iomap(node
, 0);
136 WARN(!rps_base
, "unable to map rps registers\n");
137 rps_data
.base
= rps_base
;
139 irq_base
= irq_alloc_descs(irq_start
, 0, RPS_IRQ_COUNT
, numa_node_id());
140 if (IS_ERR_VALUE(irq_base
)) {
141 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
143 irq_base
= irq_start
;
146 rps_data
.domain
= irq_domain_add_legacy(node
, RPS_IRQ_COUNT
, irq_base
,
147 PRS_HWIRQ_BASE
, &rps_irq_domain_ops
, &rps_data
);
149 if (WARN_ON(!rps_data
.domain
))
153 irq
= irq_of_parse_and_map(node
, 0);
154 if (irq_set_handler_data(irq
, &rps_data
) != 0)
156 irq_set_chained_handler(irq
, rps_handle_cascade_irq
);
162 IRQCHIP_DECLARE(nas782x
, "plxtech,nas782x-rps", rps_of_init
);