bcm53xx: switch to simpler devm_gpiod_get in USB driver
[openwrt/openwrt.git] / target / linux / bcm53xx / patches-4.4 / 153-PCI-iproc-Add-iProc-PCIe-MSI-support.patch
1 From c81922174d61127ff5baad6059ae148794c72276 Mon Sep 17 00:00:00 2001
2 From: Ray Jui <rjui@broadcom.com>
3 Date: Tue, 17 Nov 2015 13:14:37 -0800
4 Subject: [PATCH 153/154] PCI: iproc: Add iProc PCIe MSI support
5
6 This patch adds PCIe MSI support for both PAXB and PAXC interfaces on
7 all iProc based platforms
8
9 The iProc PCIe MSI support deploys an event queue based implementation.
10 Each event queue is serviced by a GIC interrupt and can support up to 64
11 MSI vectors. Host memory is allocated for the event queues, and each event
12 queue consists of 64 word-sized entries. MSI data is written to the
13 lower 16-bit of each entry, whereas the upper 16-bit of the entry is
14 reserved for the controller for internal processing
15
16 Each event queue is tracked by a head pointer and tail pointer. Head
17 pointer indicates the next entry in the event queue to be processed by
18 the driver and is updated by the driver after processing is done.
19 The controller uses the tail pointer as the next MSI data insertion
20 point. The controller ensures MSI data is flushed to host memory before
21 updating the tail pointer and then triggering the interrupt
22
23 MSI IRQ affinity is supported by evenly distributing the interrupts to
24 each CPU core. MSI vector is moved from one GIC interrupt to another in
25 order to steer to the target CPU
26
27 Therefore, the actual number of supported MSI vectors is:
28
29 M * 64 / N
30
31 where M denotes the number of GIC interrupts (event queues), and N
32 denotes the number of CPU cores
33
34 This iProc event queue based MSI support should not be used with newer
35 platforms with integrated MSI support in the GIC (e.g., giv2m or
36 gicv3-its)
37
38 Signed-off-by: Ray Jui <rjui@broadcom.com>
39 Reviewed-by: Anup Patel <anup.patel@broadcom.com>
40 Reviewed-by: Vikram Prakash <vikramp@broadcom.com>
41 Reviewed-by: Scott Branden <sbranden@broadcom.com>
42 ---
43 drivers/pci/host/Kconfig | 9 +
44 drivers/pci/host/Makefile | 1 +
45 drivers/pci/host/pcie-iproc-bcma.c | 1 +
46 drivers/pci/host/pcie-iproc-msi.c | 675 +++++++++++++++++++++++++++++++++
47 drivers/pci/host/pcie-iproc-platform.c | 1 +
48 drivers/pci/host/pcie-iproc.c | 26 ++
49 drivers/pci/host/pcie-iproc.h | 23 +-
50 7 files changed, 734 insertions(+), 2 deletions(-)
51 create mode 100644 drivers/pci/host/pcie-iproc-msi.c
52
53 --- a/drivers/pci/host/Kconfig
54 +++ b/drivers/pci/host/Kconfig
55 @@ -126,6 +126,15 @@ config PCIE_IPROC
56 iProc family of SoCs. An appropriate bus interface driver also needs
57 to be enabled
58
59 +config PCIE_IPROC_MSI
60 + bool "Broadcom iProc PCIe MSI support"
61 + depends on ARCH_BCM_IPROC && PCI_MSI
62 + select PCI_MSI_IRQ_DOMAIN
63 + default ARCH_BCM_IPROC
64 + help
65 + Say Y here if you want to enable MSI support for Broadcom's iProc
66 + PCIe controller
67 +
68 config PCIE_IPROC_PLATFORM
69 tristate "Broadcom iProc PCIe platform bus driver"
70 depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
71 --- a/drivers/pci/host/Makefile
72 +++ b/drivers/pci/host/Makefile
73 @@ -15,6 +15,7 @@ obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene
74 obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
75 obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
76 obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
77 +obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
78 obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
79 obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
80 obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
81 --- a/drivers/pci/host/pcie-iproc-bcma.c
82 +++ b/drivers/pci/host/pcie-iproc-bcma.c
83 @@ -55,6 +55,7 @@ static int iproc_pcie_bcma_probe(struct
84 bcma_set_drvdata(bdev, pcie);
85
86 pcie->base = bdev->io_addr;
87 + pcie->base_addr = bdev->addr;
88
89 res_mem.start = bdev->addr_s[0];
90 res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
91 --- /dev/null
92 +++ b/drivers/pci/host/pcie-iproc-msi.c
93 @@ -0,0 +1,675 @@
94 +/*
95 + * Copyright (C) 2015 Broadcom Corporation
96 + *
97 + * This program is free software; you can redistribute it and/or
98 + * modify it under the terms of the GNU General Public License as
99 + * published by the Free Software Foundation version 2.
100 + *
101 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any
102 + * kind, whether express or implied; without even the implied warranty
103 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
104 + * GNU General Public License for more details.
105 + */
106 +
107 +#include <linux/interrupt.h>
108 +#include <linux/irqchip/chained_irq.h>
109 +#include <linux/irqdomain.h>
110 +#include <linux/msi.h>
111 +#include <linux/of_irq.h>
112 +#include <linux/of_pci.h>
113 +#include <linux/pci.h>
114 +
115 +#include "pcie-iproc.h"
116 +
117 +#define IPROC_MSI_INTR_EN_SHIFT 11
118 +#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
119 +#define IPROC_MSI_INT_N_EVENT_SHIFT 1
120 +#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
121 +#define IPROC_MSI_EQ_EN_SHIFT 0
122 +#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
123 +
124 +#define IPROC_MSI_EQ_MASK 0x3f
125 +
126 +/* max number of GIC interrupts */
127 +#define NR_HW_IRQS 6
128 +
129 +/* number of entries in each event queue */
130 +#define EQ_LEN 64
131 +
132 +/* size of each event queue memory region */
133 +#define EQ_MEM_REGION_SIZE SZ_4K
134 +
135 +/* size of each MSI address region */
136 +#define MSI_MEM_REGION_SIZE SZ_4K
137 +
138 +enum iproc_msi_reg {
139 + IPROC_MSI_EQ_PAGE = 0,
140 + IPROC_MSI_EQ_PAGE_UPPER,
141 + IPROC_MSI_PAGE,
142 + IPROC_MSI_PAGE_UPPER,
143 + IPROC_MSI_CTRL,
144 + IPROC_MSI_EQ_HEAD,
145 + IPROC_MSI_EQ_TAIL,
146 + IPROC_MSI_INTS_EN,
147 + IPROC_MSI_REG_SIZE,
148 +};
149 +
150 +struct iproc_msi;
151 +
152 +/**
153 + * iProc MSI group
154 + *
155 + * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
156 + * event queue
157 + *
158 + * @msi: pointer to iProc MSI data
159 + * @gic_irq: GIC interrupt
160 + * @eq: Event queue number
161 + */
162 +struct iproc_msi_grp {
163 + struct iproc_msi *msi;
164 + int gic_irq;
165 + unsigned int eq;
166 +};
167 +
168 +/**
169 + * iProc event queue based MSI
170 + *
171 + * Only meant to be used on platforms without MSI support integrated into the
172 + * GIC
173 + *
174 + * @pcie: pointer to iProc PCIe data
175 + * @reg_offsets: MSI register offsets
176 + * @grps: MSI groups
177 + * @nr_irqs: number of total interrupts connected to GIC
178 + * @nr_cpus: number of toal CPUs
179 + * @has_inten_reg: indicates the MSI interrupt enable register needs to be
180 + * set explicitly (required for some legacy platforms)
181 + * @bitmap: MSI vector bitmap
182 + * @bitmap_lock: lock to protect access to the MSI bitmap
183 + * @nr_msi_vecs: total number of MSI vectors
184 + * @inner_domain: inner IRQ domain
185 + * @msi_domain: MSI IRQ domain
186 + * @nr_eq_region: required number of 4K aligned memory region for MSI event
187 + * queues
188 + * @nr_msi_region: required number of 4K aligned address region for MSI posted
189 + * writes
190 + * @eq_cpu: pointer to allocated memory region for MSI event queues
191 + * @eq_dma: DMA address of MSI event queues
192 + * @msi_addr: MSI address
193 + */
194 +struct iproc_msi {
195 + struct iproc_pcie *pcie;
196 + const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
197 + struct iproc_msi_grp *grps;
198 + int nr_irqs;
199 + int nr_cpus;
200 + bool has_inten_reg;
201 + unsigned long *bitmap;
202 + struct mutex bitmap_lock;
203 + unsigned int nr_msi_vecs;
204 + struct irq_domain *inner_domain;
205 + struct irq_domain *msi_domain;
206 + unsigned int nr_eq_region;
207 + unsigned int nr_msi_region;
208 + void *eq_cpu;
209 + dma_addr_t eq_dma;
210 + phys_addr_t msi_addr;
211 +};
212 +
213 +static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
214 + { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
215 + { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
216 + { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
217 + { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
218 + { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
219 + { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
220 +};
221 +
222 +static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
223 + { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
224 + { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
225 + { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
226 + { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
227 +};
228 +
229 +static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
230 + enum iproc_msi_reg reg,
231 + unsigned int eq)
232 +{
233 + struct iproc_pcie *pcie = msi->pcie;
234 +
235 + return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
236 +}
237 +
238 +static inline void iproc_msi_write_reg(struct iproc_msi *msi,
239 + enum iproc_msi_reg reg,
240 + int eq, u32 val)
241 +{
242 + struct iproc_pcie *pcie = msi->pcie;
243 +
244 + writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
245 +}
246 +
247 +static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
248 +{
249 + return (hwirq % msi->nr_irqs);
250 +}
251 +
252 +static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
253 + unsigned long hwirq)
254 +{
255 + if (msi->nr_msi_region > 1)
256 + return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
257 + else
258 + return hwirq_to_group(msi, hwirq) * sizeof(u32);
259 +}
260 +
261 +static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
262 +{
263 + if (msi->nr_eq_region > 1)
264 + return eq * EQ_MEM_REGION_SIZE;
265 + else
266 + return eq * EQ_LEN * sizeof(u32);
267 +}
268 +
269 +static struct irq_chip iproc_msi_irq_chip = {
270 + .name = "iProc-MSI",
271 +};
272 +
273 +static struct msi_domain_info iproc_msi_domain_info = {
274 + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
275 + MSI_FLAG_PCI_MSIX,
276 + .chip = &iproc_msi_irq_chip,
277 +};
278 +
279 +/*
280 + * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
281 + * dedicated event queue. Each MSI group can support up to 64 MSI vectors
282 + *
283 + * The number of MSI groups varies between different iProc SoCs. The total
284 + * number of CPU cores also varies. To support MSI IRQ affinity, we
285 + * distribute GIC interrupts across all available CPUs. MSI vector is moved
286 + * from one GIC interrupt to another to steer to the target CPU
287 + *
288 + * Assuming:
289 + * - the number of MSI groups is M
290 + * - the number of CPU cores is N
291 + * - M is always a multiple of N
292 + *
293 + * Total number of raw MSI vectors = M * 64
294 + * Total number of supported MSI vectors = (M * 64) / N
295 + */
296 +static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
297 +{
298 + return (hwirq % msi->nr_cpus);
299 +}
300 +
301 +static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
302 + unsigned long hwirq)
303 +{
304 + return (hwirq - hwirq_to_cpu(msi, hwirq));
305 +}
306 +
307 +static int iproc_msi_irq_set_affinity(struct irq_data *data,
308 + const struct cpumask *mask, bool force)
309 +{
310 + struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
311 + int target_cpu = cpumask_first(mask);
312 + int curr_cpu;
313 +
314 + curr_cpu = hwirq_to_cpu(msi, data->hwirq);
315 + if (curr_cpu == target_cpu)
316 + return IRQ_SET_MASK_OK_DONE;
317 +
318 + /* steer MSI to the target CPU */
319 + data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
320 +
321 + return IRQ_SET_MASK_OK;
322 +}
323 +
324 +static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
325 + struct msi_msg *msg)
326 +{
327 + struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
328 + dma_addr_t addr;
329 +
330 + addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
331 + msg->address_lo = lower_32_bits(addr);
332 + msg->address_hi = upper_32_bits(addr);
333 + msg->data = data->hwirq;
334 +}
335 +
336 +static struct irq_chip iproc_msi_bottom_irq_chip = {
337 + .name = "MSI",
338 + .irq_set_affinity = iproc_msi_irq_set_affinity,
339 + .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
340 +};
341 +
342 +static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
343 + unsigned int virq, unsigned int nr_irqs,
344 + void *args)
345 +{
346 + struct iproc_msi *msi = domain->host_data;
347 + int hwirq;
348 +
349 + mutex_lock(&msi->bitmap_lock);
350 +
351 + /* allocate 'nr_cpus' number of MSI vectors each time */
352 + hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
353 + msi->nr_cpus, 0);
354 + if (hwirq < msi->nr_msi_vecs) {
355 + bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
356 + } else {
357 + mutex_unlock(&msi->bitmap_lock);
358 + return -ENOSPC;
359 + }
360 +
361 + mutex_unlock(&msi->bitmap_lock);
362 +
363 + irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip,
364 + domain->host_data, handle_simple_irq, NULL, NULL);
365 +
366 + return 0;
367 +}
368 +
369 +static void iproc_msi_irq_domain_free(struct irq_domain *domain,
370 + unsigned int virq, unsigned int nr_irqs)
371 +{
372 + struct irq_data *data = irq_domain_get_irq_data(domain, virq);
373 + struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
374 + unsigned int hwirq;
375 +
376 + mutex_lock(&msi->bitmap_lock);
377 +
378 + hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
379 + bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
380 +
381 + mutex_unlock(&msi->bitmap_lock);
382 +
383 + irq_domain_free_irqs_parent(domain, virq, nr_irqs);
384 +}
385 +
386 +static const struct irq_domain_ops msi_domain_ops = {
387 + .alloc = iproc_msi_irq_domain_alloc,
388 + .free = iproc_msi_irq_domain_free,
389 +};
390 +
391 +static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
392 +{
393 + u32 *msg, hwirq;
394 + unsigned int offs;
395 +
396 + offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
397 + msg = (u32 *)(msi->eq_cpu + offs);
398 + hwirq = *msg & IPROC_MSI_EQ_MASK;
399 +
400 + /*
401 + * Since we have multiple hwirq mapped to a single MSI vector,
402 + * now we need to derive the hwirq at CPU0. It can then be used to
403 + * mapped back to virq
404 + */
405 + return hwirq_to_canonical_hwirq(msi, hwirq);
406 +}
407 +
408 +static void iproc_msi_handler(struct irq_desc *desc)
409 +{
410 + struct irq_chip *chip = irq_desc_get_chip(desc);
411 + struct iproc_msi_grp *grp;
412 + struct iproc_msi *msi;
413 + struct iproc_pcie *pcie;
414 + u32 eq, head, tail, nr_events;
415 + unsigned long hwirq;
416 + int virq;
417 +
418 + chained_irq_enter(chip, desc);
419 +
420 + grp = irq_desc_get_handler_data(desc);
421 + msi = grp->msi;
422 + pcie = msi->pcie;
423 + eq = grp->eq;
424 +
425 + /*
426 + * iProc MSI event queue is tracked by head and tail pointers. Head
427 + * pointer indicates the next entry (MSI data) to be consumed by SW in
428 + * the queue and needs to be updated by SW. iProc MSI core uses the
429 + * tail pointer as the next data insertion point
430 + *
431 + * Entries between head and tail pointers contain valid MSI data. MSI
432 + * data is guaranteed to be in the event queue memory before the tail
433 + * pointer is updated by the iProc MSI core
434 + */
435 + head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
436 + eq) & IPROC_MSI_EQ_MASK;
437 + do {
438 + tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
439 + eq) & IPROC_MSI_EQ_MASK;
440 +
441 + /*
442 + * Figure out total number of events (MSI data) to be
443 + * processed
444 + */
445 + nr_events = (tail < head) ?
446 + (EQ_LEN - (head - tail)) : (tail - head);
447 + if (!nr_events)
448 + break;
449 +
450 + /* process all outstanding events */
451 + while (nr_events--) {
452 + hwirq = decode_msi_hwirq(msi, eq, head);
453 + virq = irq_find_mapping(msi->inner_domain, hwirq);
454 + generic_handle_irq(virq);
455 +
456 + head++;
457 + head %= EQ_LEN;
458 + }
459 +
460 + /*
461 + * Now all outstanding events have been processed. Update the
462 + * head pointer
463 + */
464 + iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
465 +
466 + /*
467 + * Now go read the tail pointer again to see if there are new
468 + * oustanding events that came in during the above window
469 + */
470 + } while (true);
471 +
472 + chained_irq_exit(chip, desc);
473 +}
474 +
475 +static void iproc_msi_enable(struct iproc_msi *msi)
476 +{
477 + int i, eq;
478 + u32 val;
479 +
480 + /* program memory region for each event queue */
481 + for (i = 0; i < msi->nr_eq_region; i++) {
482 + dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
483 +
484 + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
485 + lower_32_bits(addr));
486 + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
487 + upper_32_bits(addr));
488 + }
489 +
490 + /* program address region for MSI posted writes */
491 + for (i = 0; i < msi->nr_msi_region; i++) {
492 + phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
493 +
494 + iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
495 + lower_32_bits(addr));
496 + iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
497 + upper_32_bits(addr));
498 + }
499 +
500 + for (eq = 0; eq < msi->nr_irqs; eq++) {
501 + /* enable MSI event queue */
502 + val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
503 + IPROC_MSI_EQ_EN;
504 + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
505 +
506 + /*
507 + * Some legacy platforms require the MSI interrupt enable
508 + * register to be set explicitly
509 + */
510 + if (msi->has_inten_reg) {
511 + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
512 + val |= BIT(eq);
513 + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
514 + }
515 + }
516 +}
517 +
518 +static void iproc_msi_disable(struct iproc_msi *msi)
519 +{
520 + u32 eq, val;
521 +
522 + for (eq = 0; eq < msi->nr_irqs; eq++) {
523 + if (msi->has_inten_reg) {
524 + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
525 + val &= ~BIT(eq);
526 + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
527 + }
528 +
529 + val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
530 + val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
531 + IPROC_MSI_EQ_EN);
532 + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
533 + }
534 +}
535 +
536 +static int iproc_msi_alloc_domains(struct device_node *node,
537 + struct iproc_msi *msi)
538 +{
539 + msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
540 + &msi_domain_ops, msi);
541 + if (!msi->inner_domain)
542 + return -ENOMEM;
543 +
544 + msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
545 + &iproc_msi_domain_info,
546 + msi->inner_domain);
547 + if (!msi->msi_domain) {
548 + irq_domain_remove(msi->inner_domain);
549 + return -ENOMEM;
550 + }
551 +
552 + return 0;
553 +}
554 +
555 +static void iproc_msi_free_domains(struct iproc_msi *msi)
556 +{
557 + if (msi->msi_domain)
558 + irq_domain_remove(msi->msi_domain);
559 +
560 + if (msi->inner_domain)
561 + irq_domain_remove(msi->inner_domain);
562 +}
563 +
564 +static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
565 +{
566 + int i;
567 +
568 + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
569 + irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
570 + NULL, NULL);
571 + }
572 +}
573 +
574 +static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
575 +{
576 + int i, ret;
577 + cpumask_var_t mask;
578 + struct iproc_pcie *pcie = msi->pcie;
579 +
580 + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
581 + irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
582 + iproc_msi_handler,
583 + &msi->grps[i]);
584 + /* dedicate GIC interrupt to each CPU core */
585 + if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
586 + cpumask_clear(mask);
587 + cpumask_set_cpu(cpu, mask);
588 + ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
589 + if (ret)
590 + dev_err(pcie->dev,
591 + "failed to set affinity for IRQ%d\n",
592 + msi->grps[i].gic_irq);
593 + free_cpumask_var(mask);
594 + } else {
595 + dev_err(pcie->dev, "failed to alloc CPU mask\n");
596 + ret = -EINVAL;
597 + }
598 +
599 + if (ret) {
600 + /* free all configured/unconfigured irqs */
601 + iproc_msi_irq_free(msi, cpu);
602 + return ret;
603 + }
604 + }
605 +
606 + return 0;
607 +}
608 +
609 +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
610 +{
611 + struct iproc_msi *msi;
612 + int i, ret;
613 + unsigned int cpu;
614 +
615 + if (!of_device_is_compatible(node, "brcm,iproc-msi"))
616 + return -ENODEV;
617 +
618 + if (!of_find_property(node, "msi-controller", NULL))
619 + return -ENODEV;
620 +
621 + if (pcie->msi)
622 + return -EBUSY;
623 +
624 + msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
625 + if (!msi)
626 + return -ENOMEM;
627 +
628 + msi->pcie = pcie;
629 + pcie->msi = msi;
630 + msi->msi_addr = pcie->base_addr;
631 + mutex_init(&msi->bitmap_lock);
632 + msi->nr_cpus = num_possible_cpus();
633 +
634 + msi->nr_irqs = of_irq_count(node);
635 + if (!msi->nr_irqs) {
636 + dev_err(pcie->dev, "found no MSI GIC interrupt\n");
637 + return -ENODEV;
638 + }
639 +
640 + if (msi->nr_irqs > NR_HW_IRQS) {
641 + dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
642 + msi->nr_irqs);
643 + msi->nr_irqs = NR_HW_IRQS;
644 + }
645 +
646 + if (msi->nr_irqs < msi->nr_cpus) {
647 + dev_err(pcie->dev,
648 + "not enough GIC interrupts for MSI affinity\n");
649 + return -EINVAL;
650 + }
651 +
652 + if (msi->nr_irqs % msi->nr_cpus != 0) {
653 + msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
654 + dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
655 + msi->nr_irqs);
656 + }
657 +
658 + switch (pcie->type) {
659 + case IPROC_PCIE_PAXB:
660 + msi->reg_offsets = iproc_msi_reg_paxb;
661 + msi->nr_eq_region = 1;
662 + msi->nr_msi_region = 1;
663 + break;
664 + case IPROC_PCIE_PAXC:
665 + msi->reg_offsets = iproc_msi_reg_paxc;
666 + msi->nr_eq_region = msi->nr_irqs;
667 + msi->nr_msi_region = msi->nr_irqs;
668 + break;
669 + default:
670 + dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
671 + return -EINVAL;
672 + }
673 +
674 + if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
675 + msi->has_inten_reg = true;
676 +
677 + msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
678 + msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
679 + sizeof(*msi->bitmap), GFP_KERNEL);
680 + if (!msi->bitmap)
681 + return -ENOMEM;
682 +
683 + msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
684 + GFP_KERNEL);
685 + if (!msi->grps)
686 + return -ENOMEM;
687 +
688 + for (i = 0; i < msi->nr_irqs; i++) {
689 + unsigned int irq = irq_of_parse_and_map(node, i);
690 +
691 + if (!irq) {
692 + dev_err(pcie->dev, "unable to parse/map interrupt\n");
693 + ret = -ENODEV;
694 + goto free_irqs;
695 + }
696 + msi->grps[i].gic_irq = irq;
697 + msi->grps[i].msi = msi;
698 + msi->grps[i].eq = i;
699 + }
700 +
701 + /* reserve memory for event queue and make sure memories are zeroed */
702 + msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
703 + msi->nr_eq_region * EQ_MEM_REGION_SIZE,
704 + &msi->eq_dma, GFP_KERNEL);
705 + if (!msi->eq_cpu) {
706 + ret = -ENOMEM;
707 + goto free_irqs;
708 + }
709 +
710 + ret = iproc_msi_alloc_domains(node, msi);
711 + if (ret) {
712 + dev_err(pcie->dev, "failed to create MSI domains\n");
713 + goto free_eq_dma;
714 + }
715 +
716 + for_each_online_cpu(cpu) {
717 + ret = iproc_msi_irq_setup(msi, cpu);
718 + if (ret)
719 + goto free_msi_irq;
720 + }
721 +
722 + iproc_msi_enable(msi);
723 +
724 + return 0;
725 +
726 +free_msi_irq:
727 + for_each_online_cpu(cpu)
728 + iproc_msi_irq_free(msi, cpu);
729 + iproc_msi_free_domains(msi);
730 +
731 +free_eq_dma:
732 + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
733 + msi->eq_cpu, msi->eq_dma);
734 +
735 +free_irqs:
736 + for (i = 0; i < msi->nr_irqs; i++) {
737 + if (msi->grps[i].gic_irq)
738 + irq_dispose_mapping(msi->grps[i].gic_irq);
739 + }
740 + pcie->msi = NULL;
741 + return ret;
742 +}
743 +EXPORT_SYMBOL(iproc_msi_init);
744 +
745 +void iproc_msi_exit(struct iproc_pcie *pcie)
746 +{
747 + struct iproc_msi *msi = pcie->msi;
748 + unsigned int i, cpu;
749 +
750 + if (!msi)
751 + return;
752 +
753 + iproc_msi_disable(msi);
754 +
755 + for_each_online_cpu(cpu)
756 + iproc_msi_irq_free(msi, cpu);
757 +
758 + iproc_msi_free_domains(msi);
759 +
760 + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
761 + msi->eq_cpu, msi->eq_dma);
762 +
763 + for (i = 0; i < msi->nr_irqs; i++) {
764 + if (msi->grps[i].gic_irq)
765 + irq_dispose_mapping(msi->grps[i].gic_irq);
766 + }
767 +}
768 +EXPORT_SYMBOL(iproc_msi_exit);
769 --- a/drivers/pci/host/pcie-iproc-platform.c
770 +++ b/drivers/pci/host/pcie-iproc-platform.c
771 @@ -71,6 +71,7 @@ static int iproc_pcie_pltfm_probe(struct
772 dev_err(pcie->dev, "unable to map controller registers\n");
773 return -ENOMEM;
774 }
775 + pcie->base_addr = reg.start;
776
777 if (of_property_read_bool(np, "brcm,pcie-ob")) {
778 u32 val;
779 --- a/drivers/pci/host/pcie-iproc.c
780 +++ b/drivers/pci/host/pcie-iproc.c
781 @@ -440,6 +440,26 @@ static int iproc_pcie_map_ranges(struct
782 return 0;
783 }
784
785 +static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
786 +{
787 + struct device_node *msi_node;
788 +
789 + msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
790 + if (!msi_node)
791 + return -ENODEV;
792 +
793 + /*
794 + * If another MSI controller is being used, the call below should fail
795 + * but that is okay
796 + */
797 + return iproc_msi_init(pcie, msi_node);
798 +}
799 +
800 +static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
801 +{
802 + iproc_msi_exit(pcie);
803 +}
804 +
805 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
806 {
807 int ret;
808 @@ -507,6 +527,10 @@ int iproc_pcie_setup(struct iproc_pcie *
809
810 iproc_pcie_enable(pcie);
811
812 + if (IS_ENABLED(CONFIG_PCI_MSI))
813 + if (iproc_pcie_msi_enable(pcie))
814 + dev_info(pcie->dev, "not using iProc MSI\n");
815 +
816 pci_scan_child_bus(bus);
817 pci_assign_unassigned_bus_resources(bus);
818 pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
819 @@ -531,6 +555,8 @@ int iproc_pcie_remove(struct iproc_pcie
820 pci_stop_root_bus(pcie->root_bus);
821 pci_remove_root_bus(pcie->root_bus);
822
823 + iproc_pcie_msi_disable(pcie);
824 +
825 phy_power_off(pcie->phy);
826 phy_exit(pcie->phy);
827
828 --- a/drivers/pci/host/pcie-iproc.h
829 +++ b/drivers/pci/host/pcie-iproc.h
830 @@ -41,6 +41,8 @@ struct iproc_pcie_ob {
831 resource_size_t window_size;
832 };
833
834 +struct iproc_msi;
835 +
836 /**
837 * iProc PCIe device
838 *
839 @@ -48,19 +50,21 @@ struct iproc_pcie_ob {
840 * @type: iProc PCIe interface type
841 * @reg_offsets: register offsets
842 * @base: PCIe host controller I/O register base
843 + * @base_addr: PCIe host controller register base physical address
844 * @sysdata: Per PCI controller data (ARM-specific)
845 * @root_bus: pointer to root bus
846 * @phy: optional PHY device that controls the Serdes
847 - * @irqs: interrupt IDs
848 * @map_irq: function callback to map interrupts
849 - * @need_ob_cfg: indidates SW needs to configure the outbound mapping window
850 + * @need_ob_cfg: indicates SW needs to configure the outbound mapping window
851 * @ob: outbound mapping parameters
852 + * @msi: MSI data
853 */
854 struct iproc_pcie {
855 struct device *dev;
856 enum iproc_pcie_type type;
857 const u16 *reg_offsets;
858 void __iomem *base;
859 + phys_addr_t base_addr;
860 #ifdef CONFIG_ARM
861 struct pci_sys_data sysdata;
862 #endif
863 @@ -69,9 +73,24 @@ struct iproc_pcie {
864 int (*map_irq)(const struct pci_dev *, u8, u8);
865 bool need_ob_cfg;
866 struct iproc_pcie_ob ob;
867 + struct iproc_msi *msi;
868 };
869
870 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
871 int iproc_pcie_remove(struct iproc_pcie *pcie);
872
873 +#ifdef CONFIG_PCI_MSI
874 +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node);
875 +void iproc_msi_exit(struct iproc_pcie *pcie);
876 +#else
877 +static inline int iproc_msi_init(struct iproc_pcie *pcie,
878 + struct device_node *node)
879 +{
880 + return -ENODEV;
881 +}
882 +static void iproc_msi_exit(struct iproc_pcie *pcie)
883 +{
884 +}
885 +#endif
886 +
887 #endif /* _PCIE_IPROC_H */