kernel: bump 4.14 to 4.14.126
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 816-pcie-support-layerscape.patch
1 From c54a010fe105281259b996d318ed85efc4103fee Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 6 May 2019 15:18:05 +0800
4 Subject: [PATCH] pcie: support layerscape
5
6 This is an integrated patch of pcie for layerscape
7
8 Signed-off-by: Bao Xiaowei <xiaowei.bao@nxp.com>
9 Signed-off-by: Bhumika Goyal <bhumirks@gmail.com>
10 Signed-off-by: Biwen Li <biwen.li@nxp.com>
11 Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
12 Signed-off-by: Christoph Hellwig <hch@lst.de>
13 Signed-off-by: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
14 Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
15 Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
16 Signed-off-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
17 Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
18 Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
19 Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
20 Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
21 Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
22 Signed-off-by: Niklas Cassel <niklas.cassel@axis.com>
23 Signed-off-by: Po Liu <po.liu@nxp.com>
24 Signed-off-by: Rob Herring <robh@kernel.org>
25 Signed-off-by: Rolf Evers-Fischer <rolf.evers.fischer@aptiv.com>
26 Signed-off-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
27 Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
28 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
29 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
30 ---
31 arch/arm/kernel/bios32.c | 43 ++
32 arch/arm64/kernel/pci.c | 43 ++
33 drivers/misc/pci_endpoint_test.c | 332 ++++++++++---
34 drivers/pci/Kconfig | 1 +
35 drivers/pci/dwc/Kconfig | 39 +-
36 drivers/pci/dwc/Makefile | 2 +-
37 drivers/pci/dwc/pci-dra7xx.c | 9 -
38 drivers/pci/dwc/pci-layerscape-ep.c | 146 ++++++
39 drivers/pci/dwc/pci-layerscape.c | 12 +
40 drivers/pci/dwc/pcie-designware-ep.c | 338 ++++++++++++--
41 drivers/pci/dwc/pcie-designware-host.c | 5 +-
42 drivers/pci/dwc/pcie-designware-plat.c | 159 ++++++-
43 drivers/pci/dwc/pcie-designware.c | 5 +-
44 drivers/pci/dwc/pcie-designware.h | 57 ++-
45 drivers/pci/endpoint/Kconfig | 1 +
46 drivers/pci/endpoint/Makefile | 1 +
47 drivers/pci/endpoint/functions/Kconfig | 1 +
48 drivers/pci/endpoint/functions/Makefile | 1 +
49 drivers/pci/endpoint/functions/pci-epf-test.c | 191 +++++---
50 drivers/pci/endpoint/pci-ep-cfs.c | 95 +++-
51 drivers/pci/endpoint/pci-epc-core.c | 159 +++++--
52 drivers/pci/endpoint/pci-epc-mem.c | 13 +-
53 drivers/pci/endpoint/pci-epf-core.c | 116 +++--
54 drivers/pci/host/pci-host-common.c | 8 -
55 drivers/pci/host/pcie-xilinx-nwl.c | 9 -
56 drivers/pci/host/pcie-xilinx.c | 7 -
57 drivers/pci/mobiveil/Kconfig | 50 ++
58 drivers/pci/mobiveil/Makefile | 7 +
59 drivers/pci/mobiveil/pci-layerscape-gen4-ep.c | 178 +++++++
60 drivers/pci/mobiveil/pci-layerscape-gen4.c | 292 ++++++++++++
61 drivers/pci/mobiveil/pcie-mobiveil-ep.c | 512 +++++++++++++++++++++
62 drivers/pci/mobiveil/pcie-mobiveil-host.c | 640 ++++++++++++++++++++++++++
63 drivers/pci/mobiveil/pcie-mobiveil-plat.c | 54 +++
64 drivers/pci/mobiveil/pcie-mobiveil.c | 334 ++++++++++++++
65 drivers/pci/mobiveil/pcie-mobiveil.h | 296 ++++++++++++
66 drivers/pci/pcie/portdrv_core.c | 29 ++
67 drivers/pci/quirks.c | 15 +
68 include/linux/pci-ep-cfs.h | 5 +-
69 include/linux/pci-epc.h | 73 +--
70 include/linux/pci-epf.h | 12 +-
71 include/linux/pci.h | 1 +
72 include/uapi/linux/pcitest.h | 3 +
73 tools/pci/pcitest.c | 51 +-
74 tools/pci/pcitest.sh | 15 +
75 44 files changed, 3917 insertions(+), 443 deletions(-)
76 create mode 100644 drivers/pci/dwc/pci-layerscape-ep.c
77 create mode 100644 drivers/pci/mobiveil/Kconfig
78 create mode 100644 drivers/pci/mobiveil/Makefile
79 create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
80 create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4.c
81 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-ep.c
82 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-host.c
83 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-plat.c
84 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.c
85 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.h
86
87 --- a/arch/arm/kernel/bios32.c
88 +++ b/arch/arm/kernel/bios32.c
89 @@ -12,6 +12,8 @@
90 #include <linux/slab.h>
91 #include <linux/init.h>
92 #include <linux/io.h>
93 +#include <linux/of_irq.h>
94 +#include <linux/pcieport_if.h>
95
96 #include <asm/mach-types.h>
97 #include <asm/mach/map.h>
98 @@ -65,6 +67,47 @@ void pcibios_report_status(u_int status_
99 }
100
101 /*
102 + * Check device tree if the service interrupts are there
103 + */
104 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
105 +{
106 + int ret, count = 0;
107 + struct device_node *np = NULL;
108 +
109 + if (dev->bus->dev.of_node)
110 + np = dev->bus->dev.of_node;
111 +
112 + if (np == NULL)
113 + return 0;
114 +
115 + if (!IS_ENABLED(CONFIG_OF_IRQ))
116 + return 0;
117 +
118 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
119 + * request irq for aer
120 + */
121 + if (mask & PCIE_PORT_SERVICE_AER) {
122 + ret = of_irq_get_byname(np, "aer");
123 + if (ret > 0) {
124 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
125 + count++;
126 + }
127 + }
128 +
129 + if (mask & PCIE_PORT_SERVICE_PME) {
130 + ret = of_irq_get_byname(np, "pme");
131 + if (ret > 0) {
132 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
133 + count++;
134 + }
135 + }
136 +
137 + /* TODO: add more service interrupts if there it is in the device tree*/
138 +
139 + return count;
140 +}
141 +
142 +/*
143 * We don't use this to fix the device, but initialisation of it.
144 * It's not the correct use for this, but it works.
145 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
146 --- a/arch/arm64/kernel/pci.c
147 +++ b/arch/arm64/kernel/pci.c
148 @@ -17,6 +17,8 @@
149 #include <linux/mm.h>
150 #include <linux/of_pci.h>
151 #include <linux/of_platform.h>
152 +#include <linux/of_irq.h>
153 +#include <linux/pcieport_if.h>
154 #include <linux/pci.h>
155 #include <linux/pci-acpi.h>
156 #include <linux/pci-ecam.h>
157 @@ -36,6 +38,47 @@ int pcibios_alloc_irq(struct pci_dev *de
158 #endif
159
160 /*
161 + * Check device tree if the service interrupts are there
162 + */
163 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
164 +{
165 + int ret, count = 0;
166 + struct device_node *np = NULL;
167 +
168 + if (dev->bus->dev.of_node)
169 + np = dev->bus->dev.of_node;
170 +
171 + if (np == NULL)
172 + return 0;
173 +
174 + if (!IS_ENABLED(CONFIG_OF_IRQ))
175 + return 0;
176 +
177 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
178 + * request irq for aer
179 + */
180 + if (mask & PCIE_PORT_SERVICE_AER) {
181 + ret = of_irq_get_byname(np, "aer");
182 + if (ret > 0) {
183 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
184 + count++;
185 + }
186 + }
187 +
188 + if (mask & PCIE_PORT_SERVICE_PME) {
189 + ret = of_irq_get_byname(np, "pme");
190 + if (ret > 0) {
191 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
192 + count++;
193 + }
194 + }
195 +
196 + /* TODO: add more service interrupts if there it is in the device tree*/
197 +
198 + return count;
199 +}
200 +
201 +/*
202 * raw_pci_read/write - Platform-specific PCI config space access.
203 */
204 int raw_pci_read(unsigned int domain, unsigned int bus,
205 --- a/drivers/misc/pci_endpoint_test.c
206 +++ b/drivers/misc/pci_endpoint_test.c
207 @@ -35,38 +35,45 @@
208
209 #include <uapi/linux/pcitest.h>
210
211 -#define DRV_MODULE_NAME "pci-endpoint-test"
212 +#define DRV_MODULE_NAME "pci-endpoint-test"
213
214 -#define PCI_ENDPOINT_TEST_MAGIC 0x0
215 +#define IRQ_TYPE_UNDEFINED -1
216 +#define IRQ_TYPE_LEGACY 0
217 +#define IRQ_TYPE_MSI 1
218 +#define IRQ_TYPE_MSIX 2
219 +
220 +#define PCI_ENDPOINT_TEST_MAGIC 0x0
221 +
222 +#define PCI_ENDPOINT_TEST_COMMAND 0x4
223 +#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
224 +#define COMMAND_RAISE_MSI_IRQ BIT(1)
225 +#define COMMAND_RAISE_MSIX_IRQ BIT(2)
226 +#define COMMAND_READ BIT(3)
227 +#define COMMAND_WRITE BIT(4)
228 +#define COMMAND_COPY BIT(5)
229 +
230 +#define PCI_ENDPOINT_TEST_STATUS 0x8
231 +#define STATUS_READ_SUCCESS BIT(0)
232 +#define STATUS_READ_FAIL BIT(1)
233 +#define STATUS_WRITE_SUCCESS BIT(2)
234 +#define STATUS_WRITE_FAIL BIT(3)
235 +#define STATUS_COPY_SUCCESS BIT(4)
236 +#define STATUS_COPY_FAIL BIT(5)
237 +#define STATUS_IRQ_RAISED BIT(6)
238 +#define STATUS_SRC_ADDR_INVALID BIT(7)
239 +#define STATUS_DST_ADDR_INVALID BIT(8)
240
241 -#define PCI_ENDPOINT_TEST_COMMAND 0x4
242 -#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
243 -#define COMMAND_RAISE_MSI_IRQ BIT(1)
244 -#define MSI_NUMBER_SHIFT 2
245 -/* 6 bits for MSI number */
246 -#define COMMAND_READ BIT(8)
247 -#define COMMAND_WRITE BIT(9)
248 -#define COMMAND_COPY BIT(10)
249 -
250 -#define PCI_ENDPOINT_TEST_STATUS 0x8
251 -#define STATUS_READ_SUCCESS BIT(0)
252 -#define STATUS_READ_FAIL BIT(1)
253 -#define STATUS_WRITE_SUCCESS BIT(2)
254 -#define STATUS_WRITE_FAIL BIT(3)
255 -#define STATUS_COPY_SUCCESS BIT(4)
256 -#define STATUS_COPY_FAIL BIT(5)
257 -#define STATUS_IRQ_RAISED BIT(6)
258 -#define STATUS_SRC_ADDR_INVALID BIT(7)
259 -#define STATUS_DST_ADDR_INVALID BIT(8)
260 -
261 -#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
262 +#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
263 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
264
265 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
266 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
267
268 -#define PCI_ENDPOINT_TEST_SIZE 0x1c
269 -#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
270 +#define PCI_ENDPOINT_TEST_SIZE 0x1c
271 +#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
272 +
273 +#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
274 +#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
275
276 static DEFINE_IDA(pci_endpoint_test_ida);
277
278 @@ -77,6 +84,10 @@ static bool no_msi;
279 module_param(no_msi, bool, 0444);
280 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
281
282 +static int irq_type = IRQ_TYPE_MSI;
283 +module_param(irq_type, int, 0444);
284 +MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
285 +
286 enum pci_barno {
287 BAR_0,
288 BAR_1,
289 @@ -92,6 +103,7 @@ struct pci_endpoint_test {
290 void __iomem *bar[6];
291 struct completion irq_raised;
292 int last_irq;
293 + int num_irqs;
294 /* mutex to protect the ioctls */
295 struct mutex mutex;
296 struct miscdevice miscdev;
297 @@ -102,7 +114,7 @@ struct pci_endpoint_test {
298 struct pci_endpoint_test_data {
299 enum pci_barno test_reg_bar;
300 size_t alignment;
301 - bool no_msi;
302 + int irq_type;
303 };
304
305 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
306 @@ -146,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irq
307 return IRQ_HANDLED;
308 }
309
310 +static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
311 +{
312 + struct pci_dev *pdev = test->pdev;
313 +
314 + pci_free_irq_vectors(pdev);
315 +}
316 +
317 +static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
318 + int type)
319 +{
320 + int irq = -1;
321 + struct pci_dev *pdev = test->pdev;
322 + struct device *dev = &pdev->dev;
323 + bool res = true;
324 +
325 + switch (type) {
326 + case IRQ_TYPE_LEGACY:
327 + irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
328 + if (irq < 0)
329 + dev_err(dev, "Failed to get Legacy interrupt\n");
330 + break;
331 + case IRQ_TYPE_MSI:
332 + irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
333 + if (irq < 0)
334 + dev_err(dev, "Failed to get MSI interrupts\n");
335 + break;
336 + case IRQ_TYPE_MSIX:
337 + irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
338 + if (irq < 0)
339 + dev_err(dev, "Failed to get MSI-X interrupts\n");
340 + break;
341 + default:
342 + dev_err(dev, "Invalid IRQ type selected\n");
343 + }
344 +
345 + if (irq < 0) {
346 + irq = 0;
347 + res = false;
348 + }
349 + test->num_irqs = irq;
350 +
351 + return res;
352 +}
353 +
354 +static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
355 +{
356 + int i;
357 + struct pci_dev *pdev = test->pdev;
358 + struct device *dev = &pdev->dev;
359 +
360 + for (i = 0; i < test->num_irqs; i++)
361 + devm_free_irq(dev, pci_irq_vector(pdev, i), test);
362 +
363 + test->num_irqs = 0;
364 +}
365 +
366 +static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
367 +{
368 + int i;
369 + int err;
370 + struct pci_dev *pdev = test->pdev;
371 + struct device *dev = &pdev->dev;
372 +
373 + for (i = 0; i < test->num_irqs; i++) {
374 + err = devm_request_irq(dev, pci_irq_vector(pdev, i),
375 + pci_endpoint_test_irqhandler,
376 + IRQF_SHARED, DRV_MODULE_NAME, test);
377 + if (err)
378 + goto fail;
379 + }
380 +
381 + return true;
382 +
383 +fail:
384 + switch (irq_type) {
385 + case IRQ_TYPE_LEGACY:
386 + dev_err(dev, "Failed to request IRQ %d for Legacy\n",
387 + pci_irq_vector(pdev, i));
388 + break;
389 + case IRQ_TYPE_MSI:
390 + dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
391 + pci_irq_vector(pdev, i),
392 + i + 1);
393 + break;
394 + case IRQ_TYPE_MSIX:
395 + dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
396 + pci_irq_vector(pdev, i),
397 + i + 1);
398 + break;
399 + }
400 +
401 + return false;
402 +}
403 +
404 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
405 enum pci_barno barno)
406 {
407 @@ -178,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq
408 {
409 u32 val;
410
411 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
412 + IRQ_TYPE_LEGACY);
413 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
414 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
415 COMMAND_RAISE_LEGACY_IRQ);
416 val = wait_for_completion_timeout(&test->irq_raised,
417 @@ -189,20 +298,24 @@ static bool pci_endpoint_test_legacy_irq
418 }
419
420 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
421 - u8 msi_num)
422 + u16 msi_num, bool msix)
423 {
424 u32 val;
425 struct pci_dev *pdev = test->pdev;
426
427 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
428 + msix == false ? IRQ_TYPE_MSI :
429 + IRQ_TYPE_MSIX);
430 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
431 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
432 - msi_num << MSI_NUMBER_SHIFT |
433 - COMMAND_RAISE_MSI_IRQ);
434 + msix == false ? COMMAND_RAISE_MSI_IRQ :
435 + COMMAND_RAISE_MSIX_IRQ);
436 val = wait_for_completion_timeout(&test->irq_raised,
437 msecs_to_jiffies(1000));
438 if (!val)
439 return false;
440
441 - if (test->last_irq - pdev->irq == msi_num - 1)
442 + if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
443 return true;
444
445 return false;
446 @@ -226,10 +339,18 @@ static bool pci_endpoint_test_copy(struc
447 u32 src_crc32;
448 u32 dst_crc32;
449
450 + if (size > SIZE_MAX - alignment)
451 + goto err;
452 +
453 + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
454 + dev_err(dev, "Invalid IRQ type option\n");
455 + goto err;
456 + }
457 +
458 orig_src_addr = dma_alloc_coherent(dev, size + alignment,
459 &orig_src_phys_addr, GFP_KERNEL);
460 if (!orig_src_addr) {
461 - dev_err(dev, "failed to allocate source buffer\n");
462 + dev_err(dev, "Failed to allocate source buffer\n");
463 ret = false;
464 goto err;
465 }
466 @@ -255,7 +376,7 @@ static bool pci_endpoint_test_copy(struc
467 orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
468 &orig_dst_phys_addr, GFP_KERNEL);
469 if (!orig_dst_addr) {
470 - dev_err(dev, "failed to allocate destination address\n");
471 + dev_err(dev, "Failed to allocate destination address\n");
472 ret = false;
473 goto err_orig_src_addr;
474 }
475 @@ -277,8 +398,10 @@ static bool pci_endpoint_test_copy(struc
476 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
477 size);
478
479 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
480 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
481 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
482 - 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
483 + COMMAND_COPY);
484
485 wait_for_completion(&test->irq_raised);
486
487 @@ -311,10 +434,18 @@ static bool pci_endpoint_test_write(stru
488 size_t alignment = test->alignment;
489 u32 crc32;
490
491 + if (size > SIZE_MAX - alignment)
492 + goto err;
493 +
494 + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
495 + dev_err(dev, "Invalid IRQ type option\n");
496 + goto err;
497 + }
498 +
499 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
500 GFP_KERNEL);
501 if (!orig_addr) {
502 - dev_err(dev, "failed to allocate address\n");
503 + dev_err(dev, "Failed to allocate address\n");
504 ret = false;
505 goto err;
506 }
507 @@ -341,8 +472,10 @@ static bool pci_endpoint_test_write(stru
508
509 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
510
511 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
512 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
513 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
514 - 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
515 + COMMAND_READ);
516
517 wait_for_completion(&test->irq_raised);
518
519 @@ -369,10 +502,18 @@ static bool pci_endpoint_test_read(struc
520 size_t alignment = test->alignment;
521 u32 crc32;
522
523 + if (size > SIZE_MAX - alignment)
524 + goto err;
525 +
526 + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
527 + dev_err(dev, "Invalid IRQ type option\n");
528 + goto err;
529 + }
530 +
531 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
532 GFP_KERNEL);
533 if (!orig_addr) {
534 - dev_err(dev, "failed to allocate destination address\n");
535 + dev_err(dev, "Failed to allocate destination address\n");
536 ret = false;
537 goto err;
538 }
539 @@ -393,8 +534,10 @@ static bool pci_endpoint_test_read(struc
540
541 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
542
543 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
544 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
545 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
546 - 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
547 + COMMAND_WRITE);
548
549 wait_for_completion(&test->irq_raised);
550
551 @@ -407,6 +550,38 @@ err:
552 return ret;
553 }
554
555 +static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
556 + int req_irq_type)
557 +{
558 + struct pci_dev *pdev = test->pdev;
559 + struct device *dev = &pdev->dev;
560 +
561 + if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
562 + dev_err(dev, "Invalid IRQ type option\n");
563 + return false;
564 + }
565 +
566 + if (irq_type == req_irq_type)
567 + return true;
568 +
569 + pci_endpoint_test_release_irq(test);
570 + pci_endpoint_test_free_irq_vectors(test);
571 +
572 + if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
573 + goto err;
574 +
575 + if (!pci_endpoint_test_request_irq(test))
576 + goto err;
577 +
578 + irq_type = req_irq_type;
579 + return true;
580 +
581 +err:
582 + pci_endpoint_test_free_irq_vectors(test);
583 + irq_type = IRQ_TYPE_UNDEFINED;
584 + return false;
585 +}
586 +
587 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
588 unsigned long arg)
589 {
590 @@ -426,7 +601,8 @@ static long pci_endpoint_test_ioctl(stru
591 ret = pci_endpoint_test_legacy_irq(test);
592 break;
593 case PCITEST_MSI:
594 - ret = pci_endpoint_test_msi_irq(test, arg);
595 + case PCITEST_MSIX:
596 + ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
597 break;
598 case PCITEST_WRITE:
599 ret = pci_endpoint_test_write(test, arg);
600 @@ -437,6 +613,12 @@ static long pci_endpoint_test_ioctl(stru
601 case PCITEST_COPY:
602 ret = pci_endpoint_test_copy(test, arg);
603 break;
604 + case PCITEST_SET_IRQTYPE:
605 + ret = pci_endpoint_test_set_irq(test, arg);
606 + break;
607 + case PCITEST_GET_IRQTYPE:
608 + ret = irq_type;
609 + break;
610 }
611
612 ret:
613 @@ -452,9 +634,7 @@ static const struct file_operations pci_
614 static int pci_endpoint_test_probe(struct pci_dev *pdev,
615 const struct pci_device_id *ent)
616 {
617 - int i;
618 int err;
619 - int irq = 0;
620 int id;
621 char name[20];
622 enum pci_barno bar;
623 @@ -476,12 +656,15 @@ static int pci_endpoint_test_probe(struc
624 test->alignment = 0;
625 test->pdev = pdev;
626
627 + if (no_msi)
628 + irq_type = IRQ_TYPE_LEGACY;
629 +
630 data = (struct pci_endpoint_test_data *)ent->driver_data;
631 if (data) {
632 test_reg_bar = data->test_reg_bar;
633 test->test_reg_bar = test_reg_bar;
634 test->alignment = data->alignment;
635 - no_msi = data->no_msi;
636 + irq_type = data->irq_type;
637 }
638
639 init_completion(&test->irq_raised);
640 @@ -501,35 +684,21 @@ static int pci_endpoint_test_probe(struc
641
642 pci_set_master(pdev);
643
644 - if (!no_msi) {
645 - irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
646 - if (irq < 0)
647 - dev_err(dev, "failed to get MSI interrupts\n");
648 - }
649 + if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
650 + goto err_disable_irq;
651
652 - err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
653 - IRQF_SHARED, DRV_MODULE_NAME, test);
654 - if (err) {
655 - dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
656 - goto err_disable_msi;
657 - }
658 -
659 - for (i = 1; i < irq; i++) {
660 - err = devm_request_irq(dev, pdev->irq + i,
661 - pci_endpoint_test_irqhandler,
662 - IRQF_SHARED, DRV_MODULE_NAME, test);
663 - if (err)
664 - dev_err(dev, "failed to request IRQ %d for MSI %d\n",
665 - pdev->irq + i, i + 1);
666 - }
667 + if (!pci_endpoint_test_request_irq(test))
668 + goto err_disable_irq;
669
670 for (bar = BAR_0; bar <= BAR_5; bar++) {
671 - base = pci_ioremap_bar(pdev, bar);
672 - if (!base) {
673 - dev_err(dev, "failed to read BAR%d\n", bar);
674 - WARN_ON(bar == test_reg_bar);
675 + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
676 + base = pci_ioremap_bar(pdev, bar);
677 + if (!base) {
678 + dev_err(dev, "Failed to read BAR%d\n", bar);
679 + WARN_ON(bar == test_reg_bar);
680 + }
681 + test->bar[bar] = base;
682 }
683 - test->bar[bar] = base;
684 }
685
686 test->base = test->bar[test_reg_bar];
687 @@ -545,24 +714,31 @@ static int pci_endpoint_test_probe(struc
688 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
689 if (id < 0) {
690 err = id;
691 - dev_err(dev, "unable to get id\n");
692 + dev_err(dev, "Unable to get id\n");
693 goto err_iounmap;
694 }
695
696 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
697 misc_device = &test->miscdev;
698 misc_device->minor = MISC_DYNAMIC_MINOR;
699 - misc_device->name = name;
700 + misc_device->name = kstrdup(name, GFP_KERNEL);
701 + if (!misc_device->name) {
702 + err = -ENOMEM;
703 + goto err_ida_remove;
704 + }
705 misc_device->fops = &pci_endpoint_test_fops,
706
707 err = misc_register(misc_device);
708 if (err) {
709 - dev_err(dev, "failed to register device\n");
710 - goto err_ida_remove;
711 + dev_err(dev, "Failed to register device\n");
712 + goto err_kfree_name;
713 }
714
715 return 0;
716
717 +err_kfree_name:
718 + kfree(misc_device->name);
719 +
720 err_ida_remove:
721 ida_simple_remove(&pci_endpoint_test_ida, id);
722
723 @@ -571,9 +747,10 @@ err_iounmap:
724 if (test->bar[bar])
725 pci_iounmap(pdev, test->bar[bar]);
726 }
727 + pci_endpoint_test_release_irq(test);
728
729 -err_disable_msi:
730 - pci_disable_msi(pdev);
731 +err_disable_irq:
732 + pci_endpoint_test_free_irq_vectors(test);
733 pci_release_regions(pdev);
734
735 err_disable_pdev:
736 @@ -595,12 +772,16 @@ static void pci_endpoint_test_remove(str
737 return;
738
739 misc_deregister(&test->miscdev);
740 + kfree(misc_device->name);
741 ida_simple_remove(&pci_endpoint_test_ida, id);
742 for (bar = BAR_0; bar <= BAR_5; bar++) {
743 if (test->bar[bar])
744 pci_iounmap(pdev, test->bar[bar]);
745 }
746 - pci_disable_msi(pdev);
747 +
748 + pci_endpoint_test_release_irq(test);
749 + pci_endpoint_test_free_irq_vectors(test);
750 +
751 pci_release_regions(pdev);
752 pci_disable_device(pdev);
753 }
754 @@ -608,6 +789,7 @@ static void pci_endpoint_test_remove(str
755 static const struct pci_device_id pci_endpoint_test_tbl[] = {
756 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
757 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
758 + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID) },
759 { }
760 };
761 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
762 --- a/drivers/pci/Kconfig
763 +++ b/drivers/pci/Kconfig
764 @@ -142,6 +142,7 @@ config PCI_HYPERV
765
766 source "drivers/pci/hotplug/Kconfig"
767 source "drivers/pci/dwc/Kconfig"
768 +source "drivers/pci/mobiveil/Kconfig"
769 source "drivers/pci/host/Kconfig"
770 source "drivers/pci/endpoint/Kconfig"
771 source "drivers/pci/switch/Kconfig"
772 --- a/drivers/pci/dwc/Kconfig
773 +++ b/drivers/pci/dwc/Kconfig
774 @@ -50,17 +50,36 @@ config PCI_DRA7XX_EP
775 endif
776
777 config PCIE_DW_PLAT
778 - bool "Platform bus based DesignWare PCIe Controller"
779 - depends on PCI
780 - depends on PCI_MSI_IRQ_DOMAIN
781 - select PCIE_DW_HOST
782 - ---help---
783 - This selects the DesignWare PCIe controller support. Select this if
784 - you have a PCIe controller on Platform bus.
785 + bool
786
787 - If you have a controller with this interface, say Y or M here.
788 +config PCIE_DW_PLAT_HOST
789 + bool "Platform bus based DesignWare PCIe Controller - Host mode"
790 + depends on PCI && PCI_MSI_IRQ_DOMAIN
791 + select PCIE_DW_HOST
792 + select PCIE_DW_PLAT
793 + help
794 + Enables support for the PCIe controller in the Designware IP to
795 + work in host mode. There are two instances of PCIe controller in
796 + Designware IP.
797 + This controller can work either as EP or RC. In order to enable
798 + host-specific features PCIE_DW_PLAT_HOST must be selected and in
799 + order to enable device-specific features PCI_DW_PLAT_EP must be
800 + selected.
801
802 - If unsure, say N.
803 +config PCIE_DW_PLAT_EP
804 + bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
805 + depends on PCI && PCI_MSI_IRQ_DOMAIN
806 + depends on PCI_ENDPOINT
807 + select PCIE_DW_EP
808 + select PCIE_DW_PLAT
809 + help
810 + Enables support for the PCIe controller in the Designware IP to
811 + work in endpoint mode. There are two instances of PCIe controller
812 + in Designware IP.
813 + This controller can work either as EP or RC. In order to enable
814 + host-specific features PCIE_DW_PLAT_HOST must be selected and in
815 + order to enable device-specific features PCI_DW_PLAT_EP must be
816 + selected.
817
818 config PCI_EXYNOS
819 bool "Samsung Exynos PCIe controller"
820 --- a/drivers/pci/dwc/Makefile
821 +++ b/drivers/pci/dwc/Makefile
822 @@ -10,7 +10,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
823 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
824 obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
825 obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
826 -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
827 +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o
828 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
829 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
830 obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
831 --- a/drivers/pci/dwc/pci-dra7xx.c
832 +++ b/drivers/pci/dwc/pci-dra7xx.c
833 @@ -337,15 +337,6 @@ static irqreturn_t dra7xx_pcie_irq_handl
834 return IRQ_HANDLED;
835 }
836
837 -static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
838 -{
839 - u32 reg;
840 -
841 - reg = PCI_BASE_ADDRESS_0 + (4 * bar);
842 - dw_pcie_writel_dbi2(pci, reg, 0x0);
843 - dw_pcie_writel_dbi(pci, reg, 0x0);
844 -}
845 -
846 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
847 {
848 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
849 --- /dev/null
850 +++ b/drivers/pci/dwc/pci-layerscape-ep.c
851 @@ -0,0 +1,146 @@
852 +// SPDX-License-Identifier: GPL-2.0
853 +/*
854 + * PCIe controller EP driver for Freescale Layerscape SoCs
855 + *
856 + * Copyright (C) 2018 NXP Semiconductor.
857 + *
858 + * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
859 + */
860 +
861 +#include <linux/kernel.h>
862 +#include <linux/init.h>
863 +#include <linux/of_pci.h>
864 +#include <linux/of_platform.h>
865 +#include <linux/of_address.h>
866 +#include <linux/pci.h>
867 +#include <linux/platform_device.h>
868 +#include <linux/resource.h>
869 +
870 +#include "pcie-designware.h"
871 +
872 +#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
873 +
874 +struct ls_pcie_ep {
875 + struct dw_pcie *pci;
876 +};
877 +
878 +#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
879 +
880 +static int ls_pcie_establish_link(struct dw_pcie *pci)
881 +{
882 + return 0;
883 +}
884 +
885 +static const struct dw_pcie_ops ls_pcie_ep_ops = {
886 + .start_link = ls_pcie_establish_link,
887 +};
888 +
889 +static const struct of_device_id ls_pcie_ep_of_match[] = {
890 + { .compatible = "fsl,ls-pcie-ep",},
891 + { },
892 +};
893 +
894 +static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
895 +{
896 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
897 + struct pci_epc *epc = ep->epc;
898 + enum pci_barno bar;
899 +
900 + for (bar = BAR_0; bar <= BAR_5; bar++)
901 + dw_pcie_ep_reset_bar(pci, bar);
902 +
903 + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
904 +}
905 +
906 +static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
907 + enum pci_epc_irq_type type, u16 interrupt_num)
908 +{
909 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
910 +
911 + switch (type) {
912 + case PCI_EPC_IRQ_LEGACY:
913 + return dw_pcie_ep_raise_legacy_irq(ep, func_no);
914 + case PCI_EPC_IRQ_MSI:
915 + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
916 + case PCI_EPC_IRQ_MSIX:
917 + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
918 + default:
919 + dev_err(pci->dev, "UNKNOWN IRQ type\n");
920 + return -EINVAL;
921 + }
922 +}
923 +
924 +static struct dw_pcie_ep_ops pcie_ep_ops = {
925 + .ep_init = ls_pcie_ep_init,
926 + .raise_irq = ls_pcie_ep_raise_irq,
927 +};
928 +
929 +static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
930 + struct platform_device *pdev)
931 +{
932 + struct dw_pcie *pci = pcie->pci;
933 + struct device *dev = pci->dev;
934 + struct dw_pcie_ep *ep;
935 + struct resource *res;
936 + int ret;
937 +
938 + ep = &pci->ep;
939 + ep->ops = &pcie_ep_ops;
940 +
941 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
942 + if (!res)
943 + return -EINVAL;
944 +
945 + ep->phys_base = res->start;
946 + ep->addr_size = resource_size(res);
947 +
948 + ret = dw_pcie_ep_init(ep);
949 + if (ret) {
950 + dev_err(dev, "failed to initialize endpoint\n");
951 + return ret;
952 + }
953 +
954 + return 0;
955 +}
956 +
957 +static int __init ls_pcie_ep_probe(struct platform_device *pdev)
958 +{
959 + struct device *dev = &pdev->dev;
960 + struct dw_pcie *pci;
961 + struct ls_pcie_ep *pcie;
962 + struct resource *dbi_base;
963 + int ret;
964 +
965 + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
966 + if (!pcie)
967 + return -ENOMEM;
968 +
969 + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
970 + if (!pci)
971 + return -ENOMEM;
972 +
973 + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
974 + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
975 + if (IS_ERR(pci->dbi_base))
976 + return PTR_ERR(pci->dbi_base);
977 +
978 + pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
979 + pci->dev = dev;
980 + pci->ops = &ls_pcie_ep_ops;
981 + pcie->pci = pci;
982 +
983 + platform_set_drvdata(pdev, pcie);
984 +
985 + ret = ls_add_pcie_ep(pcie, pdev);
986 +
987 + return ret;
988 +}
989 +
990 +static struct platform_driver ls_pcie_ep_driver = {
991 + .driver = {
992 + .name = "layerscape-pcie-ep",
993 + .of_match_table = ls_pcie_ep_of_match,
994 + .suppress_bind_attrs = true,
995 + },
996 +};
997 +builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
998 --- a/drivers/pci/dwc/pci-layerscape.c
999 +++ b/drivers/pci/dwc/pci-layerscape.c
1000 @@ -33,6 +33,8 @@
1001
1002 /* PEX Internal Configuration Registers */
1003 #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
1004 +#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
1005 +#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
1006
1007 #define PCIE_IATU_NUM 6
1008
1009 @@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pci
1010 return 1;
1011 }
1012
1013 +/* Forward error response of outbound non-posted requests */
1014 +static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
1015 +{
1016 + struct dw_pcie *pci = pcie->pci;
1017 +
1018 + iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
1019 +}
1020 +
1021 static int ls_pcie_host_init(struct pcie_port *pp)
1022 {
1023 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1024 @@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie
1025 * dw_pcie_setup_rc() will reconfigure the outbound windows.
1026 */
1027 ls_pcie_disable_outbound_atus(pcie);
1028 + ls_pcie_fix_error_response(pcie);
1029
1030 dw_pcie_dbi_ro_wr_en(pci);
1031 ls_pcie_clear_multifunction(pcie);
1032 @@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drv
1033 };
1034
1035 static const struct of_device_id ls_pcie_of_match[] = {
1036 + { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
1037 { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
1038 { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
1039 { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
1040 --- a/drivers/pci/dwc/pcie-designware-ep.c
1041 +++ b/drivers/pci/dwc/pcie-designware-ep.c
1042 @@ -1,20 +1,9 @@
1043 +// SPDX-License-Identifier: GPL-2.0
1044 /**
1045 * Synopsys DesignWare PCIe Endpoint controller driver
1046 *
1047 * Copyright (C) 2017 Texas Instruments
1048 * Author: Kishon Vijay Abraham I <kishon@ti.com>
1049 - *
1050 - * This program is free software: you can redistribute it and/or modify
1051 - * it under the terms of the GNU General Public License version 2 of
1052 - * the License as published by the Free Software Foundation.
1053 - *
1054 - * This program is distributed in the hope that it will be useful,
1055 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1056 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1057 - * GNU General Public License for more details.
1058 - *
1059 - * You should have received a copy of the GNU General Public License
1060 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
1061 */
1062
1063 #include <linux/of.h>
1064 @@ -30,7 +19,8 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep
1065 pci_epc_linkup(epc);
1066 }
1067
1068 -static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
1069 +static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
1070 + int flags)
1071 {
1072 u32 reg;
1073
1074 @@ -38,10 +28,52 @@ static void dw_pcie_ep_reset_bar(struct
1075 dw_pcie_dbi_ro_wr_en(pci);
1076 dw_pcie_writel_dbi2(pci, reg, 0x0);
1077 dw_pcie_writel_dbi(pci, reg, 0x0);
1078 + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1079 + dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
1080 + dw_pcie_writel_dbi(pci, reg + 4, 0x0);
1081 + }
1082 dw_pcie_dbi_ro_wr_dis(pci);
1083 }
1084
1085 -static int dw_pcie_ep_write_header(struct pci_epc *epc,
1086 +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
1087 +{
1088 + __dw_pcie_ep_reset_bar(pci, bar, 0);
1089 +}
1090 +
1091 +static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
1092 + u8 cap)
1093 +{
1094 + u8 cap_id, next_cap_ptr;
1095 + u16 reg;
1096 +
1097 + reg = dw_pcie_readw_dbi(pci, cap_ptr);
1098 + next_cap_ptr = (reg & 0xff00) >> 8;
1099 + cap_id = (reg & 0x00ff);
1100 +
1101 + if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
1102 + return 0;
1103 +
1104 + if (cap_id == cap)
1105 + return cap_ptr;
1106 +
1107 + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
1108 +}
1109 +
1110 +static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
1111 +{
1112 + u8 next_cap_ptr;
1113 + u16 reg;
1114 +
1115 + reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
1116 + next_cap_ptr = (reg & 0x00ff);
1117 +
1118 + if (!next_cap_ptr)
1119 + return 0;
1120 +
1121 + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
1122 +}
1123 +
1124 +static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
1125 struct pci_epf_header *hdr)
1126 {
1127 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1128 @@ -74,8 +106,7 @@ static int dw_pcie_ep_inbound_atu(struct
1129 u32 free_win;
1130 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1131
1132 - free_win = find_first_zero_bit(&ep->ib_window_map,
1133 - sizeof(ep->ib_window_map));
1134 + free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
1135 if (free_win >= ep->num_ib_windows) {
1136 dev_err(pci->dev, "no free inbound window\n");
1137 return -EINVAL;
1138 @@ -89,7 +120,7 @@ static int dw_pcie_ep_inbound_atu(struct
1139 }
1140
1141 ep->bar_to_atu[bar] = free_win;
1142 - set_bit(free_win, &ep->ib_window_map);
1143 + set_bit(free_win, ep->ib_window_map);
1144
1145 return 0;
1146 }
1147 @@ -100,8 +131,7 @@ static int dw_pcie_ep_outbound_atu(struc
1148 u32 free_win;
1149 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1150
1151 - free_win = find_first_zero_bit(&ep->ob_window_map,
1152 - sizeof(ep->ob_window_map));
1153 + free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
1154 if (free_win >= ep->num_ob_windows) {
1155 dev_err(pci->dev, "no free outbound window\n");
1156 return -EINVAL;
1157 @@ -110,30 +140,35 @@ static int dw_pcie_ep_outbound_atu(struc
1158 dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
1159 phys_addr, pci_addr, size);
1160
1161 - set_bit(free_win, &ep->ob_window_map);
1162 + set_bit(free_win, ep->ob_window_map);
1163 ep->outbound_addr[free_win] = phys_addr;
1164
1165 return 0;
1166 }
1167
1168 -static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
1169 +static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
1170 + struct pci_epf_bar *epf_bar)
1171 {
1172 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1173 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1174 + enum pci_barno bar = epf_bar->barno;
1175 u32 atu_index = ep->bar_to_atu[bar];
1176
1177 - dw_pcie_ep_reset_bar(pci, bar);
1178 + __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
1179
1180 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
1181 - clear_bit(atu_index, &ep->ib_window_map);
1182 + clear_bit(atu_index, ep->ib_window_map);
1183 }
1184
1185 -static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
1186 - dma_addr_t bar_phys, size_t size, int flags)
1187 +static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
1188 + struct pci_epf_bar *epf_bar)
1189 {
1190 int ret;
1191 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1192 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1193 + enum pci_barno bar = epf_bar->barno;
1194 + size_t size = epf_bar->size;
1195 + int flags = epf_bar->flags;
1196 enum dw_pcie_as_type as_type;
1197 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
1198
1199 @@ -142,13 +177,20 @@ static int dw_pcie_ep_set_bar(struct pci
1200 else
1201 as_type = DW_PCIE_AS_IO;
1202
1203 - ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
1204 + ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
1205 if (ret)
1206 return ret;
1207
1208 dw_pcie_dbi_ro_wr_en(pci);
1209 - dw_pcie_writel_dbi2(pci, reg, size - 1);
1210 +
1211 + dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
1212 dw_pcie_writel_dbi(pci, reg, flags);
1213 +
1214 + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1215 + dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
1216 + dw_pcie_writel_dbi(pci, reg + 4, 0);
1217 + }
1218 +
1219 dw_pcie_dbi_ro_wr_dis(pci);
1220
1221 return 0;
1222 @@ -169,7 +211,8 @@ static int dw_pcie_find_index(struct dw_
1223 return -EINVAL;
1224 }
1225
1226 -static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
1227 +static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
1228 + phys_addr_t addr)
1229 {
1230 int ret;
1231 u32 atu_index;
1232 @@ -181,10 +224,11 @@ static void dw_pcie_ep_unmap_addr(struct
1233 return;
1234
1235 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
1236 - clear_bit(atu_index, &ep->ob_window_map);
1237 + clear_bit(atu_index, ep->ob_window_map);
1238 }
1239
1240 -static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
1241 +static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
1242 + phys_addr_t addr,
1243 u64 pci_addr, size_t size)
1244 {
1245 int ret;
1246 @@ -200,45 +244,93 @@ static int dw_pcie_ep_map_addr(struct pc
1247 return 0;
1248 }
1249
1250 -static int dw_pcie_ep_get_msi(struct pci_epc *epc)
1251 +static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
1252 {
1253 - int val;
1254 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1255 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1256 + u32 val, reg;
1257 +
1258 + if (!ep->msi_cap)
1259 + return -EINVAL;
1260
1261 - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
1262 - if (!(val & MSI_CAP_MSI_EN_MASK))
1263 + reg = ep->msi_cap + PCI_MSI_FLAGS;
1264 + val = dw_pcie_readw_dbi(pci, reg);
1265 + if (!(val & PCI_MSI_FLAGS_ENABLE))
1266 return -EINVAL;
1267
1268 - val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
1269 + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
1270 +
1271 return val;
1272 }
1273
1274 -static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
1275 +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
1276 {
1277 - int val;
1278 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1279 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1280 + u32 val, reg;
1281 +
1282 + if (!ep->msi_cap)
1283 + return -EINVAL;
1284
1285 - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
1286 - val &= ~MSI_CAP_MMC_MASK;
1287 - val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
1288 + reg = ep->msi_cap + PCI_MSI_FLAGS;
1289 + val = dw_pcie_readw_dbi(pci, reg);
1290 + val &= ~PCI_MSI_FLAGS_QMASK;
1291 + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
1292 dw_pcie_dbi_ro_wr_en(pci);
1293 - dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
1294 + dw_pcie_writew_dbi(pci, reg, val);
1295 dw_pcie_dbi_ro_wr_dis(pci);
1296
1297 return 0;
1298 }
1299
1300 -static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
1301 - enum pci_epc_irq_type type, u8 interrupt_num)
1302 +static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
1303 +{
1304 + struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1305 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1306 + u32 val, reg;
1307 +
1308 + if (!ep->msix_cap)
1309 + return -EINVAL;
1310 +
1311 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
1312 + val = dw_pcie_readw_dbi(pci, reg);
1313 + if (!(val & PCI_MSIX_FLAGS_ENABLE))
1314 + return -EINVAL;
1315 +
1316 + val &= PCI_MSIX_FLAGS_QSIZE;
1317 +
1318 + return val;
1319 +}
1320 +
1321 +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
1322 +{
1323 + struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1324 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1325 + u32 val, reg;
1326 +
1327 + if (!ep->msix_cap)
1328 + return -EINVAL;
1329 +
1330 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
1331 + val = dw_pcie_readw_dbi(pci, reg);
1332 + val &= ~PCI_MSIX_FLAGS_QSIZE;
1333 + val |= interrupts;
1334 + dw_pcie_dbi_ro_wr_en(pci);
1335 + dw_pcie_writew_dbi(pci, reg, val);
1336 + dw_pcie_dbi_ro_wr_dis(pci);
1337 +
1338 + return 0;
1339 +}
1340 +
1341 +static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
1342 + enum pci_epc_irq_type type, u16 interrupt_num)
1343 {
1344 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1345
1346 if (!ep->ops->raise_irq)
1347 return -EINVAL;
1348
1349 - return ep->ops->raise_irq(ep, type, interrupt_num);
1350 + return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
1351 }
1352
1353 static void dw_pcie_ep_stop(struct pci_epc *epc)
1354 @@ -271,15 +363,130 @@ static const struct pci_epc_ops epc_ops
1355 .unmap_addr = dw_pcie_ep_unmap_addr,
1356 .set_msi = dw_pcie_ep_set_msi,
1357 .get_msi = dw_pcie_ep_get_msi,
1358 + .set_msix = dw_pcie_ep_set_msix,
1359 + .get_msix = dw_pcie_ep_get_msix,
1360 .raise_irq = dw_pcie_ep_raise_irq,
1361 .start = dw_pcie_ep_start,
1362 .stop = dw_pcie_ep_stop,
1363 };
1364
1365 +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
1366 +{
1367 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1368 + struct device *dev = pci->dev;
1369 +
1370 + dev_err(dev, "EP cannot trigger legacy IRQs\n");
1371 +
1372 + return -EINVAL;
1373 +}
1374 +
1375 +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1376 + u8 interrupt_num)
1377 +{
1378 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1379 + struct pci_epc *epc = ep->epc;
1380 + u16 msg_ctrl, msg_data;
1381 + u32 msg_addr_lower, msg_addr_upper, reg;
1382 + u64 msg_addr;
1383 + bool has_upper;
1384 + int ret;
1385 +
1386 + if (!ep->msi_cap)
1387 + return -EINVAL;
1388 +
1389 + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
1390 + reg = ep->msi_cap + PCI_MSI_FLAGS;
1391 + msg_ctrl = dw_pcie_readw_dbi(pci, reg);
1392 + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
1393 + reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
1394 + msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
1395 + if (has_upper) {
1396 + reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
1397 + msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
1398 + reg = ep->msi_cap + PCI_MSI_DATA_64;
1399 + msg_data = dw_pcie_readw_dbi(pci, reg);
1400 + } else {
1401 + msg_addr_upper = 0;
1402 + reg = ep->msi_cap + PCI_MSI_DATA_32;
1403 + msg_data = dw_pcie_readw_dbi(pci, reg);
1404 + }
1405 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
1406 + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
1407 + epc->mem->page_size);
1408 + if (ret)
1409 + return ret;
1410 +
1411 + writel(msg_data | (interrupt_num - 1), ep->msi_mem);
1412 +
1413 + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
1414 +
1415 + return 0;
1416 +}
1417 +
1418 +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
1419 + u16 interrupt_num)
1420 +{
1421 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1422 + struct pci_epc *epc = ep->epc;
1423 + u16 tbl_offset, bir;
1424 + u32 bar_addr_upper, bar_addr_lower;
1425 + u32 msg_addr_upper, msg_addr_lower;
1426 + u32 reg, msg_data, vec_ctrl;
1427 + u64 tbl_addr, msg_addr, reg_u64;
1428 + void __iomem *msix_tbl;
1429 + int ret;
1430 +
1431 + reg = ep->msix_cap + PCI_MSIX_TABLE;
1432 + tbl_offset = dw_pcie_readl_dbi(pci, reg);
1433 + bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
1434 + tbl_offset &= PCI_MSIX_TABLE_OFFSET;
1435 +
1436 + reg = PCI_BASE_ADDRESS_0 + (4 * bir);
1437 + bar_addr_upper = 0;
1438 + bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
1439 + reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
1440 + if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1441 + bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
1442 +
1443 + tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
1444 + tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
1445 + tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
1446 +
1447 + msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
1448 + PCI_MSIX_ENTRY_SIZE);
1449 + if (!msix_tbl)
1450 + return -EINVAL;
1451 +
1452 + msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
1453 + msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
1454 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
1455 + msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
1456 + vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
1457 +
1458 + iounmap(msix_tbl);
1459 +
1460 + if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
1461 + return -EPERM;
1462 +
1463 + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
1464 + epc->mem->page_size);
1465 + if (ret)
1466 + return ret;
1467 +
1468 + writel(msg_data, ep->msi_mem);
1469 +
1470 + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
1471 +
1472 + return 0;
1473 +}
1474 +
1475 void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
1476 {
1477 struct pci_epc *epc = ep->epc;
1478
1479 + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
1480 + epc->mem->page_size);
1481 +
1482 pci_epc_mem_exit(epc);
1483 }
1484
1485 @@ -293,7 +500,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1486 struct device_node *np = dev->of_node;
1487
1488 if (!pci->dbi_base || !pci->dbi_base2) {
1489 - dev_err(dev, "dbi_base/deb_base2 is not populated\n");
1490 + dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
1491 return -EINVAL;
1492 }
1493
1494 @@ -302,12 +509,32 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1495 dev_err(dev, "unable to read *num-ib-windows* property\n");
1496 return ret;
1497 }
1498 + if (ep->num_ib_windows > MAX_IATU_IN) {
1499 + dev_err(dev, "invalid *num-ib-windows*\n");
1500 + return -EINVAL;
1501 + }
1502
1503 ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
1504 if (ret < 0) {
1505 dev_err(dev, "unable to read *num-ob-windows* property\n");
1506 return ret;
1507 }
1508 + if (ep->num_ob_windows > MAX_IATU_OUT) {
1509 + dev_err(dev, "invalid *num-ob-windows*\n");
1510 + return -EINVAL;
1511 + }
1512 +
1513 + ep->ib_window_map = devm_kzalloc(dev, sizeof(long) *
1514 + BITS_TO_LONGS(ep->num_ib_windows),
1515 + GFP_KERNEL);
1516 + if (!ep->ib_window_map)
1517 + return -ENOMEM;
1518 +
1519 + ep->ob_window_map = devm_kzalloc(dev, sizeof(long) *
1520 + BITS_TO_LONGS(ep->num_ob_windows),
1521 + GFP_KERNEL);
1522 + if (!ep->ob_window_map)
1523 + return -ENOMEM;
1524
1525 addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
1526 GFP_KERNEL);
1527 @@ -315,15 +542,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1528 return -ENOMEM;
1529 ep->outbound_addr = addr;
1530
1531 - if (ep->ops->ep_init)
1532 - ep->ops->ep_init(ep);
1533 -
1534 epc = devm_pci_epc_create(dev, &epc_ops);
1535 if (IS_ERR(epc)) {
1536 dev_err(dev, "failed to create epc device\n");
1537 return PTR_ERR(epc);
1538 }
1539
1540 + ep->epc = epc;
1541 + epc_set_drvdata(epc, ep);
1542 +
1543 + if (ep->ops->ep_init)
1544 + ep->ops->ep_init(ep);
1545 +
1546 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
1547 if (ret < 0)
1548 epc->max_functions = 1;
1549 @@ -335,8 +565,16 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1550 return ret;
1551 }
1552
1553 - ep->epc = epc;
1554 - epc_set_drvdata(epc, ep);
1555 + ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
1556 + epc->mem->page_size);
1557 + if (!ep->msi_mem) {
1558 + dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
1559 + return -ENOMEM;
1560 + }
1561 + ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
1562 +
1563 + ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
1564 +
1565 dw_pcie_setup(pci);
1566
1567 return 0;
1568 --- a/drivers/pci/dwc/pcie-designware-host.c
1569 +++ b/drivers/pci/dwc/pcie-designware-host.c
1570 @@ -1,3 +1,4 @@
1571 +// SPDX-License-Identifier: GPL-2.0
1572 /*
1573 * Synopsys DesignWare PCIe host controller driver
1574 *
1575 @@ -5,10 +6,6 @@
1576 * http://www.samsung.com
1577 *
1578 * Author: Jingoo Han <jg1.han@samsung.com>
1579 - *
1580 - * This program is free software; you can redistribute it and/or modify
1581 - * it under the terms of the GNU General Public License version 2 as
1582 - * published by the Free Software Foundation.
1583 */
1584
1585 #include <linux/irqdomain.h>
1586 --- a/drivers/pci/dwc/pcie-designware-plat.c
1587 +++ b/drivers/pci/dwc/pcie-designware-plat.c
1588 @@ -1,13 +1,10 @@
1589 +// SPDX-License-Identifier: GPL-2.0
1590 /*
1591 * PCIe RC driver for Synopsys DesignWare Core
1592 *
1593 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
1594 *
1595 * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
1596 - *
1597 - * This program is free software; you can redistribute it and/or modify
1598 - * it under the terms of the GNU General Public License version 2 as
1599 - * published by the Free Software Foundation.
1600 */
1601 #include <linux/clk.h>
1602 #include <linux/delay.h>
1603 @@ -15,19 +12,29 @@
1604 #include <linux/interrupt.h>
1605 #include <linux/kernel.h>
1606 #include <linux/init.h>
1607 +#include <linux/of_device.h>
1608 #include <linux/of_gpio.h>
1609 #include <linux/pci.h>
1610 #include <linux/platform_device.h>
1611 #include <linux/resource.h>
1612 #include <linux/signal.h>
1613 #include <linux/types.h>
1614 +#include <linux/regmap.h>
1615
1616 #include "pcie-designware.h"
1617
1618 struct dw_plat_pcie {
1619 - struct dw_pcie *pci;
1620 + struct dw_pcie *pci;
1621 + struct regmap *regmap;
1622 + enum dw_pcie_device_mode mode;
1623 +};
1624 +
1625 +struct dw_plat_pcie_of_data {
1626 + enum dw_pcie_device_mode mode;
1627 };
1628
1629 +static const struct of_device_id dw_plat_pcie_of_match[];
1630 +
1631 static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
1632 {
1633 struct pcie_port *pp = arg;
1634 @@ -52,9 +59,58 @@ static const struct dw_pcie_host_ops dw_
1635 .host_init = dw_plat_pcie_host_init,
1636 };
1637
1638 -static int dw_plat_add_pcie_port(struct pcie_port *pp,
1639 +static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
1640 +{
1641 + return 0;
1642 +}
1643 +
1644 +static const struct dw_pcie_ops dw_pcie_ops = {
1645 + .start_link = dw_plat_pcie_establish_link,
1646 +};
1647 +
1648 +static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
1649 +{
1650 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1651 + struct pci_epc *epc = ep->epc;
1652 + enum pci_barno bar;
1653 +
1654 + for (bar = BAR_0; bar <= BAR_5; bar++)
1655 + dw_pcie_ep_reset_bar(pci, bar);
1656 +
1657 + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
1658 + epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
1659 +}
1660 +
1661 +static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1662 + enum pci_epc_irq_type type,
1663 + u16 interrupt_num)
1664 +{
1665 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1666 +
1667 + switch (type) {
1668 + case PCI_EPC_IRQ_LEGACY:
1669 + return dw_pcie_ep_raise_legacy_irq(ep, func_no);
1670 + case PCI_EPC_IRQ_MSI:
1671 + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
1672 + case PCI_EPC_IRQ_MSIX:
1673 + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
1674 + default:
1675 + dev_err(pci->dev, "UNKNOWN IRQ type\n");
1676 + }
1677 +
1678 + return 0;
1679 +}
1680 +
1681 +static struct dw_pcie_ep_ops pcie_ep_ops = {
1682 + .ep_init = dw_plat_pcie_ep_init,
1683 + .raise_irq = dw_plat_pcie_ep_raise_irq,
1684 +};
1685 +
1686 +static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
1687 struct platform_device *pdev)
1688 {
1689 + struct dw_pcie *pci = dw_plat_pcie->pci;
1690 + struct pcie_port *pp = &pci->pp;
1691 struct device *dev = &pdev->dev;
1692 int ret;
1693
1694 @@ -82,15 +138,44 @@ static int dw_plat_add_pcie_port(struct
1695
1696 ret = dw_pcie_host_init(pp);
1697 if (ret) {
1698 - dev_err(dev, "failed to initialize host\n");
1699 + dev_err(dev, "Failed to initialize host\n");
1700 return ret;
1701 }
1702
1703 return 0;
1704 }
1705
1706 -static const struct dw_pcie_ops dw_pcie_ops = {
1707 -};
1708 +static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
1709 + struct platform_device *pdev)
1710 +{
1711 + int ret;
1712 + struct dw_pcie_ep *ep;
1713 + struct resource *res;
1714 + struct device *dev = &pdev->dev;
1715 + struct dw_pcie *pci = dw_plat_pcie->pci;
1716 +
1717 + ep = &pci->ep;
1718 + ep->ops = &pcie_ep_ops;
1719 +
1720 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
1721 + pci->dbi_base2 = devm_ioremap_resource(dev, res);
1722 + if (IS_ERR(pci->dbi_base2))
1723 + return PTR_ERR(pci->dbi_base2);
1724 +
1725 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1726 + if (!res)
1727 + return -EINVAL;
1728 +
1729 + ep->phys_base = res->start;
1730 + ep->addr_size = resource_size(res);
1731 +
1732 + ret = dw_pcie_ep_init(ep);
1733 + if (ret) {
1734 + dev_err(dev, "Failed to initialize endpoint\n");
1735 + return ret;
1736 + }
1737 + return 0;
1738 +}
1739
1740 static int dw_plat_pcie_probe(struct platform_device *pdev)
1741 {
1742 @@ -99,6 +184,16 @@ static int dw_plat_pcie_probe(struct pla
1743 struct dw_pcie *pci;
1744 struct resource *res; /* Resource from DT */
1745 int ret;
1746 + const struct of_device_id *match;
1747 + const struct dw_plat_pcie_of_data *data;
1748 + enum dw_pcie_device_mode mode;
1749 +
1750 + match = of_match_device(dw_plat_pcie_of_match, dev);
1751 + if (!match)
1752 + return -EINVAL;
1753 +
1754 + data = (struct dw_plat_pcie_of_data *)match->data;
1755 + mode = (enum dw_pcie_device_mode)data->mode;
1756
1757 dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
1758 if (!dw_plat_pcie)
1759 @@ -112,23 +207,59 @@ static int dw_plat_pcie_probe(struct pla
1760 pci->ops = &dw_pcie_ops;
1761
1762 dw_plat_pcie->pci = pci;
1763 + dw_plat_pcie->mode = mode;
1764 +
1765 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1766 + if (!res)
1767 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1768
1769 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1770 pci->dbi_base = devm_ioremap_resource(dev, res);
1771 if (IS_ERR(pci->dbi_base))
1772 return PTR_ERR(pci->dbi_base);
1773
1774 platform_set_drvdata(pdev, dw_plat_pcie);
1775
1776 - ret = dw_plat_add_pcie_port(&pci->pp, pdev);
1777 - if (ret < 0)
1778 - return ret;
1779 + switch (dw_plat_pcie->mode) {
1780 + case DW_PCIE_RC_TYPE:
1781 + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
1782 + return -ENODEV;
1783 +
1784 + ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
1785 + if (ret < 0)
1786 + return ret;
1787 + break;
1788 + case DW_PCIE_EP_TYPE:
1789 + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
1790 + return -ENODEV;
1791 +
1792 + ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
1793 + if (ret < 0)
1794 + return ret;
1795 + break;
1796 + default:
1797 + dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
1798 + }
1799
1800 return 0;
1801 }
1802
1803 +static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
1804 + .mode = DW_PCIE_RC_TYPE,
1805 +};
1806 +
1807 +static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
1808 + .mode = DW_PCIE_EP_TYPE,
1809 +};
1810 +
1811 static const struct of_device_id dw_plat_pcie_of_match[] = {
1812 - { .compatible = "snps,dw-pcie", },
1813 + {
1814 + .compatible = "snps,dw-pcie",
1815 + .data = &dw_plat_pcie_rc_of_data,
1816 + },
1817 + {
1818 + .compatible = "snps,dw-pcie-ep",
1819 + .data = &dw_plat_pcie_ep_of_data,
1820 + },
1821 {},
1822 };
1823
1824 --- a/drivers/pci/dwc/pcie-designware.c
1825 +++ b/drivers/pci/dwc/pcie-designware.c
1826 @@ -1,3 +1,4 @@
1827 +// SPDX-License-Identifier: GPL-2.0
1828 /*
1829 * Synopsys DesignWare PCIe host controller driver
1830 *
1831 @@ -5,10 +6,6 @@
1832 * http://www.samsung.com
1833 *
1834 * Author: Jingoo Han <jg1.han@samsung.com>
1835 - *
1836 - * This program is free software; you can redistribute it and/or modify
1837 - * it under the terms of the GNU General Public License version 2 as
1838 - * published by the Free Software Foundation.
1839 */
1840
1841 #include <linux/delay.h>
1842 --- a/drivers/pci/dwc/pcie-designware.h
1843 +++ b/drivers/pci/dwc/pcie-designware.h
1844 @@ -1,3 +1,4 @@
1845 +// SPDX-License-Identifier: GPL-2.0
1846 /*
1847 * Synopsys DesignWare PCIe host controller driver
1848 *
1849 @@ -5,10 +6,6 @@
1850 * http://www.samsung.com
1851 *
1852 * Author: Jingoo Han <jg1.han@samsung.com>
1853 - *
1854 - * This program is free software; you can redistribute it and/or modify
1855 - * it under the terms of the GNU General Public License version 2 as
1856 - * published by the Free Software Foundation.
1857 */
1858
1859 #ifndef _PCIE_DESIGNWARE_H
1860 @@ -97,15 +94,6 @@
1861 #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
1862 ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
1863
1864 -#define MSI_MESSAGE_CONTROL 0x52
1865 -#define MSI_CAP_MMC_SHIFT 1
1866 -#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
1867 -#define MSI_CAP_MME_SHIFT 4
1868 -#define MSI_CAP_MSI_EN_MASK 0x1
1869 -#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
1870 -#define MSI_MESSAGE_ADDR_L32 0x54
1871 -#define MSI_MESSAGE_ADDR_U32 0x58
1872 -
1873 /*
1874 * Maximum number of MSI IRQs can be 256 per controller. But keep
1875 * it 32 as of now. Probably we will never need more than 32. If needed,
1876 @@ -114,6 +102,10 @@
1877 #define MAX_MSI_IRQS 32
1878 #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
1879
1880 +/* Maximum number of inbound/outbound iATUs */
1881 +#define MAX_IATU_IN 256
1882 +#define MAX_IATU_OUT 256
1883 +
1884 struct pcie_port;
1885 struct dw_pcie;
1886 struct dw_pcie_ep;
1887 @@ -181,8 +173,8 @@ enum dw_pcie_as_type {
1888
1889 struct dw_pcie_ep_ops {
1890 void (*ep_init)(struct dw_pcie_ep *ep);
1891 - int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
1892 - u8 interrupt_num);
1893 + int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
1894 + enum pci_epc_irq_type type, u16 interrupt_num);
1895 };
1896
1897 struct dw_pcie_ep {
1898 @@ -193,10 +185,14 @@ struct dw_pcie_ep {
1899 size_t page_size;
1900 u8 bar_to_atu[6];
1901 phys_addr_t *outbound_addr;
1902 - unsigned long ib_window_map;
1903 - unsigned long ob_window_map;
1904 + unsigned long *ib_window_map;
1905 + unsigned long *ob_window_map;
1906 u32 num_ib_windows;
1907 u32 num_ob_windows;
1908 + void __iomem *msi_mem;
1909 + phys_addr_t msi_mem_phys;
1910 + u8 msi_cap; /* MSI capability offset */
1911 + u8 msix_cap; /* MSI-X capability offset */
1912 };
1913
1914 struct dw_pcie_ops {
1915 @@ -335,6 +331,12 @@ static inline int dw_pcie_host_init(stru
1916 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
1917 int dw_pcie_ep_init(struct dw_pcie_ep *ep);
1918 void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
1919 +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
1920 +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1921 + u8 interrupt_num);
1922 +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
1923 + u16 interrupt_num);
1924 +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
1925 #else
1926 static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
1927 {
1928 @@ -348,5 +350,26 @@ static inline int dw_pcie_ep_init(struct
1929 static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
1930 {
1931 }
1932 +
1933 +static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
1934 +{
1935 + return 0;
1936 +}
1937 +
1938 +static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1939 + u8 interrupt_num)
1940 +{
1941 + return 0;
1942 +}
1943 +
1944 +static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
1945 + u16 interrupt_num)
1946 +{
1947 + return 0;
1948 +}
1949 +
1950 +static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
1951 +{
1952 +}
1953 #endif
1954 #endif /* _PCIE_DESIGNWARE_H */
1955 --- a/drivers/pci/endpoint/Kconfig
1956 +++ b/drivers/pci/endpoint/Kconfig
1957 @@ -1,3 +1,4 @@
1958 +# SPDX-License-Identifier: GPL-2.0
1959 #
1960 # PCI Endpoint Support
1961 #
1962 --- a/drivers/pci/endpoint/Makefile
1963 +++ b/drivers/pci/endpoint/Makefile
1964 @@ -1,3 +1,4 @@
1965 +# SPDX-License-Identifier: GPL-2.0
1966 #
1967 # Makefile for PCI Endpoint Support
1968 #
1969 --- a/drivers/pci/endpoint/functions/Kconfig
1970 +++ b/drivers/pci/endpoint/functions/Kconfig
1971 @@ -1,3 +1,4 @@
1972 +# SPDX-License-Identifier: GPL-2.0
1973 #
1974 # PCI Endpoint Functions
1975 #
1976 --- a/drivers/pci/endpoint/functions/Makefile
1977 +++ b/drivers/pci/endpoint/functions/Makefile
1978 @@ -1,3 +1,4 @@
1979 +# SPDX-License-Identifier: GPL-2.0
1980 #
1981 # Makefile for PCI Endpoint Functions
1982 #
1983 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
1984 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
1985 @@ -1,20 +1,9 @@
1986 +// SPDX-License-Identifier: GPL-2.0
1987 /**
1988 * Test driver to test endpoint functionality
1989 *
1990 * Copyright (C) 2017 Texas Instruments
1991 * Author: Kishon Vijay Abraham I <kishon@ti.com>
1992 - *
1993 - * This program is free software: you can redistribute it and/or modify
1994 - * it under the terms of the GNU General Public License version 2 of
1995 - * the License as published by the Free Software Foundation.
1996 - *
1997 - * This program is distributed in the hope that it will be useful,
1998 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1999 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2000 - * GNU General Public License for more details.
2001 - *
2002 - * You should have received a copy of the GNU General Public License
2003 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2004 */
2005
2006 #include <linux/crc32.h>
2007 @@ -29,13 +18,16 @@
2008 #include <linux/pci-epf.h>
2009 #include <linux/pci_regs.h>
2010
2011 +#define IRQ_TYPE_LEGACY 0
2012 +#define IRQ_TYPE_MSI 1
2013 +#define IRQ_TYPE_MSIX 2
2014 +
2015 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
2016 #define COMMAND_RAISE_MSI_IRQ BIT(1)
2017 -#define MSI_NUMBER_SHIFT 2
2018 -#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
2019 -#define COMMAND_READ BIT(8)
2020 -#define COMMAND_WRITE BIT(9)
2021 -#define COMMAND_COPY BIT(10)
2022 +#define COMMAND_RAISE_MSIX_IRQ BIT(2)
2023 +#define COMMAND_READ BIT(3)
2024 +#define COMMAND_WRITE BIT(4)
2025 +#define COMMAND_COPY BIT(5)
2026
2027 #define STATUS_READ_SUCCESS BIT(0)
2028 #define STATUS_READ_FAIL BIT(1)
2029 @@ -56,6 +48,7 @@ struct pci_epf_test {
2030 struct pci_epf *epf;
2031 enum pci_barno test_reg_bar;
2032 bool linkup_notifier;
2033 + bool msix_available;
2034 struct delayed_work cmd_handler;
2035 };
2036
2037 @@ -67,6 +60,8 @@ struct pci_epf_test_reg {
2038 u64 dst_addr;
2039 u32 size;
2040 u32 checksum;
2041 + u32 irq_type;
2042 + u32 irq_number;
2043 } __packed;
2044
2045 static struct pci_epf_header test_header = {
2046 @@ -81,7 +76,7 @@ struct pci_epf_test_data {
2047 bool linkup_notifier;
2048 };
2049
2050 -static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
2051 +static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
2052
2053 static int pci_epf_test_copy(struct pci_epf_test *epf_test)
2054 {
2055 @@ -98,43 +93,45 @@ static int pci_epf_test_copy(struct pci_
2056
2057 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
2058 if (!src_addr) {
2059 - dev_err(dev, "failed to allocate source address\n");
2060 + dev_err(dev, "Failed to allocate source address\n");
2061 reg->status = STATUS_SRC_ADDR_INVALID;
2062 ret = -ENOMEM;
2063 goto err;
2064 }
2065
2066 - ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
2067 + ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
2068 + reg->size);
2069 if (ret) {
2070 - dev_err(dev, "failed to map source address\n");
2071 + dev_err(dev, "Failed to map source address\n");
2072 reg->status = STATUS_SRC_ADDR_INVALID;
2073 goto err_src_addr;
2074 }
2075
2076 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
2077 if (!dst_addr) {
2078 - dev_err(dev, "failed to allocate destination address\n");
2079 + dev_err(dev, "Failed to allocate destination address\n");
2080 reg->status = STATUS_DST_ADDR_INVALID;
2081 ret = -ENOMEM;
2082 goto err_src_map_addr;
2083 }
2084
2085 - ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
2086 + ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
2087 + reg->size);
2088 if (ret) {
2089 - dev_err(dev, "failed to map destination address\n");
2090 + dev_err(dev, "Failed to map destination address\n");
2091 reg->status = STATUS_DST_ADDR_INVALID;
2092 goto err_dst_addr;
2093 }
2094
2095 memcpy(dst_addr, src_addr, reg->size);
2096
2097 - pci_epc_unmap_addr(epc, dst_phys_addr);
2098 + pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
2099
2100 err_dst_addr:
2101 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
2102
2103 err_src_map_addr:
2104 - pci_epc_unmap_addr(epc, src_phys_addr);
2105 + pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
2106
2107 err_src_addr:
2108 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
2109 @@ -158,15 +155,16 @@ static int pci_epf_test_read(struct pci_
2110
2111 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
2112 if (!src_addr) {
2113 - dev_err(dev, "failed to allocate address\n");
2114 + dev_err(dev, "Failed to allocate address\n");
2115 reg->status = STATUS_SRC_ADDR_INVALID;
2116 ret = -ENOMEM;
2117 goto err;
2118 }
2119
2120 - ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
2121 + ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
2122 + reg->size);
2123 if (ret) {
2124 - dev_err(dev, "failed to map address\n");
2125 + dev_err(dev, "Failed to map address\n");
2126 reg->status = STATUS_SRC_ADDR_INVALID;
2127 goto err_addr;
2128 }
2129 @@ -186,7 +184,7 @@ static int pci_epf_test_read(struct pci_
2130 kfree(buf);
2131
2132 err_map_addr:
2133 - pci_epc_unmap_addr(epc, phys_addr);
2134 + pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
2135
2136 err_addr:
2137 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
2138 @@ -209,15 +207,16 @@ static int pci_epf_test_write(struct pci
2139
2140 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
2141 if (!dst_addr) {
2142 - dev_err(dev, "failed to allocate address\n");
2143 + dev_err(dev, "Failed to allocate address\n");
2144 reg->status = STATUS_DST_ADDR_INVALID;
2145 ret = -ENOMEM;
2146 goto err;
2147 }
2148
2149 - ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
2150 + ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
2151 + reg->size);
2152 if (ret) {
2153 - dev_err(dev, "failed to map address\n");
2154 + dev_err(dev, "Failed to map address\n");
2155 reg->status = STATUS_DST_ADDR_INVALID;
2156 goto err_addr;
2157 }
2158 @@ -237,12 +236,12 @@ static int pci_epf_test_write(struct pci
2159 * wait 1ms inorder for the write to complete. Without this delay L3
2160 * error in observed in the host system.
2161 */
2162 - mdelay(1);
2163 + usleep_range(1000, 2000);
2164
2165 kfree(buf);
2166
2167 err_map_addr:
2168 - pci_epc_unmap_addr(epc, phys_addr);
2169 + pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
2170
2171 err_addr:
2172 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
2173 @@ -251,31 +250,42 @@ err:
2174 return ret;
2175 }
2176
2177 -static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
2178 +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
2179 + u16 irq)
2180 {
2181 - u8 msi_count;
2182 struct pci_epf *epf = epf_test->epf;
2183 + struct device *dev = &epf->dev;
2184 struct pci_epc *epc = epf->epc;
2185 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
2186 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
2187
2188 reg->status |= STATUS_IRQ_RAISED;
2189 - msi_count = pci_epc_get_msi(epc);
2190 - if (irq > msi_count || msi_count <= 0)
2191 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
2192 - else
2193 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
2194 +
2195 + switch (irq_type) {
2196 + case IRQ_TYPE_LEGACY:
2197 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
2198 + break;
2199 + case IRQ_TYPE_MSI:
2200 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
2201 + break;
2202 + case IRQ_TYPE_MSIX:
2203 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
2204 + break;
2205 + default:
2206 + dev_err(dev, "Failed to raise IRQ, unknown type\n");
2207 + break;
2208 + }
2209 }
2210
2211 static void pci_epf_test_cmd_handler(struct work_struct *work)
2212 {
2213 int ret;
2214 - u8 irq;
2215 - u8 msi_count;
2216 + int count;
2217 u32 command;
2218 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
2219 cmd_handler.work);
2220 struct pci_epf *epf = epf_test->epf;
2221 + struct device *dev = &epf->dev;
2222 struct pci_epc *epc = epf->epc;
2223 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
2224 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
2225 @@ -287,11 +297,14 @@ static void pci_epf_test_cmd_handler(str
2226 reg->command = 0;
2227 reg->status = 0;
2228
2229 - irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
2230 + if (reg->irq_type > IRQ_TYPE_MSIX) {
2231 + dev_err(dev, "Failed to detect IRQ type\n");
2232 + goto reset_handler;
2233 + }
2234
2235 if (command & COMMAND_RAISE_LEGACY_IRQ) {
2236 reg->status = STATUS_IRQ_RAISED;
2237 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
2238 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
2239 goto reset_handler;
2240 }
2241
2242 @@ -301,7 +314,8 @@ static void pci_epf_test_cmd_handler(str
2243 reg->status |= STATUS_WRITE_FAIL;
2244 else
2245 reg->status |= STATUS_WRITE_SUCCESS;
2246 - pci_epf_test_raise_irq(epf_test, irq);
2247 + pci_epf_test_raise_irq(epf_test, reg->irq_type,
2248 + reg->irq_number);
2249 goto reset_handler;
2250 }
2251
2252 @@ -311,7 +325,8 @@ static void pci_epf_test_cmd_handler(str
2253 reg->status |= STATUS_READ_SUCCESS;
2254 else
2255 reg->status |= STATUS_READ_FAIL;
2256 - pci_epf_test_raise_irq(epf_test, irq);
2257 + pci_epf_test_raise_irq(epf_test, reg->irq_type,
2258 + reg->irq_number);
2259 goto reset_handler;
2260 }
2261
2262 @@ -321,16 +336,28 @@ static void pci_epf_test_cmd_handler(str
2263 reg->status |= STATUS_COPY_SUCCESS;
2264 else
2265 reg->status |= STATUS_COPY_FAIL;
2266 - pci_epf_test_raise_irq(epf_test, irq);
2267 + pci_epf_test_raise_irq(epf_test, reg->irq_type,
2268 + reg->irq_number);
2269 goto reset_handler;
2270 }
2271
2272 if (command & COMMAND_RAISE_MSI_IRQ) {
2273 - msi_count = pci_epc_get_msi(epc);
2274 - if (irq > msi_count || msi_count <= 0)
2275 + count = pci_epc_get_msi(epc, epf->func_no);
2276 + if (reg->irq_number > count || count <= 0)
2277 + goto reset_handler;
2278 + reg->status = STATUS_IRQ_RAISED;
2279 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
2280 + reg->irq_number);
2281 + goto reset_handler;
2282 + }
2283 +
2284 + if (command & COMMAND_RAISE_MSIX_IRQ) {
2285 + count = pci_epc_get_msix(epc, epf->func_no);
2286 + if (reg->irq_number > count || count <= 0)
2287 goto reset_handler;
2288 reg->status = STATUS_IRQ_RAISED;
2289 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
2290 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
2291 + reg->irq_number);
2292 goto reset_handler;
2293 }
2294
2295 @@ -351,21 +378,23 @@ static void pci_epf_test_unbind(struct p
2296 {
2297 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
2298 struct pci_epc *epc = epf->epc;
2299 + struct pci_epf_bar *epf_bar;
2300 int bar;
2301
2302 cancel_delayed_work(&epf_test->cmd_handler);
2303 pci_epc_stop(epc);
2304 for (bar = BAR_0; bar <= BAR_5; bar++) {
2305 + epf_bar = &epf->bar[bar];
2306 +
2307 if (epf_test->reg[bar]) {
2308 pci_epf_free_space(epf, epf_test->reg[bar], bar);
2309 - pci_epc_clear_bar(epc, bar);
2310 + pci_epc_clear_bar(epc, epf->func_no, epf_bar);
2311 }
2312 }
2313 }
2314
2315 static int pci_epf_test_set_bar(struct pci_epf *epf)
2316 {
2317 - int flags;
2318 int bar;
2319 int ret;
2320 struct pci_epf_bar *epf_bar;
2321 @@ -374,20 +403,27 @@ static int pci_epf_test_set_bar(struct p
2322 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
2323 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
2324
2325 - flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
2326 - if (sizeof(dma_addr_t) == 0x8)
2327 - flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
2328 -
2329 for (bar = BAR_0; bar <= BAR_5; bar++) {
2330 epf_bar = &epf->bar[bar];
2331 - ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
2332 - epf_bar->size, flags);
2333 +
2334 + epf_bar->flags |= upper_32_bits(epf_bar->size) ?
2335 + PCI_BASE_ADDRESS_MEM_TYPE_64 :
2336 + PCI_BASE_ADDRESS_MEM_TYPE_32;
2337 +
2338 + ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
2339 if (ret) {
2340 pci_epf_free_space(epf, epf_test->reg[bar], bar);
2341 - dev_err(dev, "failed to set BAR%d\n", bar);
2342 + dev_err(dev, "Failed to set BAR%d\n", bar);
2343 if (bar == test_reg_bar)
2344 return ret;
2345 }
2346 + /*
2347 + * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
2348 + * if the specific implementation required a 64-bit BAR,
2349 + * even if we only requested a 32-bit BAR.
2350 + */
2351 + if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
2352 + bar++;
2353 }
2354
2355 return 0;
2356 @@ -404,7 +440,7 @@ static int pci_epf_test_alloc_space(stru
2357 base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
2358 test_reg_bar);
2359 if (!base) {
2360 - dev_err(dev, "failed to allocated register space\n");
2361 + dev_err(dev, "Failed to allocated register space\n");
2362 return -ENOMEM;
2363 }
2364 epf_test->reg[test_reg_bar] = base;
2365 @@ -414,7 +450,7 @@ static int pci_epf_test_alloc_space(stru
2366 continue;
2367 base = pci_epf_alloc_space(epf, bar_size[bar], bar);
2368 if (!base)
2369 - dev_err(dev, "failed to allocate space for BAR%d\n",
2370 + dev_err(dev, "Failed to allocate space for BAR%d\n",
2371 bar);
2372 epf_test->reg[bar] = base;
2373 }
2374 @@ -433,9 +469,18 @@ static int pci_epf_test_bind(struct pci_
2375 if (WARN_ON_ONCE(!epc))
2376 return -EINVAL;
2377
2378 - ret = pci_epc_write_header(epc, header);
2379 + if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
2380 + epf_test->linkup_notifier = false;
2381 + else
2382 + epf_test->linkup_notifier = true;
2383 +
2384 + epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
2385 +
2386 + epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
2387 +
2388 + ret = pci_epc_write_header(epc, epf->func_no, header);
2389 if (ret) {
2390 - dev_err(dev, "configuration header write failed\n");
2391 + dev_err(dev, "Configuration header write failed\n");
2392 return ret;
2393 }
2394
2395 @@ -447,9 +492,19 @@ static int pci_epf_test_bind(struct pci_
2396 if (ret)
2397 return ret;
2398
2399 - ret = pci_epc_set_msi(epc, epf->msi_interrupts);
2400 - if (ret)
2401 + ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
2402 + if (ret) {
2403 + dev_err(dev, "MSI configuration failed\n");
2404 return ret;
2405 + }
2406 +
2407 + if (epf_test->msix_available) {
2408 + ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
2409 + if (ret) {
2410 + dev_err(dev, "MSI-X configuration failed\n");
2411 + return ret;
2412 + }
2413 + }
2414
2415 if (!epf_test->linkup_notifier)
2416 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
2417 @@ -517,7 +572,7 @@ static int __init pci_epf_test_init(void
2418 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2419 ret = pci_epf_register_driver(&test_driver);
2420 if (ret) {
2421 - pr_err("failed to register pci epf test driver --> %d\n", ret);
2422 + pr_err("Failed to register pci epf test driver --> %d\n", ret);
2423 return ret;
2424 }
2425
2426 --- a/drivers/pci/endpoint/pci-ep-cfs.c
2427 +++ b/drivers/pci/endpoint/pci-ep-cfs.c
2428 @@ -1,35 +1,28 @@
2429 +// SPDX-License-Identifier: GPL-2.0
2430 /**
2431 * configfs to configure the PCI endpoint
2432 *
2433 * Copyright (C) 2017 Texas Instruments
2434 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2435 - *
2436 - * This program is free software: you can redistribute it and/or modify
2437 - * it under the terms of the GNU General Public License version 2 of
2438 - * the License as published by the Free Software Foundation.
2439 - *
2440 - * This program is distributed in the hope that it will be useful,
2441 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2442 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2443 - * GNU General Public License for more details.
2444 - *
2445 - * You should have received a copy of the GNU General Public License
2446 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2447 */
2448
2449 #include <linux/module.h>
2450 +#include <linux/idr.h>
2451 #include <linux/slab.h>
2452
2453 #include <linux/pci-epc.h>
2454 #include <linux/pci-epf.h>
2455 #include <linux/pci-ep-cfs.h>
2456
2457 +static DEFINE_IDR(functions_idr);
2458 +static DEFINE_MUTEX(functions_mutex);
2459 static struct config_group *functions_group;
2460 static struct config_group *controllers_group;
2461
2462 struct pci_epf_group {
2463 struct config_group group;
2464 struct pci_epf *epf;
2465 + int index;
2466 };
2467
2468 struct pci_epc_group {
2469 @@ -151,7 +144,7 @@ static struct configfs_item_operations p
2470 .drop_link = pci_epc_epf_unlink,
2471 };
2472
2473 -static struct config_item_type pci_epc_type = {
2474 +static const struct config_item_type pci_epc_type = {
2475 .ct_item_ops = &pci_epc_item_ops,
2476 .ct_attrs = pci_epc_attrs,
2477 .ct_owner = THIS_MODULE,
2478 @@ -293,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_sh
2479 to_pci_epf_group(item)->epf->msi_interrupts);
2480 }
2481
2482 +static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
2483 + const char *page, size_t len)
2484 +{
2485 + u16 val;
2486 + int ret;
2487 +
2488 + ret = kstrtou16(page, 0, &val);
2489 + if (ret)
2490 + return ret;
2491 +
2492 + to_pci_epf_group(item)->epf->msix_interrupts = val;
2493 +
2494 + return len;
2495 +}
2496 +
2497 +static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
2498 + char *page)
2499 +{
2500 + return sprintf(page, "%d\n",
2501 + to_pci_epf_group(item)->epf->msix_interrupts);
2502 +}
2503 +
2504 PCI_EPF_HEADER_R(vendorid)
2505 PCI_EPF_HEADER_W_u16(vendorid)
2506
2507 @@ -334,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id
2508 CONFIGFS_ATTR(pci_epf_, subsys_id);
2509 CONFIGFS_ATTR(pci_epf_, interrupt_pin);
2510 CONFIGFS_ATTR(pci_epf_, msi_interrupts);
2511 +CONFIGFS_ATTR(pci_epf_, msix_interrupts);
2512
2513 static struct configfs_attribute *pci_epf_attrs[] = {
2514 &pci_epf_attr_vendorid,
2515 @@ -347,6 +363,7 @@ static struct configfs_attribute *pci_ep
2516 &pci_epf_attr_subsys_id,
2517 &pci_epf_attr_interrupt_pin,
2518 &pci_epf_attr_msi_interrupts,
2519 + &pci_epf_attr_msix_interrupts,
2520 NULL,
2521 };
2522
2523 @@ -354,6 +371,9 @@ static void pci_epf_release(struct confi
2524 {
2525 struct pci_epf_group *epf_group = to_pci_epf_group(item);
2526
2527 + mutex_lock(&functions_mutex);
2528 + idr_remove(&functions_idr, epf_group->index);
2529 + mutex_unlock(&functions_mutex);
2530 pci_epf_destroy(epf_group->epf);
2531 kfree(epf_group);
2532 }
2533 @@ -362,7 +382,7 @@ static struct configfs_item_operations p
2534 .release = pci_epf_release,
2535 };
2536
2537 -static struct config_item_type pci_epf_type = {
2538 +static const struct config_item_type pci_epf_type = {
2539 .ct_item_ops = &pci_epf_ops,
2540 .ct_attrs = pci_epf_attrs,
2541 .ct_owner = THIS_MODULE,
2542 @@ -373,22 +393,57 @@ static struct config_group *pci_epf_make
2543 {
2544 struct pci_epf_group *epf_group;
2545 struct pci_epf *epf;
2546 + char *epf_name;
2547 + int index, err;
2548
2549 epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
2550 if (!epf_group)
2551 return ERR_PTR(-ENOMEM);
2552
2553 + mutex_lock(&functions_mutex);
2554 + index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
2555 + mutex_unlock(&functions_mutex);
2556 + if (index < 0) {
2557 + err = index;
2558 + goto free_group;
2559 + }
2560 +
2561 + epf_group->index = index;
2562 +
2563 config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
2564
2565 - epf = pci_epf_create(group->cg_item.ci_name);
2566 + epf_name = kasprintf(GFP_KERNEL, "%s.%d",
2567 + group->cg_item.ci_name, epf_group->index);
2568 + if (!epf_name) {
2569 + err = -ENOMEM;
2570 + goto remove_idr;
2571 + }
2572 +
2573 + epf = pci_epf_create(epf_name);
2574 if (IS_ERR(epf)) {
2575 pr_err("failed to create endpoint function device\n");
2576 - return ERR_PTR(-EINVAL);
2577 + err = -EINVAL;
2578 + goto free_name;
2579 }
2580
2581 epf_group->epf = epf;
2582
2583 + kfree(epf_name);
2584 +
2585 return &epf_group->group;
2586 +
2587 +free_name:
2588 + kfree(epf_name);
2589 +
2590 +remove_idr:
2591 + mutex_lock(&functions_mutex);
2592 + idr_remove(&functions_idr, epf_group->index);
2593 + mutex_unlock(&functions_mutex);
2594 +
2595 +free_group:
2596 + kfree(epf_group);
2597 +
2598 + return ERR_PTR(err);
2599 }
2600
2601 static void pci_epf_drop(struct config_group *group, struct config_item *item)
2602 @@ -401,7 +456,7 @@ static struct configfs_group_operations
2603 .drop_item = &pci_epf_drop,
2604 };
2605
2606 -static struct config_item_type pci_epf_group_type = {
2607 +static const struct config_item_type pci_epf_group_type = {
2608 .ct_group_ops = &pci_epf_group_ops,
2609 .ct_owner = THIS_MODULE,
2610 };
2611 @@ -429,15 +484,15 @@ void pci_ep_cfs_remove_epf_group(struct
2612 }
2613 EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
2614
2615 -static struct config_item_type pci_functions_type = {
2616 +static const struct config_item_type pci_functions_type = {
2617 .ct_owner = THIS_MODULE,
2618 };
2619
2620 -static struct config_item_type pci_controllers_type = {
2621 +static const struct config_item_type pci_controllers_type = {
2622 .ct_owner = THIS_MODULE,
2623 };
2624
2625 -static struct config_item_type pci_ep_type = {
2626 +static const struct config_item_type pci_ep_type = {
2627 .ct_owner = THIS_MODULE,
2628 };
2629
2630 --- a/drivers/pci/endpoint/pci-epc-core.c
2631 +++ b/drivers/pci/endpoint/pci-epc-core.c
2632 @@ -1,20 +1,9 @@
2633 +// SPDX-License-Identifier: GPL-2.0
2634 /**
2635 * PCI Endpoint *Controller* (EPC) library
2636 *
2637 * Copyright (C) 2017 Texas Instruments
2638 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2639 - *
2640 - * This program is free software: you can redistribute it and/or modify
2641 - * it under the terms of the GNU General Public License version 2 of
2642 - * the License as published by the Free Software Foundation.
2643 - *
2644 - * This program is distributed in the hope that it will be useful,
2645 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2646 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2647 - * GNU General Public License for more details.
2648 - *
2649 - * You should have received a copy of the GNU General Public License
2650 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2651 */
2652
2653 #include <linux/device.h>
2654 @@ -141,25 +130,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
2655 /**
2656 * pci_epc_raise_irq() - interrupt the host system
2657 * @epc: the EPC device which has to interrupt the host
2658 - * @type: specify the type of interrupt; legacy or MSI
2659 - * @interrupt_num: the MSI interrupt number
2660 + * @func_no: the endpoint function number in the EPC device
2661 + * @type: specify the type of interrupt; legacy, MSI or MSI-X
2662 + * @interrupt_num: the MSI or MSI-X interrupt number
2663 *
2664 - * Invoke to raise an MSI or legacy interrupt
2665 + * Invoke to raise an legacy, MSI or MSI-X interrupt
2666 */
2667 -int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
2668 - u8 interrupt_num)
2669 +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
2670 + enum pci_epc_irq_type type, u16 interrupt_num)
2671 {
2672 int ret;
2673 unsigned long flags;
2674
2675 - if (IS_ERR(epc))
2676 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2677 return -EINVAL;
2678
2679 if (!epc->ops->raise_irq)
2680 return 0;
2681
2682 spin_lock_irqsave(&epc->lock, flags);
2683 - ret = epc->ops->raise_irq(epc, type, interrupt_num);
2684 + ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
2685 spin_unlock_irqrestore(&epc->lock, flags);
2686
2687 return ret;
2688 @@ -169,22 +159,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
2689 /**
2690 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
2691 * @epc: the EPC device to which MSI interrupts was requested
2692 + * @func_no: the endpoint function number in the EPC device
2693 *
2694 * Invoke to get the number of MSI interrupts allocated by the RC
2695 */
2696 -int pci_epc_get_msi(struct pci_epc *epc)
2697 +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
2698 {
2699 int interrupt;
2700 unsigned long flags;
2701
2702 - if (IS_ERR(epc))
2703 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2704 return 0;
2705
2706 if (!epc->ops->get_msi)
2707 return 0;
2708
2709 spin_lock_irqsave(&epc->lock, flags);
2710 - interrupt = epc->ops->get_msi(epc);
2711 + interrupt = epc->ops->get_msi(epc, func_no);
2712 spin_unlock_irqrestore(&epc->lock, flags);
2713
2714 if (interrupt < 0)
2715 @@ -199,17 +190,19 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
2716 /**
2717 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
2718 * @epc: the EPC device on which MSI has to be configured
2719 + * @func_no: the endpoint function number in the EPC device
2720 * @interrupts: number of MSI interrupts required by the EPF
2721 *
2722 * Invoke to set the required number of MSI interrupts.
2723 */
2724 -int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
2725 +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
2726 {
2727 int ret;
2728 u8 encode_int;
2729 unsigned long flags;
2730
2731 - if (IS_ERR(epc))
2732 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2733 + interrupts > 32)
2734 return -EINVAL;
2735
2736 if (!epc->ops->set_msi)
2737 @@ -218,7 +211,7 @@ int pci_epc_set_msi(struct pci_epc *epc,
2738 encode_int = order_base_2(interrupts);
2739
2740 spin_lock_irqsave(&epc->lock, flags);
2741 - ret = epc->ops->set_msi(epc, encode_int);
2742 + ret = epc->ops->set_msi(epc, func_no, encode_int);
2743 spin_unlock_irqrestore(&epc->lock, flags);
2744
2745 return ret;
2746 @@ -226,24 +219,83 @@ int pci_epc_set_msi(struct pci_epc *epc,
2747 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
2748
2749 /**
2750 + * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
2751 + * @epc: the EPC device to which MSI-X interrupts was requested
2752 + * @func_no: the endpoint function number in the EPC device
2753 + *
2754 + * Invoke to get the number of MSI-X interrupts allocated by the RC
2755 + */
2756 +int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
2757 +{
2758 + int interrupt;
2759 + unsigned long flags;
2760 +
2761 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2762 + return 0;
2763 +
2764 + if (!epc->ops->get_msix)
2765 + return 0;
2766 +
2767 + spin_lock_irqsave(&epc->lock, flags);
2768 + interrupt = epc->ops->get_msix(epc, func_no);
2769 + spin_unlock_irqrestore(&epc->lock, flags);
2770 +
2771 + if (interrupt < 0)
2772 + return 0;
2773 +
2774 + return interrupt + 1;
2775 +}
2776 +EXPORT_SYMBOL_GPL(pci_epc_get_msix);
2777 +
2778 +/**
2779 + * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
2780 + * @epc: the EPC device on which MSI-X has to be configured
2781 + * @func_no: the endpoint function number in the EPC device
2782 + * @interrupts: number of MSI-X interrupts required by the EPF
2783 + *
2784 + * Invoke to set the required number of MSI-X interrupts.
2785 + */
2786 +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
2787 +{
2788 + int ret;
2789 + unsigned long flags;
2790 +
2791 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2792 + interrupts < 1 || interrupts > 2048)
2793 + return -EINVAL;
2794 +
2795 + if (!epc->ops->set_msix)
2796 + return 0;
2797 +
2798 + spin_lock_irqsave(&epc->lock, flags);
2799 + ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
2800 + spin_unlock_irqrestore(&epc->lock, flags);
2801 +
2802 + return ret;
2803 +}
2804 +EXPORT_SYMBOL_GPL(pci_epc_set_msix);
2805 +
2806 +/**
2807 * pci_epc_unmap_addr() - unmap CPU address from PCI address
2808 * @epc: the EPC device on which address is allocated
2809 + * @func_no: the endpoint function number in the EPC device
2810 * @phys_addr: physical address of the local system
2811 *
2812 * Invoke to unmap the CPU address from PCI address.
2813 */
2814 -void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
2815 +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
2816 + phys_addr_t phys_addr)
2817 {
2818 unsigned long flags;
2819
2820 - if (IS_ERR(epc))
2821 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2822 return;
2823
2824 if (!epc->ops->unmap_addr)
2825 return;
2826
2827 spin_lock_irqsave(&epc->lock, flags);
2828 - epc->ops->unmap_addr(epc, phys_addr);
2829 + epc->ops->unmap_addr(epc, func_no, phys_addr);
2830 spin_unlock_irqrestore(&epc->lock, flags);
2831 }
2832 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
2833 @@ -251,26 +303,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
2834 /**
2835 * pci_epc_map_addr() - map CPU address to PCI address
2836 * @epc: the EPC device on which address is allocated
2837 + * @func_no: the endpoint function number in the EPC device
2838 * @phys_addr: physical address of the local system
2839 * @pci_addr: PCI address to which the physical address should be mapped
2840 * @size: the size of the allocation
2841 *
2842 * Invoke to map CPU address with PCI address.
2843 */
2844 -int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
2845 - u64 pci_addr, size_t size)
2846 +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
2847 + phys_addr_t phys_addr, u64 pci_addr, size_t size)
2848 {
2849 int ret;
2850 unsigned long flags;
2851
2852 - if (IS_ERR(epc))
2853 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2854 return -EINVAL;
2855
2856 if (!epc->ops->map_addr)
2857 return 0;
2858
2859 spin_lock_irqsave(&epc->lock, flags);
2860 - ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
2861 + ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
2862 spin_unlock_irqrestore(&epc->lock, flags);
2863
2864 return ret;
2865 @@ -280,22 +333,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
2866 /**
2867 * pci_epc_clear_bar() - reset the BAR
2868 * @epc: the EPC device for which the BAR has to be cleared
2869 - * @bar: the BAR number that has to be reset
2870 + * @func_no: the endpoint function number in the EPC device
2871 + * @epf_bar: the struct epf_bar that contains the BAR information
2872 *
2873 * Invoke to reset the BAR of the endpoint device.
2874 */
2875 -void pci_epc_clear_bar(struct pci_epc *epc, int bar)
2876 +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
2877 + struct pci_epf_bar *epf_bar)
2878 {
2879 unsigned long flags;
2880
2881 - if (IS_ERR(epc))
2882 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2883 + (epf_bar->barno == BAR_5 &&
2884 + epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
2885 return;
2886
2887 if (!epc->ops->clear_bar)
2888 return;
2889
2890 spin_lock_irqsave(&epc->lock, flags);
2891 - epc->ops->clear_bar(epc, bar);
2892 + epc->ops->clear_bar(epc, func_no, epf_bar);
2893 spin_unlock_irqrestore(&epc->lock, flags);
2894 }
2895 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
2896 @@ -303,26 +360,32 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
2897 /**
2898 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
2899 * @epc: the EPC device on which BAR has to be configured
2900 - * @bar: the BAR number that has to be configured
2901 - * @size: the size of the addr space
2902 - * @flags: specify memory allocation/io allocation/32bit address/64 bit address
2903 + * @func_no: the endpoint function number in the EPC device
2904 + * @epf_bar: the struct epf_bar that contains the BAR information
2905 *
2906 * Invoke to configure the BAR of the endpoint device.
2907 */
2908 -int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
2909 - dma_addr_t bar_phys, size_t size, int flags)
2910 +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
2911 + struct pci_epf_bar *epf_bar)
2912 {
2913 int ret;
2914 unsigned long irq_flags;
2915 + int flags = epf_bar->flags;
2916
2917 - if (IS_ERR(epc))
2918 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2919 + (epf_bar->barno == BAR_5 &&
2920 + flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
2921 + (flags & PCI_BASE_ADDRESS_SPACE_IO &&
2922 + flags & PCI_BASE_ADDRESS_IO_MASK) ||
2923 + (upper_32_bits(epf_bar->size) &&
2924 + !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
2925 return -EINVAL;
2926
2927 if (!epc->ops->set_bar)
2928 return 0;
2929
2930 spin_lock_irqsave(&epc->lock, irq_flags);
2931 - ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
2932 + ret = epc->ops->set_bar(epc, func_no, epf_bar);
2933 spin_unlock_irqrestore(&epc->lock, irq_flags);
2934
2935 return ret;
2936 @@ -332,6 +395,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
2937 /**
2938 * pci_epc_write_header() - write standard configuration header
2939 * @epc: the EPC device to which the configuration header should be written
2940 + * @func_no: the endpoint function number in the EPC device
2941 * @header: standard configuration header fields
2942 *
2943 * Invoke to write the configuration header to the endpoint controller. Every
2944 @@ -339,19 +403,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
2945 * configuration header would be written. The callback function should write
2946 * the header fields to this dedicated location.
2947 */
2948 -int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
2949 +int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
2950 + struct pci_epf_header *header)
2951 {
2952 int ret;
2953 unsigned long flags;
2954
2955 - if (IS_ERR(epc))
2956 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2957 return -EINVAL;
2958
2959 if (!epc->ops->write_header)
2960 return 0;
2961
2962 spin_lock_irqsave(&epc->lock, flags);
2963 - ret = epc->ops->write_header(epc, header);
2964 + ret = epc->ops->write_header(epc, func_no, header);
2965 spin_unlock_irqrestore(&epc->lock, flags);
2966
2967 return ret;
2968 --- a/drivers/pci/endpoint/pci-epc-mem.c
2969 +++ b/drivers/pci/endpoint/pci-epc-mem.c
2970 @@ -1,20 +1,9 @@
2971 +// SPDX-License-Identifier: GPL-2.0
2972 /**
2973 * PCI Endpoint *Controller* Address Space Management
2974 *
2975 * Copyright (C) 2017 Texas Instruments
2976 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2977 - *
2978 - * This program is free software: you can redistribute it and/or modify
2979 - * it under the terms of the GNU General Public License version 2 of
2980 - * the License as published by the Free Software Foundation.
2981 - *
2982 - * This program is distributed in the hope that it will be useful,
2983 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2984 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2985 - * GNU General Public License for more details.
2986 - *
2987 - * You should have received a copy of the GNU General Public License
2988 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2989 */
2990
2991 #include <linux/io.h>
2992 --- a/drivers/pci/endpoint/pci-epf-core.c
2993 +++ b/drivers/pci/endpoint/pci-epf-core.c
2994 @@ -1,20 +1,9 @@
2995 +// SPDX-License-Identifier: GPL-2.0
2996 /**
2997 * PCI Endpoint *Function* (EPF) library
2998 *
2999 * Copyright (C) 2017 Texas Instruments
3000 * Author: Kishon Vijay Abraham I <kishon@ti.com>
3001 - *
3002 - * This program is free software: you can redistribute it and/or modify
3003 - * it under the terms of the GNU General Public License version 2 of
3004 - * the License as published by the Free Software Foundation.
3005 - *
3006 - * This program is distributed in the hope that it will be useful,
3007 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
3008 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3009 - * GNU General Public License for more details.
3010 - *
3011 - * You should have received a copy of the GNU General Public License
3012 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
3013 */
3014
3015 #include <linux/device.h>
3016 @@ -26,6 +15,8 @@
3017 #include <linux/pci-epf.h>
3018 #include <linux/pci-ep-cfs.h>
3019
3020 +static DEFINE_MUTEX(pci_epf_mutex);
3021 +
3022 static struct bus_type pci_epf_bus_type;
3023 static const struct device_type pci_epf_type;
3024
3025 @@ -109,6 +100,8 @@ void pci_epf_free_space(struct pci_epf *
3026
3027 epf->bar[bar].phys_addr = 0;
3028 epf->bar[bar].size = 0;
3029 + epf->bar[bar].barno = 0;
3030 + epf->bar[bar].flags = 0;
3031 }
3032 EXPORT_SYMBOL_GPL(pci_epf_free_space);
3033
3034 @@ -137,11 +130,27 @@ void *pci_epf_alloc_space(struct pci_epf
3035
3036 epf->bar[bar].phys_addr = phys_addr;
3037 epf->bar[bar].size = size;
3038 + epf->bar[bar].barno = bar;
3039 + epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
3040
3041 return space;
3042 }
3043 EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
3044
3045 +static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
3046 +{
3047 + struct config_group *group, *tmp;
3048 +
3049 + if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
3050 + return;
3051 +
3052 + mutex_lock(&pci_epf_mutex);
3053 + list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
3054 + pci_ep_cfs_remove_epf_group(group);
3055 + list_del(&driver->epf_group);
3056 + mutex_unlock(&pci_epf_mutex);
3057 +}
3058 +
3059 /**
3060 * pci_epf_unregister_driver() - unregister the PCI EPF driver
3061 * @driver: the PCI EPF driver that has to be unregistered
3062 @@ -150,11 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
3063 */
3064 void pci_epf_unregister_driver(struct pci_epf_driver *driver)
3065 {
3066 - pci_ep_cfs_remove_epf_group(driver->group);
3067 + pci_epf_remove_cfs(driver);
3068 driver_unregister(&driver->driver);
3069 }
3070 EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
3071
3072 +static int pci_epf_add_cfs(struct pci_epf_driver *driver)
3073 +{
3074 + struct config_group *group;
3075 + const struct pci_epf_device_id *id;
3076 +
3077 + if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
3078 + return 0;
3079 +
3080 + INIT_LIST_HEAD(&driver->epf_group);
3081 +
3082 + id = driver->id_table;
3083 + while (id->name[0]) {
3084 + group = pci_ep_cfs_add_epf_group(id->name);
3085 + if (IS_ERR(group)) {
3086 + pci_epf_remove_cfs(driver);
3087 + return PTR_ERR(group);
3088 + }
3089 +
3090 + mutex_lock(&pci_epf_mutex);
3091 + list_add_tail(&group->group_entry, &driver->epf_group);
3092 + mutex_unlock(&pci_epf_mutex);
3093 + id++;
3094 + }
3095 +
3096 + return 0;
3097 +}
3098 +
3099 /**
3100 * __pci_epf_register_driver() - register a new PCI EPF driver
3101 * @driver: structure representing PCI EPF driver
3102 @@ -180,7 +216,7 @@ int __pci_epf_register_driver(struct pci
3103 if (ret)
3104 return ret;
3105
3106 - driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
3107 + pci_epf_add_cfs(driver);
3108
3109 return 0;
3110 }
3111 @@ -211,29 +247,17 @@ struct pci_epf *pci_epf_create(const cha
3112 int ret;
3113 struct pci_epf *epf;
3114 struct device *dev;
3115 - char *func_name;
3116 - char *buf;
3117 + int len;
3118
3119 epf = kzalloc(sizeof(*epf), GFP_KERNEL);
3120 - if (!epf) {
3121 - ret = -ENOMEM;
3122 - goto err_ret;
3123 - }
3124 + if (!epf)
3125 + return ERR_PTR(-ENOMEM);
3126
3127 - buf = kstrdup(name, GFP_KERNEL);
3128 - if (!buf) {
3129 - ret = -ENOMEM;
3130 - goto free_epf;
3131 - }
3132 -
3133 - func_name = buf;
3134 - buf = strchrnul(buf, '.');
3135 - *buf = '\0';
3136 -
3137 - epf->name = kstrdup(func_name, GFP_KERNEL);
3138 + len = strchrnul(name, '.') - name;
3139 + epf->name = kstrndup(name, len, GFP_KERNEL);
3140 if (!epf->name) {
3141 - ret = -ENOMEM;
3142 - goto free_func_name;
3143 + kfree(epf);
3144 + return ERR_PTR(-ENOMEM);
3145 }
3146
3147 dev = &epf->dev;
3148 @@ -242,28 +266,18 @@ struct pci_epf *pci_epf_create(const cha
3149 dev->type = &pci_epf_type;
3150
3151 ret = dev_set_name(dev, "%s", name);
3152 - if (ret)
3153 - goto put_dev;
3154 + if (ret) {
3155 + put_device(dev);
3156 + return ERR_PTR(ret);
3157 + }
3158
3159 ret = device_add(dev);
3160 - if (ret)
3161 - goto put_dev;
3162 + if (ret) {
3163 + put_device(dev);
3164 + return ERR_PTR(ret);
3165 + }
3166
3167 - kfree(func_name);
3168 return epf;
3169 -
3170 -put_dev:
3171 - put_device(dev);
3172 - kfree(epf->name);
3173 -
3174 -free_func_name:
3175 - kfree(func_name);
3176 -
3177 -free_epf:
3178 - kfree(epf);
3179 -
3180 -err_ret:
3181 - return ERR_PTR(ret);
3182 }
3183 EXPORT_SYMBOL_GPL(pci_epf_create);
3184
3185 --- a/drivers/pci/host/pci-host-common.c
3186 +++ b/drivers/pci/host/pci-host-common.c
3187 @@ -113,9 +113,7 @@ err_out:
3188 int pci_host_common_probe(struct platform_device *pdev,
3189 struct pci_ecam_ops *ops)
3190 {
3191 - const char *type;
3192 struct device *dev = &pdev->dev;
3193 - struct device_node *np = dev->of_node;
3194 struct pci_bus *bus, *child;
3195 struct pci_host_bridge *bridge;
3196 struct pci_config_window *cfg;
3197 @@ -126,12 +124,6 @@ int pci_host_common_probe(struct platfor
3198 if (!bridge)
3199 return -ENOMEM;
3200
3201 - type = of_get_property(np, "device_type", NULL);
3202 - if (!type || strcmp(type, "pci")) {
3203 - dev_err(dev, "invalid \"device_type\" %s\n", type);
3204 - return -EINVAL;
3205 - }
3206 -
3207 of_pci_check_probe_only();
3208
3209 /* Parse and map our Configuration Space windows */
3210 --- a/drivers/pci/host/pcie-xilinx-nwl.c
3211 +++ b/drivers/pci/host/pcie-xilinx-nwl.c
3212 @@ -779,16 +779,7 @@ static int nwl_pcie_parse_dt(struct nwl_
3213 struct platform_device *pdev)
3214 {
3215 struct device *dev = pcie->dev;
3216 - struct device_node *node = dev->of_node;
3217 struct resource *res;
3218 - const char *type;
3219 -
3220 - /* Check for device type */
3221 - type = of_get_property(node, "device_type", NULL);
3222 - if (!type || strcmp(type, "pci")) {
3223 - dev_err(dev, "invalid \"device_type\" %s\n", type);
3224 - return -EINVAL;
3225 - }
3226
3227 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
3228 pcie->breg_base = devm_ioremap_resource(dev, res);
3229 --- a/drivers/pci/host/pcie-xilinx.c
3230 +++ b/drivers/pci/host/pcie-xilinx.c
3231 @@ -584,15 +584,8 @@ static int xilinx_pcie_parse_dt(struct x
3232 struct device *dev = port->dev;
3233 struct device_node *node = dev->of_node;
3234 struct resource regs;
3235 - const char *type;
3236 int err;
3237
3238 - type = of_get_property(node, "device_type", NULL);
3239 - if (!type || strcmp(type, "pci")) {
3240 - dev_err(dev, "invalid \"device_type\" %s\n", type);
3241 - return -EINVAL;
3242 - }
3243 -
3244 err = of_address_to_resource(node, 0, &regs);
3245 if (err) {
3246 dev_err(dev, "missing \"reg\" property\n");
3247 --- /dev/null
3248 +++ b/drivers/pci/mobiveil/Kconfig
3249 @@ -0,0 +1,50 @@
3250 +# SPDX-License-Identifier: GPL-2.0
3251 +
3252 +menu "Mobiveil PCIe Core Support"
3253 + depends on PCI
3254 +
3255 +config PCIE_MOBIVEIL
3256 + bool
3257 +
3258 +config PCIE_MOBIVEIL_HOST
3259 + bool
3260 + depends on PCI_MSI_IRQ_DOMAIN
3261 + select PCIE_MOBIVEIL
3262 +
3263 +config PCIE_MOBIVEIL_EP
3264 + bool
3265 + depends on PCI_ENDPOINT
3266 + select PCIE_MOBIVEIL
3267 +
3268 +config PCIE_MOBIVEIL_PLAT
3269 + bool "Mobiveil AXI PCIe controller"
3270 + depends on ARCH_ZYNQMP || COMPILE_TEST
3271 + depends on OF
3272 + select PCIE_MOBIVEIL_HOST
3273 + help
3274 + Say Y here if you want to enable support for the Mobiveil AXI PCIe
3275 + Soft IP. It has up to 8 outbound and inbound windows
3276 + for address translation and it is a PCIe Gen4 IP.
3277 +
3278 +config PCI_LAYERSCAPE_GEN4
3279 + bool "Freescale Layerscpe PCIe Gen4 controller in RC mode"
3280 + depends on PCI
3281 + depends on OF && (ARM64 || ARCH_LAYERSCAPE)
3282 + depends on PCI_MSI_IRQ_DOMAIN
3283 + select PCIE_MOBIVEIL_HOST
3284 + help
3285 + Say Y here if you want PCIe Gen4 controller support on
3286 + Layerscape SoCs. And the PCIe controller work in RC mode
3287 + by setting the RCW[HOST_AGT_PEX] to 0.
3288 +
3289 +config PCI_LAYERSCAPE_GEN4_EP
3290 + bool "Freescale Layerscpe PCIe Gen4 controller in EP mode"
3291 + depends on PCI
3292 + depends on OF && (ARM64 || ARCH_LAYERSCAPE)
3293 + depends on PCI_ENDPOINT
3294 + select PCIE_MOBIVEIL_EP
3295 + help
3296 + Say Y here if you want PCIe Gen4 controller support on
3297 + Layerscape SoCs. And the PCIe controller work in EP mode
3298 + by setting the RCW[HOST_AGT_PEX] to 1.
3299 +endmenu
3300 --- /dev/null
3301 +++ b/drivers/pci/mobiveil/Makefile
3302 @@ -0,0 +1,7 @@
3303 +# SPDX-License-Identifier: GPL-2.0
3304 +obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
3305 +obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
3306 +obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o
3307 +obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
3308 +obj-$(CONFIG_PCI_LAYERSCAPE_GEN4) += pci-layerscape-gen4.o
3309 +obj-$(CONFIG_PCI_LAYERSCAPE_GEN4_EP) += pci-layerscape-gen4-ep.o
3310 --- /dev/null
3311 +++ b/drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
3312 @@ -0,0 +1,178 @@
3313 +// SPDX-License-Identifier: GPL-2.0
3314 +/*
3315 + * PCIe controller EP driver for Freescale Layerscape SoCs
3316 + *
3317 + * Copyright (C) 2018 NXP Semiconductor.
3318 + *
3319 + * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
3320 + */
3321 +
3322 +#include <linux/kernel.h>
3323 +#include <linux/init.h>
3324 +#include <linux/of_pci.h>
3325 +#include <linux/of_platform.h>
3326 +#include <linux/of_address.h>
3327 +#include <linux/pci.h>
3328 +#include <linux/platform_device.h>
3329 +#include <linux/resource.h>
3330 +
3331 +#include "pcie-mobiveil.h"
3332 +
3333 +struct ls_pcie_g4_ep {
3334 + struct mobiveil_pcie *mv_pci;
3335 +};
3336 +
3337 +#define to_ls_pcie_g4_ep(x) dev_get_drvdata((x)->dev)
3338 +
3339 +static const struct of_device_id ls_pcie_g4_ep_of_match[] = {
3340 + { .compatible = "fsl,lx2160a-pcie-ep",},
3341 + { },
3342 +};
3343 +
3344 +static void ls_pcie_g4_get_bar_num(struct mobiveil_pcie_ep *ep)
3345 +{
3346 + struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
3347 + u32 type, reg;
3348 + u8 bar;
3349 +
3350 + ep->bar_num = BAR_5 + 1;
3351 +
3352 + for (bar = BAR_0; bar <= BAR_5; bar++) {
3353 + reg = PCI_BASE_ADDRESS_0 + (4 * bar);
3354 + type = csr_readl(mv_pci, reg) &
3355 + PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3356 + if (type & PCI_BASE_ADDRESS_MEM_TYPE_64)
3357 + ep->bar_num--;
3358 + }
3359 +}
3360 +
3361 +static void ls_pcie_g4_ep_init(struct mobiveil_pcie_ep *ep)
3362 +{
3363 + struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
3364 + struct pci_epc *epc = ep->epc;
3365 + enum pci_barno bar;
3366 + int win_idx, val;
3367 +
3368 + /*
3369 + * Errata: unsupported request error on inbound posted write
3370 + * transaction, PCIe controller reports advisory error instead
3371 + * of uncorrectable error message to RC.
3372 + * workaround: set the bit20(unsupported_request_Error_severity) with
3373 + * value 1 in uncorrectable_Error_Severity_Register, make the
3374 + * unsupported request error generate the fatal error.
3375 + */
3376 + val = csr_readl(mv_pci, CFG_UNCORRECTABLE_ERROR_SEVERITY);
3377 + val |= 1 << UNSUPPORTED_REQUEST_ERROR_SHIFT;
3378 + csr_writel(mv_pci, val, CFG_UNCORRECTABLE_ERROR_SEVERITY);
3379 +
3380 + ls_pcie_g4_get_bar_num(ep);
3381 +
3382 + for (bar = BAR_0; bar < (ep->bar_num * ep->pf_num); bar++)
3383 + mobiveil_pcie_ep_reset_bar(mv_pci, bar);
3384 +
3385 + for (win_idx = 0; win_idx < MAX_IATU_OUT; win_idx++)
3386 + mobiveil_pcie_disable_ob_win(mv_pci, win_idx);
3387 +
3388 + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
3389 + epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
3390 +}
3391 +
3392 +static int ls_pcie_g4_ep_raise_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
3393 + enum pci_epc_irq_type type,
3394 + u16 interrupt_num)
3395 +{
3396 + struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
3397 +
3398 + switch (type) {
3399 + case PCI_EPC_IRQ_LEGACY:
3400 + return mobiveil_pcie_ep_raise_legacy_irq(ep, func_no);
3401 + case PCI_EPC_IRQ_MSI:
3402 + return mobiveil_pcie_ep_raise_msi_irq(ep, func_no,
3403 + interrupt_num);
3404 + case PCI_EPC_IRQ_MSIX:
3405 + return mobiveil_pcie_ep_raise_msix_irq(ep, func_no,
3406 + interrupt_num);
3407 + default:
3408 + dev_err(&mv_pci->pdev->dev, "UNKNOWN IRQ type\n");
3409 + }
3410 +
3411 + return 0;
3412 +}
3413 +
3414 +static struct mobiveil_pcie_ep_ops pcie_ep_ops = {
3415 + .ep_init = ls_pcie_g4_ep_init,
3416 + .raise_irq = ls_pcie_g4_ep_raise_irq,
3417 +};
3418 +
3419 +static int __init ls_pcie_gen4_add_pcie_ep(struct ls_pcie_g4_ep *ls_pcie_g4_ep,
3420 + struct platform_device *pdev)
3421 +{
3422 + struct mobiveil_pcie *mv_pci = ls_pcie_g4_ep->mv_pci;
3423 + struct device *dev = &pdev->dev;
3424 + struct mobiveil_pcie_ep *ep;
3425 + struct resource *res;
3426 + int ret;
3427 + struct device_node *np = dev->of_node;
3428 +
3429 + ep = &mv_pci->ep;
3430 + ep->ops = &pcie_ep_ops;
3431 +
3432 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
3433 + if (!res)
3434 + return -EINVAL;
3435 +
3436 + ep->phys_base = res->start;
3437 + ep->addr_size = resource_size(res);
3438 +
3439 + ret = of_property_read_u32(np, "max-functions", &ep->pf_num);
3440 + if (ret < 0)
3441 + ep->pf_num = 1;
3442 +
3443 + ret = mobiveil_pcie_ep_init(ep);
3444 + if (ret) {
3445 + dev_err(dev, "failed to initialize endpoint\n");
3446 + return ret;
3447 + }
3448 +
3449 + return 0;
3450 +}
3451 +
3452 +static int __init ls_pcie_g4_ep_probe(struct platform_device *pdev)
3453 +{
3454 + struct device *dev = &pdev->dev;
3455 + struct mobiveil_pcie *mv_pci;
3456 + struct ls_pcie_g4_ep *ls_pcie_g4_ep;
3457 + struct resource *res;
3458 + int ret;
3459 +
3460 + ls_pcie_g4_ep = devm_kzalloc(dev, sizeof(*ls_pcie_g4_ep), GFP_KERNEL);
3461 + if (!ls_pcie_g4_ep)
3462 + return -ENOMEM;
3463 +
3464 + mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
3465 + if (!mv_pci)
3466 + return -ENOMEM;
3467 +
3468 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
3469 + mv_pci->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
3470 + if (IS_ERR(mv_pci->csr_axi_slave_base))
3471 + return PTR_ERR(mv_pci->csr_axi_slave_base);
3472 +
3473 + mv_pci->pdev = pdev;
3474 + ls_pcie_g4_ep->mv_pci = mv_pci;
3475 +
3476 + platform_set_drvdata(pdev, ls_pcie_g4_ep);
3477 +
3478 + ret = ls_pcie_gen4_add_pcie_ep(ls_pcie_g4_ep, pdev);
3479 +
3480 + return ret;
3481 +}
3482 +
3483 +static struct platform_driver ls_pcie_g4_ep_driver = {
3484 + .driver = {
3485 + .name = "layerscape-pcie-gen4-ep",
3486 + .of_match_table = ls_pcie_g4_ep_of_match,
3487 + .suppress_bind_attrs = true,
3488 + },
3489 +};
3490 +builtin_platform_driver_probe(ls_pcie_g4_ep_driver, ls_pcie_g4_ep_probe);
3491 --- /dev/null
3492 +++ b/drivers/pci/mobiveil/pci-layerscape-gen4.c
3493 @@ -0,0 +1,292 @@
3494 +// SPDX-License-Identifier: GPL-2.0
3495 +/*
3496 + * PCIe host controller driver for NXP Layerscape SoCs
3497 + *
3498 + * Copyright 2018 NXP
3499 + *
3500 + * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
3501 + */
3502 +
3503 +#include <linux/kernel.h>
3504 +#include <linux/interrupt.h>
3505 +#include <linux/init.h>
3506 +#include <linux/of_pci.h>
3507 +#include <linux/of_platform.h>
3508 +#include <linux/of_irq.h>
3509 +#include <linux/of_address.h>
3510 +#include <linux/pci.h>
3511 +#include <linux/platform_device.h>
3512 +#include <linux/resource.h>
3513 +#include <linux/mfd/syscon.h>
3514 +#include <linux/regmap.h>
3515 +
3516 +#include "pcie-mobiveil.h"
3517 +
3518 +/* LUT and PF control registers */
3519 +#define PCIE_LUT_OFF (0x80000)
3520 +#define PCIE_LUT_GCR (0x28)
3521 +#define PCIE_LUT_GCR_RRE (0)
3522 +
3523 +#define PCIE_PF_OFF (0xc0000)
3524 +#define PCIE_PF_INT_STAT (0x18)
3525 +#define PF_INT_STAT_PABRST (31)
3526 +
3527 +#define PCIE_PF_DBG (0x7fc)
3528 +#define PF_DBG_LTSSM_MASK (0x3f)
3529 +#define PF_DBG_WE (31)
3530 +#define PF_DBG_PABR (27)
3531 +
3532 +#define LS_PCIE_G4_LTSSM_L0 0x2d /* L0 state */
3533 +
3534 +#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
3535 +
3536 +struct ls_pcie_g4 {
3537 + struct mobiveil_pcie *pci;
3538 + struct delayed_work dwork;
3539 + int irq;
3540 +};
3541 +
3542 +static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
3543 +{
3544 + return ioread32(pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
3545 +}
3546 +
3547 +static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
3548 + u32 off, u32 val)
3549 +{
3550 + iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
3551 +}
3552 +
3553 +static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
3554 +{
3555 + return ioread32(pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
3556 +}
3557 +
3558 +static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
3559 + u32 off, u32 val)
3560 +{
3561 + iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
3562 +}
3563 +
3564 +static bool ls_pcie_g4_is_bridge(struct ls_pcie_g4 *pcie)
3565 +{
3566 + struct mobiveil_pcie *mv_pci = pcie->pci;
3567 + u32 header_type;
3568 +
3569 + header_type = csr_readb(mv_pci, PCI_HEADER_TYPE);
3570 + header_type &= 0x7f;
3571 +
3572 + return header_type == PCI_HEADER_TYPE_BRIDGE;
3573 +}
3574 +
3575 +static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
3576 +{
3577 + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
3578 + u32 state;
3579 +
3580 + state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3581 + state = state & PF_DBG_LTSSM_MASK;
3582 +
3583 + if (state == LS_PCIE_G4_LTSSM_L0)
3584 + return 1;
3585 +
3586 + return 0;
3587 +}
3588 +
3589 +static void ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
3590 +{
3591 + struct mobiveil_pcie *mv_pci = pcie->pci;
3592 + u32 val, act_stat;
3593 + int to = 100;
3594 +
3595 + /* Poll for pab_csb_reset to set and PAB activity to clear */
3596 + do {
3597 + usleep_range(10, 15);
3598 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
3599 + act_stat = csr_readl(mv_pci, PAB_ACTIVITY_STAT);
3600 + } while (((val & 1 << PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
3601 + if (to < 0) {
3602 + dev_err(&mv_pci->pdev->dev, "poll PABRST&PABACT timeout\n");
3603 + return;
3604 + }
3605 +
3606 + /* clear PEX_RESET bit in PEX_PF0_DBG register */
3607 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3608 + val |= 1 << PF_DBG_WE;
3609 + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
3610 +
3611 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3612 + val |= 1 << PF_DBG_PABR;
3613 + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
3614 +
3615 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3616 + val &= ~(1 << PF_DBG_WE);
3617 + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
3618 +
3619 + mobiveil_host_init(mv_pci, true);
3620 +
3621 + to = 100;
3622 + while (!ls_pcie_g4_link_up(mv_pci) && to--)
3623 + usleep_range(200, 250);
3624 + if (to < 0)
3625 + dev_err(&mv_pci->pdev->dev, "PCIe link trainning timeout\n");
3626 +}
3627 +
3628 +static irqreturn_t ls_pcie_g4_handler(int irq, void *dev_id)
3629 +{
3630 + struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
3631 + struct mobiveil_pcie *mv_pci = pcie->pci;
3632 + u32 val;
3633 +
3634 + val = csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
3635 + if (!val)
3636 + return IRQ_NONE;
3637 +
3638 + if (val & PAB_INTP_RESET)
3639 + schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
3640 +
3641 + csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
3642 +
3643 + return IRQ_HANDLED;
3644 +}
3645 +
3646 +static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
3647 +{
3648 + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
3649 + u32 val;
3650 + int ret;
3651 +
3652 + pcie->irq = platform_get_irq_byname(mv_pci->pdev, "intr");
3653 + if (pcie->irq < 0) {
3654 + dev_err(&mv_pci->pdev->dev, "Can't get 'intr' irq.\n");
3655 + return pcie->irq;
3656 + }
3657 + ret = devm_request_irq(&mv_pci->pdev->dev, pcie->irq,
3658 + ls_pcie_g4_handler, IRQF_SHARED,
3659 + mv_pci->pdev->name, pcie);
3660 + if (ret) {
3661 + dev_err(&mv_pci->pdev->dev, "Can't register PCIe IRQ.\n");
3662 + return ret;
3663 + }
3664 +
3665 + /* Enable interrupts */
3666 + val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
3667 + PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
3668 + csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
3669 +
3670 + return 0;
3671 +}
3672 +
3673 +static void ls_pcie_g4_reset(struct work_struct *work)
3674 +{
3675 + struct delayed_work *dwork = container_of(work, struct delayed_work,
3676 + work);
3677 + struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
3678 + struct mobiveil_pcie *mv_pci = pcie->pci;
3679 + u16 ctrl;
3680 +
3681 + ctrl = csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
3682 + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3683 + csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
3684 + ls_pcie_g4_reinit_hw(pcie);
3685 +}
3686 +
3687 +static int ls_pcie_g4_read_other_conf(struct pci_bus *bus, unsigned int devfn,
3688 + int where, int size, u32 *val)
3689 +{
3690 + struct mobiveil_pcie *pci = bus->sysdata;
3691 + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
3692 + int ret;
3693 +
3694 + if (where == PCI_VENDOR_ID)
3695 + ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
3696 + 0 << PCIE_LUT_GCR_RRE);
3697 +
3698 + ret = pci_generic_config_read(bus, devfn, where, size, val);
3699 +
3700 + if (where == PCI_VENDOR_ID)
3701 + ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
3702 + 1 << PCIE_LUT_GCR_RRE);
3703 +
3704 + return ret;
3705 +}
3706 +
3707 +static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
3708 + .interrupt_init = ls_pcie_g4_interrupt_init,
3709 + .read_other_conf = ls_pcie_g4_read_other_conf,
3710 +};
3711 +
3712 +static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
3713 + .link_up = ls_pcie_g4_link_up,
3714 +};
3715 +
3716 +static void workaround_tkt381274(struct ls_pcie_g4 *pcie)
3717 +{
3718 + struct mobiveil_pcie *mv_pci = pcie->pci;
3719 + u32 val;
3720 +
3721 + /* Set ACK latency timeout */
3722 + val = csr_readl(mv_pci, GPEX_ACK_REPLAY_TO);
3723 + val &= ~(ACK_LAT_TO_VAL_MASK << ACK_LAT_TO_VAL_SHIFT);
3724 + val |= (4 << ACK_LAT_TO_VAL_SHIFT);
3725 + csr_writel(mv_pci, val, GPEX_ACK_REPLAY_TO);
3726 +}
3727 +
3728 +static int __init ls_pcie_g4_probe(struct platform_device *pdev)
3729 +{
3730 + struct device *dev = &pdev->dev;
3731 + struct mobiveil_pcie *mv_pci;
3732 + struct ls_pcie_g4 *pcie;
3733 + struct device_node *np = dev->of_node;
3734 + int ret;
3735 +
3736 + if (!of_parse_phandle(np, "msi-parent", 0)) {
3737 + dev_err(dev, "failed to find msi-parent\n");
3738 + return -EINVAL;
3739 + }
3740 +
3741 + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
3742 + if (!pcie)
3743 + return -ENOMEM;
3744 +
3745 + mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
3746 + if (!mv_pci)
3747 + return -ENOMEM;
3748 +
3749 + mv_pci->pdev = pdev;
3750 + mv_pci->ops = &ls_pcie_g4_pab_ops;
3751 + mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
3752 + pcie->pci = mv_pci;
3753 +
3754 + platform_set_drvdata(pdev, pcie);
3755 +
3756 + INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
3757 +
3758 + ret = mobiveil_pcie_host_probe(mv_pci);
3759 + if (ret) {
3760 + dev_err(dev, "fail to probe!\n");
3761 + return ret;
3762 + }
3763 +
3764 + if (!ls_pcie_g4_is_bridge(pcie))
3765 + return -ENODEV;
3766 +
3767 + workaround_tkt381274(pcie);
3768 +
3769 + return 0;
3770 +}
3771 +
3772 +static const struct of_device_id ls_pcie_g4_of_match[] = {
3773 + { .compatible = "fsl,lx2160a-pcie", },
3774 + { },
3775 +};
3776 +
3777 +static struct platform_driver ls_pcie_g4_driver = {
3778 + .driver = {
3779 + .name = "layerscape-pcie-gen4",
3780 + .of_match_table = ls_pcie_g4_of_match,
3781 + .suppress_bind_attrs = true,
3782 + },
3783 +};
3784 +
3785 +builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
3786 --- /dev/null
3787 +++ b/drivers/pci/mobiveil/pcie-mobiveil-ep.c
3788 @@ -0,0 +1,512 @@
3789 +// SPDX-License-Identifier: GPL-2.0
3790 +/**
3791 + * Mobiveil PCIe Endpoint controller driver
3792 + *
3793 + * Copyright (C) 2018 NXP Semiconductor.
3794 + * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
3795 + */
3796 +
3797 +#include <linux/of.h>
3798 +#include <linux/pci-epc.h>
3799 +#include <linux/pci-epf.h>
3800 +#include <linux/platform_device.h>
3801 +#include "pcie-mobiveil.h"
3802 +
3803 +void mobiveil_pcie_ep_linkup(struct mobiveil_pcie_ep *ep)
3804 +{
3805 + struct pci_epc *epc = ep->epc;
3806 +
3807 + pci_epc_linkup(epc);
3808 +}
3809 +
3810 +static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
3811 + enum pci_barno bar)
3812 +{
3813 + csr_writel(pcie, bar, GPEX_BAR_SELECT);
3814 + csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW);
3815 + csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW);
3816 +}
3817 +
3818 +void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
3819 + enum pci_barno bar)
3820 +{
3821 + __mobiveil_pcie_ep_reset_bar(pcie, bar);
3822 +}
3823 +
3824 +static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie,
3825 + u8 cap_ptr, u8 cap)
3826 +{
3827 + u8 cap_id, next_cap_ptr;
3828 + u16 reg;
3829 +
3830 + reg = csr_readw(pcie, cap_ptr);
3831 + next_cap_ptr = (reg & 0xff00) >> 8;
3832 + cap_id = (reg & 0x00ff);
3833 +
3834 + if (cap_id == cap)
3835 + return cap_ptr;
3836 +
3837 + if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
3838 + return 0;
3839 +
3840 + return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
3841 +}
3842 +
3843 +static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie *pcie,
3844 + u8 cap)
3845 +{
3846 + u8 next_cap_ptr;
3847 + u16 reg;
3848 +
3849 + reg = csr_readw(pcie, PCI_CAPABILITY_LIST);
3850 + next_cap_ptr = (reg & 0x00ff);
3851 +
3852 + if (!next_cap_ptr)
3853 + return 0;
3854 +
3855 + return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
3856 +}
3857 +
3858 +static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
3859 + struct pci_epf_header *hdr)
3860 +{
3861 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3862 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3863 +
3864 + csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID);
3865 + csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID);
3866 + csr_writeb(pcie, hdr->revid, PCI_REVISION_ID);
3867 + csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG);
3868 + csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8,
3869 + PCI_CLASS_DEVICE);
3870 + csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE);
3871 + csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID);
3872 + csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID);
3873 + csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN);
3874 +
3875 + return 0;
3876 +}
3877 +
3878 +static int mobiveil_pcie_ep_inbound_atu(struct mobiveil_pcie_ep *ep,
3879 + u8 func_no, enum pci_barno bar,
3880 + dma_addr_t cpu_addr)
3881 +{
3882 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3883 +
3884 + program_ib_windows_ep(pcie, func_no, bar, cpu_addr);
3885 +
3886 + return 0;
3887 +}
3888 +
3889 +static int mobiveil_pcie_ep_outbound_atu(struct mobiveil_pcie_ep *ep,
3890 + phys_addr_t phys_addr,
3891 + u64 pci_addr, u8 func_no,
3892 + size_t size)
3893 +{
3894 + int ret;
3895 + u32 free_win;
3896 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3897 +
3898 + free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
3899 + if (free_win >= ep->num_ob_windows) {
3900 + dev_err(&pcie->pdev->dev, "No free outbound window\n");
3901 + return -EINVAL;
3902 + }
3903 +
3904 + ret = program_ob_windows_ep(pcie, free_win, MEM_WINDOW_TYPE,
3905 + phys_addr, pci_addr, func_no, size);
3906 + if (ret < 0) {
3907 + dev_err(&pcie->pdev->dev, "Failed to program IB window\n");
3908 + return ret;
3909 + }
3910 +
3911 + set_bit(free_win, ep->ob_window_map);
3912 + ep->outbound_addr[free_win] = phys_addr;
3913 +
3914 + return 0;
3915 +}
3916 +
3917 +static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
3918 + struct pci_epf_bar *epf_bar)
3919 +{
3920 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3921 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3922 + enum pci_barno bar = epf_bar->barno;
3923 +
3924 + if (bar < ep->bar_num) {
3925 + __mobiveil_pcie_ep_reset_bar(pcie,
3926 + func_no * ep->bar_num + bar);
3927 +
3928 + mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar);
3929 + }
3930 +}
3931 +
3932 +static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
3933 + struct pci_epf_bar *epf_bar)
3934 +{
3935 + int ret;
3936 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3937 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3938 + enum pci_barno bar = epf_bar->barno;
3939 + size_t size = epf_bar->size;
3940 +
3941 + if (bar < ep->bar_num) {
3942 + ret = mobiveil_pcie_ep_inbound_atu(ep, func_no, bar,
3943 + epf_bar->phys_addr);
3944 + if (ret)
3945 + return ret;
3946 +
3947 + csr_writel(pcie, func_no * ep->bar_num + bar,
3948 + GPEX_BAR_SELECT);
3949 + csr_writel(pcie, lower_32_bits(~(size - 1)),
3950 + GPEX_BAR_SIZE_LDW);
3951 + csr_writel(pcie, upper_32_bits(~(size - 1)),
3952 + GPEX_BAR_SIZE_UDW);
3953 + }
3954 +
3955 + return 0;
3956 +}
3957 +
3958 +static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep,
3959 + phys_addr_t addr,
3960 + u32 *atu_index)
3961 +{
3962 + u32 index;
3963 +
3964 + for (index = 0; index < ep->num_ob_windows; index++) {
3965 + if (ep->outbound_addr[index] != addr)
3966 + continue;
3967 + *atu_index = index;
3968 + return 0;
3969 + }
3970 +
3971 + return -EINVAL;
3972 +}
3973 +
3974 +static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
3975 + phys_addr_t addr)
3976 +{
3977 + int ret;
3978 + u32 atu_index;
3979 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3980 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3981 +
3982 + ret = mobiveil_pcie_find_index(ep, addr, &atu_index);
3983 + if (ret < 0)
3984 + return;
3985 +
3986 + mobiveil_pcie_disable_ob_win(pcie, atu_index);
3987 + clear_bit(atu_index, ep->ob_window_map);
3988 +}
3989 +
3990 +static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
3991 + phys_addr_t addr,
3992 + u64 pci_addr, size_t size)
3993 +{
3994 + int ret;
3995 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3996 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3997 +
3998 + ret = mobiveil_pcie_ep_outbound_atu(ep, addr, pci_addr, func_no, size);
3999 + if (ret) {
4000 + dev_err(&pcie->pdev->dev, "Failed to enable address\n");
4001 + return ret;
4002 + }
4003 +
4004 + return 0;
4005 +}
4006 +
4007 +static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
4008 +{
4009 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
4010 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4011 + u32 val, reg;
4012 +
4013 + if (!ep->msi_cap)
4014 + return -EINVAL;
4015 +
4016 + reg = ep->msi_cap + PCI_MSI_FLAGS;
4017 + val = csr_readw(pcie, reg);
4018 + if (!(val & PCI_MSI_FLAGS_ENABLE))
4019 + return -EINVAL;
4020 +
4021 + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
4022 +
4023 + return val;
4024 +}
4025 +
4026 +static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc,
4027 + u8 func_no, u8 interrupts)
4028 +{
4029 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
4030 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4031 + u32 val, reg;
4032 +
4033 + if (!ep->msi_cap)
4034 + return -EINVAL;
4035 +
4036 + reg = ep->msi_cap + PCI_MSI_FLAGS;
4037 + val = csr_readw(pcie, reg);
4038 + val &= ~PCI_MSI_FLAGS_QMASK;
4039 + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
4040 + csr_writew(pcie, val, reg);
4041 +
4042 + return 0;
4043 +}
4044 +
4045 +static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
4046 +{
4047 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
4048 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4049 + u32 val, reg;
4050 +
4051 + if (!ep->msix_cap)
4052 + return -EINVAL;
4053 +
4054 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
4055 + val = csr_readw(pcie, reg);
4056 + if (!(val & PCI_MSIX_FLAGS_ENABLE))
4057 + return -EINVAL;
4058 +
4059 + val &= PCI_MSIX_FLAGS_QSIZE;
4060 +
4061 + return val;
4062 +}
4063 +
4064 +static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no,
4065 + u16 interrupts)
4066 +{
4067 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
4068 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4069 + u32 val, reg;
4070 +
4071 + if (!ep->msix_cap)
4072 + return -EINVAL;
4073 +
4074 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
4075 + val = csr_readw(pcie, reg);
4076 + val &= ~PCI_MSIX_FLAGS_QSIZE;
4077 + val |= interrupts;
4078 + csr_writew(pcie, val, reg);
4079 +
4080 + return 0;
4081 +}
4082 +
4083 +static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
4084 + enum pci_epc_irq_type type,
4085 + u16 interrupt_num)
4086 +{
4087 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
4088 +
4089 + if (!ep->ops->raise_irq)
4090 + return -EINVAL;
4091 +
4092 + return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
4093 +}
4094 +
4095 +static const struct pci_epc_ops epc_ops = {
4096 + .write_header = mobiveil_pcie_ep_write_header,
4097 + .set_bar = mobiveil_pcie_ep_set_bar,
4098 + .clear_bar = mobiveil_pcie_ep_clear_bar,
4099 + .map_addr = mobiveil_pcie_ep_map_addr,
4100 + .unmap_addr = mobiveil_pcie_ep_unmap_addr,
4101 + .set_msi = mobiveil_pcie_ep_set_msi,
4102 + .get_msi = mobiveil_pcie_ep_get_msi,
4103 + .set_msix = mobiveil_pcie_ep_set_msix,
4104 + .get_msix = mobiveil_pcie_ep_get_msix,
4105 + .raise_irq = mobiveil_pcie_ep_raise_irq,
4106 +};
4107 +
4108 +int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no)
4109 +{
4110 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4111 +
4112 + dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n");
4113 +
4114 + return -EINVAL;
4115 +}
4116 +
4117 +int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
4118 + u8 interrupt_num)
4119 +{
4120 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4121 + struct pci_epc *epc = ep->epc;
4122 + u16 msg_ctrl, msg_data;
4123 + u32 msg_addr_lower, msg_addr_upper, reg;
4124 + u64 msg_addr;
4125 + u32 func_num;
4126 + bool has_upper;
4127 + int ret;
4128 +
4129 + if (!ep->msi_cap)
4130 + return -EINVAL;
4131 +
4132 + func_num = csr_readl(pcie, PAB_CTRL);
4133 + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
4134 + func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
4135 + csr_writel(pcie, func_num, PAB_CTRL);
4136 +
4137 + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
4138 + reg = ep->msi_cap + PCI_MSI_FLAGS;
4139 + msg_ctrl = csr_readw(pcie, reg);
4140 + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
4141 + reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
4142 + msg_addr_lower = csr_readl(pcie, reg);
4143 + if (has_upper) {
4144 + reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
4145 + msg_addr_upper = csr_readl(pcie, reg);
4146 + reg = ep->msi_cap + PCI_MSI_DATA_64;
4147 + msg_data = csr_readw(pcie, reg);
4148 + } else {
4149 + msg_addr_upper = 0;
4150 + reg = ep->msi_cap + PCI_MSI_DATA_32;
4151 + msg_data = csr_readw(pcie, reg);
4152 + }
4153 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
4154 +
4155 + func_num = csr_readl(pcie, PAB_CTRL);
4156 + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
4157 + csr_writel(pcie, func_num, PAB_CTRL);
4158 +
4159 + ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
4160 + msg_addr, epc->mem->page_size);
4161 + if (ret)
4162 + return ret;
4163 +
4164 + writel(msg_data | (interrupt_num - 1), ep->msi_mem);
4165 +
4166 + mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
4167 +
4168 + return 0;
4169 +}
4170 +
4171 +int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
4172 + u16 interrupt_num)
4173 +{
4174 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4175 + struct pci_epc *epc = ep->epc;
4176 + u32 msg_addr_upper, msg_addr_lower;
4177 + u32 msg_data;
4178 + u64 msg_addr;
4179 + u32 func_num;
4180 + int ret;
4181 +
4182 + func_num = csr_readl(pcie, PAB_CTRL);
4183 + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
4184 + func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
4185 + csr_writel(pcie, func_num, PAB_CTRL);
4186 +
4187 + msg_addr_lower = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
4188 + PCI_MSIX_ENTRY_LOWER_ADDR +
4189 + (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
4190 + msg_addr_upper = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
4191 + PCI_MSIX_ENTRY_UPPER_ADDR +
4192 + (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
4193 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
4194 + msg_data = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
4195 + PCI_MSIX_ENTRY_DATA +
4196 + (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
4197 +
4198 + func_num = csr_readl(pcie, PAB_CTRL);
4199 + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
4200 + csr_writel(pcie, func_num, PAB_CTRL);
4201 +
4202 + ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
4203 + msg_addr, epc->mem->page_size);
4204 + if (ret)
4205 + return ret;
4206 +
4207 + writel(msg_data, ep->msi_mem);
4208 +
4209 + mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
4210 +
4211 + return 0;
4212 +}
4213 +
4214 +void mobiveil_pcie_ep_exit(struct mobiveil_pcie_ep *ep)
4215 +{
4216 + struct pci_epc *epc = ep->epc;
4217 +
4218 + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
4219 + epc->mem->page_size);
4220 +
4221 + pci_epc_mem_exit(epc);
4222 +}
4223 +
4224 +int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep)
4225 +{
4226 + int ret;
4227 + void *addr;
4228 + struct pci_epc *epc;
4229 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4230 + struct device *dev = &pcie->pdev->dev;
4231 + struct device_node *np = dev->of_node;
4232 +
4233 + if (!pcie->csr_axi_slave_base) {
4234 + dev_err(dev, "csr_base is not populated\n");
4235 + return -EINVAL;
4236 + }
4237 +
4238 + ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
4239 + if (ret < 0) {
4240 + dev_err(dev, "Unable to read *num-ob-windows* property\n");
4241 + return ret;
4242 + }
4243 +
4244 + if (ep->num_ob_windows > MAX_IATU_OUT) {
4245 + dev_err(dev, "Invalid *num-ob-windows*\n");
4246 + return -EINVAL;
4247 + }
4248 + ep->ob_window_map = devm_kcalloc(dev,
4249 + BITS_TO_LONGS(ep->num_ob_windows),
4250 + sizeof(long),
4251 + GFP_KERNEL);
4252 + if (!ep->ob_window_map)
4253 + return -ENOMEM;
4254 +
4255 + addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
4256 + GFP_KERNEL);
4257 + if (!addr)
4258 + return -ENOMEM;
4259 + ep->outbound_addr = addr;
4260 +
4261 + mobiveil_pcie_enable_bridge_pio(pcie);
4262 + mobiveil_pcie_enable_engine_apio(pcie);
4263 + mobiveil_pcie_enable_engine_ppio(pcie);
4264 + mobiveil_pcie_enable_msi_ep(pcie);
4265 +
4266 + epc = devm_pci_epc_create(dev, &epc_ops);
4267 + if (IS_ERR(epc)) {
4268 + dev_err(dev, "Failed to create epc device\n");
4269 + return PTR_ERR(epc);
4270 + }
4271 +
4272 + ep->epc = epc;
4273 + epc_set_drvdata(epc, ep);
4274 +
4275 + ep->msi_cap = mobiveil_pcie_ep_find_capability(pcie, PCI_CAP_ID_MSI);
4276 +
4277 + ep->msix_cap = mobiveil_pcie_ep_find_capability(pcie,
4278 + PCI_CAP_ID_MSIX);
4279 +
4280 + if (ep->ops->ep_init)
4281 + ep->ops->ep_init(ep);
4282 +
4283 + epc->max_functions = ep->pf_num;
4284 +
4285 + ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
4286 + ep->page_size);
4287 + if (ret < 0) {
4288 + dev_err(dev, "Failed to initialize address space\n");
4289 + return ret;
4290 + }
4291 +
4292 + ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
4293 + epc->mem->page_size);
4294 + if (!ep->msi_mem) {
4295 + dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
4296 + return -ENOMEM;
4297 + }
4298 +
4299 + return 0;
4300 +}
4301 --- /dev/null
4302 +++ b/drivers/pci/mobiveil/pcie-mobiveil-host.c
4303 @@ -0,0 +1,640 @@
4304 +// SPDX-License-Identifier: GPL-2.0
4305 +/*
4306 + * PCIe host controller driver for Mobiveil PCIe Host controller
4307 + *
4308 + * Copyright (c) 2018 Mobiveil Inc.
4309 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
4310 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
4311 + */
4312 +
4313 +#include <linux/init.h>
4314 +#include <linux/interrupt.h>
4315 +#include <linux/irq.h>
4316 +#include <linux/irqchip/chained_irq.h>
4317 +#include <linux/irqdomain.h>
4318 +#include <linux/kernel.h>
4319 +#include <linux/module.h>
4320 +#include <linux/msi.h>
4321 +#include <linux/of_address.h>
4322 +#include <linux/of_irq.h>
4323 +#include <linux/of_platform.h>
4324 +#include <linux/of_pci.h>
4325 +#include <linux/pci.h>
4326 +#include <linux/platform_device.h>
4327 +#include <linux/slab.h>
4328 +
4329 +#include "pcie-mobiveil.h"
4330 +
4331 +static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
4332 +{
4333 + struct mobiveil_pcie *pcie = bus->sysdata;
4334 +
4335 + /* If there is no link, then there is no device */
4336 + if (bus->number > pcie->rp.root_bus_nr && !mobiveil_pcie_link_up(pcie))
4337 + return false;
4338 +
4339 + /* Only one device down on each root port */
4340 + if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
4341 + return false;
4342 +
4343 + /*
4344 + * Do not read more than one device on the bus directly
4345 + * attached to RC
4346 + */
4347 + if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
4348 + return false;
4349 +
4350 + return true;
4351 +}
4352 +
4353 +/*
4354 + * mobiveil_pcie_map_bus - routine to get the configuration base of either
4355 + * root port or endpoint
4356 + */
4357 +static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
4358 + unsigned int devfn, int where)
4359 +{
4360 + struct mobiveil_pcie *pcie = bus->sysdata;
4361 + u32 value;
4362 +
4363 + if (!mobiveil_pcie_valid_device(bus, devfn))
4364 + return NULL;
4365 +
4366 + /* RC config access */
4367 + if (bus->number == pcie->rp.root_bus_nr)
4368 + return pcie->csr_axi_slave_base + where;
4369 +
4370 + /*
4371 + * EP config access (in Config/APIO space)
4372 + * Program PEX Address base (31..16 bits) with appropriate value
4373 + * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
4374 + * Relies on pci_lock serialization
4375 + */
4376 + value = bus->number << PAB_BUS_SHIFT |
4377 + PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
4378 + PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
4379 +
4380 + csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
4381 +
4382 + return pcie->rp.config_axi_slave_base + where;
4383 +}
4384 +
4385 +static int mobiveil_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
4386 + int where, int size, u32 *val)
4387 +{
4388 + struct mobiveil_pcie *pcie = bus->sysdata;
4389 + struct root_port *rp = &pcie->rp;
4390 +
4391 + if (bus->number > rp->root_bus_nr && rp->ops->read_other_conf)
4392 + return rp->ops->read_other_conf(bus, devfn, where, size, val);
4393 +
4394 + return pci_generic_config_read(bus, devfn, where, size, val);
4395 +}
4396 +static struct pci_ops mobiveil_pcie_ops = {
4397 + .map_bus = mobiveil_pcie_map_bus,
4398 + .read = mobiveil_pcie_config_read,
4399 + .write = pci_generic_config_write,
4400 +};
4401 +
4402 +static void mobiveil_pcie_isr(struct irq_desc *desc)
4403 +{
4404 + struct irq_chip *chip = irq_desc_get_chip(desc);
4405 + struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
4406 + struct device *dev = &pcie->pdev->dev;
4407 + struct mobiveil_msi *msi = &pcie->rp.msi;
4408 + u32 msi_data, msi_addr_lo, msi_addr_hi;
4409 + u32 intr_status, msi_status;
4410 + unsigned long shifted_status;
4411 + u32 bit, virq, val, mask;
4412 +
4413 + /*
4414 + * The core provides a single interrupt for both INTx/MSI messages.
4415 + * So we'll read both INTx and MSI status
4416 + */
4417 +
4418 + chained_irq_enter(chip, desc);
4419 +
4420 + /* read INTx status */
4421 + val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
4422 + mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
4423 + intr_status = val & mask;
4424 +
4425 + /* Handle INTx */
4426 + if (intr_status & PAB_INTP_INTX_MASK) {
4427 + shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
4428 + shifted_status &= PAB_INTP_INTX_MASK;
4429 + shifted_status >>= PAB_INTX_START;
4430 + do {
4431 + for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
4432 + virq = irq_find_mapping(pcie->rp.intx_domain,
4433 + bit + 1);
4434 + if (virq)
4435 + generic_handle_irq(virq);
4436 + else
4437 + dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
4438 + bit);
4439 +
4440 + /* clear interrupt handled */
4441 + csr_writel(pcie, 1 << (PAB_INTX_START + bit),
4442 + PAB_INTP_AMBA_MISC_STAT);
4443 + }
4444 +
4445 + shifted_status = csr_readl(pcie,
4446 + PAB_INTP_AMBA_MISC_STAT);
4447 + shifted_status &= PAB_INTP_INTX_MASK;
4448 + shifted_status >>= PAB_INTX_START;
4449 + } while (shifted_status != 0);
4450 + }
4451 +
4452 + /* read extra MSI status register */
4453 + msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
4454 +
4455 + /* handle MSI interrupts */
4456 + while (msi_status & 1) {
4457 + msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
4458 +
4459 + /*
4460 + * MSI_STATUS_OFFSET register gets updated to zero
4461 + * once we pop not only the MSI data but also address
4462 + * from MSI hardware FIFO. So keeping these following
4463 + * two dummy reads.
4464 + */
4465 + msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
4466 + MSI_ADDR_L_OFFSET);
4467 + msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
4468 + MSI_ADDR_H_OFFSET);
4469 + dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
4470 + msi_data, msi_addr_hi, msi_addr_lo);
4471 +
4472 + virq = irq_find_mapping(msi->dev_domain, msi_data);
4473 + if (virq)
4474 + generic_handle_irq(virq);
4475 +
4476 + msi_status = readl_relaxed(pcie->apb_csr_base +
4477 + MSI_STATUS_OFFSET);
4478 + }
4479 +
4480 + /* Clear the interrupt status */
4481 + csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
4482 + chained_irq_exit(chip, desc);
4483 +}
4484 +
4485 +static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
4486 +{
4487 + struct device *dev = &pcie->pdev->dev;
4488 + struct platform_device *pdev = pcie->pdev;
4489 + struct device_node *node = dev->of_node;
4490 + struct resource *res;
4491 +
4492 + /* map config resource */
4493 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4494 + "config_axi_slave");
4495 + pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
4496 + if (IS_ERR(pcie->rp.config_axi_slave_base))
4497 + return PTR_ERR(pcie->rp.config_axi_slave_base);
4498 + pcie->rp.ob_io_res = res;
4499 +
4500 + /* map csr resource */
4501 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4502 + "csr_axi_slave");
4503 + pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
4504 + if (IS_ERR(pcie->csr_axi_slave_base))
4505 + return PTR_ERR(pcie->csr_axi_slave_base);
4506 + pcie->pcie_reg_base = res->start;
4507 +
4508 + /* read the number of windows requested */
4509 + if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
4510 + pcie->apio_wins = MAX_PIO_WINDOWS;
4511 +
4512 + if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
4513 + pcie->ppio_wins = MAX_PIO_WINDOWS;
4514 +
4515 + return 0;
4516 +}
4517 +
4518 +static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
4519 +{
4520 + phys_addr_t msg_addr = pcie->pcie_reg_base;
4521 + struct mobiveil_msi *msi = &pcie->rp.msi;
4522 +
4523 + msi->num_of_vectors = PCI_NUM_MSI;
4524 + msi->msi_pages_phys = (phys_addr_t)msg_addr;
4525 +
4526 + writel_relaxed(lower_32_bits(msg_addr),
4527 + pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
4528 + writel_relaxed(upper_32_bits(msg_addr),
4529 + pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
4530 + writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
4531 + writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
4532 +}
4533 +
4534 +int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
4535 +{
4536 + u32 value, pab_ctrl, type;
4537 + struct resource_entry *win;
4538 + int i;
4539 +
4540 + /* Disable all inbound/outbound windows */
4541 + for (i = 0; i < pcie->apio_wins; i++)
4542 + mobiveil_pcie_disable_ob_win(pcie, i);
4543 + for (i = 0; i < pcie->ppio_wins; i++)
4544 + mobiveil_pcie_disable_ib_win(pcie, i);
4545 +
4546 + pcie->ib_wins_configured = 0;
4547 + pcie->ob_wins_configured = 0;
4548 +
4549 + if (!reinit) {
4550 + /* setup bus numbers */
4551 + value = csr_readl(pcie, PCI_PRIMARY_BUS);
4552 + value &= 0xff000000;
4553 + value |= 0x00ff0100;
4554 + csr_writel(pcie, value, PCI_PRIMARY_BUS);
4555 + }
4556 +
4557 + /*
4558 + * program Bus Master Enable Bit in Command Register in PAB Config
4559 + * Space
4560 + */
4561 + value = csr_readl(pcie, PCI_COMMAND);
4562 + value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
4563 + csr_writel(pcie, value, PCI_COMMAND);
4564 +
4565 + /*
4566 + * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
4567 + * register
4568 + */
4569 + pab_ctrl = csr_readl(pcie, PAB_CTRL);
4570 + pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
4571 + csr_writel(pcie, pab_ctrl, PAB_CTRL);
4572 +
4573 + /*
4574 + * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
4575 + * PAB_AXI_PIO_CTRL Register
4576 + */
4577 + value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
4578 + value |= APIO_EN_MASK;
4579 + csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
4580 +
4581 + /* Enable PCIe PIO master */
4582 + value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
4583 + value |= 1 << PIO_ENABLE_SHIFT;
4584 + csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
4585 +
4586 + /*
4587 + * we'll program one outbound window for config reads and
4588 + * another default inbound window for all the upstream traffic
4589 + * rest of the outbound windows will be configured according to
4590 + * the "ranges" field defined in device tree
4591 + */
4592 +
4593 + /* config outbound translation window */
4594 + program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
4595 + CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
4596 +
4597 + /* memory inbound translation window */
4598 + program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
4599 +
4600 + /* Get the I/O and memory ranges from DT */
4601 + resource_list_for_each_entry(win, pcie->resources) {
4602 + if (resource_type(win->res) == IORESOURCE_MEM) {
4603 + type = MEM_WINDOW_TYPE;
4604 + } else if (resource_type(win->res) == IORESOURCE_IO) {
4605 + type = IO_WINDOW_TYPE;
4606 + } else if (resource_type(win->res) == IORESOURCE_BUS) {
4607 + pcie->rp.root_bus_nr = win->res->start;
4608 + continue;
4609 + } else {
4610 + continue;
4611 + }
4612 +
4613 + /* configure outbound translation window */
4614 + program_ob_windows(pcie, pcie->ob_wins_configured,
4615 + win->res->start,
4616 + win->res->start - win->offset,
4617 + type, resource_size(win->res));
4618 + }
4619 +
4620 + /* fixup for PCIe class register */
4621 + value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
4622 + value &= 0xff;
4623 + value |= (PCI_CLASS_BRIDGE_PCI << 16);
4624 + csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
4625 +
4626 + return 0;
4627 +}
4628 +
4629 +static void mobiveil_mask_intx_irq(struct irq_data *data)
4630 +{
4631 + struct irq_desc *desc = irq_to_desc(data->irq);
4632 + struct mobiveil_pcie *pcie;
4633 + unsigned long flags;
4634 + u32 mask, shifted_val;
4635 +
4636 + pcie = irq_desc_get_chip_data(desc);
4637 + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
4638 + raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
4639 + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
4640 + shifted_val &= ~mask;
4641 + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
4642 + raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
4643 +}
4644 +
4645 +static void mobiveil_unmask_intx_irq(struct irq_data *data)
4646 +{
4647 + struct irq_desc *desc = irq_to_desc(data->irq);
4648 + struct mobiveil_pcie *pcie;
4649 + unsigned long flags;
4650 + u32 shifted_val, mask;
4651 +
4652 + pcie = irq_desc_get_chip_data(desc);
4653 + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
4654 + raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
4655 + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
4656 + shifted_val |= mask;
4657 + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
4658 + raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
4659 +}
4660 +
4661 +static struct irq_chip intx_irq_chip = {
4662 + .name = "mobiveil_pcie:intx",
4663 + .irq_enable = mobiveil_unmask_intx_irq,
4664 + .irq_disable = mobiveil_mask_intx_irq,
4665 + .irq_mask = mobiveil_mask_intx_irq,
4666 + .irq_unmask = mobiveil_unmask_intx_irq,
4667 +};
4668 +
4669 +/* routine to setup the INTx related data */
4670 +static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
4671 + irq_hw_number_t hwirq)
4672 +{
4673 + irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
4674 + irq_set_chip_data(irq, domain->host_data);
4675 +
4676 + return 0;
4677 +}
4678 +
4679 +/* INTx domain operations structure */
4680 +static const struct irq_domain_ops intx_domain_ops = {
4681 + .map = mobiveil_pcie_intx_map,
4682 +};
4683 +
4684 +static struct irq_chip mobiveil_msi_irq_chip = {
4685 + .name = "Mobiveil PCIe MSI",
4686 + .irq_mask = pci_msi_mask_irq,
4687 + .irq_unmask = pci_msi_unmask_irq,
4688 +};
4689 +
4690 +static struct msi_domain_info mobiveil_msi_domain_info = {
4691 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
4692 + MSI_FLAG_PCI_MSIX),
4693 + .chip = &mobiveil_msi_irq_chip,
4694 +};
4695 +
4696 +static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
4697 +{
4698 + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
4699 + phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
4700 +
4701 + msg->address_lo = lower_32_bits(addr);
4702 + msg->address_hi = upper_32_bits(addr);
4703 + msg->data = data->hwirq;
4704 +
4705 + dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
4706 + (int)data->hwirq, msg->address_hi, msg->address_lo);
4707 +}
4708 +
4709 +static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
4710 + const struct cpumask *mask, bool force)
4711 +{
4712 + return -EINVAL;
4713 +}
4714 +
4715 +static struct irq_chip mobiveil_msi_bottom_irq_chip = {
4716 + .name = "Mobiveil MSI",
4717 + .irq_compose_msi_msg = mobiveil_compose_msi_msg,
4718 + .irq_set_affinity = mobiveil_msi_set_affinity,
4719 +};
4720 +
4721 +static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
4722 + unsigned int virq,
4723 + unsigned int nr_irqs, void *args)
4724 +{
4725 + struct mobiveil_pcie *pcie = domain->host_data;
4726 + struct mobiveil_msi *msi = &pcie->rp.msi;
4727 + unsigned long bit;
4728 +
4729 + WARN_ON(nr_irqs != 1);
4730 + mutex_lock(&msi->lock);
4731 +
4732 + bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
4733 + if (bit >= msi->num_of_vectors) {
4734 + mutex_unlock(&msi->lock);
4735 + return -ENOSPC;
4736 + }
4737 +
4738 + set_bit(bit, msi->msi_irq_in_use);
4739 +
4740 + mutex_unlock(&msi->lock);
4741 +
4742 + irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
4743 + domain->host_data, handle_level_irq, NULL, NULL);
4744 + return 0;
4745 +}
4746 +
4747 +static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
4748 + unsigned int virq,
4749 + unsigned int nr_irqs)
4750 +{
4751 + struct irq_data *d = irq_domain_get_irq_data(domain, virq);
4752 + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
4753 + struct mobiveil_msi *msi = &pcie->rp.msi;
4754 +
4755 + mutex_lock(&msi->lock);
4756 +
4757 + if (!test_bit(d->hwirq, msi->msi_irq_in_use))
4758 + dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
4759 + d->hwirq);
4760 + else
4761 + __clear_bit(d->hwirq, msi->msi_irq_in_use);
4762 +
4763 + mutex_unlock(&msi->lock);
4764 +}
4765 +static const struct irq_domain_ops msi_domain_ops = {
4766 + .alloc = mobiveil_irq_msi_domain_alloc,
4767 + .free = mobiveil_irq_msi_domain_free,
4768 +};
4769 +
4770 +static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
4771 +{
4772 + struct device *dev = &pcie->pdev->dev;
4773 + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
4774 + struct mobiveil_msi *msi = &pcie->rp.msi;
4775 +
4776 + mutex_init(&msi->lock);
4777 + msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
4778 + &msi_domain_ops, pcie);
4779 + if (!msi->dev_domain) {
4780 + dev_err(dev, "failed to create IRQ domain\n");
4781 + return -ENOMEM;
4782 + }
4783 +
4784 + msi->msi_domain = pci_msi_create_irq_domain(fwnode,
4785 + &mobiveil_msi_domain_info,
4786 + msi->dev_domain);
4787 + if (!msi->msi_domain) {
4788 + dev_err(dev, "failed to create MSI domain\n");
4789 + irq_domain_remove(msi->dev_domain);
4790 + return -ENOMEM;
4791 + }
4792 +
4793 + return 0;
4794 +}
4795 +
4796 +static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
4797 +{
4798 + struct device *dev = &pcie->pdev->dev;
4799 + struct device_node *node = dev->of_node;
4800 + int ret;
4801 +
4802 + /* setup INTx */
4803 + pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
4804 + &intx_domain_ops, pcie);
4805 +
4806 + if (!pcie->rp.intx_domain) {
4807 + dev_err(dev, "Failed to get a INTx IRQ domain\n");
4808 + return -ENOMEM;
4809 + }
4810 +
4811 + raw_spin_lock_init(&pcie->rp.intx_mask_lock);
4812 +
4813 + /* setup MSI */
4814 + ret = mobiveil_allocate_msi_domains(pcie);
4815 + if (ret)
4816 + return ret;
4817 +
4818 + return 0;
4819 +}
4820 +
4821 +static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
4822 +{
4823 + struct device *dev = &pcie->pdev->dev;
4824 + struct resource *res;
4825 + int ret;
4826 +
4827 + if (pcie->rp.ops->interrupt_init)
4828 + return pcie->rp.ops->interrupt_init(pcie);
4829 +
4830 + /* map MSI config resource */
4831 + res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
4832 + "apb_csr");
4833 + pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
4834 + if (IS_ERR(pcie->apb_csr_base))
4835 + return PTR_ERR(pcie->apb_csr_base);
4836 +
4837 + /* setup MSI hardware registers */
4838 + mobiveil_pcie_enable_msi(pcie);
4839 +
4840 + pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
4841 + if (pcie->rp.irq <= 0) {
4842 + dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
4843 + return -ENODEV;
4844 + }
4845 +
4846 + /* initialize the IRQ domains */
4847 + ret = mobiveil_pcie_init_irq_domain(pcie);
4848 + if (ret) {
4849 + dev_err(dev, "Failed creating IRQ Domain\n");
4850 + return ret;
4851 + }
4852 +
4853 + irq_set_chained_handler_and_data(pcie->rp.irq,
4854 + mobiveil_pcie_isr, pcie);
4855 +
4856 + /* Enable interrupts */
4857 + csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
4858 + PAB_INTP_AMBA_MISC_ENB);
4859 +
4860 + return 0;
4861 +}
4862 +
4863 +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
4864 +{
4865 + struct pci_bus *bus;
4866 + struct pci_bus *child;
4867 + struct pci_host_bridge *bridge;
4868 + struct device *dev = &pcie->pdev->dev;
4869 + struct device_node *np = dev->of_node;
4870 + resource_size_t iobase;
4871 + int ret;
4872 +
4873 + ret = mobiveil_pcie_parse_dt(pcie);
4874 + if (ret) {
4875 + dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
4876 + return ret;
4877 + }
4878 +
4879 + /* allocate the PCIe port */
4880 + bridge = devm_pci_alloc_host_bridge(dev, 0);
4881 + if (!bridge)
4882 + return -ENOMEM;
4883 +
4884 + /* parse the host bridge base addresses from the device tree file */
4885 + ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
4886 + &bridge->windows, &iobase);
4887 + if (ret) {
4888 + dev_err(dev, "Getting bridge resources failed\n");
4889 + return ret;
4890 + }
4891 +
4892 + pcie->resources = &bridge->windows;
4893 +
4894 + /*
4895 + * configure all inbound and outbound windows and prepare the RC for
4896 + * config access
4897 + */
4898 + ret = mobiveil_host_init(pcie, false);
4899 + if (ret) {
4900 + dev_err(dev, "Failed to initialize host\n");
4901 + goto error;
4902 + }
4903 +
4904 + ret = mobiveil_pcie_interrupt_init(pcie);
4905 + if (ret) {
4906 + dev_err(dev, "Interrupt init failed\n");
4907 + goto error;
4908 + }
4909 +
4910 + ret = devm_request_pci_bus_resources(dev, pcie->resources);
4911 + if (ret)
4912 + goto error;
4913 +
4914 + /* Initialize bridge */
4915 + bridge->dev.parent = dev;
4916 + bridge->sysdata = pcie;
4917 + bridge->busnr = pcie->rp.root_bus_nr;
4918 + bridge->ops = &mobiveil_pcie_ops;
4919 + bridge->map_irq = of_irq_parse_and_map_pci;
4920 + bridge->swizzle_irq = pci_common_swizzle;
4921 +
4922 + ret = mobiveil_bringup_link(pcie);
4923 + if (ret) {
4924 + dev_info(dev, "link bring-up failed\n");
4925 + }
4926 +
4927 + /* setup the kernel resources for the newly added PCIe root bus */
4928 + ret = pci_scan_root_bus_bridge(bridge);
4929 + if (ret)
4930 + goto error;
4931 +
4932 + bus = bridge->bus;
4933 +
4934 + pci_assign_unassigned_bus_resources(bus);
4935 + list_for_each_entry(child, &bus->children, node)
4936 + pcie_bus_configure_settings(child);
4937 + pci_bus_add_devices(bus);
4938 +
4939 + return 0;
4940 +error:
4941 + pci_free_resource_list(pcie->resources);
4942 + return ret;
4943 +}
4944 --- /dev/null
4945 +++ b/drivers/pci/mobiveil/pcie-mobiveil-plat.c
4946 @@ -0,0 +1,54 @@
4947 +// SPDX-License-Identifier: GPL-2.0
4948 +/*
4949 + * PCIe host controller driver for Mobiveil PCIe Host controller
4950 + *
4951 + * Copyright (c) 2018 Mobiveil Inc.
4952 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
4953 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
4954 + */
4955 +
4956 +#include <linux/init.h>
4957 +#include <linux/kernel.h>
4958 +#include <linux/module.h>
4959 +#include <linux/of_pci.h>
4960 +#include <linux/pci.h>
4961 +#include <linux/platform_device.h>
4962 +#include <linux/slab.h>
4963 +
4964 +#include "pcie-mobiveil.h"
4965 +
4966 +static int mobiveil_pcie_probe(struct platform_device *pdev)
4967 +{
4968 + struct mobiveil_pcie *pcie;
4969 + struct device *dev = &pdev->dev;
4970 +
4971 + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
4972 + if (!pcie)
4973 + return -ENOMEM;
4974 +
4975 + pcie->pdev = pdev;
4976 +
4977 + return mobiveil_pcie_host_probe(pcie);
4978 +}
4979 +
4980 +static const struct of_device_id mobiveil_pcie_of_match[] = {
4981 + {.compatible = "mbvl,gpex40-pcie",},
4982 + {},
4983 +};
4984 +
4985 +MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
4986 +
4987 +static struct platform_driver mobiveil_pcie_driver = {
4988 + .probe = mobiveil_pcie_probe,
4989 + .driver = {
4990 + .name = "mobiveil-pcie",
4991 + .of_match_table = mobiveil_pcie_of_match,
4992 + .suppress_bind_attrs = true,
4993 + },
4994 +};
4995 +
4996 +builtin_platform_driver(mobiveil_pcie_driver);
4997 +
4998 +MODULE_LICENSE("GPL v2");
4999 +MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
5000 +MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
5001 --- /dev/null
5002 +++ b/drivers/pci/mobiveil/pcie-mobiveil.c
5003 @@ -0,0 +1,334 @@
5004 +// SPDX-License-Identifier: GPL-2.0
5005 +/*
5006 + * PCIe host controller driver for Mobiveil PCIe Host controller
5007 + *
5008 + * Copyright (c) 2018 Mobiveil Inc.
5009 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
5010 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
5011 + */
5012 +
5013 +#include <linux/delay.h>
5014 +#include <linux/init.h>
5015 +#include <linux/kernel.h>
5016 +#include <linux/pci.h>
5017 +#include <linux/platform_device.h>
5018 +
5019 +#include "pcie-mobiveil.h"
5020 +
5021 +/*
5022 + * mobiveil_pcie_sel_page - routine to access paged register
5023 + *
5024 + * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
5025 + * for this scheme to work extracted higher 6 bits of the offset will be
5026 + * written to pg_sel field of PAB_CTRL register and rest of the lower 10
5027 + * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
5028 + */
5029 +static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
5030 +{
5031 + u32 val;
5032 +
5033 + val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
5034 + val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
5035 + val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
5036 +
5037 + writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
5038 +}
5039 +
5040 +static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
5041 +{
5042 + if (off < PAGED_ADDR_BNDRY) {
5043 + /* For directly accessed registers, clear the pg_sel field */
5044 + mobiveil_pcie_sel_page(pcie, 0);
5045 + return pcie->csr_axi_slave_base + off;
5046 + }
5047 +
5048 + mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
5049 + return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
5050 +}
5051 +
5052 +static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
5053 +{
5054 + if ((uintptr_t)addr & (size - 1)) {
5055 + *val = 0;
5056 + return PCIBIOS_BAD_REGISTER_NUMBER;
5057 + }
5058 +
5059 + switch (size) {
5060 + case 4:
5061 + *val = readl(addr);
5062 + break;
5063 + case 2:
5064 + *val = readw(addr);
5065 + break;
5066 + case 1:
5067 + *val = readb(addr);
5068 + break;
5069 + default:
5070 + *val = 0;
5071 + return PCIBIOS_BAD_REGISTER_NUMBER;
5072 + }
5073 +
5074 + return PCIBIOS_SUCCESSFUL;
5075 +}
5076 +
5077 +static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
5078 +{
5079 + if ((uintptr_t)addr & (size - 1))
5080 + return PCIBIOS_BAD_REGISTER_NUMBER;
5081 +
5082 + switch (size) {
5083 + case 4:
5084 + writel(val, addr);
5085 + break;
5086 + case 2:
5087 + writew(val, addr);
5088 + break;
5089 + case 1:
5090 + writeb(val, addr);
5091 + break;
5092 + default:
5093 + return PCIBIOS_BAD_REGISTER_NUMBER;
5094 + }
5095 +
5096 + return PCIBIOS_SUCCESSFUL;
5097 +}
5098 +
5099 +u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
5100 +{
5101 + void *addr;
5102 + u32 val;
5103 + int ret;
5104 +
5105 + addr = mobiveil_pcie_comp_addr(pcie, off);
5106 +
5107 + ret = mobiveil_pcie_read(addr, size, &val);
5108 + if (ret)
5109 + dev_err(&pcie->pdev->dev, "read CSR address failed\n");
5110 +
5111 + return val;
5112 +}
5113 +
5114 +void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
5115 +{
5116 + void *addr;
5117 + int ret;
5118 +
5119 + addr = mobiveil_pcie_comp_addr(pcie, off);
5120 +
5121 + ret = mobiveil_pcie_write(addr, size, val);
5122 + if (ret)
5123 + dev_err(&pcie->pdev->dev, "write CSR address failed\n");
5124 +}
5125 +
5126 +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
5127 +{
5128 + if (pcie->ops->link_up)
5129 + return pcie->ops->link_up(pcie);
5130 +
5131 + return (csr_readl(pcie, LTSSM_STATUS) &
5132 + LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
5133 +}
5134 +
5135 +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
5136 + u64 pci_addr, u32 type, u64 size)
5137 +{
5138 + u32 value;
5139 + u64 size64 = ~(size - 1);
5140 +
5141 + if (win_num >= pcie->ppio_wins) {
5142 + dev_err(&pcie->pdev->dev,
5143 + "ERROR: max inbound windows reached !\n");
5144 + return;
5145 + }
5146 +
5147 + value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
5148 + value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT |
5149 + WIN_SIZE_MASK << WIN_SIZE_SHIFT);
5150 + value |= (type << AMAP_CTRL_TYPE_SHIFT) | (1 << AMAP_CTRL_EN_SHIFT) |
5151 + (lower_32_bits(size64) & WIN_SIZE_MASK << WIN_SIZE_SHIFT);
5152 + csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
5153 +
5154 + csr_writel(pcie, upper_32_bits(size64),
5155 + PAB_EXT_PEX_AMAP_SIZEN(win_num));
5156 +
5157 + csr_writel(pcie, lower_32_bits(cpu_addr),
5158 + PAB_PEX_AMAP_AXI_WIN(win_num));
5159 + csr_writel(pcie, upper_32_bits(cpu_addr),
5160 + PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
5161 +
5162 + csr_writel(pcie, lower_32_bits(pci_addr),
5163 + PAB_PEX_AMAP_PEX_WIN_L(win_num));
5164 + csr_writel(pcie, upper_32_bits(pci_addr),
5165 + PAB_PEX_AMAP_PEX_WIN_H(win_num));
5166 +
5167 + pcie->ib_wins_configured++;
5168 +}
5169 +
5170 +/*
5171 + * routine to program the outbound windows
5172 + */
5173 +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
5174 + u64 pci_addr, u32 type, u64 size)
5175 +{
5176 +
5177 + u32 value;
5178 + u64 size64 = ~(size - 1);
5179 +
5180 + if (win_num >= pcie->apio_wins) {
5181 + dev_err(&pcie->pdev->dev,
5182 + "ERROR: max outbound windows reached !\n");
5183 + return;
5184 + }
5185 +
5186 + /*
5187 + * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
5188 + * to 4 KB in PAB_AXI_AMAP_CTRL register
5189 + */
5190 + value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
5191 + value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT |
5192 + WIN_SIZE_MASK << WIN_SIZE_SHIFT);
5193 + value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
5194 + (lower_32_bits(size64) & WIN_SIZE_MASK << WIN_SIZE_SHIFT);
5195 + csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
5196 +
5197 + csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
5198 +
5199 + /*
5200 + * program AXI window base with appropriate value in
5201 + * PAB_AXI_AMAP_AXI_WIN0 register
5202 + */
5203 + csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
5204 + PAB_AXI_AMAP_AXI_WIN(win_num));
5205 + csr_writel(pcie, upper_32_bits(cpu_addr),
5206 + PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
5207 +
5208 + csr_writel(pcie, lower_32_bits(pci_addr),
5209 + PAB_AXI_AMAP_PEX_WIN_L(win_num));
5210 + csr_writel(pcie, upper_32_bits(pci_addr),
5211 + PAB_AXI_AMAP_PEX_WIN_H(win_num));
5212 +
5213 + pcie->ob_wins_configured++;
5214 +}
5215 +
5216 +int program_ob_windows_ep(struct mobiveil_pcie *pcie, int win_num, int type,
5217 + u64 phys, u64 bus_addr, u8 func, u64 size)
5218 +{
5219 + u32 val;
5220 + u32 size_h, size_l;
5221 +
5222 + if (size & (size - 1))
5223 + size = 1 << (1 + ilog2(size));
5224 +
5225 + size_h = upper_32_bits(~(size - 1));
5226 + size_l = lower_32_bits(~(size - 1));
5227 +
5228 + val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
5229 + val &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT |
5230 + WIN_SIZE_MASK << WIN_SIZE_SHIFT);
5231 + val |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
5232 + (size_l & (WIN_SIZE_MASK << WIN_SIZE_SHIFT));
5233 + csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
5234 +
5235 + csr_writel(pcie, func, PAB_AXI_AMAP_PCI_HDR_PARAM(win_num));
5236 + csr_writel(pcie, lower_32_bits(phys), PAB_AXI_AMAP_AXI_WIN(win_num));
5237 + csr_writel(pcie, upper_32_bits(phys),
5238 + PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
5239 + csr_writel(pcie, lower_32_bits(bus_addr),
5240 + PAB_AXI_AMAP_PEX_WIN_L(win_num));
5241 + csr_writel(pcie, upper_32_bits(bus_addr),
5242 + PAB_AXI_AMAP_PEX_WIN_H(win_num));
5243 + csr_writel(pcie, size_h, PAB_EXT_AXI_AMAP_SIZE(win_num));
5244 +
5245 + return 0;
5246 +}
5247 +
5248 +void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
5249 + int bar, u64 phys)
5250 +{
5251 + csr_writel(pcie, upper_32_bits(phys),
5252 + PAB_EXT_PEX_BAR_AMAP(func_no, bar));
5253 + csr_writel(pcie, lower_32_bits(phys) | PEX_BAR_AMAP_EN,
5254 + PAB_PEX_BAR_AMAP(func_no, bar));
5255 +}
5256 +
5257 +void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pcie,
5258 + u8 func_no, u8 bar)
5259 +{
5260 + u32 val;
5261 +
5262 + val = csr_readl(pcie, PAB_PEX_BAR_AMAP(func_no, bar));
5263 + val &= ~(1 << 0);
5264 + csr_writel(pcie, val, PAB_PEX_BAR_AMAP(func_no, bar));
5265 +}
5266 +
5267 +int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
5268 +{
5269 + int retries;
5270 +
5271 + /* check if the link is up or not */
5272 + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
5273 + if (mobiveil_pcie_link_up(pcie))
5274 + return 0;
5275 +
5276 + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
5277 + }
5278 +
5279 + dev_info(&pcie->pdev->dev, "link never came up\n");
5280 +
5281 + return -ETIMEDOUT;
5282 +}
5283 +
5284 +void mobiveil_pcie_disable_ib_win(struct mobiveil_pcie *pcie, int win_num)
5285 +{
5286 + u32 val;
5287 +
5288 + val = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
5289 + val &= ~(1 << AMAP_CTRL_EN_SHIFT);
5290 + csr_writel(pcie, val, PAB_PEX_AMAP_CTRL(win_num));
5291 +}
5292 +
5293 +void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num)
5294 +{
5295 + u32 val;
5296 +
5297 + val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
5298 + val &= ~(1 << WIN_ENABLE_SHIFT);
5299 + csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
5300 +}
5301 +
5302 +void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pcie)
5303 +{
5304 + u32 val;
5305 +
5306 + val = csr_readl(pcie, PAB_CTRL);
5307 + val |= 1 << AMBA_PIO_ENABLE_SHIFT;
5308 + val |= 1 << PEX_PIO_ENABLE_SHIFT;
5309 + csr_writel(pcie, val, PAB_CTRL);
5310 +}
5311 +
5312 +void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pcie)
5313 +{
5314 + u32 val;
5315 +
5316 + val = csr_readl(pcie, PAB_AXI_PIO_CTRL);
5317 + val |= APIO_EN_MASK;
5318 + csr_writel(pcie, val, PAB_AXI_PIO_CTRL);
5319 +}
5320 +
5321 +void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pcie)
5322 +{
5323 + u32 val;
5324 +
5325 + val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
5326 + val |= 1 << PIO_ENABLE_SHIFT;
5327 + csr_writel(pcie, val, PAB_PEX_PIO_CTRL);
5328 +}
5329 +
5330 +void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pcie)
5331 +{
5332 + u32 val;
5333 +
5334 + val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
5335 + val |= 1 << 0;
5336 + csr_writel(pcie, val, PAB_INTP_AMBA_MISC_ENB);
5337 +}
5338 --- /dev/null
5339 +++ b/drivers/pci/mobiveil/pcie-mobiveil.h
5340 @@ -0,0 +1,296 @@
5341 +/* SPDX-License-Identifier: GPL-2.0 */
5342 +/*
5343 + * PCIe host controller driver for Mobiveil PCIe Host controller
5344 + *
5345 + * Copyright (c) 2018 Mobiveil Inc.
5346 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
5347 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
5348 + */
5349 +
5350 +#ifndef _PCIE_MOBIVEIL_H
5351 +#define _PCIE_MOBIVEIL_H
5352 +
5353 +#include <linux/pci.h>
5354 +#include <linux/irq.h>
5355 +#include <linux/msi.h>
5356 +#include "../pci.h"
5357 +
5358 +#include <linux/pci-epc.h>
5359 +#include <linux/pci-epf.h>
5360 +
5361 +#define MAX_IATU_OUT 256
5362 +/* register offsets and bit positions */
5363 +
5364 +/*
5365 + * translation tables are grouped into windows, each window registers are
5366 + * grouped into blocks of 4 or 16 registers each
5367 + */
5368 +#define PAB_REG_BLOCK_SIZE 16
5369 +#define PAB_EXT_REG_BLOCK_SIZE 4
5370 +
5371 +#define PAB_REG_ADDR(offset, win) \
5372 + (offset + (win * PAB_REG_BLOCK_SIZE))
5373 +#define PAB_EXT_REG_ADDR(offset, win) \
5374 + (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
5375 +
5376 +#define LTSSM_STATUS 0x0404
5377 +#define LTSSM_STATUS_L0_MASK 0x3f
5378 +#define LTSSM_STATUS_L0 0x2d
5379 +
5380 +#define PAB_CTRL 0x0808
5381 +#define AMBA_PIO_ENABLE_SHIFT 0
5382 +#define PEX_PIO_ENABLE_SHIFT 1
5383 +#define PAGE_SEL_SHIFT 13
5384 +#define PAGE_SEL_MASK 0x3f
5385 +#define PAGE_LO_MASK 0x3ff
5386 +#define PAGE_SEL_OFFSET_SHIFT 10
5387 +#define FUNC_SEL_SHIFT 19
5388 +#define FUNC_SEL_MASK 0x1ff
5389 +#define MSI_SW_CTRL_EN (1 << 29)
5390 +
5391 +#define PAB_ACTIVITY_STAT 0x81c
5392 +
5393 +#define PAB_AXI_PIO_CTRL 0x0840
5394 +#define APIO_EN_MASK 0xf
5395 +
5396 +#define PAB_PEX_PIO_CTRL 0x08c0
5397 +#define PIO_ENABLE_SHIFT 0
5398 +
5399 +#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
5400 +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
5401 +#define PAB_INTP_RESET (0x1 << 1)
5402 +#define PAB_INTP_MSI (0x1 << 3)
5403 +#define PAB_INTP_INTA (0x1 << 5)
5404 +#define PAB_INTP_INTB (0x1 << 6)
5405 +#define PAB_INTP_INTC (0x1 << 7)
5406 +#define PAB_INTP_INTD (0x1 << 8)
5407 +#define PAB_INTP_PCIE_UE (0x1 << 9)
5408 +#define PAB_INTP_IE_PMREDI (0x1 << 29)
5409 +#define PAB_INTP_IE_EC (0x1 << 30)
5410 +#define PAB_INTP_MSI_MASK PAB_INTP_MSI
5411 +#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
5412 + PAB_INTP_INTC | PAB_INTP_INTD)
5413 +
5414 +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
5415 +#define WIN_ENABLE_SHIFT 0
5416 +#define WIN_TYPE_SHIFT 1
5417 +#define WIN_TYPE_MASK 0x3
5418 +#define WIN_SIZE_SHIFT 10
5419 +#define WIN_SIZE_MASK 0x3fffff
5420 +
5421 +#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
5422 +
5423 +#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
5424 +#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
5425 +#define AXI_WINDOW_ALIGN_MASK 3
5426 +
5427 +#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
5428 +#define PAB_BUS_SHIFT 24
5429 +#define PAB_DEVICE_SHIFT 19
5430 +#define PAB_FUNCTION_SHIFT 16
5431 +
5432 +#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
5433 +#define PAB_INTP_AXI_PIO_CLASS 0x474
5434 +
5435 +#define GPEX_ACK_REPLAY_TO 0x438
5436 +#define ACK_LAT_TO_VAL_MASK 0x1fff
5437 +#define ACK_LAT_TO_VAL_SHIFT 0
5438 +
5439 +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
5440 +#define AMAP_CTRL_EN_SHIFT 0
5441 +#define AMAP_CTRL_TYPE_SHIFT 1
5442 +#define AMAP_CTRL_TYPE_MASK 3
5443 +
5444 +#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
5445 +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
5446 +#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
5447 +#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
5448 +#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
5449 +
5450 +/* PPIO WINs EP mode */
5451 +#define PAB_PEX_BAR_AMAP(func, bar) (0x1ba0 + 0x20 * func + 4 * bar)
5452 +#define PAB_EXT_PEX_BAR_AMAP(func, bar) (0x84a0 + 0x20 * func + 4 * bar)
5453 +#define PEX_BAR_AMAP_EN (1 << 0)
5454 +
5455 +#define PAB_AXI_AMAP_PCI_HDR_PARAM(idx) (0x5ba0 + 0x04 * idx)
5456 +#define PAB_MSIX_TABLE_PBA_ACCESS 0xD000
5457 +
5458 +#define GPEX_BAR_ENABLE 0x4D4
5459 +#define GPEX_BAR_SIZE_LDW 0x4D8
5460 +#define GPEX_BAR_SIZE_UDW 0x4DC
5461 +#define GPEX_BAR_SELECT 0x4E0
5462 +
5463 +#define CFG_UNCORRECTABLE_ERROR_SEVERITY 0x10c
5464 +#define UNSUPPORTED_REQUEST_ERROR_SHIFT 20
5465 +#define CFG_UNCORRECTABLE_ERROR_MASK 0x108
5466 +
5467 +/* starting offset of INTX bits in status register */
5468 +#define PAB_INTX_START 5
5469 +
5470 +/* supported number of MSI interrupts */
5471 +#define PCI_NUM_MSI 16
5472 +
5473 +/* MSI registers */
5474 +#define MSI_BASE_LO_OFFSET 0x04
5475 +#define MSI_BASE_HI_OFFSET 0x08
5476 +#define MSI_SIZE_OFFSET 0x0c
5477 +#define MSI_ENABLE_OFFSET 0x14
5478 +#define MSI_STATUS_OFFSET 0x18
5479 +#define MSI_DATA_OFFSET 0x20
5480 +#define MSI_ADDR_L_OFFSET 0x24
5481 +#define MSI_ADDR_H_OFFSET 0x28
5482 +
5483 +/* outbound and inbound window definitions */
5484 +#define WIN_NUM_0 0
5485 +#define WIN_NUM_1 1
5486 +#define CFG_WINDOW_TYPE 0
5487 +#define IO_WINDOW_TYPE 1
5488 +#define MEM_WINDOW_TYPE 2
5489 +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
5490 +#define MAX_PIO_WINDOWS 8
5491 +
5492 +/* Parameters for the waiting for link up routine */
5493 +#define LINK_WAIT_MAX_RETRIES 10
5494 +#define LINK_WAIT_MIN 90000
5495 +#define LINK_WAIT_MAX 100000
5496 +
5497 +#define PAGED_ADDR_BNDRY 0xc00
5498 +#define OFFSET_TO_PAGE_ADDR(off) \
5499 + ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
5500 +#define OFFSET_TO_PAGE_IDX(off) \
5501 + ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
5502 +
5503 +struct mobiveil_pcie;
5504 +struct mobiveil_pcie_ep;
5505 +
5506 +struct mobiveil_msi { /* MSI information */
5507 + struct mutex lock; /* protect bitmap variable */
5508 + struct irq_domain *msi_domain;
5509 + struct irq_domain *dev_domain;
5510 + phys_addr_t msi_pages_phys;
5511 + int num_of_vectors;
5512 + DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
5513 +};
5514 +
5515 +struct mobiveil_rp_ops {
5516 + int (*interrupt_init)(struct mobiveil_pcie *pcie);
5517 + int (*read_other_conf)(struct pci_bus *bus, unsigned int devfn,
5518 + int where, int size, u32 *val);
5519 +};
5520 +
5521 +struct root_port {
5522 + u8 root_bus_nr;
5523 + void __iomem *config_axi_slave_base; /* endpoint config base */
5524 + struct resource *ob_io_res;
5525 + struct mobiveil_rp_ops *ops;
5526 + int irq;
5527 + raw_spinlock_t intx_mask_lock;
5528 + struct irq_domain *intx_domain;
5529 + struct mobiveil_msi msi;
5530 +};
5531 +
5532 +struct mobiveil_pab_ops {
5533 + int (*link_up)(struct mobiveil_pcie *pcie);
5534 +};
5535 +
5536 +struct mobiveil_pcie_ep_ops {
5537 + void (*ep_init)(struct mobiveil_pcie_ep *ep);
5538 + int (*raise_irq)(struct mobiveil_pcie_ep *ep, u8 func_no,
5539 + enum pci_epc_irq_type type, u16 interrupt_num);
5540 +};
5541 +
5542 +struct mobiveil_pcie_ep {
5543 + struct pci_epc *epc;
5544 + struct mobiveil_pcie_ep_ops *ops;
5545 + phys_addr_t phys_base;
5546 + size_t addr_size;
5547 + size_t page_size;
5548 + phys_addr_t *outbound_addr;
5549 + unsigned long *ob_window_map;
5550 + u32 num_ob_windows;
5551 + void __iomem *msi_mem;
5552 + phys_addr_t msi_mem_phys;
5553 + u8 msi_cap; /* MSI capability offset */
5554 + u8 msix_cap; /* MSI-X capability offset */
5555 + u8 bar_num;
5556 + u32 pf_num;
5557 +};
5558 +
5559 +struct mobiveil_pcie {
5560 + struct platform_device *pdev;
5561 + struct list_head *resources;
5562 + void __iomem *csr_axi_slave_base; /* PAB registers base */
5563 + phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
5564 + void __iomem *apb_csr_base; /* MSI register base */
5565 + u32 apio_wins;
5566 + u32 ppio_wins;
5567 + u32 ob_wins_configured; /* configured outbound windows */
5568 + u32 ib_wins_configured; /* configured inbound windows */
5569 + const struct mobiveil_pab_ops *ops;
5570 + struct root_port rp;
5571 + struct mobiveil_pcie_ep ep;
5572 +};
5573 +#define to_mobiveil_pcie_from_ep(endpoint) \
5574 + container_of((endpoint), struct mobiveil_pcie, ep)
5575 +
5576 +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
5577 +int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
5578 +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
5579 +int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
5580 +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
5581 + u64 pci_addr, u32 type, u64 size);
5582 +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
5583 + u64 pci_addr, u32 type, u64 size);
5584 +void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pci, int win_num);
5585 +void mobiveil_pcie_disable_ib_win(struct mobiveil_pcie *pci, int win_num);
5586 +u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
5587 +void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size);
5588 +
5589 +static inline u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
5590 +{
5591 + return csr_read(pcie, off, 0x4);
5592 +}
5593 +
5594 +static inline u32 csr_readw(struct mobiveil_pcie *pcie, u32 off)
5595 +{
5596 + return csr_read(pcie, off, 0x2);
5597 +}
5598 +
5599 +static inline u32 csr_readb(struct mobiveil_pcie *pcie, u32 off)
5600 +{
5601 + return csr_read(pcie, off, 0x1);
5602 +}
5603 +
5604 +static inline void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
5605 +{
5606 + csr_write(pcie, val, off, 0x4);
5607 +}
5608 +
5609 +static inline void csr_writew(struct mobiveil_pcie *pcie, u32 val, u32 off)
5610 +{
5611 + csr_write(pcie, val, off, 0x2);
5612 +}
5613 +
5614 +static inline void csr_writeb(struct mobiveil_pcie *pcie, u32 val, u32 off)
5615 +{
5616 + csr_write(pcie, val, off, 0x1);
5617 +}
5618 +
5619 +void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
5620 + int bar, u64 phys);
5621 +int program_ob_windows_ep(struct mobiveil_pcie *pcie, int win_num, int type,
5622 + u64 phys, u64 bus_addr, u8 func, u64 size);
5623 +void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pci,
5624 + u8 func_no, u8 bar);
5625 +int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep);
5626 +int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no);
5627 +int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
5628 + u8 interrupt_num);
5629 +int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
5630 + u16 interrupt_num);
5631 +void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pci, enum pci_barno bar);
5632 +void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pci);
5633 +void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pci);
5634 +void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pci);
5635 +void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pci);
5636 +#endif /* _PCIE_MOBIVEIL_H */
5637 --- a/drivers/pci/pcie/portdrv_core.c
5638 +++ b/drivers/pci/pcie/portdrv_core.c
5639 @@ -45,6 +45,20 @@ static void release_pcie_device(struct d
5640 }
5641
5642 /**
5643 + * pcibios_check_service_irqs - check irqs in the device tree
5644 + * @dev: PCI Express port to handle
5645 + * @irqs: Array of irqs to populate
5646 + * @mask: Bitmask of port capabilities returned by get_port_device_capability()
5647 + *
5648 + * Return value: 0 means no service irqs in the device tree
5649 + *
5650 + */
5651 +int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
5652 +{
5653 + return 0;
5654 +}
5655 +
5656 +/**
5657 * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
5658 * for given port
5659 * @dev: PCI Express port to handle
5660 @@ -185,10 +199,25 @@ out_free_irqs:
5661 static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
5662 {
5663 int ret, i;
5664 + int irq = -1;
5665
5666 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
5667 irqs[i] = -1;
5668
5669 + /* Check if some platforms owns independent irq pins for AER/PME etc.
5670 + * Some platforms may own independent AER/PME interrupts and set
5671 + * them in the device tree file.
5672 + */
5673 + ret = pcibios_check_service_irqs(dev, irqs, mask);
5674 + if (ret) {
5675 + if (dev->irq)
5676 + irq = dev->irq;
5677 + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
5678 + if (irqs[i] == -1 && i != PCIE_PORT_SERVICE_VC_SHIFT)
5679 + irqs[i] = irq;
5680 + return 0;
5681 + }
5682 +
5683 /*
5684 * If we support PME or hotplug, but we can't use MSI/MSI-X for
5685 * them, we have to fall back to INTx or other interrupts, e.g., a
5686 --- a/drivers/pci/quirks.c
5687 +++ b/drivers/pci/quirks.c
5688 @@ -3394,6 +3394,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_A
5689 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
5690 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
5691
5692 +/*
5693 + * NXP (Freescale Vendor ID) LS1088 chips do not behave correctly after
5694 + * bus reset. Link state of device does not comes UP and so config space
5695 + * never accessible again.
5696 + */
5697 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x80c0, quirk_no_bus_reset);
5698 +
5699 static void quirk_no_pm_reset(struct pci_dev *dev)
5700 {
5701 /*
5702 @@ -4878,3 +4885,11 @@ static void quirk_no_ats(struct pci_dev
5703 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
5704 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
5705 #endif /* CONFIG_PCI_ATS */
5706 +
5707 +/* Freescale PCIe doesn't support MSI in RC mode */
5708 +static void quirk_fsl_no_msi(struct pci_dev *pdev)
5709 +{
5710 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
5711 + pdev->no_msi = 1;
5712 +}
5713 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
5714 --- a/include/linux/pci-ep-cfs.h
5715 +++ b/include/linux/pci-ep-cfs.h
5716 @@ -1,12 +1,9 @@
5717 +/* SPDX-License-Identifier: GPL-2.0+ */
5718 /**
5719 * PCI Endpoint ConfigFS header file
5720 *
5721 * Copyright (C) 2017 Texas Instruments
5722 * Author: Kishon Vijay Abraham I <kishon@ti.com>
5723 - *
5724 - * This program is free software: you can redistribute it and/or modify
5725 - * it under the terms of the GNU General Public License version 2 of
5726 - * the License as published by the Free Software Foundation.
5727 */
5728
5729 #ifndef __LINUX_PCI_EP_CFS_H
5730 --- a/include/linux/pci-epc.h
5731 +++ b/include/linux/pci-epc.h
5732 @@ -1,12 +1,9 @@
5733 +/* SPDX-License-Identifier: GPL-2.0+ */
5734 /**
5735 * PCI Endpoint *Controller* (EPC) header file
5736 *
5737 * Copyright (C) 2017 Texas Instruments
5738 * Author: Kishon Vijay Abraham I <kishon@ti.com>
5739 - *
5740 - * This program is free software: you can redistribute it and/or modify
5741 - * it under the terms of the GNU General Public License version 2 of
5742 - * the License as published by the Free Software Foundation.
5743 */
5744
5745 #ifndef __LINUX_PCI_EPC_H
5746 @@ -20,6 +17,7 @@ enum pci_epc_irq_type {
5747 PCI_EPC_IRQ_UNKNOWN,
5748 PCI_EPC_IRQ_LEGACY,
5749 PCI_EPC_IRQ_MSI,
5750 + PCI_EPC_IRQ_MSIX,
5751 };
5752
5753 /**
5754 @@ -33,24 +31,32 @@ enum pci_epc_irq_type {
5755 * capability register
5756 * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
5757 * the MSI capability register
5758 - * @raise_irq: ops to raise a legacy or MSI interrupt
5759 + * @set_msix: ops to set the requested number of MSI-X interrupts in the
5760 + * MSI-X capability register
5761 + * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
5762 + * from the MSI-X capability register
5763 + * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
5764 * @start: ops to start the PCI link
5765 * @stop: ops to stop the PCI link
5766 * @owner: the module owner containing the ops
5767 */
5768 struct pci_epc_ops {
5769 - int (*write_header)(struct pci_epc *pci_epc,
5770 + int (*write_header)(struct pci_epc *epc, u8 func_no,
5771 struct pci_epf_header *hdr);
5772 - int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
5773 - dma_addr_t bar_phys, size_t size, int flags);
5774 - void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
5775 - int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
5776 - u64 pci_addr, size_t size);
5777 - void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
5778 - int (*set_msi)(struct pci_epc *epc, u8 interrupts);
5779 - int (*get_msi)(struct pci_epc *epc);
5780 - int (*raise_irq)(struct pci_epc *pci_epc,
5781 - enum pci_epc_irq_type type, u8 interrupt_num);
5782 + int (*set_bar)(struct pci_epc *epc, u8 func_no,
5783 + struct pci_epf_bar *epf_bar);
5784 + void (*clear_bar)(struct pci_epc *epc, u8 func_no,
5785 + struct pci_epf_bar *epf_bar);
5786 + int (*map_addr)(struct pci_epc *epc, u8 func_no,
5787 + phys_addr_t addr, u64 pci_addr, size_t size);
5788 + void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
5789 + phys_addr_t addr);
5790 + int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
5791 + int (*get_msi)(struct pci_epc *epc, u8 func_no);
5792 + int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
5793 + int (*get_msix)(struct pci_epc *epc, u8 func_no);
5794 + int (*raise_irq)(struct pci_epc *epc, u8 func_no,
5795 + enum pci_epc_irq_type type, u16 interrupt_num);
5796 int (*start)(struct pci_epc *epc);
5797 void (*stop)(struct pci_epc *epc);
5798 struct module *owner;
5799 @@ -91,8 +97,17 @@ struct pci_epc {
5800 struct config_group *group;
5801 /* spinlock to protect against concurrent access of EP controller */
5802 spinlock_t lock;
5803 + unsigned int features;
5804 };
5805
5806 +#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
5807 +#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
5808 +#define EPC_FEATURE_MSIX_AVAILABLE BIT(4)
5809 +#define EPC_FEATURE_SET_BAR(features, bar) \
5810 + (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
5811 +#define EPC_FEATURE_GET_BAR(features) \
5812 + ((features & EPC_FEATURE_BAR_MASK) >> 1)
5813 +
5814 #define to_pci_epc(device) container_of((device), struct pci_epc, dev)
5815
5816 #define pci_epc_create(dev, ops) \
5817 @@ -124,17 +139,23 @@ void pci_epc_destroy(struct pci_epc *epc
5818 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
5819 void pci_epc_linkup(struct pci_epc *epc);
5820 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
5821 -int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
5822 -int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
5823 - dma_addr_t bar_phys, size_t size, int flags);
5824 -void pci_epc_clear_bar(struct pci_epc *epc, int bar);
5825 -int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
5826 +int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
5827 + struct pci_epf_header *hdr);
5828 +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
5829 + struct pci_epf_bar *epf_bar);
5830 +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
5831 + struct pci_epf_bar *epf_bar);
5832 +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
5833 + phys_addr_t phys_addr,
5834 u64 pci_addr, size_t size);
5835 -void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
5836 -int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
5837 -int pci_epc_get_msi(struct pci_epc *epc);
5838 -int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
5839 - u8 interrupt_num);
5840 +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
5841 + phys_addr_t phys_addr);
5842 +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
5843 +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
5844 +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
5845 +int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
5846 +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
5847 + enum pci_epc_irq_type type, u16 interrupt_num);
5848 int pci_epc_start(struct pci_epc *epc);
5849 void pci_epc_stop(struct pci_epc *epc);
5850 struct pci_epc *pci_epc_get(const char *epc_name);
5851 --- a/include/linux/pci-epf.h
5852 +++ b/include/linux/pci-epf.h
5853 @@ -1,12 +1,9 @@
5854 +/* SPDX-License-Identifier: GPL-2.0+ */
5855 /**
5856 * PCI Endpoint *Function* (EPF) header file
5857 *
5858 * Copyright (C) 2017 Texas Instruments
5859 * Author: Kishon Vijay Abraham I <kishon@ti.com>
5860 - *
5861 - * This program is free software: you can redistribute it and/or modify
5862 - * it under the terms of the GNU General Public License version 2 of
5863 - * the License as published by the Free Software Foundation.
5864 */
5865
5866 #ifndef __LINUX_PCI_EPF_H
5867 @@ -75,7 +72,7 @@ struct pci_epf_ops {
5868 * @driver: PCI EPF driver
5869 * @ops: set of function pointers for performing EPF operations
5870 * @owner: the owner of the module that registers the PCI EPF driver
5871 - * @group: configfs group corresponding to the PCI EPF driver
5872 + * @epf_group: list of configfs group corresponding to the PCI EPF driver
5873 * @id_table: identifies EPF devices for probing
5874 */
5875 struct pci_epf_driver {
5876 @@ -85,7 +82,7 @@ struct pci_epf_driver {
5877 struct device_driver driver;
5878 struct pci_epf_ops *ops;
5879 struct module *owner;
5880 - struct config_group *group;
5881 + struct list_head epf_group;
5882 const struct pci_epf_device_id *id_table;
5883 };
5884
5885 @@ -100,6 +97,8 @@ struct pci_epf_driver {
5886 struct pci_epf_bar {
5887 dma_addr_t phys_addr;
5888 size_t size;
5889 + enum pci_barno barno;
5890 + int flags;
5891 };
5892
5893 /**
5894 @@ -120,6 +119,7 @@ struct pci_epf {
5895 struct pci_epf_header *header;
5896 struct pci_epf_bar bar[6];
5897 u8 msi_interrupts;
5898 + u16 msix_interrupts;
5899 u8 func_no;
5900
5901 struct pci_epc *epc;
5902 --- a/include/linux/pci.h
5903 +++ b/include/linux/pci.h
5904 @@ -1946,6 +1946,7 @@ void pcibios_release_device(struct pci_d
5905 void pcibios_penalize_isa_irq(int irq, int active);
5906 int pcibios_alloc_irq(struct pci_dev *dev);
5907 void pcibios_free_irq(struct pci_dev *dev);
5908 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
5909
5910 #ifdef CONFIG_HIBERNATE_CALLBACKS
5911 extern struct dev_pm_ops pcibios_pm_ops;
5912 --- a/include/uapi/linux/pcitest.h
5913 +++ b/include/uapi/linux/pcitest.h
5914 @@ -16,5 +16,8 @@
5915 #define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
5916 #define PCITEST_READ _IOW('P', 0x5, unsigned long)
5917 #define PCITEST_COPY _IOW('P', 0x6, unsigned long)
5918 +#define PCITEST_MSIX _IOW('P', 0x7, int)
5919 +#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
5920 +#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
5921
5922 #endif /* __UAPI_LINUX_PCITEST_H */
5923 --- a/tools/pci/pcitest.c
5924 +++ b/tools/pci/pcitest.c
5925 @@ -31,12 +31,17 @@
5926 #define BILLION 1E9
5927
5928 static char *result[] = { "NOT OKAY", "OKAY" };
5929 +static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
5930
5931 struct pci_test {
5932 char *device;
5933 char barnum;
5934 bool legacyirq;
5935 unsigned int msinum;
5936 + unsigned int msixnum;
5937 + int irqtype;
5938 + bool set_irqtype;
5939 + bool get_irqtype;
5940 bool read;
5941 bool write;
5942 bool copy;
5943 @@ -65,6 +70,24 @@ static int run_test(struct pci_test *tes
5944 fprintf(stdout, "%s\n", result[ret]);
5945 }
5946
5947 + if (test->set_irqtype) {
5948 + ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype);
5949 + fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]);
5950 + if (ret < 0)
5951 + fprintf(stdout, "FAILED\n");
5952 + else
5953 + fprintf(stdout, "%s\n", result[ret]);
5954 + }
5955 +
5956 + if (test->get_irqtype) {
5957 + ret = ioctl(fd, PCITEST_GET_IRQTYPE);
5958 + fprintf(stdout, "GET IRQ TYPE:\t\t");
5959 + if (ret < 0)
5960 + fprintf(stdout, "FAILED\n");
5961 + else
5962 + fprintf(stdout, "%s\n", irq[ret]);
5963 + }
5964 +
5965 if (test->legacyirq) {
5966 ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
5967 fprintf(stdout, "LEGACY IRQ:\t");
5968 @@ -83,6 +106,15 @@ static int run_test(struct pci_test *tes
5969 fprintf(stdout, "%s\n", result[ret]);
5970 }
5971
5972 + if (test->msixnum > 0 && test->msixnum <= 2048) {
5973 + ret = ioctl(fd, PCITEST_MSIX, test->msixnum);
5974 + fprintf(stdout, "MSI-X%d:\t\t", test->msixnum);
5975 + if (ret < 0)
5976 + fprintf(stdout, "TEST FAILED\n");
5977 + else
5978 + fprintf(stdout, "%s\n", result[ret]);
5979 + }
5980 +
5981 if (test->write) {
5982 ret = ioctl(fd, PCITEST_WRITE, test->size);
5983 fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
5984 @@ -133,7 +165,7 @@ int main(int argc, char **argv)
5985 /* set default endpoint device */
5986 test->device = "/dev/pci-endpoint-test.0";
5987
5988 - while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF)
5989 + while ((c = getopt(argc, argv, "D:b:m:x:i:Ilrwcs:")) != EOF)
5990 switch (c) {
5991 case 'D':
5992 test->device = optarg;
5993 @@ -151,6 +183,20 @@ int main(int argc, char **argv)
5994 if (test->msinum < 1 || test->msinum > 32)
5995 goto usage;
5996 continue;
5997 + case 'x':
5998 + test->msixnum = atoi(optarg);
5999 + if (test->msixnum < 1 || test->msixnum > 2048)
6000 + goto usage;
6001 + continue;
6002 + case 'i':
6003 + test->irqtype = atoi(optarg);
6004 + if (test->irqtype < 0 || test->irqtype > 2)
6005 + goto usage;
6006 + test->set_irqtype = true;
6007 + continue;
6008 + case 'I':
6009 + test->get_irqtype = true;
6010 + continue;
6011 case 'r':
6012 test->read = true;
6013 continue;
6014 @@ -173,6 +219,9 @@ usage:
6015 "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
6016 "\t-b <bar num> BAR test (bar number between 0..5)\n"
6017 "\t-m <msi num> MSI test (msi number between 1..32)\n"
6018 + "\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
6019 + "\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
6020 + "\t-I Get current IRQ type configured\n"
6021 "\t-l Legacy IRQ test\n"
6022 "\t-r Read buffer test\n"
6023 "\t-w Write buffer test\n"
6024 --- a/tools/pci/pcitest.sh
6025 +++ b/tools/pci/pcitest.sh
6026 @@ -16,7 +16,10 @@ echo
6027 echo "Interrupt tests"
6028 echo
6029
6030 +pcitest -i 0
6031 pcitest -l
6032 +
6033 +pcitest -i 1
6034 msi=1
6035
6036 while [ $msi -lt 33 ]
6037 @@ -26,9 +29,21 @@ do
6038 done
6039 echo
6040
6041 +pcitest -i 2
6042 +msix=1
6043 +
6044 +while [ $msix -lt 2049 ]
6045 +do
6046 + pcitest -x $msix
6047 + msix=`expr $msix + 1`
6048 +done
6049 +echo
6050 +
6051 echo "Read Tests"
6052 echo
6053
6054 +pcitest -i 1
6055 +
6056 pcitest -r -s 1
6057 pcitest -r -s 1024
6058 pcitest -r -s 1025