f3f09989a7a446e4c5d621aafc12a4609977085e
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 816-pcie-support-layerscape.patch
1 From c54a010fe105281259b996d318ed85efc4103fee Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 6 May 2019 15:18:05 +0800
4 Subject: [PATCH] pcie: support layerscape
5
6 This is an integrated patch of pcie for layerscape
7
8 Signed-off-by: Bao Xiaowei <xiaowei.bao@nxp.com>
9 Signed-off-by: Bhumika Goyal <bhumirks@gmail.com>
10 Signed-off-by: Biwen Li <biwen.li@nxp.com>
11 Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
12 Signed-off-by: Christoph Hellwig <hch@lst.de>
13 Signed-off-by: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
14 Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
15 Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
16 Signed-off-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
17 Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
18 Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
19 Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
20 Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
21 Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
22 Signed-off-by: Niklas Cassel <niklas.cassel@axis.com>
23 Signed-off-by: Po Liu <po.liu@nxp.com>
24 Signed-off-by: Rob Herring <robh@kernel.org>
25 Signed-off-by: Rolf Evers-Fischer <rolf.evers.fischer@aptiv.com>
26 Signed-off-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
27 Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
28 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
29 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
30 ---
31 arch/arm/kernel/bios32.c | 43 ++
32 arch/arm64/kernel/pci.c | 43 ++
33 drivers/misc/pci_endpoint_test.c | 332 ++++++++++---
34 drivers/pci/Kconfig | 1 +
35 drivers/pci/dwc/Kconfig | 39 +-
36 drivers/pci/dwc/Makefile | 2 +-
37 drivers/pci/dwc/pci-dra7xx.c | 9 -
38 drivers/pci/dwc/pci-layerscape-ep.c | 146 ++++++
39 drivers/pci/dwc/pci-layerscape.c | 12 +
40 drivers/pci/dwc/pcie-designware-ep.c | 338 ++++++++++++--
41 drivers/pci/dwc/pcie-designware-host.c | 5 +-
42 drivers/pci/dwc/pcie-designware-plat.c | 159 ++++++-
43 drivers/pci/dwc/pcie-designware.c | 5 +-
44 drivers/pci/dwc/pcie-designware.h | 57 ++-
45 drivers/pci/endpoint/Kconfig | 1 +
46 drivers/pci/endpoint/Makefile | 1 +
47 drivers/pci/endpoint/functions/Kconfig | 1 +
48 drivers/pci/endpoint/functions/Makefile | 1 +
49 drivers/pci/endpoint/functions/pci-epf-test.c | 191 +++++---
50 drivers/pci/endpoint/pci-ep-cfs.c | 95 +++-
51 drivers/pci/endpoint/pci-epc-core.c | 159 +++++--
52 drivers/pci/endpoint/pci-epc-mem.c | 13 +-
53 drivers/pci/endpoint/pci-epf-core.c | 116 +++--
54 drivers/pci/host/pci-host-common.c | 8 -
55 drivers/pci/host/pcie-xilinx-nwl.c | 9 -
56 drivers/pci/host/pcie-xilinx.c | 7 -
57 drivers/pci/mobiveil/Kconfig | 50 ++
58 drivers/pci/mobiveil/Makefile | 7 +
59 drivers/pci/mobiveil/pci-layerscape-gen4-ep.c | 178 +++++++
60 drivers/pci/mobiveil/pci-layerscape-gen4.c | 292 ++++++++++++
61 drivers/pci/mobiveil/pcie-mobiveil-ep.c | 512 +++++++++++++++++++++
62 drivers/pci/mobiveil/pcie-mobiveil-host.c | 640 ++++++++++++++++++++++++++
63 drivers/pci/mobiveil/pcie-mobiveil-plat.c | 54 +++
64 drivers/pci/mobiveil/pcie-mobiveil.c | 334 ++++++++++++++
65 drivers/pci/mobiveil/pcie-mobiveil.h | 296 ++++++++++++
66 drivers/pci/pcie/portdrv_core.c | 29 ++
67 drivers/pci/quirks.c | 15 +
68 include/linux/pci-ep-cfs.h | 5 +-
69 include/linux/pci-epc.h | 73 +--
70 include/linux/pci-epf.h | 12 +-
71 include/linux/pci.h | 1 +
72 include/uapi/linux/pcitest.h | 3 +
73 tools/pci/pcitest.c | 51 +-
74 tools/pci/pcitest.sh | 15 +
75 44 files changed, 3917 insertions(+), 443 deletions(-)
76 create mode 100644 drivers/pci/dwc/pci-layerscape-ep.c
77 create mode 100644 drivers/pci/mobiveil/Kconfig
78 create mode 100644 drivers/pci/mobiveil/Makefile
79 create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
80 create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4.c
81 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-ep.c
82 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-host.c
83 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-plat.c
84 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.c
85 create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.h
86
87 --- a/arch/arm/kernel/bios32.c
88 +++ b/arch/arm/kernel/bios32.c
89 @@ -12,6 +12,8 @@
90 #include <linux/slab.h>
91 #include <linux/init.h>
92 #include <linux/io.h>
93 +#include <linux/of_irq.h>
94 +#include <linux/pcieport_if.h>
95
96 #include <asm/mach-types.h>
97 #include <asm/mach/map.h>
98 @@ -65,6 +67,47 @@ void pcibios_report_status(u_int status_
99 }
100
101 /*
102 + * Check device tree if the service interrupts are there
103 + */
104 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
105 +{
106 + int ret, count = 0;
107 + struct device_node *np = NULL;
108 +
109 + if (dev->bus->dev.of_node)
110 + np = dev->bus->dev.of_node;
111 +
112 + if (np == NULL)
113 + return 0;
114 +
115 + if (!IS_ENABLED(CONFIG_OF_IRQ))
116 + return 0;
117 +
118 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
119 + * request irq for aer
120 + */
121 + if (mask & PCIE_PORT_SERVICE_AER) {
122 + ret = of_irq_get_byname(np, "aer");
123 + if (ret > 0) {
124 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
125 + count++;
126 + }
127 + }
128 +
129 + if (mask & PCIE_PORT_SERVICE_PME) {
130 + ret = of_irq_get_byname(np, "pme");
131 + if (ret > 0) {
132 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
133 + count++;
134 + }
135 + }
136 +
137 + /* TODO: add more service interrupts if there it is in the device tree*/
138 +
139 + return count;
140 +}
141 +
142 +/*
143 * We don't use this to fix the device, but initialisation of it.
144 * It's not the correct use for this, but it works.
145 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
146 --- a/arch/arm64/kernel/pci.c
147 +++ b/arch/arm64/kernel/pci.c
148 @@ -17,6 +17,8 @@
149 #include <linux/mm.h>
150 #include <linux/of_pci.h>
151 #include <linux/of_platform.h>
152 +#include <linux/of_irq.h>
153 +#include <linux/pcieport_if.h>
154 #include <linux/pci.h>
155 #include <linux/pci-acpi.h>
156 #include <linux/pci-ecam.h>
157 @@ -36,6 +38,47 @@ int pcibios_alloc_irq(struct pci_dev *de
158 #endif
159
160 /*
161 + * Check device tree if the service interrupts are there
162 + */
163 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
164 +{
165 + int ret, count = 0;
166 + struct device_node *np = NULL;
167 +
168 + if (dev->bus->dev.of_node)
169 + np = dev->bus->dev.of_node;
170 +
171 + if (np == NULL)
172 + return 0;
173 +
174 + if (!IS_ENABLED(CONFIG_OF_IRQ))
175 + return 0;
176 +
177 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
178 + * request irq for aer
179 + */
180 + if (mask & PCIE_PORT_SERVICE_AER) {
181 + ret = of_irq_get_byname(np, "aer");
182 + if (ret > 0) {
183 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
184 + count++;
185 + }
186 + }
187 +
188 + if (mask & PCIE_PORT_SERVICE_PME) {
189 + ret = of_irq_get_byname(np, "pme");
190 + if (ret > 0) {
191 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
192 + count++;
193 + }
194 + }
195 +
196 + /* TODO: add more service interrupts if there it is in the device tree*/
197 +
198 + return count;
199 +}
200 +
201 +/*
202 * raw_pci_read/write - Platform-specific PCI config space access.
203 */
204 int raw_pci_read(unsigned int domain, unsigned int bus,
205 --- a/drivers/misc/pci_endpoint_test.c
206 +++ b/drivers/misc/pci_endpoint_test.c
207 @@ -35,38 +35,45 @@
208
209 #include <uapi/linux/pcitest.h>
210
211 -#define DRV_MODULE_NAME "pci-endpoint-test"
212 +#define DRV_MODULE_NAME "pci-endpoint-test"
213
214 -#define PCI_ENDPOINT_TEST_MAGIC 0x0
215 +#define IRQ_TYPE_UNDEFINED -1
216 +#define IRQ_TYPE_LEGACY 0
217 +#define IRQ_TYPE_MSI 1
218 +#define IRQ_TYPE_MSIX 2
219 +
220 +#define PCI_ENDPOINT_TEST_MAGIC 0x0
221 +
222 +#define PCI_ENDPOINT_TEST_COMMAND 0x4
223 +#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
224 +#define COMMAND_RAISE_MSI_IRQ BIT(1)
225 +#define COMMAND_RAISE_MSIX_IRQ BIT(2)
226 +#define COMMAND_READ BIT(3)
227 +#define COMMAND_WRITE BIT(4)
228 +#define COMMAND_COPY BIT(5)
229 +
230 +#define PCI_ENDPOINT_TEST_STATUS 0x8
231 +#define STATUS_READ_SUCCESS BIT(0)
232 +#define STATUS_READ_FAIL BIT(1)
233 +#define STATUS_WRITE_SUCCESS BIT(2)
234 +#define STATUS_WRITE_FAIL BIT(3)
235 +#define STATUS_COPY_SUCCESS BIT(4)
236 +#define STATUS_COPY_FAIL BIT(5)
237 +#define STATUS_IRQ_RAISED BIT(6)
238 +#define STATUS_SRC_ADDR_INVALID BIT(7)
239 +#define STATUS_DST_ADDR_INVALID BIT(8)
240
241 -#define PCI_ENDPOINT_TEST_COMMAND 0x4
242 -#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
243 -#define COMMAND_RAISE_MSI_IRQ BIT(1)
244 -#define MSI_NUMBER_SHIFT 2
245 -/* 6 bits for MSI number */
246 -#define COMMAND_READ BIT(8)
247 -#define COMMAND_WRITE BIT(9)
248 -#define COMMAND_COPY BIT(10)
249 -
250 -#define PCI_ENDPOINT_TEST_STATUS 0x8
251 -#define STATUS_READ_SUCCESS BIT(0)
252 -#define STATUS_READ_FAIL BIT(1)
253 -#define STATUS_WRITE_SUCCESS BIT(2)
254 -#define STATUS_WRITE_FAIL BIT(3)
255 -#define STATUS_COPY_SUCCESS BIT(4)
256 -#define STATUS_COPY_FAIL BIT(5)
257 -#define STATUS_IRQ_RAISED BIT(6)
258 -#define STATUS_SRC_ADDR_INVALID BIT(7)
259 -#define STATUS_DST_ADDR_INVALID BIT(8)
260 -
261 -#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
262 +#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
263 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
264
265 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
266 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
267
268 -#define PCI_ENDPOINT_TEST_SIZE 0x1c
269 -#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
270 +#define PCI_ENDPOINT_TEST_SIZE 0x1c
271 +#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
272 +
273 +#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
274 +#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
275
276 static DEFINE_IDA(pci_endpoint_test_ida);
277
278 @@ -77,6 +84,10 @@ static bool no_msi;
279 module_param(no_msi, bool, 0444);
280 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
281
282 +static int irq_type = IRQ_TYPE_MSI;
283 +module_param(irq_type, int, 0444);
284 +MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
285 +
286 enum pci_barno {
287 BAR_0,
288 BAR_1,
289 @@ -103,7 +114,7 @@ struct pci_endpoint_test {
290 struct pci_endpoint_test_data {
291 enum pci_barno test_reg_bar;
292 size_t alignment;
293 - bool no_msi;
294 + int irq_type;
295 };
296
297 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
298 @@ -147,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irq
299 return IRQ_HANDLED;
300 }
301
302 +static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
303 +{
304 + struct pci_dev *pdev = test->pdev;
305 +
306 + pci_free_irq_vectors(pdev);
307 +}
308 +
309 +static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
310 + int type)
311 +{
312 + int irq = -1;
313 + struct pci_dev *pdev = test->pdev;
314 + struct device *dev = &pdev->dev;
315 + bool res = true;
316 +
317 + switch (type) {
318 + case IRQ_TYPE_LEGACY:
319 + irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
320 + if (irq < 0)
321 + dev_err(dev, "Failed to get Legacy interrupt\n");
322 + break;
323 + case IRQ_TYPE_MSI:
324 + irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
325 + if (irq < 0)
326 + dev_err(dev, "Failed to get MSI interrupts\n");
327 + break;
328 + case IRQ_TYPE_MSIX:
329 + irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
330 + if (irq < 0)
331 + dev_err(dev, "Failed to get MSI-X interrupts\n");
332 + break;
333 + default:
334 + dev_err(dev, "Invalid IRQ type selected\n");
335 + }
336 +
337 + if (irq < 0) {
338 + irq = 0;
339 + res = false;
340 + }
341 + test->num_irqs = irq;
342 +
343 + return res;
344 +}
345 +
346 +static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
347 +{
348 + int i;
349 + struct pci_dev *pdev = test->pdev;
350 + struct device *dev = &pdev->dev;
351 +
352 + for (i = 0; i < test->num_irqs; i++)
353 + devm_free_irq(dev, pci_irq_vector(pdev, i), test);
354 +
355 + test->num_irqs = 0;
356 +}
357 +
358 +static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
359 +{
360 + int i;
361 + int err;
362 + struct pci_dev *pdev = test->pdev;
363 + struct device *dev = &pdev->dev;
364 +
365 + for (i = 0; i < test->num_irqs; i++) {
366 + err = devm_request_irq(dev, pci_irq_vector(pdev, i),
367 + pci_endpoint_test_irqhandler,
368 + IRQF_SHARED, DRV_MODULE_NAME, test);
369 + if (err)
370 + goto fail;
371 + }
372 +
373 + return true;
374 +
375 +fail:
376 + switch (irq_type) {
377 + case IRQ_TYPE_LEGACY:
378 + dev_err(dev, "Failed to request IRQ %d for Legacy\n",
379 + pci_irq_vector(pdev, i));
380 + break;
381 + case IRQ_TYPE_MSI:
382 + dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
383 + pci_irq_vector(pdev, i),
384 + i + 1);
385 + break;
386 + case IRQ_TYPE_MSIX:
387 + dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
388 + pci_irq_vector(pdev, i),
389 + i + 1);
390 + break;
391 + }
392 +
393 + return false;
394 +}
395 +
396 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
397 enum pci_barno barno)
398 {
399 @@ -179,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq
400 {
401 u32 val;
402
403 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
404 + IRQ_TYPE_LEGACY);
405 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
406 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
407 COMMAND_RAISE_LEGACY_IRQ);
408 val = wait_for_completion_timeout(&test->irq_raised,
409 @@ -190,20 +298,24 @@ static bool pci_endpoint_test_legacy_irq
410 }
411
412 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
413 - u8 msi_num)
414 + u16 msi_num, bool msix)
415 {
416 u32 val;
417 struct pci_dev *pdev = test->pdev;
418
419 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
420 + msix == false ? IRQ_TYPE_MSI :
421 + IRQ_TYPE_MSIX);
422 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
423 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
424 - msi_num << MSI_NUMBER_SHIFT |
425 - COMMAND_RAISE_MSI_IRQ);
426 + msix == false ? COMMAND_RAISE_MSI_IRQ :
427 + COMMAND_RAISE_MSIX_IRQ);
428 val = wait_for_completion_timeout(&test->irq_raised,
429 msecs_to_jiffies(1000));
430 if (!val)
431 return false;
432
433 - if (test->last_irq - pdev->irq == msi_num - 1)
434 + if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
435 return true;
436
437 return false;
438 @@ -230,10 +342,18 @@ static bool pci_endpoint_test_copy(struc
439 if (size > SIZE_MAX - alignment)
440 goto err;
441
442 + if (size > SIZE_MAX - alignment)
443 + goto err;
444 +
445 + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
446 + dev_err(dev, "Invalid IRQ type option\n");
447 + goto err;
448 + }
449 +
450 orig_src_addr = dma_alloc_coherent(dev, size + alignment,
451 &orig_src_phys_addr, GFP_KERNEL);
452 if (!orig_src_addr) {
453 - dev_err(dev, "failed to allocate source buffer\n");
454 + dev_err(dev, "Failed to allocate source buffer\n");
455 ret = false;
456 goto err;
457 }
458 @@ -259,7 +379,7 @@ static bool pci_endpoint_test_copy(struc
459 orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
460 &orig_dst_phys_addr, GFP_KERNEL);
461 if (!orig_dst_addr) {
462 - dev_err(dev, "failed to allocate destination address\n");
463 + dev_err(dev, "Failed to allocate destination address\n");
464 ret = false;
465 goto err_orig_src_addr;
466 }
467 @@ -281,8 +401,10 @@ static bool pci_endpoint_test_copy(struc
468 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
469 size);
470
471 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
472 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
473 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
474 - 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
475 + COMMAND_COPY);
476
477 wait_for_completion(&test->irq_raised);
478
479 @@ -318,10 +440,18 @@ static bool pci_endpoint_test_write(stru
480 if (size > SIZE_MAX - alignment)
481 goto err;
482
483 + if (size > SIZE_MAX - alignment)
484 + goto err;
485 +
486 + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
487 + dev_err(dev, "Invalid IRQ type option\n");
488 + goto err;
489 + }
490 +
491 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
492 GFP_KERNEL);
493 if (!orig_addr) {
494 - dev_err(dev, "failed to allocate address\n");
495 + dev_err(dev, "Failed to allocate address\n");
496 ret = false;
497 goto err;
498 }
499 @@ -348,8 +478,10 @@ static bool pci_endpoint_test_write(stru
500
501 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
502
503 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
504 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
505 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
506 - 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
507 + COMMAND_READ);
508
509 wait_for_completion(&test->irq_raised);
510
511 @@ -379,10 +511,18 @@ static bool pci_endpoint_test_read(struc
512 if (size > SIZE_MAX - alignment)
513 goto err;
514
515 + if (size > SIZE_MAX - alignment)
516 + goto err;
517 +
518 + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
519 + dev_err(dev, "Invalid IRQ type option\n");
520 + goto err;
521 + }
522 +
523 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
524 GFP_KERNEL);
525 if (!orig_addr) {
526 - dev_err(dev, "failed to allocate destination address\n");
527 + dev_err(dev, "Failed to allocate destination address\n");
528 ret = false;
529 goto err;
530 }
531 @@ -403,8 +543,10 @@ static bool pci_endpoint_test_read(struc
532
533 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
534
535 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
536 + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
537 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
538 - 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
539 + COMMAND_WRITE);
540
541 wait_for_completion(&test->irq_raised);
542
543 @@ -417,6 +559,38 @@ err:
544 return ret;
545 }
546
547 +static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
548 + int req_irq_type)
549 +{
550 + struct pci_dev *pdev = test->pdev;
551 + struct device *dev = &pdev->dev;
552 +
553 + if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
554 + dev_err(dev, "Invalid IRQ type option\n");
555 + return false;
556 + }
557 +
558 + if (irq_type == req_irq_type)
559 + return true;
560 +
561 + pci_endpoint_test_release_irq(test);
562 + pci_endpoint_test_free_irq_vectors(test);
563 +
564 + if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
565 + goto err;
566 +
567 + if (!pci_endpoint_test_request_irq(test))
568 + goto err;
569 +
570 + irq_type = req_irq_type;
571 + return true;
572 +
573 +err:
574 + pci_endpoint_test_free_irq_vectors(test);
575 + irq_type = IRQ_TYPE_UNDEFINED;
576 + return false;
577 +}
578 +
579 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
580 unsigned long arg)
581 {
582 @@ -436,7 +610,8 @@ static long pci_endpoint_test_ioctl(stru
583 ret = pci_endpoint_test_legacy_irq(test);
584 break;
585 case PCITEST_MSI:
586 - ret = pci_endpoint_test_msi_irq(test, arg);
587 + case PCITEST_MSIX:
588 + ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
589 break;
590 case PCITEST_WRITE:
591 ret = pci_endpoint_test_write(test, arg);
592 @@ -447,6 +622,12 @@ static long pci_endpoint_test_ioctl(stru
593 case PCITEST_COPY:
594 ret = pci_endpoint_test_copy(test, arg);
595 break;
596 + case PCITEST_SET_IRQTYPE:
597 + ret = pci_endpoint_test_set_irq(test, arg);
598 + break;
599 + case PCITEST_GET_IRQTYPE:
600 + ret = irq_type;
601 + break;
602 }
603
604 ret:
605 @@ -462,9 +643,7 @@ static const struct file_operations pci_
606 static int pci_endpoint_test_probe(struct pci_dev *pdev,
607 const struct pci_device_id *ent)
608 {
609 - int i;
610 int err;
611 - int irq = 0;
612 int id;
613 char name[20];
614 enum pci_barno bar;
615 @@ -486,12 +665,15 @@ static int pci_endpoint_test_probe(struc
616 test->alignment = 0;
617 test->pdev = pdev;
618
619 + if (no_msi)
620 + irq_type = IRQ_TYPE_LEGACY;
621 +
622 data = (struct pci_endpoint_test_data *)ent->driver_data;
623 if (data) {
624 test_reg_bar = data->test_reg_bar;
625 test->test_reg_bar = test_reg_bar;
626 test->alignment = data->alignment;
627 - no_msi = data->no_msi;
628 + irq_type = data->irq_type;
629 }
630
631 init_completion(&test->irq_raised);
632 @@ -511,36 +693,21 @@ static int pci_endpoint_test_probe(struc
633
634 pci_set_master(pdev);
635
636 - if (!no_msi) {
637 - irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
638 - if (irq < 0)
639 - dev_err(dev, "failed to get MSI interrupts\n");
640 - test->num_irqs = irq;
641 - }
642 + if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
643 + goto err_disable_irq;
644
645 - err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
646 - IRQF_SHARED, DRV_MODULE_NAME, test);
647 - if (err) {
648 - dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
649 - goto err_disable_msi;
650 - }
651 -
652 - for (i = 1; i < irq; i++) {
653 - err = devm_request_irq(dev, pdev->irq + i,
654 - pci_endpoint_test_irqhandler,
655 - IRQF_SHARED, DRV_MODULE_NAME, test);
656 - if (err)
657 - dev_err(dev, "failed to request IRQ %d for MSI %d\n",
658 - pdev->irq + i, i + 1);
659 - }
660 + if (!pci_endpoint_test_request_irq(test))
661 + goto err_disable_irq;
662
663 for (bar = BAR_0; bar <= BAR_5; bar++) {
664 - base = pci_ioremap_bar(pdev, bar);
665 - if (!base) {
666 - dev_err(dev, "failed to read BAR%d\n", bar);
667 - WARN_ON(bar == test_reg_bar);
668 + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
669 + base = pci_ioremap_bar(pdev, bar);
670 + if (!base) {
671 + dev_err(dev, "Failed to read BAR%d\n", bar);
672 + WARN_ON(bar == test_reg_bar);
673 + }
674 + test->bar[bar] = base;
675 }
676 - test->bar[bar] = base;
677 }
678
679 test->base = test->bar[test_reg_bar];
680 @@ -556,24 +723,31 @@ static int pci_endpoint_test_probe(struc
681 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
682 if (id < 0) {
683 err = id;
684 - dev_err(dev, "unable to get id\n");
685 + dev_err(dev, "Unable to get id\n");
686 goto err_iounmap;
687 }
688
689 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
690 misc_device = &test->miscdev;
691 misc_device->minor = MISC_DYNAMIC_MINOR;
692 - misc_device->name = name;
693 + misc_device->name = kstrdup(name, GFP_KERNEL);
694 + if (!misc_device->name) {
695 + err = -ENOMEM;
696 + goto err_ida_remove;
697 + }
698 misc_device->fops = &pci_endpoint_test_fops,
699
700 err = misc_register(misc_device);
701 if (err) {
702 - dev_err(dev, "failed to register device\n");
703 - goto err_ida_remove;
704 + dev_err(dev, "Failed to register device\n");
705 + goto err_kfree_name;
706 }
707
708 return 0;
709
710 +err_kfree_name:
711 + kfree(misc_device->name);
712 +
713 err_ida_remove:
714 ida_simple_remove(&pci_endpoint_test_ida, id);
715
716 @@ -583,11 +757,13 @@ err_iounmap:
717 pci_iounmap(pdev, test->bar[bar]);
718 }
719
720 - for (i = 0; i < irq; i++)
721 - devm_free_irq(dev, pdev->irq + i, test);
722 + pci_endpoint_test_release_irq(test);
723
724 err_disable_msi:
725 pci_disable_msi(pdev);
726 +
727 +err_disable_irq:
728 + pci_endpoint_test_free_irq_vectors(test);
729 pci_release_regions(pdev);
730
731 err_disable_pdev:
732 @@ -610,14 +786,15 @@ static void pci_endpoint_test_remove(str
733 return;
734
735 misc_deregister(&test->miscdev);
736 + kfree(misc_device->name);
737 ida_simple_remove(&pci_endpoint_test_ida, id);
738 for (bar = BAR_0; bar <= BAR_5; bar++) {
739 if (test->bar[bar])
740 pci_iounmap(pdev, test->bar[bar]);
741 }
742 - for (i = 0; i < test->num_irqs; i++)
743 - devm_free_irq(&pdev->dev, pdev->irq + i, test);
744 - pci_disable_msi(pdev);
745 +
746 + pci_endpoint_test_release_irq(test);
747 + pci_endpoint_test_free_irq_vectors(test);
748 pci_release_regions(pdev);
749 pci_disable_device(pdev);
750 }
751 @@ -625,6 +802,7 @@ static void pci_endpoint_test_remove(str
752 static const struct pci_device_id pci_endpoint_test_tbl[] = {
753 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
754 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
755 + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID) },
756 { }
757 };
758 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
759 --- a/drivers/pci/Kconfig
760 +++ b/drivers/pci/Kconfig
761 @@ -142,6 +142,7 @@ config PCI_HYPERV
762
763 source "drivers/pci/hotplug/Kconfig"
764 source "drivers/pci/dwc/Kconfig"
765 +source "drivers/pci/mobiveil/Kconfig"
766 source "drivers/pci/host/Kconfig"
767 source "drivers/pci/endpoint/Kconfig"
768 source "drivers/pci/switch/Kconfig"
769 --- a/drivers/pci/dwc/Kconfig
770 +++ b/drivers/pci/dwc/Kconfig
771 @@ -50,17 +50,36 @@ config PCI_DRA7XX_EP
772 endif
773
774 config PCIE_DW_PLAT
775 - bool "Platform bus based DesignWare PCIe Controller"
776 - depends on PCI
777 - depends on PCI_MSI_IRQ_DOMAIN
778 - select PCIE_DW_HOST
779 - ---help---
780 - This selects the DesignWare PCIe controller support. Select this if
781 - you have a PCIe controller on Platform bus.
782 + bool
783
784 - If you have a controller with this interface, say Y or M here.
785 +config PCIE_DW_PLAT_HOST
786 + bool "Platform bus based DesignWare PCIe Controller - Host mode"
787 + depends on PCI && PCI_MSI_IRQ_DOMAIN
788 + select PCIE_DW_HOST
789 + select PCIE_DW_PLAT
790 + help
791 + Enables support for the PCIe controller in the Designware IP to
792 + work in host mode. There are two instances of PCIe controller in
793 + Designware IP.
794 + This controller can work either as EP or RC. In order to enable
795 + host-specific features PCIE_DW_PLAT_HOST must be selected and in
796 + order to enable device-specific features PCI_DW_PLAT_EP must be
797 + selected.
798
799 - If unsure, say N.
800 +config PCIE_DW_PLAT_EP
801 + bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
802 + depends on PCI && PCI_MSI_IRQ_DOMAIN
803 + depends on PCI_ENDPOINT
804 + select PCIE_DW_EP
805 + select PCIE_DW_PLAT
806 + help
807 + Enables support for the PCIe controller in the Designware IP to
808 + work in endpoint mode. There are two instances of PCIe controller
809 + in Designware IP.
810 + This controller can work either as EP or RC. In order to enable
811 + host-specific features PCIE_DW_PLAT_HOST must be selected and in
812 + order to enable device-specific features PCI_DW_PLAT_EP must be
813 + selected.
814
815 config PCI_EXYNOS
816 bool "Samsung Exynos PCIe controller"
817 --- a/drivers/pci/dwc/Makefile
818 +++ b/drivers/pci/dwc/Makefile
819 @@ -10,7 +10,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
820 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
821 obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
822 obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
823 -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
824 +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o
825 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
826 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
827 obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
828 --- a/drivers/pci/dwc/pci-dra7xx.c
829 +++ b/drivers/pci/dwc/pci-dra7xx.c
830 @@ -339,15 +339,6 @@ static irqreturn_t dra7xx_pcie_irq_handl
831 return IRQ_HANDLED;
832 }
833
834 -static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
835 -{
836 - u32 reg;
837 -
838 - reg = PCI_BASE_ADDRESS_0 + (4 * bar);
839 - dw_pcie_writel_dbi2(pci, reg, 0x0);
840 - dw_pcie_writel_dbi(pci, reg, 0x0);
841 -}
842 -
843 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
844 {
845 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
846 --- /dev/null
847 +++ b/drivers/pci/dwc/pci-layerscape-ep.c
848 @@ -0,0 +1,146 @@
849 +// SPDX-License-Identifier: GPL-2.0
850 +/*
851 + * PCIe controller EP driver for Freescale Layerscape SoCs
852 + *
853 + * Copyright (C) 2018 NXP Semiconductor.
854 + *
855 + * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
856 + */
857 +
858 +#include <linux/kernel.h>
859 +#include <linux/init.h>
860 +#include <linux/of_pci.h>
861 +#include <linux/of_platform.h>
862 +#include <linux/of_address.h>
863 +#include <linux/pci.h>
864 +#include <linux/platform_device.h>
865 +#include <linux/resource.h>
866 +
867 +#include "pcie-designware.h"
868 +
869 +#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
870 +
871 +struct ls_pcie_ep {
872 + struct dw_pcie *pci;
873 +};
874 +
875 +#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
876 +
877 +static int ls_pcie_establish_link(struct dw_pcie *pci)
878 +{
879 + return 0;
880 +}
881 +
882 +static const struct dw_pcie_ops ls_pcie_ep_ops = {
883 + .start_link = ls_pcie_establish_link,
884 +};
885 +
886 +static const struct of_device_id ls_pcie_ep_of_match[] = {
887 + { .compatible = "fsl,ls-pcie-ep",},
888 + { },
889 +};
890 +
891 +static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
892 +{
893 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
894 + struct pci_epc *epc = ep->epc;
895 + enum pci_barno bar;
896 +
897 + for (bar = BAR_0; bar <= BAR_5; bar++)
898 + dw_pcie_ep_reset_bar(pci, bar);
899 +
900 + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
901 +}
902 +
903 +static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
904 + enum pci_epc_irq_type type, u16 interrupt_num)
905 +{
906 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
907 +
908 + switch (type) {
909 + case PCI_EPC_IRQ_LEGACY:
910 + return dw_pcie_ep_raise_legacy_irq(ep, func_no);
911 + case PCI_EPC_IRQ_MSI:
912 + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
913 + case PCI_EPC_IRQ_MSIX:
914 + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
915 + default:
916 + dev_err(pci->dev, "UNKNOWN IRQ type\n");
917 + return -EINVAL;
918 + }
919 +}
920 +
921 +static struct dw_pcie_ep_ops pcie_ep_ops = {
922 + .ep_init = ls_pcie_ep_init,
923 + .raise_irq = ls_pcie_ep_raise_irq,
924 +};
925 +
926 +static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
927 + struct platform_device *pdev)
928 +{
929 + struct dw_pcie *pci = pcie->pci;
930 + struct device *dev = pci->dev;
931 + struct dw_pcie_ep *ep;
932 + struct resource *res;
933 + int ret;
934 +
935 + ep = &pci->ep;
936 + ep->ops = &pcie_ep_ops;
937 +
938 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
939 + if (!res)
940 + return -EINVAL;
941 +
942 + ep->phys_base = res->start;
943 + ep->addr_size = resource_size(res);
944 +
945 + ret = dw_pcie_ep_init(ep);
946 + if (ret) {
947 + dev_err(dev, "failed to initialize endpoint\n");
948 + return ret;
949 + }
950 +
951 + return 0;
952 +}
953 +
954 +static int __init ls_pcie_ep_probe(struct platform_device *pdev)
955 +{
956 + struct device *dev = &pdev->dev;
957 + struct dw_pcie *pci;
958 + struct ls_pcie_ep *pcie;
959 + struct resource *dbi_base;
960 + int ret;
961 +
962 + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
963 + if (!pcie)
964 + return -ENOMEM;
965 +
966 + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
967 + if (!pci)
968 + return -ENOMEM;
969 +
970 + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
971 + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
972 + if (IS_ERR(pci->dbi_base))
973 + return PTR_ERR(pci->dbi_base);
974 +
975 + pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
976 + pci->dev = dev;
977 + pci->ops = &ls_pcie_ep_ops;
978 + pcie->pci = pci;
979 +
980 + platform_set_drvdata(pdev, pcie);
981 +
982 + ret = ls_add_pcie_ep(pcie, pdev);
983 +
984 + return ret;
985 +}
986 +
987 +static struct platform_driver ls_pcie_ep_driver = {
988 + .driver = {
989 + .name = "layerscape-pcie-ep",
990 + .of_match_table = ls_pcie_ep_of_match,
991 + .suppress_bind_attrs = true,
992 + },
993 +};
994 +builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
995 --- a/drivers/pci/dwc/pci-layerscape.c
996 +++ b/drivers/pci/dwc/pci-layerscape.c
997 @@ -33,6 +33,8 @@
998
999 /* PEX Internal Configuration Registers */
1000 #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
1001 +#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
1002 +#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
1003
1004 #define PCIE_IATU_NUM 6
1005
1006 @@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pci
1007 return 1;
1008 }
1009
1010 +/* Forward error response of outbound non-posted requests */
1011 +static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
1012 +{
1013 + struct dw_pcie *pci = pcie->pci;
1014 +
1015 + iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
1016 +}
1017 +
1018 static int ls_pcie_host_init(struct pcie_port *pp)
1019 {
1020 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1021 @@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie
1022 * dw_pcie_setup_rc() will reconfigure the outbound windows.
1023 */
1024 ls_pcie_disable_outbound_atus(pcie);
1025 + ls_pcie_fix_error_response(pcie);
1026
1027 dw_pcie_dbi_ro_wr_en(pci);
1028 ls_pcie_clear_multifunction(pcie);
1029 @@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drv
1030 };
1031
1032 static const struct of_device_id ls_pcie_of_match[] = {
1033 + { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
1034 { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
1035 { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
1036 { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
1037 --- a/drivers/pci/dwc/pcie-designware-ep.c
1038 +++ b/drivers/pci/dwc/pcie-designware-ep.c
1039 @@ -1,20 +1,9 @@
1040 +// SPDX-License-Identifier: GPL-2.0
1041 /**
1042 * Synopsys DesignWare PCIe Endpoint controller driver
1043 *
1044 * Copyright (C) 2017 Texas Instruments
1045 * Author: Kishon Vijay Abraham I <kishon@ti.com>
1046 - *
1047 - * This program is free software: you can redistribute it and/or modify
1048 - * it under the terms of the GNU General Public License version 2 of
1049 - * the License as published by the Free Software Foundation.
1050 - *
1051 - * This program is distributed in the hope that it will be useful,
1052 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1053 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1054 - * GNU General Public License for more details.
1055 - *
1056 - * You should have received a copy of the GNU General Public License
1057 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
1058 */
1059
1060 #include <linux/of.h>
1061 @@ -30,7 +19,8 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep
1062 pci_epc_linkup(epc);
1063 }
1064
1065 -static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
1066 +static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
1067 + int flags)
1068 {
1069 u32 reg;
1070
1071 @@ -38,10 +28,52 @@ static void dw_pcie_ep_reset_bar(struct
1072 dw_pcie_dbi_ro_wr_en(pci);
1073 dw_pcie_writel_dbi2(pci, reg, 0x0);
1074 dw_pcie_writel_dbi(pci, reg, 0x0);
1075 + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1076 + dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
1077 + dw_pcie_writel_dbi(pci, reg + 4, 0x0);
1078 + }
1079 dw_pcie_dbi_ro_wr_dis(pci);
1080 }
1081
1082 -static int dw_pcie_ep_write_header(struct pci_epc *epc,
1083 +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
1084 +{
1085 + __dw_pcie_ep_reset_bar(pci, bar, 0);
1086 +}
1087 +
1088 +static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
1089 + u8 cap)
1090 +{
1091 + u8 cap_id, next_cap_ptr;
1092 + u16 reg;
1093 +
1094 + reg = dw_pcie_readw_dbi(pci, cap_ptr);
1095 + next_cap_ptr = (reg & 0xff00) >> 8;
1096 + cap_id = (reg & 0x00ff);
1097 +
1098 + if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
1099 + return 0;
1100 +
1101 + if (cap_id == cap)
1102 + return cap_ptr;
1103 +
1104 + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
1105 +}
1106 +
1107 +static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
1108 +{
1109 + u8 next_cap_ptr;
1110 + u16 reg;
1111 +
1112 + reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
1113 + next_cap_ptr = (reg & 0x00ff);
1114 +
1115 + if (!next_cap_ptr)
1116 + return 0;
1117 +
1118 + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
1119 +}
1120 +
1121 +static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
1122 struct pci_epf_header *hdr)
1123 {
1124 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1125 @@ -114,24 +146,29 @@ static int dw_pcie_ep_outbound_atu(struc
1126 return 0;
1127 }
1128
1129 -static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
1130 +static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
1131 + struct pci_epf_bar *epf_bar)
1132 {
1133 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1134 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1135 + enum pci_barno bar = epf_bar->barno;
1136 u32 atu_index = ep->bar_to_atu[bar];
1137
1138 - dw_pcie_ep_reset_bar(pci, bar);
1139 + __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
1140
1141 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
1142 clear_bit(atu_index, ep->ib_window_map);
1143 }
1144
1145 -static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
1146 - dma_addr_t bar_phys, size_t size, int flags)
1147 +static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
1148 + struct pci_epf_bar *epf_bar)
1149 {
1150 int ret;
1151 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1152 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1153 + enum pci_barno bar = epf_bar->barno;
1154 + size_t size = epf_bar->size;
1155 + int flags = epf_bar->flags;
1156 enum dw_pcie_as_type as_type;
1157 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
1158
1159 @@ -140,13 +177,20 @@ static int dw_pcie_ep_set_bar(struct pci
1160 else
1161 as_type = DW_PCIE_AS_IO;
1162
1163 - ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
1164 + ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
1165 if (ret)
1166 return ret;
1167
1168 dw_pcie_dbi_ro_wr_en(pci);
1169 - dw_pcie_writel_dbi2(pci, reg, size - 1);
1170 +
1171 + dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
1172 dw_pcie_writel_dbi(pci, reg, flags);
1173 +
1174 + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1175 + dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
1176 + dw_pcie_writel_dbi(pci, reg + 4, 0);
1177 + }
1178 +
1179 dw_pcie_dbi_ro_wr_dis(pci);
1180
1181 return 0;
1182 @@ -167,7 +211,8 @@ static int dw_pcie_find_index(struct dw_
1183 return -EINVAL;
1184 }
1185
1186 -static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
1187 +static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
1188 + phys_addr_t addr)
1189 {
1190 int ret;
1191 u32 atu_index;
1192 @@ -182,8 +227,9 @@ static void dw_pcie_ep_unmap_addr(struct
1193 clear_bit(atu_index, ep->ob_window_map);
1194 }
1195
1196 -static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
1197 - u64 pci_addr, size_t size)
1198 +static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
1199 + phys_addr_t addr,
1200 + u64 pci_addr, size_t size)
1201 {
1202 int ret;
1203 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1204 @@ -198,45 +244,93 @@ static int dw_pcie_ep_map_addr(struct pc
1205 return 0;
1206 }
1207
1208 -static int dw_pcie_ep_get_msi(struct pci_epc *epc)
1209 +static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
1210 +{
1211 + struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1212 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1213 + u32 val, reg;
1214 +
1215 + if (!ep->msi_cap)
1216 + return -EINVAL;
1217 +
1218 + reg = ep->msi_cap + PCI_MSI_FLAGS;
1219 + val = dw_pcie_readw_dbi(pci, reg);
1220 + if (!(val & PCI_MSI_FLAGS_ENABLE))
1221 + return -EINVAL;
1222 +
1223 + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
1224 +
1225 + return val;
1226 +}
1227 +
1228 +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
1229 +{
1230 + struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1231 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1232 + u32 val, reg;
1233 +
1234 + if (!ep->msi_cap)
1235 + return -EINVAL;
1236 +
1237 + reg = ep->msi_cap + PCI_MSI_FLAGS;
1238 + val = dw_pcie_readw_dbi(pci, reg);
1239 + val &= ~PCI_MSI_FLAGS_QMASK;
1240 + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
1241 + dw_pcie_dbi_ro_wr_en(pci);
1242 + dw_pcie_writew_dbi(pci, reg, val);
1243 + dw_pcie_dbi_ro_wr_dis(pci);
1244 +
1245 + return 0;
1246 +}
1247 +
1248 +static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
1249 {
1250 - int val;
1251 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1252 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1253 + u32 val, reg;
1254 +
1255 + if (!ep->msix_cap)
1256 + return -EINVAL;
1257
1258 - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
1259 - if (!(val & MSI_CAP_MSI_EN_MASK))
1260 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
1261 + val = dw_pcie_readw_dbi(pci, reg);
1262 + if (!(val & PCI_MSIX_FLAGS_ENABLE))
1263 return -EINVAL;
1264
1265 - val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
1266 + val &= PCI_MSIX_FLAGS_QSIZE;
1267 +
1268 return val;
1269 }
1270
1271 -static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
1272 +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
1273 {
1274 - int val;
1275 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1276 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1277 + u32 val, reg;
1278
1279 - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
1280 - val &= ~MSI_CAP_MMC_MASK;
1281 - val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
1282 + if (!ep->msix_cap)
1283 + return -EINVAL;
1284 +
1285 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
1286 + val = dw_pcie_readw_dbi(pci, reg);
1287 + val &= ~PCI_MSIX_FLAGS_QSIZE;
1288 + val |= interrupts;
1289 dw_pcie_dbi_ro_wr_en(pci);
1290 - dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
1291 + dw_pcie_writew_dbi(pci, reg, val);
1292 dw_pcie_dbi_ro_wr_dis(pci);
1293
1294 return 0;
1295 }
1296
1297 -static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
1298 - enum pci_epc_irq_type type, u8 interrupt_num)
1299 +static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
1300 + enum pci_epc_irq_type type, u16 interrupt_num)
1301 {
1302 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
1303
1304 if (!ep->ops->raise_irq)
1305 return -EINVAL;
1306
1307 - return ep->ops->raise_irq(ep, type, interrupt_num);
1308 + return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
1309 }
1310
1311 static void dw_pcie_ep_stop(struct pci_epc *epc)
1312 @@ -269,15 +363,130 @@ static const struct pci_epc_ops epc_ops
1313 .unmap_addr = dw_pcie_ep_unmap_addr,
1314 .set_msi = dw_pcie_ep_set_msi,
1315 .get_msi = dw_pcie_ep_get_msi,
1316 + .set_msix = dw_pcie_ep_set_msix,
1317 + .get_msix = dw_pcie_ep_get_msix,
1318 .raise_irq = dw_pcie_ep_raise_irq,
1319 .start = dw_pcie_ep_start,
1320 .stop = dw_pcie_ep_stop,
1321 };
1322
1323 +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
1324 +{
1325 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1326 + struct device *dev = pci->dev;
1327 +
1328 + dev_err(dev, "EP cannot trigger legacy IRQs\n");
1329 +
1330 + return -EINVAL;
1331 +}
1332 +
1333 +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1334 + u8 interrupt_num)
1335 +{
1336 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1337 + struct pci_epc *epc = ep->epc;
1338 + u16 msg_ctrl, msg_data;
1339 + u32 msg_addr_lower, msg_addr_upper, reg;
1340 + u64 msg_addr;
1341 + bool has_upper;
1342 + int ret;
1343 +
1344 + if (!ep->msi_cap)
1345 + return -EINVAL;
1346 +
1347 + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
1348 + reg = ep->msi_cap + PCI_MSI_FLAGS;
1349 + msg_ctrl = dw_pcie_readw_dbi(pci, reg);
1350 + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
1351 + reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
1352 + msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
1353 + if (has_upper) {
1354 + reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
1355 + msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
1356 + reg = ep->msi_cap + PCI_MSI_DATA_64;
1357 + msg_data = dw_pcie_readw_dbi(pci, reg);
1358 + } else {
1359 + msg_addr_upper = 0;
1360 + reg = ep->msi_cap + PCI_MSI_DATA_32;
1361 + msg_data = dw_pcie_readw_dbi(pci, reg);
1362 + }
1363 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
1364 + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
1365 + epc->mem->page_size);
1366 + if (ret)
1367 + return ret;
1368 +
1369 + writel(msg_data | (interrupt_num - 1), ep->msi_mem);
1370 +
1371 + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
1372 +
1373 + return 0;
1374 +}
1375 +
1376 +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
1377 + u16 interrupt_num)
1378 +{
1379 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1380 + struct pci_epc *epc = ep->epc;
1381 + u16 tbl_offset, bir;
1382 + u32 bar_addr_upper, bar_addr_lower;
1383 + u32 msg_addr_upper, msg_addr_lower;
1384 + u32 reg, msg_data, vec_ctrl;
1385 + u64 tbl_addr, msg_addr, reg_u64;
1386 + void __iomem *msix_tbl;
1387 + int ret;
1388 +
1389 + reg = ep->msix_cap + PCI_MSIX_TABLE;
1390 + tbl_offset = dw_pcie_readl_dbi(pci, reg);
1391 + bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
1392 + tbl_offset &= PCI_MSIX_TABLE_OFFSET;
1393 +
1394 + reg = PCI_BASE_ADDRESS_0 + (4 * bir);
1395 + bar_addr_upper = 0;
1396 + bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
1397 + reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
1398 + if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1399 + bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
1400 +
1401 + tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
1402 + tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
1403 + tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
1404 +
1405 + msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
1406 + PCI_MSIX_ENTRY_SIZE);
1407 + if (!msix_tbl)
1408 + return -EINVAL;
1409 +
1410 + msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
1411 + msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
1412 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
1413 + msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
1414 + vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
1415 +
1416 + iounmap(msix_tbl);
1417 +
1418 + if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
1419 + return -EPERM;
1420 +
1421 + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
1422 + epc->mem->page_size);
1423 + if (ret)
1424 + return ret;
1425 +
1426 + writel(msg_data, ep->msi_mem);
1427 +
1428 + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
1429 +
1430 + return 0;
1431 +}
1432 +
1433 void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
1434 {
1435 struct pci_epc *epc = ep->epc;
1436
1437 + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
1438 + epc->mem->page_size);
1439 +
1440 pci_epc_mem_exit(epc);
1441 }
1442
1443 @@ -291,7 +500,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1444 struct device_node *np = dev->of_node;
1445
1446 if (!pci->dbi_base || !pci->dbi_base2) {
1447 - dev_err(dev, "dbi_base/deb_base2 is not populated\n");
1448 + dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
1449 return -EINVAL;
1450 }
1451
1452 @@ -333,15 +542,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1453 return -ENOMEM;
1454 ep->outbound_addr = addr;
1455
1456 - if (ep->ops->ep_init)
1457 - ep->ops->ep_init(ep);
1458 -
1459 epc = devm_pci_epc_create(dev, &epc_ops);
1460 if (IS_ERR(epc)) {
1461 dev_err(dev, "failed to create epc device\n");
1462 return PTR_ERR(epc);
1463 }
1464
1465 + ep->epc = epc;
1466 + epc_set_drvdata(epc, ep);
1467 +
1468 + if (ep->ops->ep_init)
1469 + ep->ops->ep_init(ep);
1470 +
1471 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
1472 if (ret < 0)
1473 epc->max_functions = 1;
1474 @@ -353,8 +565,16 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
1475 return ret;
1476 }
1477
1478 - ep->epc = epc;
1479 - epc_set_drvdata(epc, ep);
1480 + ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
1481 + epc->mem->page_size);
1482 + if (!ep->msi_mem) {
1483 + dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
1484 + return -ENOMEM;
1485 + }
1486 + ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
1487 +
1488 + ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
1489 +
1490 dw_pcie_setup(pci);
1491
1492 return 0;
1493 --- a/drivers/pci/dwc/pcie-designware-host.c
1494 +++ b/drivers/pci/dwc/pcie-designware-host.c
1495 @@ -1,3 +1,4 @@
1496 +// SPDX-License-Identifier: GPL-2.0
1497 /*
1498 * Synopsys DesignWare PCIe host controller driver
1499 *
1500 @@ -5,10 +6,6 @@
1501 * http://www.samsung.com
1502 *
1503 * Author: Jingoo Han <jg1.han@samsung.com>
1504 - *
1505 - * This program is free software; you can redistribute it and/or modify
1506 - * it under the terms of the GNU General Public License version 2 as
1507 - * published by the Free Software Foundation.
1508 */
1509
1510 #include <linux/irqdomain.h>
1511 --- a/drivers/pci/dwc/pcie-designware-plat.c
1512 +++ b/drivers/pci/dwc/pcie-designware-plat.c
1513 @@ -1,13 +1,10 @@
1514 +// SPDX-License-Identifier: GPL-2.0
1515 /*
1516 * PCIe RC driver for Synopsys DesignWare Core
1517 *
1518 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
1519 *
1520 * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
1521 - *
1522 - * This program is free software; you can redistribute it and/or modify
1523 - * it under the terms of the GNU General Public License version 2 as
1524 - * published by the Free Software Foundation.
1525 */
1526 #include <linux/clk.h>
1527 #include <linux/delay.h>
1528 @@ -15,19 +12,29 @@
1529 #include <linux/interrupt.h>
1530 #include <linux/kernel.h>
1531 #include <linux/init.h>
1532 +#include <linux/of_device.h>
1533 #include <linux/of_gpio.h>
1534 #include <linux/pci.h>
1535 #include <linux/platform_device.h>
1536 #include <linux/resource.h>
1537 #include <linux/signal.h>
1538 #include <linux/types.h>
1539 +#include <linux/regmap.h>
1540
1541 #include "pcie-designware.h"
1542
1543 struct dw_plat_pcie {
1544 - struct dw_pcie *pci;
1545 + struct dw_pcie *pci;
1546 + struct regmap *regmap;
1547 + enum dw_pcie_device_mode mode;
1548 +};
1549 +
1550 +struct dw_plat_pcie_of_data {
1551 + enum dw_pcie_device_mode mode;
1552 };
1553
1554 +static const struct of_device_id dw_plat_pcie_of_match[];
1555 +
1556 static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
1557 {
1558 struct pcie_port *pp = arg;
1559 @@ -52,9 +59,58 @@ static const struct dw_pcie_host_ops dw_
1560 .host_init = dw_plat_pcie_host_init,
1561 };
1562
1563 -static int dw_plat_add_pcie_port(struct pcie_port *pp,
1564 +static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
1565 +{
1566 + return 0;
1567 +}
1568 +
1569 +static const struct dw_pcie_ops dw_pcie_ops = {
1570 + .start_link = dw_plat_pcie_establish_link,
1571 +};
1572 +
1573 +static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
1574 +{
1575 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1576 + struct pci_epc *epc = ep->epc;
1577 + enum pci_barno bar;
1578 +
1579 + for (bar = BAR_0; bar <= BAR_5; bar++)
1580 + dw_pcie_ep_reset_bar(pci, bar);
1581 +
1582 + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
1583 + epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
1584 +}
1585 +
1586 +static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1587 + enum pci_epc_irq_type type,
1588 + u16 interrupt_num)
1589 +{
1590 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1591 +
1592 + switch (type) {
1593 + case PCI_EPC_IRQ_LEGACY:
1594 + return dw_pcie_ep_raise_legacy_irq(ep, func_no);
1595 + case PCI_EPC_IRQ_MSI:
1596 + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
1597 + case PCI_EPC_IRQ_MSIX:
1598 + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
1599 + default:
1600 + dev_err(pci->dev, "UNKNOWN IRQ type\n");
1601 + }
1602 +
1603 + return 0;
1604 +}
1605 +
1606 +static struct dw_pcie_ep_ops pcie_ep_ops = {
1607 + .ep_init = dw_plat_pcie_ep_init,
1608 + .raise_irq = dw_plat_pcie_ep_raise_irq,
1609 +};
1610 +
1611 +static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
1612 struct platform_device *pdev)
1613 {
1614 + struct dw_pcie *pci = dw_plat_pcie->pci;
1615 + struct pcie_port *pp = &pci->pp;
1616 struct device *dev = &pdev->dev;
1617 int ret;
1618
1619 @@ -82,15 +138,44 @@ static int dw_plat_add_pcie_port(struct
1620
1621 ret = dw_pcie_host_init(pp);
1622 if (ret) {
1623 - dev_err(dev, "failed to initialize host\n");
1624 + dev_err(dev, "Failed to initialize host\n");
1625 return ret;
1626 }
1627
1628 return 0;
1629 }
1630
1631 -static const struct dw_pcie_ops dw_pcie_ops = {
1632 -};
1633 +static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
1634 + struct platform_device *pdev)
1635 +{
1636 + int ret;
1637 + struct dw_pcie_ep *ep;
1638 + struct resource *res;
1639 + struct device *dev = &pdev->dev;
1640 + struct dw_pcie *pci = dw_plat_pcie->pci;
1641 +
1642 + ep = &pci->ep;
1643 + ep->ops = &pcie_ep_ops;
1644 +
1645 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
1646 + pci->dbi_base2 = devm_ioremap_resource(dev, res);
1647 + if (IS_ERR(pci->dbi_base2))
1648 + return PTR_ERR(pci->dbi_base2);
1649 +
1650 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1651 + if (!res)
1652 + return -EINVAL;
1653 +
1654 + ep->phys_base = res->start;
1655 + ep->addr_size = resource_size(res);
1656 +
1657 + ret = dw_pcie_ep_init(ep);
1658 + if (ret) {
1659 + dev_err(dev, "Failed to initialize endpoint\n");
1660 + return ret;
1661 + }
1662 + return 0;
1663 +}
1664
1665 static int dw_plat_pcie_probe(struct platform_device *pdev)
1666 {
1667 @@ -99,6 +184,16 @@ static int dw_plat_pcie_probe(struct pla
1668 struct dw_pcie *pci;
1669 struct resource *res; /* Resource from DT */
1670 int ret;
1671 + const struct of_device_id *match;
1672 + const struct dw_plat_pcie_of_data *data;
1673 + enum dw_pcie_device_mode mode;
1674 +
1675 + match = of_match_device(dw_plat_pcie_of_match, dev);
1676 + if (!match)
1677 + return -EINVAL;
1678 +
1679 + data = (struct dw_plat_pcie_of_data *)match->data;
1680 + mode = (enum dw_pcie_device_mode)data->mode;
1681
1682 dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
1683 if (!dw_plat_pcie)
1684 @@ -112,23 +207,59 @@ static int dw_plat_pcie_probe(struct pla
1685 pci->ops = &dw_pcie_ops;
1686
1687 dw_plat_pcie->pci = pci;
1688 + dw_plat_pcie->mode = mode;
1689 +
1690 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1691 + if (!res)
1692 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1693
1694 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1695 pci->dbi_base = devm_ioremap_resource(dev, res);
1696 if (IS_ERR(pci->dbi_base))
1697 return PTR_ERR(pci->dbi_base);
1698
1699 platform_set_drvdata(pdev, dw_plat_pcie);
1700
1701 - ret = dw_plat_add_pcie_port(&pci->pp, pdev);
1702 - if (ret < 0)
1703 - return ret;
1704 + switch (dw_plat_pcie->mode) {
1705 + case DW_PCIE_RC_TYPE:
1706 + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
1707 + return -ENODEV;
1708 +
1709 + ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
1710 + if (ret < 0)
1711 + return ret;
1712 + break;
1713 + case DW_PCIE_EP_TYPE:
1714 + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
1715 + return -ENODEV;
1716 +
1717 + ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
1718 + if (ret < 0)
1719 + return ret;
1720 + break;
1721 + default:
1722 + dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
1723 + }
1724
1725 return 0;
1726 }
1727
1728 +static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
1729 + .mode = DW_PCIE_RC_TYPE,
1730 +};
1731 +
1732 +static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
1733 + .mode = DW_PCIE_EP_TYPE,
1734 +};
1735 +
1736 static const struct of_device_id dw_plat_pcie_of_match[] = {
1737 - { .compatible = "snps,dw-pcie", },
1738 + {
1739 + .compatible = "snps,dw-pcie",
1740 + .data = &dw_plat_pcie_rc_of_data,
1741 + },
1742 + {
1743 + .compatible = "snps,dw-pcie-ep",
1744 + .data = &dw_plat_pcie_ep_of_data,
1745 + },
1746 {},
1747 };
1748
1749 --- a/drivers/pci/dwc/pcie-designware.c
1750 +++ b/drivers/pci/dwc/pcie-designware.c
1751 @@ -1,3 +1,4 @@
1752 +// SPDX-License-Identifier: GPL-2.0
1753 /*
1754 * Synopsys DesignWare PCIe host controller driver
1755 *
1756 @@ -5,10 +6,6 @@
1757 * http://www.samsung.com
1758 *
1759 * Author: Jingoo Han <jg1.han@samsung.com>
1760 - *
1761 - * This program is free software; you can redistribute it and/or modify
1762 - * it under the terms of the GNU General Public License version 2 as
1763 - * published by the Free Software Foundation.
1764 */
1765
1766 #include <linux/delay.h>
1767 --- a/drivers/pci/dwc/pcie-designware.h
1768 +++ b/drivers/pci/dwc/pcie-designware.h
1769 @@ -1,3 +1,4 @@
1770 +// SPDX-License-Identifier: GPL-2.0
1771 /*
1772 * Synopsys DesignWare PCIe host controller driver
1773 *
1774 @@ -5,10 +6,6 @@
1775 * http://www.samsung.com
1776 *
1777 * Author: Jingoo Han <jg1.han@samsung.com>
1778 - *
1779 - * This program is free software; you can redistribute it and/or modify
1780 - * it under the terms of the GNU General Public License version 2 as
1781 - * published by the Free Software Foundation.
1782 */
1783
1784 #ifndef _PCIE_DESIGNWARE_H
1785 @@ -97,15 +94,6 @@
1786 #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
1787 ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
1788
1789 -#define MSI_MESSAGE_CONTROL 0x52
1790 -#define MSI_CAP_MMC_SHIFT 1
1791 -#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
1792 -#define MSI_CAP_MME_SHIFT 4
1793 -#define MSI_CAP_MSI_EN_MASK 0x1
1794 -#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
1795 -#define MSI_MESSAGE_ADDR_L32 0x54
1796 -#define MSI_MESSAGE_ADDR_U32 0x58
1797 -
1798 /*
1799 * Maximum number of MSI IRQs can be 256 per controller. But keep
1800 * it 32 as of now. Probably we will never need more than 32. If needed,
1801 @@ -118,6 +106,10 @@
1802 #define MAX_IATU_IN 256
1803 #define MAX_IATU_OUT 256
1804
1805 +/* Maximum number of inbound/outbound iATUs */
1806 +#define MAX_IATU_IN 256
1807 +#define MAX_IATU_OUT 256
1808 +
1809 struct pcie_port;
1810 struct dw_pcie;
1811 struct dw_pcie_ep;
1812 @@ -185,8 +177,8 @@ enum dw_pcie_as_type {
1813
1814 struct dw_pcie_ep_ops {
1815 void (*ep_init)(struct dw_pcie_ep *ep);
1816 - int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
1817 - u8 interrupt_num);
1818 + int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
1819 + enum pci_epc_irq_type type, u16 interrupt_num);
1820 };
1821
1822 struct dw_pcie_ep {
1823 @@ -201,6 +193,10 @@ struct dw_pcie_ep {
1824 unsigned long *ob_window_map;
1825 u32 num_ib_windows;
1826 u32 num_ob_windows;
1827 + void __iomem *msi_mem;
1828 + phys_addr_t msi_mem_phys;
1829 + u8 msi_cap; /* MSI capability offset */
1830 + u8 msix_cap; /* MSI-X capability offset */
1831 };
1832
1833 struct dw_pcie_ops {
1834 @@ -339,6 +335,12 @@ static inline int dw_pcie_host_init(stru
1835 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
1836 int dw_pcie_ep_init(struct dw_pcie_ep *ep);
1837 void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
1838 +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
1839 +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1840 + u8 interrupt_num);
1841 +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
1842 + u16 interrupt_num);
1843 +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
1844 #else
1845 static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
1846 {
1847 @@ -352,5 +354,26 @@ static inline int dw_pcie_ep_init(struct
1848 static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
1849 {
1850 }
1851 +
1852 +static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
1853 +{
1854 + return 0;
1855 +}
1856 +
1857 +static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1858 + u8 interrupt_num)
1859 +{
1860 + return 0;
1861 +}
1862 +
1863 +static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
1864 + u16 interrupt_num)
1865 +{
1866 + return 0;
1867 +}
1868 +
1869 +static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
1870 +{
1871 +}
1872 #endif
1873 #endif /* _PCIE_DESIGNWARE_H */
1874 --- a/drivers/pci/endpoint/Kconfig
1875 +++ b/drivers/pci/endpoint/Kconfig
1876 @@ -1,3 +1,4 @@
1877 +# SPDX-License-Identifier: GPL-2.0
1878 #
1879 # PCI Endpoint Support
1880 #
1881 --- a/drivers/pci/endpoint/Makefile
1882 +++ b/drivers/pci/endpoint/Makefile
1883 @@ -1,3 +1,4 @@
1884 +# SPDX-License-Identifier: GPL-2.0
1885 #
1886 # Makefile for PCI Endpoint Support
1887 #
1888 --- a/drivers/pci/endpoint/functions/Kconfig
1889 +++ b/drivers/pci/endpoint/functions/Kconfig
1890 @@ -1,3 +1,4 @@
1891 +# SPDX-License-Identifier: GPL-2.0
1892 #
1893 # PCI Endpoint Functions
1894 #
1895 --- a/drivers/pci/endpoint/functions/Makefile
1896 +++ b/drivers/pci/endpoint/functions/Makefile
1897 @@ -1,3 +1,4 @@
1898 +# SPDX-License-Identifier: GPL-2.0
1899 #
1900 # Makefile for PCI Endpoint Functions
1901 #
1902 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
1903 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
1904 @@ -1,20 +1,9 @@
1905 +// SPDX-License-Identifier: GPL-2.0
1906 /**
1907 * Test driver to test endpoint functionality
1908 *
1909 * Copyright (C) 2017 Texas Instruments
1910 * Author: Kishon Vijay Abraham I <kishon@ti.com>
1911 - *
1912 - * This program is free software: you can redistribute it and/or modify
1913 - * it under the terms of the GNU General Public License version 2 of
1914 - * the License as published by the Free Software Foundation.
1915 - *
1916 - * This program is distributed in the hope that it will be useful,
1917 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1918 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1919 - * GNU General Public License for more details.
1920 - *
1921 - * You should have received a copy of the GNU General Public License
1922 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
1923 */
1924
1925 #include <linux/crc32.h>
1926 @@ -29,13 +18,16 @@
1927 #include <linux/pci-epf.h>
1928 #include <linux/pci_regs.h>
1929
1930 +#define IRQ_TYPE_LEGACY 0
1931 +#define IRQ_TYPE_MSI 1
1932 +#define IRQ_TYPE_MSIX 2
1933 +
1934 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
1935 #define COMMAND_RAISE_MSI_IRQ BIT(1)
1936 -#define MSI_NUMBER_SHIFT 2
1937 -#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
1938 -#define COMMAND_READ BIT(8)
1939 -#define COMMAND_WRITE BIT(9)
1940 -#define COMMAND_COPY BIT(10)
1941 +#define COMMAND_RAISE_MSIX_IRQ BIT(2)
1942 +#define COMMAND_READ BIT(3)
1943 +#define COMMAND_WRITE BIT(4)
1944 +#define COMMAND_COPY BIT(5)
1945
1946 #define STATUS_READ_SUCCESS BIT(0)
1947 #define STATUS_READ_FAIL BIT(1)
1948 @@ -56,6 +48,7 @@ struct pci_epf_test {
1949 struct pci_epf *epf;
1950 enum pci_barno test_reg_bar;
1951 bool linkup_notifier;
1952 + bool msix_available;
1953 struct delayed_work cmd_handler;
1954 };
1955
1956 @@ -67,6 +60,8 @@ struct pci_epf_test_reg {
1957 u64 dst_addr;
1958 u32 size;
1959 u32 checksum;
1960 + u32 irq_type;
1961 + u32 irq_number;
1962 } __packed;
1963
1964 static struct pci_epf_header test_header = {
1965 @@ -81,7 +76,7 @@ struct pci_epf_test_data {
1966 bool linkup_notifier;
1967 };
1968
1969 -static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
1970 +static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
1971
1972 static int pci_epf_test_copy(struct pci_epf_test *epf_test)
1973 {
1974 @@ -98,43 +93,45 @@ static int pci_epf_test_copy(struct pci_
1975
1976 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
1977 if (!src_addr) {
1978 - dev_err(dev, "failed to allocate source address\n");
1979 + dev_err(dev, "Failed to allocate source address\n");
1980 reg->status = STATUS_SRC_ADDR_INVALID;
1981 ret = -ENOMEM;
1982 goto err;
1983 }
1984
1985 - ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
1986 + ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
1987 + reg->size);
1988 if (ret) {
1989 - dev_err(dev, "failed to map source address\n");
1990 + dev_err(dev, "Failed to map source address\n");
1991 reg->status = STATUS_SRC_ADDR_INVALID;
1992 goto err_src_addr;
1993 }
1994
1995 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
1996 if (!dst_addr) {
1997 - dev_err(dev, "failed to allocate destination address\n");
1998 + dev_err(dev, "Failed to allocate destination address\n");
1999 reg->status = STATUS_DST_ADDR_INVALID;
2000 ret = -ENOMEM;
2001 goto err_src_map_addr;
2002 }
2003
2004 - ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
2005 + ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
2006 + reg->size);
2007 if (ret) {
2008 - dev_err(dev, "failed to map destination address\n");
2009 + dev_err(dev, "Failed to map destination address\n");
2010 reg->status = STATUS_DST_ADDR_INVALID;
2011 goto err_dst_addr;
2012 }
2013
2014 memcpy(dst_addr, src_addr, reg->size);
2015
2016 - pci_epc_unmap_addr(epc, dst_phys_addr);
2017 + pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
2018
2019 err_dst_addr:
2020 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
2021
2022 err_src_map_addr:
2023 - pci_epc_unmap_addr(epc, src_phys_addr);
2024 + pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
2025
2026 err_src_addr:
2027 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
2028 @@ -158,15 +155,16 @@ static int pci_epf_test_read(struct pci_
2029
2030 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
2031 if (!src_addr) {
2032 - dev_err(dev, "failed to allocate address\n");
2033 + dev_err(dev, "Failed to allocate address\n");
2034 reg->status = STATUS_SRC_ADDR_INVALID;
2035 ret = -ENOMEM;
2036 goto err;
2037 }
2038
2039 - ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
2040 + ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
2041 + reg->size);
2042 if (ret) {
2043 - dev_err(dev, "failed to map address\n");
2044 + dev_err(dev, "Failed to map address\n");
2045 reg->status = STATUS_SRC_ADDR_INVALID;
2046 goto err_addr;
2047 }
2048 @@ -186,7 +184,7 @@ static int pci_epf_test_read(struct pci_
2049 kfree(buf);
2050
2051 err_map_addr:
2052 - pci_epc_unmap_addr(epc, phys_addr);
2053 + pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
2054
2055 err_addr:
2056 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
2057 @@ -209,15 +207,16 @@ static int pci_epf_test_write(struct pci
2058
2059 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
2060 if (!dst_addr) {
2061 - dev_err(dev, "failed to allocate address\n");
2062 + dev_err(dev, "Failed to allocate address\n");
2063 reg->status = STATUS_DST_ADDR_INVALID;
2064 ret = -ENOMEM;
2065 goto err;
2066 }
2067
2068 - ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
2069 + ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
2070 + reg->size);
2071 if (ret) {
2072 - dev_err(dev, "failed to map address\n");
2073 + dev_err(dev, "Failed to map address\n");
2074 reg->status = STATUS_DST_ADDR_INVALID;
2075 goto err_addr;
2076 }
2077 @@ -237,12 +236,12 @@ static int pci_epf_test_write(struct pci
2078 * wait 1ms inorder for the write to complete. Without this delay L3
2079 * error in observed in the host system.
2080 */
2081 - mdelay(1);
2082 + usleep_range(1000, 2000);
2083
2084 kfree(buf);
2085
2086 err_map_addr:
2087 - pci_epc_unmap_addr(epc, phys_addr);
2088 + pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
2089
2090 err_addr:
2091 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
2092 @@ -251,31 +250,42 @@ err:
2093 return ret;
2094 }
2095
2096 -static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
2097 +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
2098 + u16 irq)
2099 {
2100 - u8 msi_count;
2101 struct pci_epf *epf = epf_test->epf;
2102 + struct device *dev = &epf->dev;
2103 struct pci_epc *epc = epf->epc;
2104 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
2105 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
2106
2107 reg->status |= STATUS_IRQ_RAISED;
2108 - msi_count = pci_epc_get_msi(epc);
2109 - if (irq > msi_count || msi_count <= 0)
2110 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
2111 - else
2112 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
2113 +
2114 + switch (irq_type) {
2115 + case IRQ_TYPE_LEGACY:
2116 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
2117 + break;
2118 + case IRQ_TYPE_MSI:
2119 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
2120 + break;
2121 + case IRQ_TYPE_MSIX:
2122 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
2123 + break;
2124 + default:
2125 + dev_err(dev, "Failed to raise IRQ, unknown type\n");
2126 + break;
2127 + }
2128 }
2129
2130 static void pci_epf_test_cmd_handler(struct work_struct *work)
2131 {
2132 int ret;
2133 - u8 irq;
2134 - u8 msi_count;
2135 + int count;
2136 u32 command;
2137 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
2138 cmd_handler.work);
2139 struct pci_epf *epf = epf_test->epf;
2140 + struct device *dev = &epf->dev;
2141 struct pci_epc *epc = epf->epc;
2142 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
2143 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
2144 @@ -287,11 +297,14 @@ static void pci_epf_test_cmd_handler(str
2145 reg->command = 0;
2146 reg->status = 0;
2147
2148 - irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
2149 + if (reg->irq_type > IRQ_TYPE_MSIX) {
2150 + dev_err(dev, "Failed to detect IRQ type\n");
2151 + goto reset_handler;
2152 + }
2153
2154 if (command & COMMAND_RAISE_LEGACY_IRQ) {
2155 reg->status = STATUS_IRQ_RAISED;
2156 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
2157 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
2158 goto reset_handler;
2159 }
2160
2161 @@ -301,7 +314,8 @@ static void pci_epf_test_cmd_handler(str
2162 reg->status |= STATUS_WRITE_FAIL;
2163 else
2164 reg->status |= STATUS_WRITE_SUCCESS;
2165 - pci_epf_test_raise_irq(epf_test, irq);
2166 + pci_epf_test_raise_irq(epf_test, reg->irq_type,
2167 + reg->irq_number);
2168 goto reset_handler;
2169 }
2170
2171 @@ -311,7 +325,8 @@ static void pci_epf_test_cmd_handler(str
2172 reg->status |= STATUS_READ_SUCCESS;
2173 else
2174 reg->status |= STATUS_READ_FAIL;
2175 - pci_epf_test_raise_irq(epf_test, irq);
2176 + pci_epf_test_raise_irq(epf_test, reg->irq_type,
2177 + reg->irq_number);
2178 goto reset_handler;
2179 }
2180
2181 @@ -321,16 +336,28 @@ static void pci_epf_test_cmd_handler(str
2182 reg->status |= STATUS_COPY_SUCCESS;
2183 else
2184 reg->status |= STATUS_COPY_FAIL;
2185 - pci_epf_test_raise_irq(epf_test, irq);
2186 + pci_epf_test_raise_irq(epf_test, reg->irq_type,
2187 + reg->irq_number);
2188 goto reset_handler;
2189 }
2190
2191 if (command & COMMAND_RAISE_MSI_IRQ) {
2192 - msi_count = pci_epc_get_msi(epc);
2193 - if (irq > msi_count || msi_count <= 0)
2194 + count = pci_epc_get_msi(epc, epf->func_no);
2195 + if (reg->irq_number > count || count <= 0)
2196 + goto reset_handler;
2197 + reg->status = STATUS_IRQ_RAISED;
2198 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
2199 + reg->irq_number);
2200 + goto reset_handler;
2201 + }
2202 +
2203 + if (command & COMMAND_RAISE_MSIX_IRQ) {
2204 + count = pci_epc_get_msix(epc, epf->func_no);
2205 + if (reg->irq_number > count || count <= 0)
2206 goto reset_handler;
2207 reg->status = STATUS_IRQ_RAISED;
2208 - pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
2209 + pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
2210 + reg->irq_number);
2211 goto reset_handler;
2212 }
2213
2214 @@ -351,21 +378,23 @@ static void pci_epf_test_unbind(struct p
2215 {
2216 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
2217 struct pci_epc *epc = epf->epc;
2218 + struct pci_epf_bar *epf_bar;
2219 int bar;
2220
2221 cancel_delayed_work(&epf_test->cmd_handler);
2222 pci_epc_stop(epc);
2223 for (bar = BAR_0; bar <= BAR_5; bar++) {
2224 + epf_bar = &epf->bar[bar];
2225 +
2226 if (epf_test->reg[bar]) {
2227 pci_epf_free_space(epf, epf_test->reg[bar], bar);
2228 - pci_epc_clear_bar(epc, bar);
2229 + pci_epc_clear_bar(epc, epf->func_no, epf_bar);
2230 }
2231 }
2232 }
2233
2234 static int pci_epf_test_set_bar(struct pci_epf *epf)
2235 {
2236 - int flags;
2237 int bar;
2238 int ret;
2239 struct pci_epf_bar *epf_bar;
2240 @@ -374,20 +403,27 @@ static int pci_epf_test_set_bar(struct p
2241 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
2242 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
2243
2244 - flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
2245 - if (sizeof(dma_addr_t) == 0x8)
2246 - flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
2247 -
2248 for (bar = BAR_0; bar <= BAR_5; bar++) {
2249 epf_bar = &epf->bar[bar];
2250 - ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
2251 - epf_bar->size, flags);
2252 +
2253 + epf_bar->flags |= upper_32_bits(epf_bar->size) ?
2254 + PCI_BASE_ADDRESS_MEM_TYPE_64 :
2255 + PCI_BASE_ADDRESS_MEM_TYPE_32;
2256 +
2257 + ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
2258 if (ret) {
2259 pci_epf_free_space(epf, epf_test->reg[bar], bar);
2260 - dev_err(dev, "failed to set BAR%d\n", bar);
2261 + dev_err(dev, "Failed to set BAR%d\n", bar);
2262 if (bar == test_reg_bar)
2263 return ret;
2264 }
2265 + /*
2266 + * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
2267 + * if the specific implementation required a 64-bit BAR,
2268 + * even if we only requested a 32-bit BAR.
2269 + */
2270 + if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
2271 + bar++;
2272 }
2273
2274 return 0;
2275 @@ -404,7 +440,7 @@ static int pci_epf_test_alloc_space(stru
2276 base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
2277 test_reg_bar);
2278 if (!base) {
2279 - dev_err(dev, "failed to allocated register space\n");
2280 + dev_err(dev, "Failed to allocated register space\n");
2281 return -ENOMEM;
2282 }
2283 epf_test->reg[test_reg_bar] = base;
2284 @@ -414,7 +450,7 @@ static int pci_epf_test_alloc_space(stru
2285 continue;
2286 base = pci_epf_alloc_space(epf, bar_size[bar], bar);
2287 if (!base)
2288 - dev_err(dev, "failed to allocate space for BAR%d\n",
2289 + dev_err(dev, "Failed to allocate space for BAR%d\n",
2290 bar);
2291 epf_test->reg[bar] = base;
2292 }
2293 @@ -433,9 +469,18 @@ static int pci_epf_test_bind(struct pci_
2294 if (WARN_ON_ONCE(!epc))
2295 return -EINVAL;
2296
2297 - ret = pci_epc_write_header(epc, header);
2298 + if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
2299 + epf_test->linkup_notifier = false;
2300 + else
2301 + epf_test->linkup_notifier = true;
2302 +
2303 + epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
2304 +
2305 + epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
2306 +
2307 + ret = pci_epc_write_header(epc, epf->func_no, header);
2308 if (ret) {
2309 - dev_err(dev, "configuration header write failed\n");
2310 + dev_err(dev, "Configuration header write failed\n");
2311 return ret;
2312 }
2313
2314 @@ -447,9 +492,19 @@ static int pci_epf_test_bind(struct pci_
2315 if (ret)
2316 return ret;
2317
2318 - ret = pci_epc_set_msi(epc, epf->msi_interrupts);
2319 - if (ret)
2320 + ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
2321 + if (ret) {
2322 + dev_err(dev, "MSI configuration failed\n");
2323 return ret;
2324 + }
2325 +
2326 + if (epf_test->msix_available) {
2327 + ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
2328 + if (ret) {
2329 + dev_err(dev, "MSI-X configuration failed\n");
2330 + return ret;
2331 + }
2332 + }
2333
2334 if (!epf_test->linkup_notifier)
2335 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
2336 @@ -517,7 +572,7 @@ static int __init pci_epf_test_init(void
2337 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2338 ret = pci_epf_register_driver(&test_driver);
2339 if (ret) {
2340 - pr_err("failed to register pci epf test driver --> %d\n", ret);
2341 + pr_err("Failed to register pci epf test driver --> %d\n", ret);
2342 return ret;
2343 }
2344
2345 --- a/drivers/pci/endpoint/pci-ep-cfs.c
2346 +++ b/drivers/pci/endpoint/pci-ep-cfs.c
2347 @@ -1,35 +1,28 @@
2348 +// SPDX-License-Identifier: GPL-2.0
2349 /**
2350 * configfs to configure the PCI endpoint
2351 *
2352 * Copyright (C) 2017 Texas Instruments
2353 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2354 - *
2355 - * This program is free software: you can redistribute it and/or modify
2356 - * it under the terms of the GNU General Public License version 2 of
2357 - * the License as published by the Free Software Foundation.
2358 - *
2359 - * This program is distributed in the hope that it will be useful,
2360 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2361 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2362 - * GNU General Public License for more details.
2363 - *
2364 - * You should have received a copy of the GNU General Public License
2365 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2366 */
2367
2368 #include <linux/module.h>
2369 +#include <linux/idr.h>
2370 #include <linux/slab.h>
2371
2372 #include <linux/pci-epc.h>
2373 #include <linux/pci-epf.h>
2374 #include <linux/pci-ep-cfs.h>
2375
2376 +static DEFINE_IDR(functions_idr);
2377 +static DEFINE_MUTEX(functions_mutex);
2378 static struct config_group *functions_group;
2379 static struct config_group *controllers_group;
2380
2381 struct pci_epf_group {
2382 struct config_group group;
2383 struct pci_epf *epf;
2384 + int index;
2385 };
2386
2387 struct pci_epc_group {
2388 @@ -151,7 +144,7 @@ static struct configfs_item_operations p
2389 .drop_link = pci_epc_epf_unlink,
2390 };
2391
2392 -static struct config_item_type pci_epc_type = {
2393 +static const struct config_item_type pci_epc_type = {
2394 .ct_item_ops = &pci_epc_item_ops,
2395 .ct_attrs = pci_epc_attrs,
2396 .ct_owner = THIS_MODULE,
2397 @@ -293,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_sh
2398 to_pci_epf_group(item)->epf->msi_interrupts);
2399 }
2400
2401 +static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
2402 + const char *page, size_t len)
2403 +{
2404 + u16 val;
2405 + int ret;
2406 +
2407 + ret = kstrtou16(page, 0, &val);
2408 + if (ret)
2409 + return ret;
2410 +
2411 + to_pci_epf_group(item)->epf->msix_interrupts = val;
2412 +
2413 + return len;
2414 +}
2415 +
2416 +static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
2417 + char *page)
2418 +{
2419 + return sprintf(page, "%d\n",
2420 + to_pci_epf_group(item)->epf->msix_interrupts);
2421 +}
2422 +
2423 PCI_EPF_HEADER_R(vendorid)
2424 PCI_EPF_HEADER_W_u16(vendorid)
2425
2426 @@ -334,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id
2427 CONFIGFS_ATTR(pci_epf_, subsys_id);
2428 CONFIGFS_ATTR(pci_epf_, interrupt_pin);
2429 CONFIGFS_ATTR(pci_epf_, msi_interrupts);
2430 +CONFIGFS_ATTR(pci_epf_, msix_interrupts);
2431
2432 static struct configfs_attribute *pci_epf_attrs[] = {
2433 &pci_epf_attr_vendorid,
2434 @@ -347,6 +363,7 @@ static struct configfs_attribute *pci_ep
2435 &pci_epf_attr_subsys_id,
2436 &pci_epf_attr_interrupt_pin,
2437 &pci_epf_attr_msi_interrupts,
2438 + &pci_epf_attr_msix_interrupts,
2439 NULL,
2440 };
2441
2442 @@ -354,6 +371,9 @@ static void pci_epf_release(struct confi
2443 {
2444 struct pci_epf_group *epf_group = to_pci_epf_group(item);
2445
2446 + mutex_lock(&functions_mutex);
2447 + idr_remove(&functions_idr, epf_group->index);
2448 + mutex_unlock(&functions_mutex);
2449 pci_epf_destroy(epf_group->epf);
2450 kfree(epf_group);
2451 }
2452 @@ -362,7 +382,7 @@ static struct configfs_item_operations p
2453 .release = pci_epf_release,
2454 };
2455
2456 -static struct config_item_type pci_epf_type = {
2457 +static const struct config_item_type pci_epf_type = {
2458 .ct_item_ops = &pci_epf_ops,
2459 .ct_attrs = pci_epf_attrs,
2460 .ct_owner = THIS_MODULE,
2461 @@ -373,22 +393,57 @@ static struct config_group *pci_epf_make
2462 {
2463 struct pci_epf_group *epf_group;
2464 struct pci_epf *epf;
2465 + char *epf_name;
2466 + int index, err;
2467
2468 epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
2469 if (!epf_group)
2470 return ERR_PTR(-ENOMEM);
2471
2472 + mutex_lock(&functions_mutex);
2473 + index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
2474 + mutex_unlock(&functions_mutex);
2475 + if (index < 0) {
2476 + err = index;
2477 + goto free_group;
2478 + }
2479 +
2480 + epf_group->index = index;
2481 +
2482 config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
2483
2484 - epf = pci_epf_create(group->cg_item.ci_name);
2485 + epf_name = kasprintf(GFP_KERNEL, "%s.%d",
2486 + group->cg_item.ci_name, epf_group->index);
2487 + if (!epf_name) {
2488 + err = -ENOMEM;
2489 + goto remove_idr;
2490 + }
2491 +
2492 + epf = pci_epf_create(epf_name);
2493 if (IS_ERR(epf)) {
2494 pr_err("failed to create endpoint function device\n");
2495 - return ERR_PTR(-EINVAL);
2496 + err = -EINVAL;
2497 + goto free_name;
2498 }
2499
2500 epf_group->epf = epf;
2501
2502 + kfree(epf_name);
2503 +
2504 return &epf_group->group;
2505 +
2506 +free_name:
2507 + kfree(epf_name);
2508 +
2509 +remove_idr:
2510 + mutex_lock(&functions_mutex);
2511 + idr_remove(&functions_idr, epf_group->index);
2512 + mutex_unlock(&functions_mutex);
2513 +
2514 +free_group:
2515 + kfree(epf_group);
2516 +
2517 + return ERR_PTR(err);
2518 }
2519
2520 static void pci_epf_drop(struct config_group *group, struct config_item *item)
2521 @@ -401,7 +456,7 @@ static struct configfs_group_operations
2522 .drop_item = &pci_epf_drop,
2523 };
2524
2525 -static struct config_item_type pci_epf_group_type = {
2526 +static const struct config_item_type pci_epf_group_type = {
2527 .ct_group_ops = &pci_epf_group_ops,
2528 .ct_owner = THIS_MODULE,
2529 };
2530 @@ -429,15 +484,15 @@ void pci_ep_cfs_remove_epf_group(struct
2531 }
2532 EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
2533
2534 -static struct config_item_type pci_functions_type = {
2535 +static const struct config_item_type pci_functions_type = {
2536 .ct_owner = THIS_MODULE,
2537 };
2538
2539 -static struct config_item_type pci_controllers_type = {
2540 +static const struct config_item_type pci_controllers_type = {
2541 .ct_owner = THIS_MODULE,
2542 };
2543
2544 -static struct config_item_type pci_ep_type = {
2545 +static const struct config_item_type pci_ep_type = {
2546 .ct_owner = THIS_MODULE,
2547 };
2548
2549 --- a/drivers/pci/endpoint/pci-epc-core.c
2550 +++ b/drivers/pci/endpoint/pci-epc-core.c
2551 @@ -1,20 +1,9 @@
2552 +// SPDX-License-Identifier: GPL-2.0
2553 /**
2554 * PCI Endpoint *Controller* (EPC) library
2555 *
2556 * Copyright (C) 2017 Texas Instruments
2557 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2558 - *
2559 - * This program is free software: you can redistribute it and/or modify
2560 - * it under the terms of the GNU General Public License version 2 of
2561 - * the License as published by the Free Software Foundation.
2562 - *
2563 - * This program is distributed in the hope that it will be useful,
2564 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2565 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2566 - * GNU General Public License for more details.
2567 - *
2568 - * You should have received a copy of the GNU General Public License
2569 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2570 */
2571
2572 #include <linux/device.h>
2573 @@ -141,25 +130,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
2574 /**
2575 * pci_epc_raise_irq() - interrupt the host system
2576 * @epc: the EPC device which has to interrupt the host
2577 - * @type: specify the type of interrupt; legacy or MSI
2578 - * @interrupt_num: the MSI interrupt number
2579 + * @func_no: the endpoint function number in the EPC device
2580 + * @type: specify the type of interrupt; legacy, MSI or MSI-X
2581 + * @interrupt_num: the MSI or MSI-X interrupt number
2582 *
2583 - * Invoke to raise an MSI or legacy interrupt
2584 + * Invoke to raise an legacy, MSI or MSI-X interrupt
2585 */
2586 -int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
2587 - u8 interrupt_num)
2588 +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
2589 + enum pci_epc_irq_type type, u16 interrupt_num)
2590 {
2591 int ret;
2592 unsigned long flags;
2593
2594 - if (IS_ERR(epc))
2595 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2596 return -EINVAL;
2597
2598 if (!epc->ops->raise_irq)
2599 return 0;
2600
2601 spin_lock_irqsave(&epc->lock, flags);
2602 - ret = epc->ops->raise_irq(epc, type, interrupt_num);
2603 + ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
2604 spin_unlock_irqrestore(&epc->lock, flags);
2605
2606 return ret;
2607 @@ -169,22 +159,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
2608 /**
2609 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
2610 * @epc: the EPC device to which MSI interrupts was requested
2611 + * @func_no: the endpoint function number in the EPC device
2612 *
2613 * Invoke to get the number of MSI interrupts allocated by the RC
2614 */
2615 -int pci_epc_get_msi(struct pci_epc *epc)
2616 +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
2617 {
2618 int interrupt;
2619 unsigned long flags;
2620
2621 - if (IS_ERR(epc))
2622 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2623 return 0;
2624
2625 if (!epc->ops->get_msi)
2626 return 0;
2627
2628 spin_lock_irqsave(&epc->lock, flags);
2629 - interrupt = epc->ops->get_msi(epc);
2630 + interrupt = epc->ops->get_msi(epc, func_no);
2631 spin_unlock_irqrestore(&epc->lock, flags);
2632
2633 if (interrupt < 0)
2634 @@ -199,17 +190,19 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
2635 /**
2636 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
2637 * @epc: the EPC device on which MSI has to be configured
2638 + * @func_no: the endpoint function number in the EPC device
2639 * @interrupts: number of MSI interrupts required by the EPF
2640 *
2641 * Invoke to set the required number of MSI interrupts.
2642 */
2643 -int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
2644 +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
2645 {
2646 int ret;
2647 u8 encode_int;
2648 unsigned long flags;
2649
2650 - if (IS_ERR(epc))
2651 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2652 + interrupts > 32)
2653 return -EINVAL;
2654
2655 if (!epc->ops->set_msi)
2656 @@ -218,7 +211,7 @@ int pci_epc_set_msi(struct pci_epc *epc,
2657 encode_int = order_base_2(interrupts);
2658
2659 spin_lock_irqsave(&epc->lock, flags);
2660 - ret = epc->ops->set_msi(epc, encode_int);
2661 + ret = epc->ops->set_msi(epc, func_no, encode_int);
2662 spin_unlock_irqrestore(&epc->lock, flags);
2663
2664 return ret;
2665 @@ -226,24 +219,83 @@ int pci_epc_set_msi(struct pci_epc *epc,
2666 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
2667
2668 /**
2669 + * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
2670 + * @epc: the EPC device to which MSI-X interrupts was requested
2671 + * @func_no: the endpoint function number in the EPC device
2672 + *
2673 + * Invoke to get the number of MSI-X interrupts allocated by the RC
2674 + */
2675 +int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
2676 +{
2677 + int interrupt;
2678 + unsigned long flags;
2679 +
2680 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2681 + return 0;
2682 +
2683 + if (!epc->ops->get_msix)
2684 + return 0;
2685 +
2686 + spin_lock_irqsave(&epc->lock, flags);
2687 + interrupt = epc->ops->get_msix(epc, func_no);
2688 + spin_unlock_irqrestore(&epc->lock, flags);
2689 +
2690 + if (interrupt < 0)
2691 + return 0;
2692 +
2693 + return interrupt + 1;
2694 +}
2695 +EXPORT_SYMBOL_GPL(pci_epc_get_msix);
2696 +
2697 +/**
2698 + * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
2699 + * @epc: the EPC device on which MSI-X has to be configured
2700 + * @func_no: the endpoint function number in the EPC device
2701 + * @interrupts: number of MSI-X interrupts required by the EPF
2702 + *
2703 + * Invoke to set the required number of MSI-X interrupts.
2704 + */
2705 +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
2706 +{
2707 + int ret;
2708 + unsigned long flags;
2709 +
2710 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2711 + interrupts < 1 || interrupts > 2048)
2712 + return -EINVAL;
2713 +
2714 + if (!epc->ops->set_msix)
2715 + return 0;
2716 +
2717 + spin_lock_irqsave(&epc->lock, flags);
2718 + ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
2719 + spin_unlock_irqrestore(&epc->lock, flags);
2720 +
2721 + return ret;
2722 +}
2723 +EXPORT_SYMBOL_GPL(pci_epc_set_msix);
2724 +
2725 +/**
2726 * pci_epc_unmap_addr() - unmap CPU address from PCI address
2727 * @epc: the EPC device on which address is allocated
2728 + * @func_no: the endpoint function number in the EPC device
2729 * @phys_addr: physical address of the local system
2730 *
2731 * Invoke to unmap the CPU address from PCI address.
2732 */
2733 -void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
2734 +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
2735 + phys_addr_t phys_addr)
2736 {
2737 unsigned long flags;
2738
2739 - if (IS_ERR(epc))
2740 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2741 return;
2742
2743 if (!epc->ops->unmap_addr)
2744 return;
2745
2746 spin_lock_irqsave(&epc->lock, flags);
2747 - epc->ops->unmap_addr(epc, phys_addr);
2748 + epc->ops->unmap_addr(epc, func_no, phys_addr);
2749 spin_unlock_irqrestore(&epc->lock, flags);
2750 }
2751 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
2752 @@ -251,26 +303,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
2753 /**
2754 * pci_epc_map_addr() - map CPU address to PCI address
2755 * @epc: the EPC device on which address is allocated
2756 + * @func_no: the endpoint function number in the EPC device
2757 * @phys_addr: physical address of the local system
2758 * @pci_addr: PCI address to which the physical address should be mapped
2759 * @size: the size of the allocation
2760 *
2761 * Invoke to map CPU address with PCI address.
2762 */
2763 -int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
2764 - u64 pci_addr, size_t size)
2765 +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
2766 + phys_addr_t phys_addr, u64 pci_addr, size_t size)
2767 {
2768 int ret;
2769 unsigned long flags;
2770
2771 - if (IS_ERR(epc))
2772 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2773 return -EINVAL;
2774
2775 if (!epc->ops->map_addr)
2776 return 0;
2777
2778 spin_lock_irqsave(&epc->lock, flags);
2779 - ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
2780 + ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
2781 spin_unlock_irqrestore(&epc->lock, flags);
2782
2783 return ret;
2784 @@ -280,22 +333,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
2785 /**
2786 * pci_epc_clear_bar() - reset the BAR
2787 * @epc: the EPC device for which the BAR has to be cleared
2788 - * @bar: the BAR number that has to be reset
2789 + * @func_no: the endpoint function number in the EPC device
2790 + * @epf_bar: the struct epf_bar that contains the BAR information
2791 *
2792 * Invoke to reset the BAR of the endpoint device.
2793 */
2794 -void pci_epc_clear_bar(struct pci_epc *epc, int bar)
2795 +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
2796 + struct pci_epf_bar *epf_bar)
2797 {
2798 unsigned long flags;
2799
2800 - if (IS_ERR(epc))
2801 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2802 + (epf_bar->barno == BAR_5 &&
2803 + epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
2804 return;
2805
2806 if (!epc->ops->clear_bar)
2807 return;
2808
2809 spin_lock_irqsave(&epc->lock, flags);
2810 - epc->ops->clear_bar(epc, bar);
2811 + epc->ops->clear_bar(epc, func_no, epf_bar);
2812 spin_unlock_irqrestore(&epc->lock, flags);
2813 }
2814 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
2815 @@ -303,26 +360,32 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
2816 /**
2817 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
2818 * @epc: the EPC device on which BAR has to be configured
2819 - * @bar: the BAR number that has to be configured
2820 - * @size: the size of the addr space
2821 - * @flags: specify memory allocation/io allocation/32bit address/64 bit address
2822 + * @func_no: the endpoint function number in the EPC device
2823 + * @epf_bar: the struct epf_bar that contains the BAR information
2824 *
2825 * Invoke to configure the BAR of the endpoint device.
2826 */
2827 -int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
2828 - dma_addr_t bar_phys, size_t size, int flags)
2829 +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
2830 + struct pci_epf_bar *epf_bar)
2831 {
2832 int ret;
2833 unsigned long irq_flags;
2834 + int flags = epf_bar->flags;
2835
2836 - if (IS_ERR(epc))
2837 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
2838 + (epf_bar->barno == BAR_5 &&
2839 + flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
2840 + (flags & PCI_BASE_ADDRESS_SPACE_IO &&
2841 + flags & PCI_BASE_ADDRESS_IO_MASK) ||
2842 + (upper_32_bits(epf_bar->size) &&
2843 + !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
2844 return -EINVAL;
2845
2846 if (!epc->ops->set_bar)
2847 return 0;
2848
2849 spin_lock_irqsave(&epc->lock, irq_flags);
2850 - ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
2851 + ret = epc->ops->set_bar(epc, func_no, epf_bar);
2852 spin_unlock_irqrestore(&epc->lock, irq_flags);
2853
2854 return ret;
2855 @@ -332,6 +395,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
2856 /**
2857 * pci_epc_write_header() - write standard configuration header
2858 * @epc: the EPC device to which the configuration header should be written
2859 + * @func_no: the endpoint function number in the EPC device
2860 * @header: standard configuration header fields
2861 *
2862 * Invoke to write the configuration header to the endpoint controller. Every
2863 @@ -339,19 +403,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
2864 * configuration header would be written. The callback function should write
2865 * the header fields to this dedicated location.
2866 */
2867 -int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
2868 +int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
2869 + struct pci_epf_header *header)
2870 {
2871 int ret;
2872 unsigned long flags;
2873
2874 - if (IS_ERR(epc))
2875 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
2876 return -EINVAL;
2877
2878 if (!epc->ops->write_header)
2879 return 0;
2880
2881 spin_lock_irqsave(&epc->lock, flags);
2882 - ret = epc->ops->write_header(epc, header);
2883 + ret = epc->ops->write_header(epc, func_no, header);
2884 spin_unlock_irqrestore(&epc->lock, flags);
2885
2886 return ret;
2887 --- a/drivers/pci/endpoint/pci-epc-mem.c
2888 +++ b/drivers/pci/endpoint/pci-epc-mem.c
2889 @@ -1,20 +1,9 @@
2890 +// SPDX-License-Identifier: GPL-2.0
2891 /**
2892 * PCI Endpoint *Controller* Address Space Management
2893 *
2894 * Copyright (C) 2017 Texas Instruments
2895 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2896 - *
2897 - * This program is free software: you can redistribute it and/or modify
2898 - * it under the terms of the GNU General Public License version 2 of
2899 - * the License as published by the Free Software Foundation.
2900 - *
2901 - * This program is distributed in the hope that it will be useful,
2902 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2903 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2904 - * GNU General Public License for more details.
2905 - *
2906 - * You should have received a copy of the GNU General Public License
2907 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2908 */
2909
2910 #include <linux/io.h>
2911 --- a/drivers/pci/endpoint/pci-epf-core.c
2912 +++ b/drivers/pci/endpoint/pci-epf-core.c
2913 @@ -1,20 +1,9 @@
2914 +// SPDX-License-Identifier: GPL-2.0
2915 /**
2916 * PCI Endpoint *Function* (EPF) library
2917 *
2918 * Copyright (C) 2017 Texas Instruments
2919 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2920 - *
2921 - * This program is free software: you can redistribute it and/or modify
2922 - * it under the terms of the GNU General Public License version 2 of
2923 - * the License as published by the Free Software Foundation.
2924 - *
2925 - * This program is distributed in the hope that it will be useful,
2926 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2927 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2928 - * GNU General Public License for more details.
2929 - *
2930 - * You should have received a copy of the GNU General Public License
2931 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
2932 */
2933
2934 #include <linux/device.h>
2935 @@ -26,6 +15,8 @@
2936 #include <linux/pci-epf.h>
2937 #include <linux/pci-ep-cfs.h>
2938
2939 +static DEFINE_MUTEX(pci_epf_mutex);
2940 +
2941 static struct bus_type pci_epf_bus_type;
2942 static const struct device_type pci_epf_type;
2943
2944 @@ -109,6 +100,8 @@ void pci_epf_free_space(struct pci_epf *
2945
2946 epf->bar[bar].phys_addr = 0;
2947 epf->bar[bar].size = 0;
2948 + epf->bar[bar].barno = 0;
2949 + epf->bar[bar].flags = 0;
2950 }
2951 EXPORT_SYMBOL_GPL(pci_epf_free_space);
2952
2953 @@ -137,11 +130,27 @@ void *pci_epf_alloc_space(struct pci_epf
2954
2955 epf->bar[bar].phys_addr = phys_addr;
2956 epf->bar[bar].size = size;
2957 + epf->bar[bar].barno = bar;
2958 + epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
2959
2960 return space;
2961 }
2962 EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
2963
2964 +static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
2965 +{
2966 + struct config_group *group, *tmp;
2967 +
2968 + if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
2969 + return;
2970 +
2971 + mutex_lock(&pci_epf_mutex);
2972 + list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
2973 + pci_ep_cfs_remove_epf_group(group);
2974 + list_del(&driver->epf_group);
2975 + mutex_unlock(&pci_epf_mutex);
2976 +}
2977 +
2978 /**
2979 * pci_epf_unregister_driver() - unregister the PCI EPF driver
2980 * @driver: the PCI EPF driver that has to be unregistered
2981 @@ -150,11 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
2982 */
2983 void pci_epf_unregister_driver(struct pci_epf_driver *driver)
2984 {
2985 - pci_ep_cfs_remove_epf_group(driver->group);
2986 + pci_epf_remove_cfs(driver);
2987 driver_unregister(&driver->driver);
2988 }
2989 EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
2990
2991 +static int pci_epf_add_cfs(struct pci_epf_driver *driver)
2992 +{
2993 + struct config_group *group;
2994 + const struct pci_epf_device_id *id;
2995 +
2996 + if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
2997 + return 0;
2998 +
2999 + INIT_LIST_HEAD(&driver->epf_group);
3000 +
3001 + id = driver->id_table;
3002 + while (id->name[0]) {
3003 + group = pci_ep_cfs_add_epf_group(id->name);
3004 + if (IS_ERR(group)) {
3005 + pci_epf_remove_cfs(driver);
3006 + return PTR_ERR(group);
3007 + }
3008 +
3009 + mutex_lock(&pci_epf_mutex);
3010 + list_add_tail(&group->group_entry, &driver->epf_group);
3011 + mutex_unlock(&pci_epf_mutex);
3012 + id++;
3013 + }
3014 +
3015 + return 0;
3016 +}
3017 +
3018 /**
3019 * __pci_epf_register_driver() - register a new PCI EPF driver
3020 * @driver: structure representing PCI EPF driver
3021 @@ -180,7 +216,7 @@ int __pci_epf_register_driver(struct pci
3022 if (ret)
3023 return ret;
3024
3025 - driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
3026 + pci_epf_add_cfs(driver);
3027
3028 return 0;
3029 }
3030 @@ -211,29 +247,17 @@ struct pci_epf *pci_epf_create(const cha
3031 int ret;
3032 struct pci_epf *epf;
3033 struct device *dev;
3034 - char *func_name;
3035 - char *buf;
3036 + int len;
3037
3038 epf = kzalloc(sizeof(*epf), GFP_KERNEL);
3039 - if (!epf) {
3040 - ret = -ENOMEM;
3041 - goto err_ret;
3042 - }
3043 + if (!epf)
3044 + return ERR_PTR(-ENOMEM);
3045
3046 - buf = kstrdup(name, GFP_KERNEL);
3047 - if (!buf) {
3048 - ret = -ENOMEM;
3049 - goto free_epf;
3050 - }
3051 -
3052 - func_name = buf;
3053 - buf = strchrnul(buf, '.');
3054 - *buf = '\0';
3055 -
3056 - epf->name = kstrdup(func_name, GFP_KERNEL);
3057 + len = strchrnul(name, '.') - name;
3058 + epf->name = kstrndup(name, len, GFP_KERNEL);
3059 if (!epf->name) {
3060 - ret = -ENOMEM;
3061 - goto free_func_name;
3062 + kfree(epf);
3063 + return ERR_PTR(-ENOMEM);
3064 }
3065
3066 dev = &epf->dev;
3067 @@ -242,28 +266,18 @@ struct pci_epf *pci_epf_create(const cha
3068 dev->type = &pci_epf_type;
3069
3070 ret = dev_set_name(dev, "%s", name);
3071 - if (ret)
3072 - goto put_dev;
3073 + if (ret) {
3074 + put_device(dev);
3075 + return ERR_PTR(ret);
3076 + }
3077
3078 ret = device_add(dev);
3079 - if (ret)
3080 - goto put_dev;
3081 + if (ret) {
3082 + put_device(dev);
3083 + return ERR_PTR(ret);
3084 + }
3085
3086 - kfree(func_name);
3087 return epf;
3088 -
3089 -put_dev:
3090 - put_device(dev);
3091 - kfree(epf->name);
3092 -
3093 -free_func_name:
3094 - kfree(func_name);
3095 -
3096 -free_epf:
3097 - kfree(epf);
3098 -
3099 -err_ret:
3100 - return ERR_PTR(ret);
3101 }
3102 EXPORT_SYMBOL_GPL(pci_epf_create);
3103
3104 --- a/drivers/pci/host/pci-host-common.c
3105 +++ b/drivers/pci/host/pci-host-common.c
3106 @@ -113,9 +113,7 @@ err_out:
3107 int pci_host_common_probe(struct platform_device *pdev,
3108 struct pci_ecam_ops *ops)
3109 {
3110 - const char *type;
3111 struct device *dev = &pdev->dev;
3112 - struct device_node *np = dev->of_node;
3113 struct pci_bus *bus, *child;
3114 struct pci_host_bridge *bridge;
3115 struct pci_config_window *cfg;
3116 @@ -126,12 +124,6 @@ int pci_host_common_probe(struct platfor
3117 if (!bridge)
3118 return -ENOMEM;
3119
3120 - type = of_get_property(np, "device_type", NULL);
3121 - if (!type || strcmp(type, "pci")) {
3122 - dev_err(dev, "invalid \"device_type\" %s\n", type);
3123 - return -EINVAL;
3124 - }
3125 -
3126 of_pci_check_probe_only();
3127
3128 /* Parse and map our Configuration Space windows */
3129 --- a/drivers/pci/host/pcie-xilinx-nwl.c
3130 +++ b/drivers/pci/host/pcie-xilinx-nwl.c
3131 @@ -778,16 +778,7 @@ static int nwl_pcie_parse_dt(struct nwl_
3132 struct platform_device *pdev)
3133 {
3134 struct device *dev = pcie->dev;
3135 - struct device_node *node = dev->of_node;
3136 struct resource *res;
3137 - const char *type;
3138 -
3139 - /* Check for device type */
3140 - type = of_get_property(node, "device_type", NULL);
3141 - if (!type || strcmp(type, "pci")) {
3142 - dev_err(dev, "invalid \"device_type\" %s\n", type);
3143 - return -EINVAL;
3144 - }
3145
3146 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
3147 pcie->breg_base = devm_ioremap_resource(dev, res);
3148 --- a/drivers/pci/host/pcie-xilinx.c
3149 +++ b/drivers/pci/host/pcie-xilinx.c
3150 @@ -584,15 +584,8 @@ static int xilinx_pcie_parse_dt(struct x
3151 struct device *dev = port->dev;
3152 struct device_node *node = dev->of_node;
3153 struct resource regs;
3154 - const char *type;
3155 int err;
3156
3157 - type = of_get_property(node, "device_type", NULL);
3158 - if (!type || strcmp(type, "pci")) {
3159 - dev_err(dev, "invalid \"device_type\" %s\n", type);
3160 - return -EINVAL;
3161 - }
3162 -
3163 err = of_address_to_resource(node, 0, &regs);
3164 if (err) {
3165 dev_err(dev, "missing \"reg\" property\n");
3166 --- /dev/null
3167 +++ b/drivers/pci/mobiveil/Kconfig
3168 @@ -0,0 +1,50 @@
3169 +# SPDX-License-Identifier: GPL-2.0
3170 +
3171 +menu "Mobiveil PCIe Core Support"
3172 + depends on PCI
3173 +
3174 +config PCIE_MOBIVEIL
3175 + bool
3176 +
3177 +config PCIE_MOBIVEIL_HOST
3178 + bool
3179 + depends on PCI_MSI_IRQ_DOMAIN
3180 + select PCIE_MOBIVEIL
3181 +
3182 +config PCIE_MOBIVEIL_EP
3183 + bool
3184 + depends on PCI_ENDPOINT
3185 + select PCIE_MOBIVEIL
3186 +
3187 +config PCIE_MOBIVEIL_PLAT
3188 + bool "Mobiveil AXI PCIe controller"
3189 + depends on ARCH_ZYNQMP || COMPILE_TEST
3190 + depends on OF
3191 + select PCIE_MOBIVEIL_HOST
3192 + help
3193 + Say Y here if you want to enable support for the Mobiveil AXI PCIe
3194 + Soft IP. It has up to 8 outbound and inbound windows
3195 + for address translation and it is a PCIe Gen4 IP.
3196 +
3197 +config PCI_LAYERSCAPE_GEN4
3198 + bool "Freescale Layerscpe PCIe Gen4 controller in RC mode"
3199 + depends on PCI
3200 + depends on OF && (ARM64 || ARCH_LAYERSCAPE)
3201 + depends on PCI_MSI_IRQ_DOMAIN
3202 + select PCIE_MOBIVEIL_HOST
3203 + help
3204 + Say Y here if you want PCIe Gen4 controller support on
3205 + Layerscape SoCs. And the PCIe controller work in RC mode
3206 + by setting the RCW[HOST_AGT_PEX] to 0.
3207 +
3208 +config PCI_LAYERSCAPE_GEN4_EP
3209 + bool "Freescale Layerscpe PCIe Gen4 controller in EP mode"
3210 + depends on PCI
3211 + depends on OF && (ARM64 || ARCH_LAYERSCAPE)
3212 + depends on PCI_ENDPOINT
3213 + select PCIE_MOBIVEIL_EP
3214 + help
3215 + Say Y here if you want PCIe Gen4 controller support on
3216 + Layerscape SoCs. And the PCIe controller work in EP mode
3217 + by setting the RCW[HOST_AGT_PEX] to 1.
3218 +endmenu
3219 --- /dev/null
3220 +++ b/drivers/pci/mobiveil/Makefile
3221 @@ -0,0 +1,7 @@
3222 +# SPDX-License-Identifier: GPL-2.0
3223 +obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
3224 +obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
3225 +obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o
3226 +obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
3227 +obj-$(CONFIG_PCI_LAYERSCAPE_GEN4) += pci-layerscape-gen4.o
3228 +obj-$(CONFIG_PCI_LAYERSCAPE_GEN4_EP) += pci-layerscape-gen4-ep.o
3229 --- /dev/null
3230 +++ b/drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
3231 @@ -0,0 +1,178 @@
3232 +// SPDX-License-Identifier: GPL-2.0
3233 +/*
3234 + * PCIe controller EP driver for Freescale Layerscape SoCs
3235 + *
3236 + * Copyright (C) 2018 NXP Semiconductor.
3237 + *
3238 + * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
3239 + */
3240 +
3241 +#include <linux/kernel.h>
3242 +#include <linux/init.h>
3243 +#include <linux/of_pci.h>
3244 +#include <linux/of_platform.h>
3245 +#include <linux/of_address.h>
3246 +#include <linux/pci.h>
3247 +#include <linux/platform_device.h>
3248 +#include <linux/resource.h>
3249 +
3250 +#include "pcie-mobiveil.h"
3251 +
3252 +struct ls_pcie_g4_ep {
3253 + struct mobiveil_pcie *mv_pci;
3254 +};
3255 +
3256 +#define to_ls_pcie_g4_ep(x) dev_get_drvdata((x)->dev)
3257 +
3258 +static const struct of_device_id ls_pcie_g4_ep_of_match[] = {
3259 + { .compatible = "fsl,lx2160a-pcie-ep",},
3260 + { },
3261 +};
3262 +
3263 +static void ls_pcie_g4_get_bar_num(struct mobiveil_pcie_ep *ep)
3264 +{
3265 + struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
3266 + u32 type, reg;
3267 + u8 bar;
3268 +
3269 + ep->bar_num = BAR_5 + 1;
3270 +
3271 + for (bar = BAR_0; bar <= BAR_5; bar++) {
3272 + reg = PCI_BASE_ADDRESS_0 + (4 * bar);
3273 + type = csr_readl(mv_pci, reg) &
3274 + PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3275 + if (type & PCI_BASE_ADDRESS_MEM_TYPE_64)
3276 + ep->bar_num--;
3277 + }
3278 +}
3279 +
3280 +static void ls_pcie_g4_ep_init(struct mobiveil_pcie_ep *ep)
3281 +{
3282 + struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
3283 + struct pci_epc *epc = ep->epc;
3284 + enum pci_barno bar;
3285 + int win_idx, val;
3286 +
3287 + /*
3288 + * Errata: unsupported request error on inbound posted write
3289 + * transaction, PCIe controller reports advisory error instead
3290 + * of uncorrectable error message to RC.
3291 + * workaround: set the bit20(unsupported_request_Error_severity) with
3292 + * value 1 in uncorrectable_Error_Severity_Register, make the
3293 + * unsupported request error generate the fatal error.
3294 + */
3295 + val = csr_readl(mv_pci, CFG_UNCORRECTABLE_ERROR_SEVERITY);
3296 + val |= 1 << UNSUPPORTED_REQUEST_ERROR_SHIFT;
3297 + csr_writel(mv_pci, val, CFG_UNCORRECTABLE_ERROR_SEVERITY);
3298 +
3299 + ls_pcie_g4_get_bar_num(ep);
3300 +
3301 + for (bar = BAR_0; bar < (ep->bar_num * ep->pf_num); bar++)
3302 + mobiveil_pcie_ep_reset_bar(mv_pci, bar);
3303 +
3304 + for (win_idx = 0; win_idx < MAX_IATU_OUT; win_idx++)
3305 + mobiveil_pcie_disable_ob_win(mv_pci, win_idx);
3306 +
3307 + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
3308 + epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
3309 +}
3310 +
3311 +static int ls_pcie_g4_ep_raise_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
3312 + enum pci_epc_irq_type type,
3313 + u16 interrupt_num)
3314 +{
3315 + struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
3316 +
3317 + switch (type) {
3318 + case PCI_EPC_IRQ_LEGACY:
3319 + return mobiveil_pcie_ep_raise_legacy_irq(ep, func_no);
3320 + case PCI_EPC_IRQ_MSI:
3321 + return mobiveil_pcie_ep_raise_msi_irq(ep, func_no,
3322 + interrupt_num);
3323 + case PCI_EPC_IRQ_MSIX:
3324 + return mobiveil_pcie_ep_raise_msix_irq(ep, func_no,
3325 + interrupt_num);
3326 + default:
3327 + dev_err(&mv_pci->pdev->dev, "UNKNOWN IRQ type\n");
3328 + }
3329 +
3330 + return 0;
3331 +}
3332 +
3333 +static struct mobiveil_pcie_ep_ops pcie_ep_ops = {
3334 + .ep_init = ls_pcie_g4_ep_init,
3335 + .raise_irq = ls_pcie_g4_ep_raise_irq,
3336 +};
3337 +
3338 +static int __init ls_pcie_gen4_add_pcie_ep(struct ls_pcie_g4_ep *ls_pcie_g4_ep,
3339 + struct platform_device *pdev)
3340 +{
3341 + struct mobiveil_pcie *mv_pci = ls_pcie_g4_ep->mv_pci;
3342 + struct device *dev = &pdev->dev;
3343 + struct mobiveil_pcie_ep *ep;
3344 + struct resource *res;
3345 + int ret;
3346 + struct device_node *np = dev->of_node;
3347 +
3348 + ep = &mv_pci->ep;
3349 + ep->ops = &pcie_ep_ops;
3350 +
3351 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
3352 + if (!res)
3353 + return -EINVAL;
3354 +
3355 + ep->phys_base = res->start;
3356 + ep->addr_size = resource_size(res);
3357 +
3358 + ret = of_property_read_u32(np, "max-functions", &ep->pf_num);
3359 + if (ret < 0)
3360 + ep->pf_num = 1;
3361 +
3362 + ret = mobiveil_pcie_ep_init(ep);
3363 + if (ret) {
3364 + dev_err(dev, "failed to initialize endpoint\n");
3365 + return ret;
3366 + }
3367 +
3368 + return 0;
3369 +}
3370 +
3371 +static int __init ls_pcie_g4_ep_probe(struct platform_device *pdev)
3372 +{
3373 + struct device *dev = &pdev->dev;
3374 + struct mobiveil_pcie *mv_pci;
3375 + struct ls_pcie_g4_ep *ls_pcie_g4_ep;
3376 + struct resource *res;
3377 + int ret;
3378 +
3379 + ls_pcie_g4_ep = devm_kzalloc(dev, sizeof(*ls_pcie_g4_ep), GFP_KERNEL);
3380 + if (!ls_pcie_g4_ep)
3381 + return -ENOMEM;
3382 +
3383 + mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
3384 + if (!mv_pci)
3385 + return -ENOMEM;
3386 +
3387 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
3388 + mv_pci->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
3389 + if (IS_ERR(mv_pci->csr_axi_slave_base))
3390 + return PTR_ERR(mv_pci->csr_axi_slave_base);
3391 +
3392 + mv_pci->pdev = pdev;
3393 + ls_pcie_g4_ep->mv_pci = mv_pci;
3394 +
3395 + platform_set_drvdata(pdev, ls_pcie_g4_ep);
3396 +
3397 + ret = ls_pcie_gen4_add_pcie_ep(ls_pcie_g4_ep, pdev);
3398 +
3399 + return ret;
3400 +}
3401 +
3402 +static struct platform_driver ls_pcie_g4_ep_driver = {
3403 + .driver = {
3404 + .name = "layerscape-pcie-gen4-ep",
3405 + .of_match_table = ls_pcie_g4_ep_of_match,
3406 + .suppress_bind_attrs = true,
3407 + },
3408 +};
3409 +builtin_platform_driver_probe(ls_pcie_g4_ep_driver, ls_pcie_g4_ep_probe);
3410 --- /dev/null
3411 +++ b/drivers/pci/mobiveil/pci-layerscape-gen4.c
3412 @@ -0,0 +1,292 @@
3413 +// SPDX-License-Identifier: GPL-2.0
3414 +/*
3415 + * PCIe host controller driver for NXP Layerscape SoCs
3416 + *
3417 + * Copyright 2018 NXP
3418 + *
3419 + * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
3420 + */
3421 +
3422 +#include <linux/kernel.h>
3423 +#include <linux/interrupt.h>
3424 +#include <linux/init.h>
3425 +#include <linux/of_pci.h>
3426 +#include <linux/of_platform.h>
3427 +#include <linux/of_irq.h>
3428 +#include <linux/of_address.h>
3429 +#include <linux/pci.h>
3430 +#include <linux/platform_device.h>
3431 +#include <linux/resource.h>
3432 +#include <linux/mfd/syscon.h>
3433 +#include <linux/regmap.h>
3434 +
3435 +#include "pcie-mobiveil.h"
3436 +
3437 +/* LUT and PF control registers */
3438 +#define PCIE_LUT_OFF (0x80000)
3439 +#define PCIE_LUT_GCR (0x28)
3440 +#define PCIE_LUT_GCR_RRE (0)
3441 +
3442 +#define PCIE_PF_OFF (0xc0000)
3443 +#define PCIE_PF_INT_STAT (0x18)
3444 +#define PF_INT_STAT_PABRST (31)
3445 +
3446 +#define PCIE_PF_DBG (0x7fc)
3447 +#define PF_DBG_LTSSM_MASK (0x3f)
3448 +#define PF_DBG_WE (31)
3449 +#define PF_DBG_PABR (27)
3450 +
3451 +#define LS_PCIE_G4_LTSSM_L0 0x2d /* L0 state */
3452 +
3453 +#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
3454 +
3455 +struct ls_pcie_g4 {
3456 + struct mobiveil_pcie *pci;
3457 + struct delayed_work dwork;
3458 + int irq;
3459 +};
3460 +
3461 +static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
3462 +{
3463 + return ioread32(pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
3464 +}
3465 +
3466 +static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
3467 + u32 off, u32 val)
3468 +{
3469 + iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
3470 +}
3471 +
3472 +static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
3473 +{
3474 + return ioread32(pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
3475 +}
3476 +
3477 +static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
3478 + u32 off, u32 val)
3479 +{
3480 + iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
3481 +}
3482 +
3483 +static bool ls_pcie_g4_is_bridge(struct ls_pcie_g4 *pcie)
3484 +{
3485 + struct mobiveil_pcie *mv_pci = pcie->pci;
3486 + u32 header_type;
3487 +
3488 + header_type = csr_readb(mv_pci, PCI_HEADER_TYPE);
3489 + header_type &= 0x7f;
3490 +
3491 + return header_type == PCI_HEADER_TYPE_BRIDGE;
3492 +}
3493 +
3494 +static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
3495 +{
3496 + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
3497 + u32 state;
3498 +
3499 + state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3500 + state = state & PF_DBG_LTSSM_MASK;
3501 +
3502 + if (state == LS_PCIE_G4_LTSSM_L0)
3503 + return 1;
3504 +
3505 + return 0;
3506 +}
3507 +
3508 +static void ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
3509 +{
3510 + struct mobiveil_pcie *mv_pci = pcie->pci;
3511 + u32 val, act_stat;
3512 + int to = 100;
3513 +
3514 + /* Poll for pab_csb_reset to set and PAB activity to clear */
3515 + do {
3516 + usleep_range(10, 15);
3517 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
3518 + act_stat = csr_readl(mv_pci, PAB_ACTIVITY_STAT);
3519 + } while (((val & 1 << PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
3520 + if (to < 0) {
3521 + dev_err(&mv_pci->pdev->dev, "poll PABRST&PABACT timeout\n");
3522 + return;
3523 + }
3524 +
3525 + /* clear PEX_RESET bit in PEX_PF0_DBG register */
3526 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3527 + val |= 1 << PF_DBG_WE;
3528 + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
3529 +
3530 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3531 + val |= 1 << PF_DBG_PABR;
3532 + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
3533 +
3534 + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
3535 + val &= ~(1 << PF_DBG_WE);
3536 + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
3537 +
3538 + mobiveil_host_init(mv_pci, true);
3539 +
3540 + to = 100;
3541 + while (!ls_pcie_g4_link_up(mv_pci) && to--)
3542 + usleep_range(200, 250);
3543 + if (to < 0)
3544 + dev_err(&mv_pci->pdev->dev, "PCIe link trainning timeout\n");
3545 +}
3546 +
3547 +static irqreturn_t ls_pcie_g4_handler(int irq, void *dev_id)
3548 +{
3549 + struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
3550 + struct mobiveil_pcie *mv_pci = pcie->pci;
3551 + u32 val;
3552 +
3553 + val = csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
3554 + if (!val)
3555 + return IRQ_NONE;
3556 +
3557 + if (val & PAB_INTP_RESET)
3558 + schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
3559 +
3560 + csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
3561 +
3562 + return IRQ_HANDLED;
3563 +}
3564 +
3565 +static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
3566 +{
3567 + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
3568 + u32 val;
3569 + int ret;
3570 +
3571 + pcie->irq = platform_get_irq_byname(mv_pci->pdev, "intr");
3572 + if (pcie->irq < 0) {
3573 + dev_err(&mv_pci->pdev->dev, "Can't get 'intr' irq.\n");
3574 + return pcie->irq;
3575 + }
3576 + ret = devm_request_irq(&mv_pci->pdev->dev, pcie->irq,
3577 + ls_pcie_g4_handler, IRQF_SHARED,
3578 + mv_pci->pdev->name, pcie);
3579 + if (ret) {
3580 + dev_err(&mv_pci->pdev->dev, "Can't register PCIe IRQ.\n");
3581 + return ret;
3582 + }
3583 +
3584 + /* Enable interrupts */
3585 + val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
3586 + PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
3587 + csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
3588 +
3589 + return 0;
3590 +}
3591 +
3592 +static void ls_pcie_g4_reset(struct work_struct *work)
3593 +{
3594 + struct delayed_work *dwork = container_of(work, struct delayed_work,
3595 + work);
3596 + struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
3597 + struct mobiveil_pcie *mv_pci = pcie->pci;
3598 + u16 ctrl;
3599 +
3600 + ctrl = csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
3601 + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3602 + csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
3603 + ls_pcie_g4_reinit_hw(pcie);
3604 +}
3605 +
3606 +static int ls_pcie_g4_read_other_conf(struct pci_bus *bus, unsigned int devfn,
3607 + int where, int size, u32 *val)
3608 +{
3609 + struct mobiveil_pcie *pci = bus->sysdata;
3610 + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
3611 + int ret;
3612 +
3613 + if (where == PCI_VENDOR_ID)
3614 + ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
3615 + 0 << PCIE_LUT_GCR_RRE);
3616 +
3617 + ret = pci_generic_config_read(bus, devfn, where, size, val);
3618 +
3619 + if (where == PCI_VENDOR_ID)
3620 + ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
3621 + 1 << PCIE_LUT_GCR_RRE);
3622 +
3623 + return ret;
3624 +}
3625 +
3626 +static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
3627 + .interrupt_init = ls_pcie_g4_interrupt_init,
3628 + .read_other_conf = ls_pcie_g4_read_other_conf,
3629 +};
3630 +
3631 +static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
3632 + .link_up = ls_pcie_g4_link_up,
3633 +};
3634 +
3635 +static void workaround_tkt381274(struct ls_pcie_g4 *pcie)
3636 +{
3637 + struct mobiveil_pcie *mv_pci = pcie->pci;
3638 + u32 val;
3639 +
3640 + /* Set ACK latency timeout */
3641 + val = csr_readl(mv_pci, GPEX_ACK_REPLAY_TO);
3642 + val &= ~(ACK_LAT_TO_VAL_MASK << ACK_LAT_TO_VAL_SHIFT);
3643 + val |= (4 << ACK_LAT_TO_VAL_SHIFT);
3644 + csr_writel(mv_pci, val, GPEX_ACK_REPLAY_TO);
3645 +}
3646 +
3647 +static int __init ls_pcie_g4_probe(struct platform_device *pdev)
3648 +{
3649 + struct device *dev = &pdev->dev;
3650 + struct mobiveil_pcie *mv_pci;
3651 + struct ls_pcie_g4 *pcie;
3652 + struct device_node *np = dev->of_node;
3653 + int ret;
3654 +
3655 + if (!of_parse_phandle(np, "msi-parent", 0)) {
3656 + dev_err(dev, "failed to find msi-parent\n");
3657 + return -EINVAL;
3658 + }
3659 +
3660 + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
3661 + if (!pcie)
3662 + return -ENOMEM;
3663 +
3664 + mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
3665 + if (!mv_pci)
3666 + return -ENOMEM;
3667 +
3668 + mv_pci->pdev = pdev;
3669 + mv_pci->ops = &ls_pcie_g4_pab_ops;
3670 + mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
3671 + pcie->pci = mv_pci;
3672 +
3673 + platform_set_drvdata(pdev, pcie);
3674 +
3675 + INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
3676 +
3677 + ret = mobiveil_pcie_host_probe(mv_pci);
3678 + if (ret) {
3679 + dev_err(dev, "fail to probe!\n");
3680 + return ret;
3681 + }
3682 +
3683 + if (!ls_pcie_g4_is_bridge(pcie))
3684 + return -ENODEV;
3685 +
3686 + workaround_tkt381274(pcie);
3687 +
3688 + return 0;
3689 +}
3690 +
3691 +static const struct of_device_id ls_pcie_g4_of_match[] = {
3692 + { .compatible = "fsl,lx2160a-pcie", },
3693 + { },
3694 +};
3695 +
3696 +static struct platform_driver ls_pcie_g4_driver = {
3697 + .driver = {
3698 + .name = "layerscape-pcie-gen4",
3699 + .of_match_table = ls_pcie_g4_of_match,
3700 + .suppress_bind_attrs = true,
3701 + },
3702 +};
3703 +
3704 +builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
3705 --- /dev/null
3706 +++ b/drivers/pci/mobiveil/pcie-mobiveil-ep.c
3707 @@ -0,0 +1,512 @@
3708 +// SPDX-License-Identifier: GPL-2.0
3709 +/**
3710 + * Mobiveil PCIe Endpoint controller driver
3711 + *
3712 + * Copyright (C) 2018 NXP Semiconductor.
3713 + * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
3714 + */
3715 +
3716 +#include <linux/of.h>
3717 +#include <linux/pci-epc.h>
3718 +#include <linux/pci-epf.h>
3719 +#include <linux/platform_device.h>
3720 +#include "pcie-mobiveil.h"
3721 +
3722 +void mobiveil_pcie_ep_linkup(struct mobiveil_pcie_ep *ep)
3723 +{
3724 + struct pci_epc *epc = ep->epc;
3725 +
3726 + pci_epc_linkup(epc);
3727 +}
3728 +
3729 +static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
3730 + enum pci_barno bar)
3731 +{
3732 + csr_writel(pcie, bar, GPEX_BAR_SELECT);
3733 + csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW);
3734 + csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW);
3735 +}
3736 +
3737 +void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
3738 + enum pci_barno bar)
3739 +{
3740 + __mobiveil_pcie_ep_reset_bar(pcie, bar);
3741 +}
3742 +
3743 +static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie,
3744 + u8 cap_ptr, u8 cap)
3745 +{
3746 + u8 cap_id, next_cap_ptr;
3747 + u16 reg;
3748 +
3749 + reg = csr_readw(pcie, cap_ptr);
3750 + next_cap_ptr = (reg & 0xff00) >> 8;
3751 + cap_id = (reg & 0x00ff);
3752 +
3753 + if (cap_id == cap)
3754 + return cap_ptr;
3755 +
3756 + if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
3757 + return 0;
3758 +
3759 + return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
3760 +}
3761 +
3762 +static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie *pcie,
3763 + u8 cap)
3764 +{
3765 + u8 next_cap_ptr;
3766 + u16 reg;
3767 +
3768 + reg = csr_readw(pcie, PCI_CAPABILITY_LIST);
3769 + next_cap_ptr = (reg & 0x00ff);
3770 +
3771 + if (!next_cap_ptr)
3772 + return 0;
3773 +
3774 + return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
3775 +}
3776 +
3777 +static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
3778 + struct pci_epf_header *hdr)
3779 +{
3780 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3781 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3782 +
3783 + csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID);
3784 + csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID);
3785 + csr_writeb(pcie, hdr->revid, PCI_REVISION_ID);
3786 + csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG);
3787 + csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8,
3788 + PCI_CLASS_DEVICE);
3789 + csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE);
3790 + csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID);
3791 + csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID);
3792 + csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN);
3793 +
3794 + return 0;
3795 +}
3796 +
3797 +static int mobiveil_pcie_ep_inbound_atu(struct mobiveil_pcie_ep *ep,
3798 + u8 func_no, enum pci_barno bar,
3799 + dma_addr_t cpu_addr)
3800 +{
3801 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3802 +
3803 + program_ib_windows_ep(pcie, func_no, bar, cpu_addr);
3804 +
3805 + return 0;
3806 +}
3807 +
3808 +static int mobiveil_pcie_ep_outbound_atu(struct mobiveil_pcie_ep *ep,
3809 + phys_addr_t phys_addr,
3810 + u64 pci_addr, u8 func_no,
3811 + size_t size)
3812 +{
3813 + int ret;
3814 + u32 free_win;
3815 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3816 +
3817 + free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
3818 + if (free_win >= ep->num_ob_windows) {
3819 + dev_err(&pcie->pdev->dev, "No free outbound window\n");
3820 + return -EINVAL;
3821 + }
3822 +
3823 + ret = program_ob_windows_ep(pcie, free_win, MEM_WINDOW_TYPE,
3824 + phys_addr, pci_addr, func_no, size);
3825 + if (ret < 0) {
3826 + dev_err(&pcie->pdev->dev, "Failed to program IB window\n");
3827 + return ret;
3828 + }
3829 +
3830 + set_bit(free_win, ep->ob_window_map);
3831 + ep->outbound_addr[free_win] = phys_addr;
3832 +
3833 + return 0;
3834 +}
3835 +
3836 +static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
3837 + struct pci_epf_bar *epf_bar)
3838 +{
3839 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3840 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3841 + enum pci_barno bar = epf_bar->barno;
3842 +
3843 + if (bar < ep->bar_num) {
3844 + __mobiveil_pcie_ep_reset_bar(pcie,
3845 + func_no * ep->bar_num + bar);
3846 +
3847 + mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar);
3848 + }
3849 +}
3850 +
3851 +static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
3852 + struct pci_epf_bar *epf_bar)
3853 +{
3854 + int ret;
3855 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3856 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3857 + enum pci_barno bar = epf_bar->barno;
3858 + size_t size = epf_bar->size;
3859 +
3860 + if (bar < ep->bar_num) {
3861 + ret = mobiveil_pcie_ep_inbound_atu(ep, func_no, bar,
3862 + epf_bar->phys_addr);
3863 + if (ret)
3864 + return ret;
3865 +
3866 + csr_writel(pcie, func_no * ep->bar_num + bar,
3867 + GPEX_BAR_SELECT);
3868 + csr_writel(pcie, lower_32_bits(~(size - 1)),
3869 + GPEX_BAR_SIZE_LDW);
3870 + csr_writel(pcie, upper_32_bits(~(size - 1)),
3871 + GPEX_BAR_SIZE_UDW);
3872 + }
3873 +
3874 + return 0;
3875 +}
3876 +
3877 +static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep,
3878 + phys_addr_t addr,
3879 + u32 *atu_index)
3880 +{
3881 + u32 index;
3882 +
3883 + for (index = 0; index < ep->num_ob_windows; index++) {
3884 + if (ep->outbound_addr[index] != addr)
3885 + continue;
3886 + *atu_index = index;
3887 + return 0;
3888 + }
3889 +
3890 + return -EINVAL;
3891 +}
3892 +
3893 +static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
3894 + phys_addr_t addr)
3895 +{
3896 + int ret;
3897 + u32 atu_index;
3898 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3899 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3900 +
3901 + ret = mobiveil_pcie_find_index(ep, addr, &atu_index);
3902 + if (ret < 0)
3903 + return;
3904 +
3905 + mobiveil_pcie_disable_ob_win(pcie, atu_index);
3906 + clear_bit(atu_index, ep->ob_window_map);
3907 +}
3908 +
3909 +static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
3910 + phys_addr_t addr,
3911 + u64 pci_addr, size_t size)
3912 +{
3913 + int ret;
3914 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3915 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3916 +
3917 + ret = mobiveil_pcie_ep_outbound_atu(ep, addr, pci_addr, func_no, size);
3918 + if (ret) {
3919 + dev_err(&pcie->pdev->dev, "Failed to enable address\n");
3920 + return ret;
3921 + }
3922 +
3923 + return 0;
3924 +}
3925 +
3926 +static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
3927 +{
3928 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3929 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3930 + u32 val, reg;
3931 +
3932 + if (!ep->msi_cap)
3933 + return -EINVAL;
3934 +
3935 + reg = ep->msi_cap + PCI_MSI_FLAGS;
3936 + val = csr_readw(pcie, reg);
3937 + if (!(val & PCI_MSI_FLAGS_ENABLE))
3938 + return -EINVAL;
3939 +
3940 + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
3941 +
3942 + return val;
3943 +}
3944 +
3945 +static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc,
3946 + u8 func_no, u8 interrupts)
3947 +{
3948 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3949 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3950 + u32 val, reg;
3951 +
3952 + if (!ep->msi_cap)
3953 + return -EINVAL;
3954 +
3955 + reg = ep->msi_cap + PCI_MSI_FLAGS;
3956 + val = csr_readw(pcie, reg);
3957 + val &= ~PCI_MSI_FLAGS_QMASK;
3958 + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
3959 + csr_writew(pcie, val, reg);
3960 +
3961 + return 0;
3962 +}
3963 +
3964 +static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
3965 +{
3966 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3967 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3968 + u32 val, reg;
3969 +
3970 + if (!ep->msix_cap)
3971 + return -EINVAL;
3972 +
3973 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
3974 + val = csr_readw(pcie, reg);
3975 + if (!(val & PCI_MSIX_FLAGS_ENABLE))
3976 + return -EINVAL;
3977 +
3978 + val &= PCI_MSIX_FLAGS_QSIZE;
3979 +
3980 + return val;
3981 +}
3982 +
3983 +static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no,
3984 + u16 interrupts)
3985 +{
3986 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
3987 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
3988 + u32 val, reg;
3989 +
3990 + if (!ep->msix_cap)
3991 + return -EINVAL;
3992 +
3993 + reg = ep->msix_cap + PCI_MSIX_FLAGS;
3994 + val = csr_readw(pcie, reg);
3995 + val &= ~PCI_MSIX_FLAGS_QSIZE;
3996 + val |= interrupts;
3997 + csr_writew(pcie, val, reg);
3998 +
3999 + return 0;
4000 +}
4001 +
4002 +static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
4003 + enum pci_epc_irq_type type,
4004 + u16 interrupt_num)
4005 +{
4006 + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
4007 +
4008 + if (!ep->ops->raise_irq)
4009 + return -EINVAL;
4010 +
4011 + return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
4012 +}
4013 +
4014 +static const struct pci_epc_ops epc_ops = {
4015 + .write_header = mobiveil_pcie_ep_write_header,
4016 + .set_bar = mobiveil_pcie_ep_set_bar,
4017 + .clear_bar = mobiveil_pcie_ep_clear_bar,
4018 + .map_addr = mobiveil_pcie_ep_map_addr,
4019 + .unmap_addr = mobiveil_pcie_ep_unmap_addr,
4020 + .set_msi = mobiveil_pcie_ep_set_msi,
4021 + .get_msi = mobiveil_pcie_ep_get_msi,
4022 + .set_msix = mobiveil_pcie_ep_set_msix,
4023 + .get_msix = mobiveil_pcie_ep_get_msix,
4024 + .raise_irq = mobiveil_pcie_ep_raise_irq,
4025 +};
4026 +
4027 +int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no)
4028 +{
4029 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4030 +
4031 + dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n");
4032 +
4033 + return -EINVAL;
4034 +}
4035 +
4036 +int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
4037 + u8 interrupt_num)
4038 +{
4039 + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
4040 + struct pci_epc *epc = ep->epc;
4041 + u16 msg_ctrl, msg_data;
4042 + u32 msg_addr_lower, msg_addr_upper, reg;
4043 + u64 msg_addr;
4044 + u32 func_num;
4045 + bool has_upper;
4046 + int ret;
4047 +
4048 + if (!ep->msi_cap)
4049 + return -EINVAL;
4050 +
4051 + func_num = csr_readl(pcie, PAB_CTRL);
4052 + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
4053 + func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
4054 + csr_writel(pcie, func_num, PAB_CTRL);
4055 +
4056 + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
4057 + reg = ep->msi_cap + PCI_MSI_FLAGS;
4058 + msg_ctrl = csr_readw(pcie, reg);
4059 + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
4060 + reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
4061 + msg_addr_lower = csr_readl(pcie, reg);
4062 + if (has_upper) {
4063 + reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
4064 + msg_addr_upper = csr_readl(pcie, reg);
4065 + reg = ep->msi_cap + PCI_MSI_DATA_64;
4066 + msg_data = csr_readw(pcie, reg);
4067 + } else {
4068 + msg_addr_upper = 0;
4069 + reg = ep->msi_cap + PCI_MSI_DATA_32;
4070 + msg_data = csr_readw(pcie, reg);
4071 + }
4072 + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
4073 +
4074 + func_num = csr_readl(pcie, PAB_CTRL);
4075 + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
4076 + csr_writel(pcie, func_num, PAB_CTRL);
4077 +
4078 + ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
4079 + msg_addr, epc->mem->page_size);
4080 + if (ret)
4081 + return ret;
4082 +
4083 + writel(msg_data | (interrupt_num - 1), ep->msi_mem);
4084 +
4085 + mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
4086 +
4087 + return 0;
4088 +}
4089 +