++ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
++}
++
++static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_header *hdr)
+ {
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+@@ -114,24 +146,29 @@ static int dw_pcie_ep_outbound_atu(struc
+ return 0;
+ }
+
+-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
++static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+- dw_pcie_ep_reset_bar(pci, bar);
++ __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ clear_bit(atu_index, ep->ib_window_map);
+ }
+
+-static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
+- dma_addr_t bar_phys, size_t size, int flags)
++static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
++ size_t size = epf_bar->size;
++ int flags = epf_bar->flags;
+ enum dw_pcie_as_type as_type;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+@@ -140,13 +177,20 @@ static int dw_pcie_ep_set_bar(struct pci
+ else
+ as_type = DW_PCIE_AS_IO;
+
+- ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
++ ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+- dw_pcie_writel_dbi2(pci, reg, size - 1);
++
++ dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg, flags);
++
++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
++ dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
++ dw_pcie_writel_dbi(pci, reg + 4, 0);
++ }
++
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+@@ -167,7 +211,8 @@ static int dw_pcie_find_index(struct dw_
+ return -EINVAL;
+ }
+
+-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
++static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr)
+ {
+ int ret;
+ u32 atu_index;
+@@ -182,8 +227,9 @@ static void dw_pcie_ep_unmap_addr(struct
+ clear_bit(atu_index, ep->ob_window_map);
+ }
+
+-static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
+- u64 pci_addr, size_t size)
++static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr,
++ u64 pci_addr, size_t size)
+ {
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+@@ -198,45 +244,93 @@ static int dw_pcie_ep_map_addr(struct pc
+ return 0;
+ }
+
+-static int dw_pcie_ep_get_msi(struct pci_epc *epc)
++static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
++{
++ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ if (!(val & PCI_MSI_FLAGS_ENABLE))
++ return -EINVAL;
++
++ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
++
++ return val;
++}
++
++static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
++{
++ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ val &= ~PCI_MSI_FLAGS_QMASK;
++ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
++ dw_pcie_dbi_ro_wr_en(pci);
++ dw_pcie_writew_dbi(pci, reg, val);
++ dw_pcie_dbi_ro_wr_dis(pci);
++
++ return 0;
++}
++
++static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+ {
+- int val;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
+
+- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
+- if (!(val & MSI_CAP_MSI_EN_MASK))
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+- val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
++ val &= PCI_MSIX_FLAGS_QSIZE;
++
+ return val;
+ }
+
+-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
++static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+ {
+- int val;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
+
+- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
+- val &= ~MSI_CAP_MMC_MASK;
+- val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ val &= ~PCI_MSIX_FLAGS_QSIZE;
++ val |= interrupts;
+ dw_pcie_dbi_ro_wr_en(pci);
+- dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
++ dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+ }
+
+-static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
+- enum pci_epc_irq_type type, u8 interrupt_num)
++static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num)
+ {
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+- return ep->ops->raise_irq(ep, type, interrupt_num);
++ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+ }
+
+ static void dw_pcie_ep_stop(struct pci_epc *epc)
+@@ -269,15 +363,130 @@ static const struct pci_epc_ops epc_ops
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+ .get_msi = dw_pcie_ep_get_msi,
++ .set_msix = dw_pcie_ep_set_msix,
++ .get_msix = dw_pcie_ep_get_msix,
+ .raise_irq = dw_pcie_ep_raise_irq,
+ .start = dw_pcie_ep_start,
+ .stop = dw_pcie_ep_stop,
+ };
+
++int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct device *dev = pci->dev;
++
++ dev_err(dev, "EP cannot trigger legacy IRQs\n");
++
++ return -EINVAL;
++}
++
++int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u16 msg_ctrl, msg_data;
++ u32 msg_addr_lower, msg_addr_upper, reg;
++ u64 msg_addr;
++ bool has_upper;
++ int ret;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
++ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
++ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
++ if (has_upper) {
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
++ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
++ reg = ep->msi_cap + PCI_MSI_DATA_64;
++ msg_data = dw_pcie_readw_dbi(pci, reg);
++ } else {
++ msg_addr_upper = 0;
++ reg = ep->msi_cap + PCI_MSI_DATA_32;
++ msg_data = dw_pcie_readw_dbi(pci, reg);
++ }
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
++ epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
++
++ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
++int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u16 tbl_offset, bir;
++ u32 bar_addr_upper, bar_addr_lower;
++ u32 msg_addr_upper, msg_addr_lower;
++ u32 reg, msg_data, vec_ctrl;
++ u64 tbl_addr, msg_addr, reg_u64;
++ void __iomem *msix_tbl;
++ int ret;
++
++ reg = ep->msix_cap + PCI_MSIX_TABLE;
++ tbl_offset = dw_pcie_readl_dbi(pci, reg);
++ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
++ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
++
++ reg = PCI_BASE_ADDRESS_0 + (4 * bir);
++ bar_addr_upper = 0;
++ bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
++ reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
++ if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
++ bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
++
++ tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
++ tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
++ tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
++
++ msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
++ PCI_MSIX_ENTRY_SIZE);
++ if (!msix_tbl)
++ return -EINVAL;
++
++ msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
++ msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++ msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
++ vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
++
++ iounmap(msix_tbl);
++
++ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
++ return -EPERM;
++
++ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
++ epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data, ep->msi_mem);
++
++ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
+ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+ {
+ struct pci_epc *epc = ep->epc;
+
++ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
++ epc->mem->page_size);
++
+ pci_epc_mem_exit(epc);
+ }
+
+@@ -291,7 +500,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ struct device_node *np = dev->of_node;
+
+ if (!pci->dbi_base || !pci->dbi_base2) {
+- dev_err(dev, "dbi_base/deb_base2 is not populated\n");
++ dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
+ return -EINVAL;
+ }
+
+@@ -333,15 +542,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+- if (ep->ops->ep_init)
+- ep->ops->ep_init(ep);
+-
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
++ ep->epc = epc;
++ epc_set_drvdata(epc, ep);
++
++ if (ep->ops->ep_init)
++ ep->ops->ep_init(ep);
++
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+@@ -353,8 +565,16 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ return ret;
+ }
+
+- ep->epc = epc;
+- epc_set_drvdata(epc, ep);
++ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
++ epc->mem->page_size);
++ if (!ep->msi_mem) {
++ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
++ return -ENOMEM;
++ }
++ ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
++
++ ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
++
+ dw_pcie_setup(pci);
+
+ return 0;
+--- a/drivers/pci/dwc/pcie-designware-host.c
++++ b/drivers/pci/dwc/pcie-designware-host.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+@@ -5,10 +6,6 @@
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+
+ #include <linux/irqdomain.h>
+--- a/drivers/pci/dwc/pcie-designware-plat.c
++++ b/drivers/pci/dwc/pcie-designware-plat.c
+@@ -1,13 +1,10 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -15,19 +12,29 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/of_device.h>
+ #include <linux/of_gpio.h>
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/resource.h>
+ #include <linux/signal.h>
+ #include <linux/types.h>
++#include <linux/regmap.h>
+
+ #include "pcie-designware.h"
+
+ struct dw_plat_pcie {
+- struct dw_pcie *pci;
++ struct dw_pcie *pci;
++ struct regmap *regmap;
++ enum dw_pcie_device_mode mode;
++};
++
++struct dw_plat_pcie_of_data {
++ enum dw_pcie_device_mode mode;
+ };
+
++static const struct of_device_id dw_plat_pcie_of_match[];
++
+ static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
+ {
+ struct pcie_port *pp = arg;
+@@ -52,9 +59,58 @@ static const struct dw_pcie_host_ops dw_
+ .host_init = dw_plat_pcie_host_init,
+ };
+
+-static int dw_plat_add_pcie_port(struct pcie_port *pp,
++static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
++{
++ return 0;
++}
++
++static const struct dw_pcie_ops dw_pcie_ops = {
++ .start_link = dw_plat_pcie_establish_link,
++};
++
++static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ enum pci_barno bar;
++
++ for (bar = BAR_0; bar <= BAR_5; bar++)
++ dw_pcie_ep_reset_bar(pci, bar);
++
++ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
++ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
++}
++
++static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type,
++ u16 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++
++ switch (type) {
++ case PCI_EPC_IRQ_LEGACY:
++ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
++ case PCI_EPC_IRQ_MSI:
++ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
++ case PCI_EPC_IRQ_MSIX:
++ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
++ default:
++ dev_err(pci->dev, "UNKNOWN IRQ type\n");
++ }
++
++ return 0;
++}
++
++static struct dw_pcie_ep_ops pcie_ep_ops = {
++ .ep_init = dw_plat_pcie_ep_init,
++ .raise_irq = dw_plat_pcie_ep_raise_irq,
++};
++
++static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+ {
++ struct dw_pcie *pci = dw_plat_pcie->pci;
++ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+@@ -82,15 +138,44 @@ static int dw_plat_add_pcie_port(struct
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+- dev_err(dev, "failed to initialize host\n");
++ dev_err(dev, "Failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+-static const struct dw_pcie_ops dw_pcie_ops = {
+-};
++static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
++ struct platform_device *pdev)
++{
++ int ret;
++ struct dw_pcie_ep *ep;
++ struct resource *res;
++ struct device *dev = &pdev->dev;
++ struct dw_pcie *pci = dw_plat_pcie->pci;
++
++ ep = &pci->ep;
++ ep->ops = &pcie_ep_ops;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
++ pci->dbi_base2 = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pci->dbi_base2))
++ return PTR_ERR(pci->dbi_base2);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
++ if (!res)
++ return -EINVAL;
++
++ ep->phys_base = res->start;
++ ep->addr_size = resource_size(res);
++
++ ret = dw_pcie_ep_init(ep);
++ if (ret) {
++ dev_err(dev, "Failed to initialize endpoint\n");
++ return ret;
++ }
++ return 0;
++}
+
+ static int dw_plat_pcie_probe(struct platform_device *pdev)
+ {
+@@ -99,6 +184,16 @@ static int dw_plat_pcie_probe(struct pla
+ struct dw_pcie *pci;
+ struct resource *res; /* Resource from DT */
+ int ret;
++ const struct of_device_id *match;
++ const struct dw_plat_pcie_of_data *data;
++ enum dw_pcie_device_mode mode;
++
++ match = of_match_device(dw_plat_pcie_of_match, dev);
++ if (!match)
++ return -EINVAL;
++
++ data = (struct dw_plat_pcie_of_data *)match->data;
++ mode = (enum dw_pcie_device_mode)data->mode;
+
+ dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
+ if (!dw_plat_pcie)
+@@ -112,23 +207,59 @@ static int dw_plat_pcie_probe(struct pla
+ pci->ops = &dw_pcie_ops;
+
+ dw_plat_pcie->pci = pci;
++ dw_plat_pcie->mode = mode;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
++ if (!res)
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ platform_set_drvdata(pdev, dw_plat_pcie);
+
+- ret = dw_plat_add_pcie_port(&pci->pp, pdev);
+- if (ret < 0)
+- return ret;
++ switch (dw_plat_pcie->mode) {
++ case DW_PCIE_RC_TYPE:
++ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
++ return -ENODEV;
++
++ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
++ if (ret < 0)
++ return ret;
++ break;
++ case DW_PCIE_EP_TYPE:
++ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
++ return -ENODEV;
++
++ ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
++ if (ret < 0)
++ return ret;
++ break;
++ default:
++ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
++ }
+
+ return 0;
+ }
+
++static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
++ .mode = DW_PCIE_RC_TYPE,
++};
++
++static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
++ .mode = DW_PCIE_EP_TYPE,
++};
++
+ static const struct of_device_id dw_plat_pcie_of_match[] = {
+- { .compatible = "snps,dw-pcie", },
++ {
++ .compatible = "snps,dw-pcie",
++ .data = &dw_plat_pcie_rc_of_data,
++ },
++ {
++ .compatible = "snps,dw-pcie-ep",
++ .data = &dw_plat_pcie_ep_of_data,
++ },
+ {},
+ };
+
+--- a/drivers/pci/dwc/pcie-designware.c
++++ b/drivers/pci/dwc/pcie-designware.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+@@ -5,10 +6,6 @@
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+
+ #include <linux/delay.h>
+--- a/drivers/pci/dwc/pcie-designware.h
++++ b/drivers/pci/dwc/pcie-designware.h
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+@@ -5,10 +6,6 @@
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+
+ #ifndef _PCIE_DESIGNWARE_H
+@@ -97,15 +94,6 @@
+ #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+ ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+
+-#define MSI_MESSAGE_CONTROL 0x52
+-#define MSI_CAP_MMC_SHIFT 1
+-#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
+-#define MSI_CAP_MME_SHIFT 4
+-#define MSI_CAP_MSI_EN_MASK 0x1
+-#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
+-#define MSI_MESSAGE_ADDR_L32 0x54
+-#define MSI_MESSAGE_ADDR_U32 0x58
+-
+ /*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+@@ -118,6 +106,10 @@
+ #define MAX_IATU_IN 256
+ #define MAX_IATU_OUT 256
+
++/* Maximum number of inbound/outbound iATUs */
++#define MAX_IATU_IN 256
++#define MAX_IATU_OUT 256
++
+ struct pcie_port;
+ struct dw_pcie;
+ struct dw_pcie_ep;
+@@ -185,8 +177,8 @@ enum dw_pcie_as_type {
+
+ struct dw_pcie_ep_ops {
+ void (*ep_init)(struct dw_pcie_ep *ep);
+- int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
+- u8 interrupt_num);
++ int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num);
+ };
+
+ struct dw_pcie_ep {
+@@ -201,6 +193,10 @@ struct dw_pcie_ep {
+ unsigned long *ob_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
++ void __iomem *msi_mem;
++ phys_addr_t msi_mem_phys;
++ u8 msi_cap; /* MSI capability offset */
++ u8 msix_cap; /* MSI-X capability offset */
+ };
+
+ struct dw_pcie_ops {
+@@ -339,6 +335,12 @@ static inline int dw_pcie_host_init(stru
+ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+ int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+ void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
++int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
++int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num);
++int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num);
++void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+ #else
+ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+ {
+@@ -352,5 +354,26 @@ static inline int dw_pcie_ep_init(struct
+ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+ {
+ }
++
++static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
++{
++ return 0;
++}
++
++static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num)
++{
++ return 0;
++}
++
++static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num)
++{
++ return 0;
++}
++
++static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
++{
++}
+ #endif
+ #endif /* _PCIE_DESIGNWARE_H */
+--- a/drivers/pci/endpoint/Kconfig
++++ b/drivers/pci/endpoint/Kconfig
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # PCI Endpoint Support
+ #
+--- a/drivers/pci/endpoint/Makefile
++++ b/drivers/pci/endpoint/Makefile
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # Makefile for PCI Endpoint Support
+ #
+--- a/drivers/pci/endpoint/functions/Kconfig
++++ b/drivers/pci/endpoint/functions/Kconfig
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # PCI Endpoint Functions
+ #
+--- a/drivers/pci/endpoint/functions/Makefile
++++ b/drivers/pci/endpoint/functions/Makefile
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # Makefile for PCI Endpoint Functions
+ #
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * Test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/crc32.h>
+@@ -29,13 +18,16 @@
+ #include <linux/pci-epf.h>
+ #include <linux/pci_regs.h>
+
++#define IRQ_TYPE_LEGACY 0
++#define IRQ_TYPE_MSI 1
++#define IRQ_TYPE_MSIX 2
++
+ #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+ #define COMMAND_RAISE_MSI_IRQ BIT(1)
+-#define MSI_NUMBER_SHIFT 2
+-#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
+-#define COMMAND_READ BIT(8)
+-#define COMMAND_WRITE BIT(9)
+-#define COMMAND_COPY BIT(10)
++#define COMMAND_RAISE_MSIX_IRQ BIT(2)
++#define COMMAND_READ BIT(3)
++#define COMMAND_WRITE BIT(4)
++#define COMMAND_COPY BIT(5)
+
+ #define STATUS_READ_SUCCESS BIT(0)
+ #define STATUS_READ_FAIL BIT(1)
+@@ -56,6 +48,7 @@ struct pci_epf_test {
+ struct pci_epf *epf;
+ enum pci_barno test_reg_bar;
+ bool linkup_notifier;
++ bool msix_available;
+ struct delayed_work cmd_handler;
+ };
+
+@@ -67,6 +60,8 @@ struct pci_epf_test_reg {
+ u64 dst_addr;
+ u32 size;
+ u32 checksum;
++ u32 irq_type;
++ u32 irq_number;
+ } __packed;
+
+ static struct pci_epf_header test_header = {
+@@ -81,7 +76,7 @@ struct pci_epf_test_data {
+ bool linkup_notifier;
+ };
+
+-static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
++static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+
+ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
+ {
+@@ -98,43 +93,45 @@ static int pci_epf_test_copy(struct pci_
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
+ if (!src_addr) {
+- dev_err(dev, "failed to allocate source address\n");
++ dev_err(dev, "Failed to allocate source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+- ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map source address\n");
++ dev_err(dev, "Failed to map source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_src_addr;
+ }
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
+ if (!dst_addr) {
+- dev_err(dev, "failed to allocate destination address\n");
++ dev_err(dev, "Failed to allocate destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err_src_map_addr;
+ }
+
+- ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map destination address\n");
++ dev_err(dev, "Failed to map destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_dst_addr;
+ }
+
+ memcpy(dst_addr, src_addr, reg->size);
+
+- pci_epc_unmap_addr(epc, dst_phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+
+ err_dst_addr:
+ pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+
+ err_src_map_addr:
+- pci_epc_unmap_addr(epc, src_phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+
+ err_src_addr:
+ pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+@@ -158,15 +155,16 @@ static int pci_epf_test_read(struct pci_
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!src_addr) {
+- dev_err(dev, "failed to allocate address\n");
++ dev_err(dev, "Failed to allocate address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+- ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map address\n");
++ dev_err(dev, "Failed to map address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_addr;
+ }
+@@ -186,7 +184,7 @@ static int pci_epf_test_read(struct pci_
+ kfree(buf);
+
+ err_map_addr:
+- pci_epc_unmap_addr(epc, phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+
+ err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+@@ -209,15 +207,16 @@ static int pci_epf_test_write(struct pci
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!dst_addr) {
+- dev_err(dev, "failed to allocate address\n");
++ dev_err(dev, "Failed to allocate address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+- ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map address\n");
++ dev_err(dev, "Failed to map address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_addr;
+ }
+@@ -237,12 +236,12 @@ static int pci_epf_test_write(struct pci
+ * wait 1ms inorder for the write to complete. Without this delay L3
+ * error in observed in the host system.
+ */
+- mdelay(1);
++ usleep_range(1000, 2000);
+
+ kfree(buf);
+
+ err_map_addr:
+- pci_epc_unmap_addr(epc, phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+
+ err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
+@@ -251,31 +250,42 @@ err:
+ return ret;
+ }
+
+-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
++static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
++ u16 irq)
+ {
+- u8 msi_count;
+ struct pci_epf *epf = epf_test->epf;
++ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+
+ reg->status |= STATUS_IRQ_RAISED;
+- msi_count = pci_epc_get_msi(epc);
+- if (irq > msi_count || msi_count <= 0)
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+- else
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
++
++ switch (irq_type) {
++ case IRQ_TYPE_LEGACY:
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
++ break;
++ case IRQ_TYPE_MSI:
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
++ break;
++ case IRQ_TYPE_MSIX:
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
++ break;
++ default:
++ dev_err(dev, "Failed to raise IRQ, unknown type\n");
++ break;
++ }
+ }
+
+ static void pci_epf_test_cmd_handler(struct work_struct *work)
+ {
+ int ret;
+- u8 irq;
+- u8 msi_count;
++ int count;
+ u32 command;
+ struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
+ cmd_handler.work);
+ struct pci_epf *epf = epf_test->epf;
++ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+@@ -287,11 +297,14 @@ static void pci_epf_test_cmd_handler(str
+ reg->command = 0;
+ reg->status = 0;
+
+- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
++ if (reg->irq_type > IRQ_TYPE_MSIX) {
++ dev_err(dev, "Failed to detect IRQ type\n");
++ goto reset_handler;
++ }
+
+ if (command & COMMAND_RAISE_LEGACY_IRQ) {
+ reg->status = STATUS_IRQ_RAISED;
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ goto reset_handler;
+ }
+
+@@ -301,7 +314,8 @@ static void pci_epf_test_cmd_handler(str
+ reg->status |= STATUS_WRITE_FAIL;
+ else
+ reg->status |= STATUS_WRITE_SUCCESS;
+- pci_epf_test_raise_irq(epf_test, irq);
++ pci_epf_test_raise_irq(epf_test, reg->irq_type,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+@@ -311,7 +325,8 @@ static void pci_epf_test_cmd_handler(str
+ reg->status |= STATUS_READ_SUCCESS;
+ else
+ reg->status |= STATUS_READ_FAIL;
+- pci_epf_test_raise_irq(epf_test, irq);
++ pci_epf_test_raise_irq(epf_test, reg->irq_type,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+@@ -321,16 +336,28 @@ static void pci_epf_test_cmd_handler(str
+ reg->status |= STATUS_COPY_SUCCESS;
+ else
+ reg->status |= STATUS_COPY_FAIL;
+- pci_epf_test_raise_irq(epf_test, irq);
++ pci_epf_test_raise_irq(epf_test, reg->irq_type,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+ if (command & COMMAND_RAISE_MSI_IRQ) {
+- msi_count = pci_epc_get_msi(epc);
+- if (irq > msi_count || msi_count <= 0)
++ count = pci_epc_get_msi(epc, epf->func_no);
++ if (reg->irq_number > count || count <= 0)
++ goto reset_handler;
++ reg->status = STATUS_IRQ_RAISED;
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
++ reg->irq_number);
++ goto reset_handler;
++ }
++
++ if (command & COMMAND_RAISE_MSIX_IRQ) {
++ count = pci_epc_get_msix(epc, epf->func_no);
++ if (reg->irq_number > count || count <= 0)
+ goto reset_handler;
+ reg->status = STATUS_IRQ_RAISED;
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+@@ -351,21 +378,23 @@ static void pci_epf_test_unbind(struct p
+ {
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
++ struct pci_epf_bar *epf_bar;
+ int bar;
+
+ cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epc_stop(epc);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
++ epf_bar = &epf->bar[bar];
++
+ if (epf_test->reg[bar]) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+- pci_epc_clear_bar(epc, bar);
++ pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ }
+ }
+ }
+
+ static int pci_epf_test_set_bar(struct pci_epf *epf)
+ {
+- int flags;
+ int bar;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+@@ -374,20 +403,27 @@ static int pci_epf_test_set_bar(struct p
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+
+- flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+- if (sizeof(dma_addr_t) == 0x8)
+- flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+-
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ epf_bar = &epf->bar[bar];
+- ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
+- epf_bar->size, flags);
++
++ epf_bar->flags |= upper_32_bits(epf_bar->size) ?
++ PCI_BASE_ADDRESS_MEM_TYPE_64 :
++ PCI_BASE_ADDRESS_MEM_TYPE_32;
++
++ ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ if (ret) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+- dev_err(dev, "failed to set BAR%d\n", bar);
++ dev_err(dev, "Failed to set BAR%d\n", bar);
+ if (bar == test_reg_bar)
+ return ret;
+ }
++ /*
++ * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
++ * if the specific implementation required a 64-bit BAR,
++ * even if we only requested a 32-bit BAR.
++ */
++ if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
++ bar++;
+ }
+
+ return 0;
+@@ -404,7 +440,7 @@ static int pci_epf_test_alloc_space(stru
+ base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
+ test_reg_bar);
+ if (!base) {
+- dev_err(dev, "failed to allocated register space\n");
++ dev_err(dev, "Failed to allocated register space\n");
+ return -ENOMEM;
+ }
+ epf_test->reg[test_reg_bar] = base;
+@@ -414,7 +450,7 @@ static int pci_epf_test_alloc_space(stru
+ continue;
+ base = pci_epf_alloc_space(epf, bar_size[bar], bar);
+ if (!base)
+- dev_err(dev, "failed to allocate space for BAR%d\n",
++ dev_err(dev, "Failed to allocate space for BAR%d\n",
+ bar);
+ epf_test->reg[bar] = base;
+ }
+@@ -433,9 +469,18 @@ static int pci_epf_test_bind(struct pci_
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+- ret = pci_epc_write_header(epc, header);
++ if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
++ epf_test->linkup_notifier = false;
++ else
++ epf_test->linkup_notifier = true;
++
++ epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
++
++ epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
++
++ ret = pci_epc_write_header(epc, epf->func_no, header);
+ if (ret) {
+- dev_err(dev, "configuration header write failed\n");
++ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
+
+@@ -447,9 +492,19 @@ static int pci_epf_test_bind(struct pci_
+ if (ret)
+ return ret;
+
+- ret = pci_epc_set_msi(epc, epf->msi_interrupts);
+- if (ret)
++ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
++ if (ret) {
++ dev_err(dev, "MSI configuration failed\n");
+ return ret;
++ }
++
++ if (epf_test->msix_available) {
++ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
++ if (ret) {
++ dev_err(dev, "MSI-X configuration failed\n");
++ return ret;
++ }
++ }
+
+ if (!epf_test->linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+@@ -517,7 +572,7 @@ static int __init pci_epf_test_init(void
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&test_driver);
+ if (ret) {
+- pr_err("failed to register pci epf test driver --> %d\n", ret);
++ pr_err("Failed to register pci epf test driver --> %d\n", ret);
+ return ret;
+ }
+
+--- a/drivers/pci/endpoint/pci-ep-cfs.c
++++ b/drivers/pci/endpoint/pci-ep-cfs.c
+@@ -1,35 +1,28 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * configfs to configure the PCI endpoint
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/module.h>
++#include <linux/idr.h>
+ #include <linux/slab.h>
+
+ #include <linux/pci-epc.h>
+ #include <linux/pci-epf.h>
+ #include <linux/pci-ep-cfs.h>
+
++static DEFINE_IDR(functions_idr);
++static DEFINE_MUTEX(functions_mutex);
+ static struct config_group *functions_group;
+ static struct config_group *controllers_group;
+
+ struct pci_epf_group {
+ struct config_group group;
+ struct pci_epf *epf;
++ int index;
+ };
+
+ struct pci_epc_group {
+@@ -151,7 +144,7 @@ static struct configfs_item_operations p
+ .drop_link = pci_epc_epf_unlink,
+ };
+
+-static struct config_item_type pci_epc_type = {
++static const struct config_item_type pci_epc_type = {
+ .ct_item_ops = &pci_epc_item_ops,
+ .ct_attrs = pci_epc_attrs,
+ .ct_owner = THIS_MODULE,
+@@ -293,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_sh
+ to_pci_epf_group(item)->epf->msi_interrupts);
+ }
+
++static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
++ const char *page, size_t len)
++{
++ u16 val;
++ int ret;
++
++ ret = kstrtou16(page, 0, &val);
++ if (ret)
++ return ret;
++
++ to_pci_epf_group(item)->epf->msix_interrupts = val;
++
++ return len;
++}
++
++static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
++ char *page)
++{
++ return sprintf(page, "%d\n",
++ to_pci_epf_group(item)->epf->msix_interrupts);
++}
++
+ PCI_EPF_HEADER_R(vendorid)
+ PCI_EPF_HEADER_W_u16(vendorid)
+
+@@ -334,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id
+ CONFIGFS_ATTR(pci_epf_, subsys_id);
+ CONFIGFS_ATTR(pci_epf_, interrupt_pin);
+ CONFIGFS_ATTR(pci_epf_, msi_interrupts);
++CONFIGFS_ATTR(pci_epf_, msix_interrupts);
+
+ static struct configfs_attribute *pci_epf_attrs[] = {
+ &pci_epf_attr_vendorid,
+@@ -347,6 +363,7 @@ static struct configfs_attribute *pci_ep
+ &pci_epf_attr_subsys_id,
+ &pci_epf_attr_interrupt_pin,
+ &pci_epf_attr_msi_interrupts,
++ &pci_epf_attr_msix_interrupts,
+ NULL,
+ };
+
+@@ -354,6 +371,9 @@ static void pci_epf_release(struct confi
+ {
+ struct pci_epf_group *epf_group = to_pci_epf_group(item);
+
++ mutex_lock(&functions_mutex);
++ idr_remove(&functions_idr, epf_group->index);
++ mutex_unlock(&functions_mutex);
+ pci_epf_destroy(epf_group->epf);
+ kfree(epf_group);
+ }
+@@ -362,7 +382,7 @@ static struct configfs_item_operations p
+ .release = pci_epf_release,
+ };
+
+-static struct config_item_type pci_epf_type = {
++static const struct config_item_type pci_epf_type = {
+ .ct_item_ops = &pci_epf_ops,
+ .ct_attrs = pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+@@ -373,22 +393,57 @@ static struct config_group *pci_epf_make
+ {
+ struct pci_epf_group *epf_group;
+ struct pci_epf *epf;
++ char *epf_name;
++ int index, err;
+
+ epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
+ if (!epf_group)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_lock(&functions_mutex);
++ index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
++ mutex_unlock(&functions_mutex);
++ if (index < 0) {
++ err = index;
++ goto free_group;
++ }
++
++ epf_group->index = index;
++
+ config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
+
+- epf = pci_epf_create(group->cg_item.ci_name);
++ epf_name = kasprintf(GFP_KERNEL, "%s.%d",
++ group->cg_item.ci_name, epf_group->index);
++ if (!epf_name) {
++ err = -ENOMEM;
++ goto remove_idr;
++ }
++
++ epf = pci_epf_create(epf_name);
+ if (IS_ERR(epf)) {
+ pr_err("failed to create endpoint function device\n");
+- return ERR_PTR(-EINVAL);
++ err = -EINVAL;
++ goto free_name;
+ }
+
+ epf_group->epf = epf;
+
++ kfree(epf_name);
++
+ return &epf_group->group;
++
++free_name:
++ kfree(epf_name);
++
++remove_idr:
++ mutex_lock(&functions_mutex);
++ idr_remove(&functions_idr, epf_group->index);
++ mutex_unlock(&functions_mutex);
++
++free_group:
++ kfree(epf_group);
++
++ return ERR_PTR(err);
+ }
+
+ static void pci_epf_drop(struct config_group *group, struct config_item *item)
+@@ -401,7 +456,7 @@ static struct configfs_group_operations
+ .drop_item = &pci_epf_drop,
+ };
+
+-static struct config_item_type pci_epf_group_type = {
++static const struct config_item_type pci_epf_group_type = {
+ .ct_group_ops = &pci_epf_group_ops,
+ .ct_owner = THIS_MODULE,
+ };
+@@ -429,15 +484,15 @@ void pci_ep_cfs_remove_epf_group(struct
+ }
+ EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+
+-static struct config_item_type pci_functions_type = {
++static const struct config_item_type pci_functions_type = {
+ .ct_owner = THIS_MODULE,
+ };
+
+-static struct config_item_type pci_controllers_type = {
++static const struct config_item_type pci_controllers_type = {
+ .ct_owner = THIS_MODULE,
+ };
+
+-static struct config_item_type pci_ep_type = {
++static const struct config_item_type pci_ep_type = {
+ .ct_owner = THIS_MODULE,
+ };
+
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * PCI Endpoint *Controller* (EPC) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/device.h>
+@@ -141,25 +130,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
+ /**
+ * pci_epc_raise_irq() - interrupt the host system
+ * @epc: the EPC device which has to interrupt the host
+- * @type: specify the type of interrupt; legacy or MSI
+- * @interrupt_num: the MSI interrupt number
++ * @func_no: the endpoint function number in the EPC device
++ * @type: specify the type of interrupt; legacy, MSI or MSI-X
++ * @interrupt_num: the MSI or MSI-X interrupt number
+ *
+- * Invoke to raise an MSI or legacy interrupt
++ * Invoke to raise an legacy, MSI or MSI-X interrupt
+ */
+-int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+- u8 interrupt_num)
++int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num)
+ {
+ int ret;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->raise_irq)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->raise_irq(epc, type, interrupt_num);
++ ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+@@ -169,22 +159,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
+ /**
+ * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
+ * @epc: the EPC device to which MSI interrupts was requested
++ * @func_no: the endpoint function number in the EPC device
+ *
+ * Invoke to get the number of MSI interrupts allocated by the RC
+ */
+-int pci_epc_get_msi(struct pci_epc *epc)
++int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+ {
+ int interrupt;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return 0;
+
+ if (!epc->ops->get_msi)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- interrupt = epc->ops->get_msi(epc);
++ interrupt = epc->ops->get_msi(epc, func_no);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ if (interrupt < 0)
+@@ -199,17 +190,19 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
+ /**
+ * pci_epc_set_msi() - set the number of MSI interrupt numbers required
+ * @epc: the EPC device on which MSI has to be configured
++ * @func_no: the endpoint function number in the EPC device
+ * @interrupts: number of MSI interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI interrupts.
+ */
+-int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
++int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+ {
+ int ret;
+ u8 encode_int;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ interrupts > 32)
+ return -EINVAL;
+
+ if (!epc->ops->set_msi)
+@@ -218,7 +211,7 @@ int pci_epc_set_msi(struct pci_epc *epc,
+ encode_int = order_base_2(interrupts);
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->set_msi(epc, encode_int);
++ ret = epc->ops->set_msi(epc, func_no, encode_int);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+@@ -226,24 +219,83 @@ int pci_epc_set_msi(struct pci_epc *epc,
+ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
+
+ /**
++ * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
++ * @epc: the EPC device to which MSI-X interrupts was requested
++ * @func_no: the endpoint function number in the EPC device
++ *
++ * Invoke to get the number of MSI-X interrupts allocated by the RC
++ */
++int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
++{
++ int interrupt;
++ unsigned long flags;
++
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
++ return 0;
++
++ if (!epc->ops->get_msix)
++ return 0;
++
++ spin_lock_irqsave(&epc->lock, flags);
++ interrupt = epc->ops->get_msix(epc, func_no);
++ spin_unlock_irqrestore(&epc->lock, flags);
++
++ if (interrupt < 0)
++ return 0;
++
++ return interrupt + 1;
++}
++EXPORT_SYMBOL_GPL(pci_epc_get_msix);
++
++/**
++ * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
++ * @epc: the EPC device on which MSI-X has to be configured
++ * @func_no: the endpoint function number in the EPC device
++ * @interrupts: number of MSI-X interrupts required by the EPF
++ *
++ * Invoke to set the required number of MSI-X interrupts.
++ */
++int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
++{
++ int ret;
++ unsigned long flags;
++
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ interrupts < 1 || interrupts > 2048)
++ return -EINVAL;
++
++ if (!epc->ops->set_msix)
++ return 0;
++
++ spin_lock_irqsave(&epc->lock, flags);
++ ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
++ spin_unlock_irqrestore(&epc->lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(pci_epc_set_msix);
++
++/**
+ * pci_epc_unmap_addr() - unmap CPU address from PCI address
+ * @epc: the EPC device on which address is allocated
++ * @func_no: the endpoint function number in the EPC device
+ * @phys_addr: physical address of the local system
+ *
+ * Invoke to unmap the CPU address from PCI address.
+ */
+-void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
++void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t phys_addr)
+ {
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return;
+
+ if (!epc->ops->unmap_addr)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- epc->ops->unmap_addr(epc, phys_addr);
++ epc->ops->unmap_addr(epc, func_no, phys_addr);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+@@ -251,26 +303,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+ /**
+ * pci_epc_map_addr() - map CPU address to PCI address
+ * @epc: the EPC device on which address is allocated
++ * @func_no: the endpoint function number in the EPC device
+ * @phys_addr: physical address of the local system
+ * @pci_addr: PCI address to which the physical address should be mapped
+ * @size: the size of the allocation
+ *
+ * Invoke to map CPU address with PCI address.
+ */
+-int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+- u64 pci_addr, size_t size)
++int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t phys_addr, u64 pci_addr, size_t size)
+ {
+ int ret;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->map_addr)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
++ ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+@@ -280,22 +333,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+ /**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+- * @bar: the BAR number that has to be reset
++ * @func_no: the endpoint function number in the EPC device
++ * @epf_bar: the struct epf_bar that contains the BAR information
+ *
+ * Invoke to reset the BAR of the endpoint device.
+ */
+-void pci_epc_clear_bar(struct pci_epc *epc, int bar)
++void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ (epf_bar->barno == BAR_5 &&
++ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ return;
+
+ if (!epc->ops->clear_bar)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- epc->ops->clear_bar(epc, bar);
++ epc->ops->clear_bar(epc, func_no, epf_bar);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+@@ -303,26 +360,32 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+ /**
+ * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
+ * @epc: the EPC device on which BAR has to be configured
+- * @bar: the BAR number that has to be configured
+- * @size: the size of the addr space
+- * @flags: specify memory allocation/io allocation/32bit address/64 bit address
++ * @func_no: the endpoint function number in the EPC device
++ * @epf_bar: the struct epf_bar that contains the BAR information
+ *
+ * Invoke to configure the BAR of the endpoint device.
+ */
+-int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+- dma_addr_t bar_phys, size_t size, int flags)
++int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ int ret;
+ unsigned long irq_flags;
++ int flags = epf_bar->flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ (epf_bar->barno == BAR_5 &&
++ flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
++ (flags & PCI_BASE_ADDRESS_SPACE_IO &&
++ flags & PCI_BASE_ADDRESS_IO_MASK) ||
++ (upper_32_bits(epf_bar->size) &&
++ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
+ return -EINVAL;
+
+ if (!epc->ops->set_bar)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, irq_flags);
+- ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
++ ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ spin_unlock_irqrestore(&epc->lock, irq_flags);
+
+ return ret;
+@@ -332,6 +395,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+ /**
+ * pci_epc_write_header() - write standard configuration header
+ * @epc: the EPC device to which the configuration header should be written
++ * @func_no: the endpoint function number in the EPC device
+ * @header: standard configuration header fields
+ *
+ * Invoke to write the configuration header to the endpoint controller. Every
+@@ -339,19 +403,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+ * configuration header would be written. The callback function should write
+ * the header fields to this dedicated location.
+ */
+-int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
++int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_header *header)
+ {
+ int ret;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->write_header)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->write_header(epc, header);
++ ret = epc->ops->write_header(epc, func_no, header);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+--- a/drivers/pci/endpoint/pci-epc-mem.c
++++ b/drivers/pci/endpoint/pci-epc-mem.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * PCI Endpoint *Controller* Address Space Management
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/io.h>
+--- a/drivers/pci/endpoint/pci-epf-core.c
++++ b/drivers/pci/endpoint/pci-epf-core.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * PCI Endpoint *Function* (EPF) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/device.h>
+@@ -26,6 +15,8 @@
+ #include <linux/pci-epf.h>
+ #include <linux/pci-ep-cfs.h>
+
++static DEFINE_MUTEX(pci_epf_mutex);
++
+ static struct bus_type pci_epf_bus_type;
+ static const struct device_type pci_epf_type;
+
+@@ -109,6 +100,8 @@ void pci_epf_free_space(struct pci_epf *
+
+ epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].size = 0;
++ epf->bar[bar].barno = 0;
++ epf->bar[bar].flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_free_space);
+
+@@ -137,11 +130,27 @@ void *pci_epf_alloc_space(struct pci_epf
+
+ epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].size = size;
++ epf->bar[bar].barno = bar;
++ epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
+
+ return space;
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+
++static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
++{
++ struct config_group *group, *tmp;
++
++ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
++ return;
++
++ mutex_lock(&pci_epf_mutex);
++ list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
++ pci_ep_cfs_remove_epf_group(group);
++ list_del(&driver->epf_group);
++ mutex_unlock(&pci_epf_mutex);
++}
++
+ /**
+ * pci_epf_unregister_driver() - unregister the PCI EPF driver
+ * @driver: the PCI EPF driver that has to be unregistered
+@@ -150,11 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+ */
+ void pci_epf_unregister_driver(struct pci_epf_driver *driver)
+ {
+- pci_ep_cfs_remove_epf_group(driver->group);
++ pci_epf_remove_cfs(driver);
+ driver_unregister(&driver->driver);
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+
++static int pci_epf_add_cfs(struct pci_epf_driver *driver)
++{
++ struct config_group *group;
++ const struct pci_epf_device_id *id;
++
++ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
++ return 0;
++
++ INIT_LIST_HEAD(&driver->epf_group);
++
++ id = driver->id_table;
++ while (id->name[0]) {
++ group = pci_ep_cfs_add_epf_group(id->name);
++ if (IS_ERR(group)) {
++ pci_epf_remove_cfs(driver);
++ return PTR_ERR(group);
++ }
++
++ mutex_lock(&pci_epf_mutex);
++ list_add_tail(&group->group_entry, &driver->epf_group);
++ mutex_unlock(&pci_epf_mutex);
++ id++;
++ }
++
++ return 0;
++}
++
+ /**
+ * __pci_epf_register_driver() - register a new PCI EPF driver
+ * @driver: structure representing PCI EPF driver
+@@ -180,7 +216,7 @@ int __pci_epf_register_driver(struct pci
+ if (ret)
+ return ret;
+
+- driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
++ pci_epf_add_cfs(driver);
+
+ return 0;
+ }
+@@ -211,29 +247,17 @@ struct pci_epf *pci_epf_create(const cha
+ int ret;
+ struct pci_epf *epf;
+ struct device *dev;
+- char *func_name;
+- char *buf;
++ int len;
+
+ epf = kzalloc(sizeof(*epf), GFP_KERNEL);
+- if (!epf) {
+- ret = -ENOMEM;
+- goto err_ret;
+- }
++ if (!epf)
++ return ERR_PTR(-ENOMEM);
+
+- buf = kstrdup(name, GFP_KERNEL);
+- if (!buf) {
+- ret = -ENOMEM;
+- goto free_epf;
+- }
+-
+- func_name = buf;
+- buf = strchrnul(buf, '.');
+- *buf = '\0';
+-
+- epf->name = kstrdup(func_name, GFP_KERNEL);
++ len = strchrnul(name, '.') - name;
++ epf->name = kstrndup(name, len, GFP_KERNEL);
+ if (!epf->name) {
+- ret = -ENOMEM;
+- goto free_func_name;
++ kfree(epf);
++ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = &epf->dev;
+@@ -242,28 +266,18 @@ struct pci_epf *pci_epf_create(const cha
+ dev->type = &pci_epf_type;
+
+ ret = dev_set_name(dev, "%s", name);
+- if (ret)
+- goto put_dev;
++ if (ret) {
++ put_device(dev);
++ return ERR_PTR(ret);
++ }
+
+ ret = device_add(dev);
+- if (ret)
+- goto put_dev;
++ if (ret) {
++ put_device(dev);
++ return ERR_PTR(ret);
++ }
+
+- kfree(func_name);
+ return epf;
+-
+-put_dev:
+- put_device(dev);
+- kfree(epf->name);
+-
+-free_func_name:
+- kfree(func_name);
+-
+-free_epf:
+- kfree(epf);
+-
+-err_ret:
+- return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_create);
+
+--- a/drivers/pci/host/pci-host-common.c
++++ b/drivers/pci/host/pci-host-common.c
+@@ -113,9 +113,7 @@ err_out:
+ int pci_host_common_probe(struct platform_device *pdev,
+ struct pci_ecam_ops *ops)
+ {
+- const char *type;
+ struct device *dev = &pdev->dev;
+- struct device_node *np = dev->of_node;
+ struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+@@ -126,12 +124,6 @@ int pci_host_common_probe(struct platfor
+ if (!bridge)
+ return -ENOMEM;
+
+- type = of_get_property(np, "device_type", NULL);
+- if (!type || strcmp(type, "pci")) {
+- dev_err(dev, "invalid \"device_type\" %s\n", type);
+- return -EINVAL;
+- }
+-
+ of_pci_check_probe_only();
+
+ /* Parse and map our Configuration Space windows */
+--- a/drivers/pci/host/pcie-xilinx-nwl.c
++++ b/drivers/pci/host/pcie-xilinx-nwl.c
+@@ -778,16 +778,7 @@ static int nwl_pcie_parse_dt(struct nwl_
+ struct platform_device *pdev)
+ {
+ struct device *dev = pcie->dev;
+- struct device_node *node = dev->of_node;
+ struct resource *res;
+- const char *type;
+-
+- /* Check for device type */
+- type = of_get_property(node, "device_type", NULL);
+- if (!type || strcmp(type, "pci")) {
+- dev_err(dev, "invalid \"device_type\" %s\n", type);
+- return -EINVAL;
+- }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ pcie->breg_base = devm_ioremap_resource(dev, res);
+--- a/drivers/pci/host/pcie-xilinx.c
++++ b/drivers/pci/host/pcie-xilinx.c
+@@ -584,15 +584,8 @@ static int xilinx_pcie_parse_dt(struct x
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+- const char *type;
+ int err;
+
+- type = of_get_property(node, "device_type", NULL);
+- if (!type || strcmp(type, "pci")) {
+- dev_err(dev, "invalid \"device_type\" %s\n", type);
+- return -EINVAL;
+- }
+-
+ err = of_address_to_resource(node, 0, ®s);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+--- /dev/null
++++ b/drivers/pci/mobiveil/Kconfig
+@@ -0,0 +1,50 @@
++# SPDX-License-Identifier: GPL-2.0
++
++menu "Mobiveil PCIe Core Support"
++ depends on PCI
++
++config PCIE_MOBIVEIL
++ bool
++
++config PCIE_MOBIVEIL_HOST
++ bool
++ depends on PCI_MSI_IRQ_DOMAIN
++ select PCIE_MOBIVEIL
++
++config PCIE_MOBIVEIL_EP
++ bool
++ depends on PCI_ENDPOINT
++ select PCIE_MOBIVEIL
++
++config PCIE_MOBIVEIL_PLAT
++ bool "Mobiveil AXI PCIe controller"
++ depends on ARCH_ZYNQMP || COMPILE_TEST
++ depends on OF
++ select PCIE_MOBIVEIL_HOST
++ help
++ Say Y here if you want to enable support for the Mobiveil AXI PCIe
++ Soft IP. It has up to 8 outbound and inbound windows
++ for address translation and it is a PCIe Gen4 IP.
++
++config PCI_LAYERSCAPE_GEN4
++ bool "Freescale Layerscpe PCIe Gen4 controller in RC mode"
++ depends on PCI
++ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
++ depends on PCI_MSI_IRQ_DOMAIN
++ select PCIE_MOBIVEIL_HOST
++ help
++ Say Y here if you want PCIe Gen4 controller support on
++ Layerscape SoCs. And the PCIe controller work in RC mode
++ by setting the RCW[HOST_AGT_PEX] to 0.
++
++config PCI_LAYERSCAPE_GEN4_EP
++ bool "Freescale Layerscpe PCIe Gen4 controller in EP mode"
++ depends on PCI
++ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
++ depends on PCI_ENDPOINT
++ select PCIE_MOBIVEIL_EP
++ help
++ Say Y here if you want PCIe Gen4 controller support on
++ Layerscape SoCs. And the PCIe controller work in EP mode
++ by setting the RCW[HOST_AGT_PEX] to 1.
++endmenu
+--- /dev/null
++++ b/drivers/pci/mobiveil/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0
++obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
++obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
++obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o
++obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
++obj-$(CONFIG_PCI_LAYERSCAPE_GEN4) += pci-layerscape-gen4.o
++obj-$(CONFIG_PCI_LAYERSCAPE_GEN4_EP) += pci-layerscape-gen4-ep.o
+--- /dev/null
++++ b/drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
+@@ -0,0 +1,178 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe controller EP driver for Freescale Layerscape SoCs
++ *
++ * Copyright (C) 2018 NXP Semiconductor.
++ *
++ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/of_pci.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/resource.h>
++
++#include "pcie-mobiveil.h"
++
++struct ls_pcie_g4_ep {
++ struct mobiveil_pcie *mv_pci;
++};
++
++#define to_ls_pcie_g4_ep(x) dev_get_drvdata((x)->dev)
++
++static const struct of_device_id ls_pcie_g4_ep_of_match[] = {
++ { .compatible = "fsl,lx2160a-pcie-ep",},
++ { },
++};
++
++static void ls_pcie_g4_get_bar_num(struct mobiveil_pcie_ep *ep)
++{
++ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
++ u32 type, reg;
++ u8 bar;
++
++ ep->bar_num = BAR_5 + 1;
++
++ for (bar = BAR_0; bar <= BAR_5; bar++) {
++ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
++ type = csr_readl(mv_pci, reg) &
++ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
++ if (type & PCI_BASE_ADDRESS_MEM_TYPE_64)
++ ep->bar_num--;
++ }
++}
++
++static void ls_pcie_g4_ep_init(struct mobiveil_pcie_ep *ep)
++{
++ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ enum pci_barno bar;
++ int win_idx, val;
++
++ /*
++ * Errata: unsupported request error on inbound posted write
++ * transaction, PCIe controller reports advisory error instead
++ * of uncorrectable error message to RC.
++ * workaround: set the bit20(unsupported_request_Error_severity) with
++ * value 1 in uncorrectable_Error_Severity_Register, make the
++ * unsupported request error generate the fatal error.
++ */
++ val = csr_readl(mv_pci, CFG_UNCORRECTABLE_ERROR_SEVERITY);
++ val |= 1 << UNSUPPORTED_REQUEST_ERROR_SHIFT;
++ csr_writel(mv_pci, val, CFG_UNCORRECTABLE_ERROR_SEVERITY);
++
++ ls_pcie_g4_get_bar_num(ep);
++
++ for (bar = BAR_0; bar < (ep->bar_num * ep->pf_num); bar++)
++ mobiveil_pcie_ep_reset_bar(mv_pci, bar);
++
++ for (win_idx = 0; win_idx < MAX_IATU_OUT; win_idx++)
++ mobiveil_pcie_disable_ob_win(mv_pci, win_idx);
++
++ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
++ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
++}
++
++static int ls_pcie_g4_ep_raise_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type,
++ u16 interrupt_num)
++{
++ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
++
++ switch (type) {
++ case PCI_EPC_IRQ_LEGACY:
++ return mobiveil_pcie_ep_raise_legacy_irq(ep, func_no);
++ case PCI_EPC_IRQ_MSI:
++ return mobiveil_pcie_ep_raise_msi_irq(ep, func_no,
++ interrupt_num);
++ case PCI_EPC_IRQ_MSIX:
++ return mobiveil_pcie_ep_raise_msix_irq(ep, func_no,
++ interrupt_num);
++ default:
++ dev_err(&mv_pci->pdev->dev, "UNKNOWN IRQ type\n");
++ }
++
++ return 0;
++}
++
++static struct mobiveil_pcie_ep_ops pcie_ep_ops = {
++ .ep_init = ls_pcie_g4_ep_init,
++ .raise_irq = ls_pcie_g4_ep_raise_irq,
++};
++
++static int __init ls_pcie_gen4_add_pcie_ep(struct ls_pcie_g4_ep *ls_pcie_g4_ep,
++ struct platform_device *pdev)
++{
++ struct mobiveil_pcie *mv_pci = ls_pcie_g4_ep->mv_pci;
++ struct device *dev = &pdev->dev;
++ struct mobiveil_pcie_ep *ep;
++ struct resource *res;
++ int ret;
++ struct device_node *np = dev->of_node;
++
++ ep = &mv_pci->ep;
++ ep->ops = &pcie_ep_ops;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
++ if (!res)
++ return -EINVAL;
++
++ ep->phys_base = res->start;
++ ep->addr_size = resource_size(res);
++
++ ret = of_property_read_u32(np, "max-functions", &ep->pf_num);
++ if (ret < 0)
++ ep->pf_num = 1;
++
++ ret = mobiveil_pcie_ep_init(ep);
++ if (ret) {
++ dev_err(dev, "failed to initialize endpoint\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int __init ls_pcie_g4_ep_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct mobiveil_pcie *mv_pci;
++ struct ls_pcie_g4_ep *ls_pcie_g4_ep;
++ struct resource *res;
++ int ret;
++
++ ls_pcie_g4_ep = devm_kzalloc(dev, sizeof(*ls_pcie_g4_ep), GFP_KERNEL);
++ if (!ls_pcie_g4_ep)
++ return -ENOMEM;
++
++ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
++ if (!mv_pci)
++ return -ENOMEM;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
++ mv_pci->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(mv_pci->csr_axi_slave_base))
++ return PTR_ERR(mv_pci->csr_axi_slave_base);
++
++ mv_pci->pdev = pdev;
++ ls_pcie_g4_ep->mv_pci = mv_pci;
++
++ platform_set_drvdata(pdev, ls_pcie_g4_ep);
++
++ ret = ls_pcie_gen4_add_pcie_ep(ls_pcie_g4_ep, pdev);
++
++ return ret;
++}
++
++static struct platform_driver ls_pcie_g4_ep_driver = {
++ .driver = {
++ .name = "layerscape-pcie-gen4-ep",
++ .of_match_table = ls_pcie_g4_ep_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++builtin_platform_driver_probe(ls_pcie_g4_ep_driver, ls_pcie_g4_ep_probe);
+--- /dev/null
++++ b/drivers/pci/mobiveil/pci-layerscape-gen4.c
+@@ -0,0 +1,292 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for NXP Layerscape SoCs
++ *
++ * Copyright 2018 NXP
++ *
++ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/of_pci.h>
++#include <linux/of_platform.h>
++#include <linux/of_irq.h>
++#include <linux/of_address.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/resource.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
++
++#include "pcie-mobiveil.h"
++
++/* LUT and PF control registers */
++#define PCIE_LUT_OFF (0x80000)
++#define PCIE_LUT_GCR (0x28)
++#define PCIE_LUT_GCR_RRE (0)
++
++#define PCIE_PF_OFF (0xc0000)
++#define PCIE_PF_INT_STAT (0x18)
++#define PF_INT_STAT_PABRST (31)
++
++#define PCIE_PF_DBG (0x7fc)
++#define PF_DBG_LTSSM_MASK (0x3f)
++#define PF_DBG_WE (31)
++#define PF_DBG_PABR (27)
++
++#define LS_PCIE_G4_LTSSM_L0 0x2d /* L0 state */
++
++#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
++
++struct ls_pcie_g4 {
++ struct mobiveil_pcie *pci;
++ struct delayed_work dwork;
++ int irq;
++};
++
++static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
++{
++ return ioread32(pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
++}
++
++static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
++ u32 off, u32 val)
++{
++ iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
++}
++
++static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
++{
++ return ioread32(pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
++}
++
++static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
++ u32 off, u32 val)
++{
++ iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
++}
++
++static bool ls_pcie_g4_is_bridge(struct ls_pcie_g4 *pcie)
++{
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 header_type;
++
++ header_type = csr_readb(mv_pci, PCI_HEADER_TYPE);
++ header_type &= 0x7f;
++
++ return header_type == PCI_HEADER_TYPE_BRIDGE;
++}
++
++static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
++{
++ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
++ u32 state;
++
++ state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ state = state & PF_DBG_LTSSM_MASK;
++
++ if (state == LS_PCIE_G4_LTSSM_L0)
++ return 1;
++
++ return 0;
++}
++
++static void ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
++{
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 val, act_stat;
++ int to = 100;
++
++ /* Poll for pab_csb_reset to set and PAB activity to clear */
++ do {
++ usleep_range(10, 15);
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
++ act_stat = csr_readl(mv_pci, PAB_ACTIVITY_STAT);
++ } while (((val & 1 << PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
++ if (to < 0) {
++ dev_err(&mv_pci->pdev->dev, "poll PABRST&PABACT timeout\n");
++ return;
++ }
++
++ /* clear PEX_RESET bit in PEX_PF0_DBG register */
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ val |= 1 << PF_DBG_WE;
++ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
++
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ val |= 1 << PF_DBG_PABR;
++ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
++
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ val &= ~(1 << PF_DBG_WE);
++ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
++
++ mobiveil_host_init(mv_pci, true);
++
++ to = 100;
++ while (!ls_pcie_g4_link_up(mv_pci) && to--)
++ usleep_range(200, 250);
++ if (to < 0)
++ dev_err(&mv_pci->pdev->dev, "PCIe link trainning timeout\n");
++}
++
++static irqreturn_t ls_pcie_g4_handler(int irq, void *dev_id)
++{
++ struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 val;
++
++ val = csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
++ if (!val)
++ return IRQ_NONE;
++
++ if (val & PAB_INTP_RESET)
++ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
++
++ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
++
++ return IRQ_HANDLED;
++}
++
++static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
++{
++ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
++ u32 val;
++ int ret;
++
++ pcie->irq = platform_get_irq_byname(mv_pci->pdev, "intr");
++ if (pcie->irq < 0) {
++ dev_err(&mv_pci->pdev->dev, "Can't get 'intr' irq.\n");
++ return pcie->irq;
++ }
++ ret = devm_request_irq(&mv_pci->pdev->dev, pcie->irq,
++ ls_pcie_g4_handler, IRQF_SHARED,
++ mv_pci->pdev->name, pcie);
++ if (ret) {
++ dev_err(&mv_pci->pdev->dev, "Can't register PCIe IRQ.\n");
++ return ret;
++ }
++
++ /* Enable interrupts */
++ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
++ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
++ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
++
++ return 0;
++}
++
++static void ls_pcie_g4_reset(struct work_struct *work)
++{
++ struct delayed_work *dwork = container_of(work, struct delayed_work,
++ work);
++ struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u16 ctrl;
++
++ ctrl = csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
++ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
++ csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
++ ls_pcie_g4_reinit_hw(pcie);
++}
++
++static int ls_pcie_g4_read_other_conf(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ struct mobiveil_pcie *pci = bus->sysdata;
++ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
++ int ret;
++
++ if (where == PCI_VENDOR_ID)
++ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
++ 0 << PCIE_LUT_GCR_RRE);
++
++ ret = pci_generic_config_read(bus, devfn, where, size, val);
++
++ if (where == PCI_VENDOR_ID)
++ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
++ 1 << PCIE_LUT_GCR_RRE);
++
++ return ret;
++}
++
++static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
++ .interrupt_init = ls_pcie_g4_interrupt_init,
++ .read_other_conf = ls_pcie_g4_read_other_conf,
++};
++
++static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
++ .link_up = ls_pcie_g4_link_up,
++};
++
++static void workaround_tkt381274(struct ls_pcie_g4 *pcie)
++{
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 val;
++
++ /* Set ACK latency timeout */
++ val = csr_readl(mv_pci, GPEX_ACK_REPLAY_TO);
++ val &= ~(ACK_LAT_TO_VAL_MASK << ACK_LAT_TO_VAL_SHIFT);
++ val |= (4 << ACK_LAT_TO_VAL_SHIFT);
++ csr_writel(mv_pci, val, GPEX_ACK_REPLAY_TO);
++}
++
++static int __init ls_pcie_g4_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct mobiveil_pcie *mv_pci;
++ struct ls_pcie_g4 *pcie;
++ struct device_node *np = dev->of_node;
++ int ret;
++
++ if (!of_parse_phandle(np, "msi-parent", 0)) {
++ dev_err(dev, "failed to find msi-parent\n");
++ return -EINVAL;
++ }
++
++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
++ if (!pcie)
++ return -ENOMEM;
++
++ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
++ if (!mv_pci)
++ return -ENOMEM;
++
++ mv_pci->pdev = pdev;
++ mv_pci->ops = &ls_pcie_g4_pab_ops;
++ mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
++ pcie->pci = mv_pci;
++
++ platform_set_drvdata(pdev, pcie);
++
++ INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
++
++ ret = mobiveil_pcie_host_probe(mv_pci);
++ if (ret) {
++ dev_err(dev, "fail to probe!\n");
++ return ret;
++ }
++
++ if (!ls_pcie_g4_is_bridge(pcie))
++ return -ENODEV;
++
++ workaround_tkt381274(pcie);
++
++ return 0;
++}
++
++static const struct of_device_id ls_pcie_g4_of_match[] = {
++ { .compatible = "fsl,lx2160a-pcie", },
++ { },
++};
++
++static struct platform_driver ls_pcie_g4_driver = {
++ .driver = {
++ .name = "layerscape-pcie-gen4",
++ .of_match_table = ls_pcie_g4_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil-ep.c
+@@ -0,0 +1,512 @@
++// SPDX-License-Identifier: GPL-2.0
++/**
++ * Mobiveil PCIe Endpoint controller driver
++ *
++ * Copyright (C) 2018 NXP Semiconductor.
++ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
++ */
++
++#include <linux/of.h>
++#include <linux/pci-epc.h>
++#include <linux/pci-epf.h>
++#include <linux/platform_device.h>
++#include "pcie-mobiveil.h"
++
++void mobiveil_pcie_ep_linkup(struct mobiveil_pcie_ep *ep)
++{
++ struct pci_epc *epc = ep->epc;
++
++ pci_epc_linkup(epc);
++}
++
++static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
++ enum pci_barno bar)
++{
++ csr_writel(pcie, bar, GPEX_BAR_SELECT);
++ csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW);
++ csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW);
++}
++
++void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
++ enum pci_barno bar)
++{
++ __mobiveil_pcie_ep_reset_bar(pcie, bar);
++}
++
++static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie,
++ u8 cap_ptr, u8 cap)
++{
++ u8 cap_id, next_cap_ptr;
++ u16 reg;
++
++ reg = csr_readw(pcie, cap_ptr);
++ next_cap_ptr = (reg & 0xff00) >> 8;
++ cap_id = (reg & 0x00ff);
++
++ if (cap_id == cap)
++ return cap_ptr;
++
++ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
++ return 0;
++
++ return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
++}
++
++static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie *pcie,
++ u8 cap)
++{
++ u8 next_cap_ptr;
++ u16 reg;
++
++ reg = csr_readw(pcie, PCI_CAPABILITY_LIST);
++ next_cap_ptr = (reg & 0x00ff);
++
++ if (!next_cap_ptr)
++ return 0;
++
++ return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
++}
++
++static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_header *hdr)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID);
++ csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID);
++ csr_writeb(pcie, hdr->revid, PCI_REVISION_ID);
++ csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG);
++ csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8,
++ PCI_CLASS_DEVICE);
++ csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE);
++ csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID);
++ csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID);
++ csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_inbound_atu(struct mobiveil_pcie_ep *ep,
++ u8 func_no, enum pci_barno bar,
++ dma_addr_t cpu_addr)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ program_ib_windows_ep(pcie, func_no, bar, cpu_addr);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_outbound_atu(struct mobiveil_pcie_ep *ep,
++ phys_addr_t phys_addr,
++ u64 pci_addr, u8 func_no,
++ size_t size)
++{
++ int ret;
++ u32 free_win;
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
++ if (free_win >= ep->num_ob_windows) {
++ dev_err(&pcie->pdev->dev, "No free outbound window\n");
++ return -EINVAL;
++ }
++
++ ret = program_ob_windows_ep(pcie, free_win, MEM_WINDOW_TYPE,
++ phys_addr, pci_addr, func_no, size);
++ if (ret < 0) {
++ dev_err(&pcie->pdev->dev, "Failed to program IB window\n");
++ return ret;
++ }
++
++ set_bit(free_win, ep->ob_window_map);
++ ep->outbound_addr[free_win] = phys_addr;
++
++ return 0;
++}
++
++static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
++
++ if (bar < ep->bar_num) {
++ __mobiveil_pcie_ep_reset_bar(pcie,
++ func_no * ep->bar_num + bar);
++
++ mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar);
++ }
++}
++
++static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
++{
++ int ret;
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
++ size_t size = epf_bar->size;
++
++ if (bar < ep->bar_num) {
++ ret = mobiveil_pcie_ep_inbound_atu(ep, func_no, bar,
++ epf_bar->phys_addr);
++ if (ret)
++ return ret;
++
++ csr_writel(pcie, func_no * ep->bar_num + bar,
++ GPEX_BAR_SELECT);
++ csr_writel(pcie, lower_32_bits(~(size - 1)),
++ GPEX_BAR_SIZE_LDW);
++ csr_writel(pcie, upper_32_bits(~(size - 1)),
++ GPEX_BAR_SIZE_UDW);
++ }
++
++ return 0;
++}
++
++static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep,
++ phys_addr_t addr,
++ u32 *atu_index)
++{
++ u32 index;
++
++ for (index = 0; index < ep->num_ob_windows; index++) {
++ if (ep->outbound_addr[index] != addr)
++ continue;
++ *atu_index = index;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr)
++{
++ int ret;
++ u32 atu_index;
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ ret = mobiveil_pcie_find_index(ep, addr, &atu_index);
++ if (ret < 0)
++ return;
++
++ mobiveil_pcie_disable_ob_win(pcie, atu_index);
++ clear_bit(atu_index, ep->ob_window_map);
++}
++
++static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr,
++ u64 pci_addr, size_t size)
++{
++ int ret;
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ ret = mobiveil_pcie_ep_outbound_atu(ep, addr, pci_addr, func_no, size);
++ if (ret) {
++ dev_err(&pcie->pdev->dev, "Failed to enable address\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = csr_readw(pcie, reg);
++ if (!(val & PCI_MSI_FLAGS_ENABLE))
++ return -EINVAL;
++
++ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
++
++ return val;
++}
++
++static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc,
++ u8 func_no, u8 interrupts)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = csr_readw(pcie, reg);
++ val &= ~PCI_MSI_FLAGS_QMASK;
++ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
++ csr_writew(pcie, val, reg);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = csr_readw(pcie, reg);
++ if (!(val & PCI_MSIX_FLAGS_ENABLE))
++ return -EINVAL;
++
++ val &= PCI_MSIX_FLAGS_QSIZE;
++
++ return val;
++}
++
++static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no,
++ u16 interrupts)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = csr_readw(pcie, reg);
++ val &= ~PCI_MSIX_FLAGS_QSIZE;
++ val |= interrupts;
++ csr_writew(pcie, val, reg);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type,
++ u16 interrupt_num)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++
++ if (!ep->ops->raise_irq)
++ return -EINVAL;
++
++ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
++}
++
++static const struct pci_epc_ops epc_ops = {
++ .write_header = mobiveil_pcie_ep_write_header,
++ .set_bar = mobiveil_pcie_ep_set_bar,
++ .clear_bar = mobiveil_pcie_ep_clear_bar,
++ .map_addr = mobiveil_pcie_ep_map_addr,
++ .unmap_addr = mobiveil_pcie_ep_unmap_addr,
++ .set_msi = mobiveil_pcie_ep_set_msi,
++ .get_msi = mobiveil_pcie_ep_get_msi,
++ .set_msix = mobiveil_pcie_ep_set_msix,
++ .get_msix = mobiveil_pcie_ep_get_msix,
++ .raise_irq = mobiveil_pcie_ep_raise_irq,
++};
++
++int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n");
++
++ return -EINVAL;
++}
++
++int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u16 msg_ctrl, msg_data;
++ u32 msg_addr_lower, msg_addr_upper, reg;
++ u64 msg_addr;
++ u32 func_num;
++ bool has_upper;
++ int ret;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ msg_ctrl = csr_readw(pcie, reg);
++ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
++ msg_addr_lower = csr_readl(pcie, reg);
++ if (has_upper) {
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
++ msg_addr_upper = csr_readl(pcie, reg);
++ reg = ep->msi_cap + PCI_MSI_DATA_64;
++ msg_data = csr_readw(pcie, reg);
++ } else {
++ msg_addr_upper = 0;
++ reg = ep->msi_cap + PCI_MSI_DATA_32;
++ msg_data = csr_readw(pcie, reg);
++ }
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
++ msg_addr, epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
++
++ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
++int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u32 msg_addr_upper, msg_addr_lower;
++ u32 msg_data;
++ u64 msg_addr;
++ u32 func_num;
++ int ret;
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ msg_addr_lower = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
++ PCI_MSIX_ENTRY_LOWER_ADDR +
++ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
++ msg_addr_upper = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
++ PCI_MSIX_ENTRY_UPPER_ADDR +
++ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++ msg_data = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
++ PCI_MSIX_ENTRY_DATA +
++ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
++ msg_addr, epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data, ep->msi_mem);
++
++ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
++void mobiveil_pcie_ep_exit(struct mobiveil_pcie_ep *ep)
++{
++ struct pci_epc *epc = ep->epc;
++
++ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
++ epc->mem->page_size);
++
++ pci_epc_mem_exit(epc);
++}
++
++int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep)
++{
++ int ret;
++ void *addr;
++ struct pci_epc *epc;
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ struct device *dev = &pcie->pdev->dev;
++ struct device_node *np = dev->of_node;
++
++ if (!pcie->csr_axi_slave_base) {
++ dev_err(dev, "csr_base is not populated\n");
++ return -EINVAL;
++ }
++
++ ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
++ if (ret < 0) {
++ dev_err(dev, "Unable to read *num-ob-windows* property\n");
++ return ret;
++ }
++
++ if (ep->num_ob_windows > MAX_IATU_OUT) {
++ dev_err(dev, "Invalid *num-ob-windows*\n");
++ return -EINVAL;
++ }
++ ep->ob_window_map = devm_kcalloc(dev,
++ BITS_TO_LONGS(ep->num_ob_windows),
++ sizeof(long),
++ GFP_KERNEL);
++ if (!ep->ob_window_map)
++ return -ENOMEM;
++
++ addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
++ GFP_KERNEL);
++ if (!addr)
++ return -ENOMEM;
++ ep->outbound_addr = addr;
++
++ mobiveil_pcie_enable_bridge_pio(pcie);
++ mobiveil_pcie_enable_engine_apio(pcie);
++ mobiveil_pcie_enable_engine_ppio(pcie);
++ mobiveil_pcie_enable_msi_ep(pcie);
++
++ epc = devm_pci_epc_create(dev, &epc_ops);
++ if (IS_ERR(epc)) {
++ dev_err(dev, "Failed to create epc device\n");
++ return PTR_ERR(epc);
++ }
++
++ ep->epc = epc;
++ epc_set_drvdata(epc, ep);
++
++ ep->msi_cap = mobiveil_pcie_ep_find_capability(pcie, PCI_CAP_ID_MSI);
++
++ ep->msix_cap = mobiveil_pcie_ep_find_capability(pcie,
++ PCI_CAP_ID_MSIX);
++
++ if (ep->ops->ep_init)
++ ep->ops->ep_init(ep);
++
++ epc->max_functions = ep->pf_num;
++
++ ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
++ ep->page_size);
++ if (ret < 0) {
++ dev_err(dev, "Failed to initialize address space\n");
++ return ret;
++ }
++
++ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
++ epc->mem->page_size);
++ if (!ep->msi_mem) {
++ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil-host.c
+@@ -0,0 +1,640 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/irqchip/chained_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/msi.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of_platform.h>
++#include <linux/of_pci.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++
++#include "pcie-mobiveil.h"
++
++static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
++{
++ struct mobiveil_pcie *pcie = bus->sysdata;
++
++ /* If there is no link, then there is no device */
++ if (bus->number > pcie->rp.root_bus_nr && !mobiveil_pcie_link_up(pcie))
++ return false;
++
++ /* Only one device down on each root port */
++ if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
++ return false;
++
++ /*
++ * Do not read more than one device on the bus directly
++ * attached to RC
++ */
++ if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
++ return false;
++
++ return true;
++}
++
++/*
++ * mobiveil_pcie_map_bus - routine to get the configuration base of either
++ * root port or endpoint
++ */
++static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
++ unsigned int devfn, int where)
++{
++ struct mobiveil_pcie *pcie = bus->sysdata;
++ u32 value;
++
++ if (!mobiveil_pcie_valid_device(bus, devfn))
++ return NULL;
++
++ /* RC config access */
++ if (bus->number == pcie->rp.root_bus_nr)
++ return pcie->csr_axi_slave_base + where;
++
++ /*
++ * EP config access (in Config/APIO space)
++ * Program PEX Address base (31..16 bits) with appropriate value
++ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
++ * Relies on pci_lock serialization
++ */
++ value = bus->number << PAB_BUS_SHIFT |
++ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
++ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
++
++ csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
++
++ return pcie->rp.config_axi_slave_base + where;
++}
++
++static int mobiveil_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ struct mobiveil_pcie *pcie = bus->sysdata;
++ struct root_port *rp = &pcie->rp;
++
++ if (bus->number > rp->root_bus_nr && rp->ops->read_other_conf)
++ return rp->ops->read_other_conf(bus, devfn, where, size, val);
++
++ return pci_generic_config_read(bus, devfn, where, size, val);
++}
++static struct pci_ops mobiveil_pcie_ops = {
++ .map_bus = mobiveil_pcie_map_bus,
++ .read = mobiveil_pcie_config_read,
++ .write = pci_generic_config_write,
++};
++
++static void mobiveil_pcie_isr(struct irq_desc *desc)
++{
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
++ struct device *dev = &pcie->pdev->dev;
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++ u32 msi_data, msi_addr_lo, msi_addr_hi;
++ u32 intr_status, msi_status;
++ unsigned long shifted_status;
++ u32 bit, virq, val, mask;
++
++ /*
++ * The core provides a single interrupt for both INTx/MSI messages.
++ * So we'll read both INTx and MSI status
++ */
++
++ chained_irq_enter(chip, desc);
++
++ /* read INTx status */
++ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
++ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ intr_status = val & mask;
++
++ /* Handle INTx */
++ if (intr_status & PAB_INTP_INTX_MASK) {
++ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
++ shifted_status &= PAB_INTP_INTX_MASK;
++ shifted_status >>= PAB_INTX_START;
++ do {
++ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
++ virq = irq_find_mapping(pcie->rp.intx_domain,
++ bit + 1);
++ if (virq)
++ generic_handle_irq(virq);
++ else
++ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
++ bit);
++
++ /* clear interrupt handled */
++ csr_writel(pcie, 1 << (PAB_INTX_START + bit),
++ PAB_INTP_AMBA_MISC_STAT);
++ }
++
++ shifted_status = csr_readl(pcie,
++ PAB_INTP_AMBA_MISC_STAT);
++ shifted_status &= PAB_INTP_INTX_MASK;
++ shifted_status >>= PAB_INTX_START;
++ } while (shifted_status != 0);
++ }
++
++ /* read extra MSI status register */
++ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
++
++ /* handle MSI interrupts */
++ while (msi_status & 1) {
++ msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
++
++ /*
++ * MSI_STATUS_OFFSET register gets updated to zero
++ * once we pop not only the MSI data but also address
++ * from MSI hardware FIFO. So keeping these following
++ * two dummy reads.
++ */
++ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
++ MSI_ADDR_L_OFFSET);
++ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
++ MSI_ADDR_H_OFFSET);
++ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
++ msi_data, msi_addr_hi, msi_addr_lo);
++
++ virq = irq_find_mapping(msi->dev_domain, msi_data);
++ if (virq)
++ generic_handle_irq(virq);
++
++ msi_status = readl_relaxed(pcie->apb_csr_base +
++ MSI_STATUS_OFFSET);
++ }
++
++ /* Clear the interrupt status */
++ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
++ chained_irq_exit(chip, desc);
++}
++
++static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct platform_device *pdev = pcie->pdev;
++ struct device_node *node = dev->of_node;
++ struct resource *res;
++
++ /* map config resource */
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "config_axi_slave");
++ pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(pcie->rp.config_axi_slave_base))
++ return PTR_ERR(pcie->rp.config_axi_slave_base);
++ pcie->rp.ob_io_res = res;
++
++ /* map csr resource */
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "csr_axi_slave");
++ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(pcie->csr_axi_slave_base))
++ return PTR_ERR(pcie->csr_axi_slave_base);
++ pcie->pcie_reg_base = res->start;
++
++ /* read the number of windows requested */
++ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
++ pcie->apio_wins = MAX_PIO_WINDOWS;
++
++ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
++ pcie->ppio_wins = MAX_PIO_WINDOWS;
++
++ return 0;
++}
++
++static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
++{
++ phys_addr_t msg_addr = pcie->pcie_reg_base;
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++
++ msi->num_of_vectors = PCI_NUM_MSI;
++ msi->msi_pages_phys = (phys_addr_t)msg_addr;
++
++ writel_relaxed(lower_32_bits(msg_addr),
++ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
++ writel_relaxed(upper_32_bits(msg_addr),
++ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
++ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
++ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
++}
++
++int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
++{
++ u32 value, pab_ctrl, type;
++ struct resource_entry *win;
++ int i;
++
++ /* Disable all inbound/outbound windows */
++ for (i = 0; i < pcie->apio_wins; i++)
++ mobiveil_pcie_disable_ob_win(pcie, i);
++ for (i = 0; i < pcie->ppio_wins; i++)
++ mobiveil_pcie_disable_ib_win(pcie, i);
++
++ pcie->ib_wins_configured = 0;
++ pcie->ob_wins_configured = 0;
++
++ if (!reinit) {
++ /* setup bus numbers */
++ value = csr_readl(pcie, PCI_PRIMARY_BUS);
++ value &= 0xff000000;
++ value |= 0x00ff0100;
++ csr_writel(pcie, value, PCI_PRIMARY_BUS);
++ }
++
++ /*
++ * program Bus Master Enable Bit in Command Register in PAB Config
++ * Space
++ */
++ value = csr_readl(pcie, PCI_COMMAND);
++ value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
++ csr_writel(pcie, value, PCI_COMMAND);
++
++ /*
++ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
++ * register
++ */
++ pab_ctrl = csr_readl(pcie, PAB_CTRL);
++ pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
++ csr_writel(pcie, pab_ctrl, PAB_CTRL);
++
++ /*
++ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
++ * PAB_AXI_PIO_CTRL Register
++ */
++ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
++ value |= APIO_EN_MASK;
++ csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
++
++ /* Enable PCIe PIO master */
++ value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
++ value |= 1 << PIO_ENABLE_SHIFT;
++ csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
++
++ /*
++ * we'll program one outbound window for config reads and
++ * another default inbound window for all the upstream traffic
++ * rest of the outbound windows will be configured according to
++ * the "ranges" field defined in device tree
++ */
++
++ /* config outbound translation window */
++ program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
++ CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
++
++ /* memory inbound translation window */
++ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
++
++ /* Get the I/O and memory ranges from DT */
++ resource_list_for_each_entry(win, pcie->resources) {
++ if (resource_type(win->res) == IORESOURCE_MEM) {
++ type = MEM_WINDOW_TYPE;
++ } else if (resource_type(win->res) == IORESOURCE_IO) {
++ type = IO_WINDOW_TYPE;
++ } else if (resource_type(win->res) == IORESOURCE_BUS) {
++ pcie->rp.root_bus_nr = win->res->start;
++ continue;
++ } else {
++ continue;
++ }
++
++ /* configure outbound translation window */
++ program_ob_windows(pcie, pcie->ob_wins_configured,
++ win->res->start,
++ win->res->start - win->offset,
++ type, resource_size(win->res));
++ }
++
++ /* fixup for PCIe class register */
++ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
++ value &= 0xff;
++ value |= (PCI_CLASS_BRIDGE_PCI << 16);
++ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
++
++ return 0;
++}
++
++static void mobiveil_mask_intx_irq(struct irq_data *data)
++{
++ struct irq_desc *desc = irq_to_desc(data->irq);
++ struct mobiveil_pcie *pcie;
++ unsigned long flags;
++ u32 mask, shifted_val;
++
++ pcie = irq_desc_get_chip_data(desc);
++ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
++ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
++ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ shifted_val &= ~mask;
++ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
++ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
++}
++
++static void mobiveil_unmask_intx_irq(struct irq_data *data)
++{
++ struct irq_desc *desc = irq_to_desc(data->irq);
++ struct mobiveil_pcie *pcie;
++ unsigned long flags;
++ u32 shifted_val, mask;
++
++ pcie = irq_desc_get_chip_data(desc);
++ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
++ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
++ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ shifted_val |= mask;
++ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
++ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
++}
++
++static struct irq_chip intx_irq_chip = {
++ .name = "mobiveil_pcie:intx",
++ .irq_enable = mobiveil_unmask_intx_irq,
++ .irq_disable = mobiveil_mask_intx_irq,
++ .irq_mask = mobiveil_mask_intx_irq,
++ .irq_unmask = mobiveil_unmask_intx_irq,
++};
++
++/* routine to setup the INTx related data */
++static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
++ irq_hw_number_t hwirq)
++{
++ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
++ irq_set_chip_data(irq, domain->host_data);
++
++ return 0;
++}
++
++/* INTx domain operations structure */
++static const struct irq_domain_ops intx_domain_ops = {
++ .map = mobiveil_pcie_intx_map,
++};
++
++static struct irq_chip mobiveil_msi_irq_chip = {
++ .name = "Mobiveil PCIe MSI",
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++};
++
++static struct msi_domain_info mobiveil_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
++ MSI_FLAG_PCI_MSIX),
++ .chip = &mobiveil_msi_irq_chip,
++};
++
++static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
++{
++ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
++ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
++
++ msg->address_lo = lower_32_bits(addr);
++ msg->address_hi = upper_32_bits(addr);
++ msg->data = data->hwirq;
++
++ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
++ (int)data->hwirq, msg->address_hi, msg->address_lo);
++}
++
++static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
++ const struct cpumask *mask, bool force)
++{
++ return -EINVAL;
++}
++
++static struct irq_chip mobiveil_msi_bottom_irq_chip = {
++ .name = "Mobiveil MSI",
++ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
++ .irq_set_affinity = mobiveil_msi_set_affinity,
++};
++
++static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
++ unsigned int virq,
++ unsigned int nr_irqs, void *args)
++{
++ struct mobiveil_pcie *pcie = domain->host_data;
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++ unsigned long bit;
++
++ WARN_ON(nr_irqs != 1);
++ mutex_lock(&msi->lock);
++
++ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
++ if (bit >= msi->num_of_vectors) {
++ mutex_unlock(&msi->lock);
++ return -ENOSPC;
++ }
++
++ set_bit(bit, msi->msi_irq_in_use);
++
++ mutex_unlock(&msi->lock);
++
++ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
++ domain->host_data, handle_level_irq, NULL, NULL);
++ return 0;
++}
++
++static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
++ unsigned int virq,
++ unsigned int nr_irqs)
++{
++ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
++ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++
++ mutex_lock(&msi->lock);
++
++ if (!test_bit(d->hwirq, msi->msi_irq_in_use))
++ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
++ d->hwirq);
++ else
++ __clear_bit(d->hwirq, msi->msi_irq_in_use);
++
++ mutex_unlock(&msi->lock);
++}
++static const struct irq_domain_ops msi_domain_ops = {
++ .alloc = mobiveil_irq_msi_domain_alloc,
++ .free = mobiveil_irq_msi_domain_free,
++};
++
++static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++
++ mutex_init(&msi->lock);
++ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
++ &msi_domain_ops, pcie);
++ if (!msi->dev_domain) {
++ dev_err(dev, "failed to create IRQ domain\n");
++ return -ENOMEM;
++ }
++
++ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
++ &mobiveil_msi_domain_info,
++ msi->dev_domain);
++ if (!msi->msi_domain) {
++ dev_err(dev, "failed to create MSI domain\n");
++ irq_domain_remove(msi->dev_domain);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct device_node *node = dev->of_node;
++ int ret;
++
++ /* setup INTx */
++ pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
++ &intx_domain_ops, pcie);
++
++ if (!pcie->rp.intx_domain) {
++ dev_err(dev, "Failed to get a INTx IRQ domain\n");
++ return -ENOMEM;
++ }
++
++ raw_spin_lock_init(&pcie->rp.intx_mask_lock);
++
++ /* setup MSI */
++ ret = mobiveil_allocate_msi_domains(pcie);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct resource *res;
++ int ret;
++
++ if (pcie->rp.ops->interrupt_init)
++ return pcie->rp.ops->interrupt_init(pcie);
++
++ /* map MSI config resource */
++ res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
++ "apb_csr");
++ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(pcie->apb_csr_base))
++ return PTR_ERR(pcie->apb_csr_base);
++
++ /* setup MSI hardware registers */
++ mobiveil_pcie_enable_msi(pcie);
++
++ pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
++ if (pcie->rp.irq <= 0) {
++ dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
++ return -ENODEV;
++ }
++
++ /* initialize the IRQ domains */
++ ret = mobiveil_pcie_init_irq_domain(pcie);
++ if (ret) {
++ dev_err(dev, "Failed creating IRQ Domain\n");
++ return ret;
++ }
++
++ irq_set_chained_handler_and_data(pcie->rp.irq,
++ mobiveil_pcie_isr, pcie);
++
++ /* Enable interrupts */
++ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
++ PAB_INTP_AMBA_MISC_ENB);
++
++ return 0;
++}
++
++int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
++{
++ struct pci_bus *bus;
++ struct pci_bus *child;
++ struct pci_host_bridge *bridge;
++ struct device *dev = &pcie->pdev->dev;
++ struct device_node *np = dev->of_node;
++ resource_size_t iobase;
++ int ret;
++
++ ret = mobiveil_pcie_parse_dt(pcie);
++ if (ret) {
++ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
++ return ret;
++ }
++
++ /* allocate the PCIe port */
++ bridge = devm_pci_alloc_host_bridge(dev, 0);
++ if (!bridge)
++ return -ENOMEM;
++
++ /* parse the host bridge base addresses from the device tree file */
++ ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
++ &bridge->windows, &iobase);
++ if (ret) {
++ dev_err(dev, "Getting bridge resources failed\n");
++ return ret;
++ }
++
++ pcie->resources = &bridge->windows;
++
++ /*
++ * configure all inbound and outbound windows and prepare the RC for
++ * config access
++ */
++ ret = mobiveil_host_init(pcie, false);
++ if (ret) {
++ dev_err(dev, "Failed to initialize host\n");
++ goto error;
++ }
++
++ ret = mobiveil_pcie_interrupt_init(pcie);
++ if (ret) {
++ dev_err(dev, "Interrupt init failed\n");
++ goto error;
++ }
++
++ ret = devm_request_pci_bus_resources(dev, pcie->resources);
++ if (ret)
++ goto error;
++
++ /* Initialize bridge */
++ bridge->dev.parent = dev;
++ bridge->sysdata = pcie;
++ bridge->busnr = pcie->rp.root_bus_nr;
++ bridge->ops = &mobiveil_pcie_ops;
++ bridge->map_irq = of_irq_parse_and_map_pci;
++ bridge->swizzle_irq = pci_common_swizzle;
++
++ ret = mobiveil_bringup_link(pcie);
++ if (ret) {
++ dev_info(dev, "link bring-up failed\n");
++ }
++
++ /* setup the kernel resources for the newly added PCIe root bus */
++ ret = pci_scan_root_bus_bridge(bridge);
++ if (ret)
++ goto error;
++
++ bus = bridge->bus;
++
++ pci_assign_unassigned_bus_resources(bus);
++ list_for_each_entry(child, &bus->children, node)
++ pcie_bus_configure_settings(child);
++ pci_bus_add_devices(bus);
++
++ return 0;
++error:
++ pci_free_resource_list(pcie->resources);
++ return ret;
++}
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil-plat.c
+@@ -0,0 +1,54 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of_pci.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++
++#include "pcie-mobiveil.h"
++
++static int mobiveil_pcie_probe(struct platform_device *pdev)
++{
++ struct mobiveil_pcie *pcie;
++ struct device *dev = &pdev->dev;
++
++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
++ if (!pcie)
++ return -ENOMEM;
++
++ pcie->pdev = pdev;
++
++ return mobiveil_pcie_host_probe(pcie);
++}
++
++static const struct of_device_id mobiveil_pcie_of_match[] = {
++ {.compatible = "mbvl,gpex40-pcie",},
++ {},
++};
++
++MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
++
++static struct platform_driver mobiveil_pcie_driver = {
++ .probe = mobiveil_pcie_probe,
++ .driver = {
++ .name = "mobiveil-pcie",
++ .of_match_table = mobiveil_pcie_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++builtin_platform_driver(mobiveil_pcie_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
++MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil.c
+@@ -0,0 +1,334 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */