kernel: bump 4.9 to 4.9.130
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 702-pci-support-layerscape.patch
1 From b2ee6e29bad31facbbf5ac1ce98235ac163d9fa9 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 16:26:47 +0800
4 Subject: [PATCH 08/32] pci: support layerscape
5
6 This is an integrated patch for layerscape pcie support.
7
8 Signed-off-by: Po Liu <po.liu@nxp.com>
9 Signed-off-by: Liu Gang <Gang.Liu@nxp.com>
10 Signed-off-by: Minghuan Lian <Minghuan.Lian@freescale.com>
11 Signed-off-by: hongbo.wang <hongbo.wang@nxp.com>
12 Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
13 Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
14 Signed-off-by: Mingkai Hu <mingkai.hu@nxp.com>
15 Signed-off-by: Christoph Hellwig <hch@lst.de>
16 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
17 ---
18 drivers/irqchip/irq-ls-scfg-msi.c | 257 ++++++-
19 drivers/pci/host/Makefile | 2 +-
20 drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++
21 drivers/pci/host/pci-layerscape-ep.c | 309 ++++++++
22 drivers/pci/host/pci-layerscape-ep.h | 115 +++
23 drivers/pci/host/pci-layerscape.c | 48 +-
24 drivers/pci/host/pcie-designware.c | 6 +
25 drivers/pci/host/pcie-designware.h | 1 +
26 drivers/pci/pci.c | 2 +-
27 drivers/pci/pcie/portdrv_core.c | 181 ++---
28 drivers/pci/quirks.c | 15 +
29 include/linux/pci.h | 1 +
30 12 files changed, 1546 insertions(+), 149 deletions(-)
31 create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c
32 create mode 100644 drivers/pci/host/pci-layerscape-ep.c
33 create mode 100644 drivers/pci/host/pci-layerscape-ep.h
34
35 --- a/drivers/irqchip/irq-ls-scfg-msi.c
36 +++ b/drivers/irqchip/irq-ls-scfg-msi.c
37 @@ -17,13 +17,32 @@
38 #include <linux/irq.h>
39 #include <linux/irqchip/chained_irq.h>
40 #include <linux/irqdomain.h>
41 +#include <linux/of_irq.h>
42 #include <linux/of_pci.h>
43 #include <linux/of_platform.h>
44 #include <linux/spinlock.h>
45
46 -#define MSI_MAX_IRQS 32
47 -#define MSI_IBS_SHIFT 3
48 -#define MSIR 4
49 +#define MSI_IRQS_PER_MSIR 32
50 +#define MSI_MSIR_OFFSET 4
51 +
52 +#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
53 +#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
54 +
55 +struct ls_scfg_msi_cfg {
56 + u32 ibs_shift; /* Shift of interrupt bit select */
57 + u32 msir_irqs; /* The irq number per MSIR */
58 + u32 msir_base; /* The base address of MSIR */
59 +};
60 +
61 +struct ls_scfg_msir {
62 + struct ls_scfg_msi *msi_data;
63 + unsigned int index;
64 + unsigned int gic_irq;
65 + unsigned int bit_start;
66 + unsigned int bit_end;
67 + unsigned int srs; /* Shared interrupt register select */
68 + void __iomem *reg;
69 +};
70
71 struct ls_scfg_msi {
72 spinlock_t lock;
73 @@ -32,8 +51,11 @@ struct ls_scfg_msi {
74 struct irq_domain *msi_domain;
75 void __iomem *regs;
76 phys_addr_t msiir_addr;
77 - int irq;
78 - DECLARE_BITMAP(used, MSI_MAX_IRQS);
79 + struct ls_scfg_msi_cfg *cfg;
80 + u32 msir_num;
81 + struct ls_scfg_msir *msir;
82 + u32 irqs_num;
83 + unsigned long *used;
84 };
85
86 static struct irq_chip ls_scfg_msi_irq_chip = {
87 @@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_ms
88 .chip = &ls_scfg_msi_irq_chip,
89 };
90
91 +static int msi_affinity_flag = 1;
92 +
93 +static int __init early_parse_ls_scfg_msi(char *p)
94 +{
95 + if (p && strncmp(p, "no-affinity", 11) == 0)
96 + msi_affinity_flag = 0;
97 + else
98 + msi_affinity_flag = 1;
99 +
100 + return 0;
101 +}
102 +early_param("lsmsi", early_parse_ls_scfg_msi);
103 +
104 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
105 {
106 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
107
108 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
109 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
110 - msg->data = data->hwirq << MSI_IBS_SHIFT;
111 + msg->data = data->hwirq;
112 +
113 + if (msi_affinity_flag)
114 + msg->data |= cpumask_first(data->common->affinity);
115 }
116
117 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
118 const struct cpumask *mask, bool force)
119 {
120 - return -EINVAL;
121 + struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
122 + u32 cpu;
123 +
124 + if (!msi_affinity_flag)
125 + return -EINVAL;
126 +
127 + if (!force)
128 + cpu = cpumask_any_and(mask, cpu_online_mask);
129 + else
130 + cpu = cpumask_first(mask);
131 +
132 + if (cpu >= msi_data->msir_num)
133 + return -EINVAL;
134 +
135 + if (msi_data->msir[cpu].gic_irq <= 0) {
136 + pr_warn("cannot bind the irq to cpu%d\n", cpu);
137 + return -EINVAL;
138 + }
139 +
140 + cpumask_copy(irq_data->common->affinity, mask);
141 +
142 + return IRQ_SET_MASK_OK;
143 }
144
145 static struct irq_chip ls_scfg_msi_parent_chip = {
146 @@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(
147 WARN_ON(nr_irqs != 1);
148
149 spin_lock(&msi_data->lock);
150 - pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
151 - if (pos < MSI_MAX_IRQS)
152 + pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
153 + if (pos < msi_data->irqs_num)
154 __set_bit(pos, msi_data->used);
155 else
156 err = -ENOSPC;
157 @@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(
158 int pos;
159
160 pos = d->hwirq;
161 - if (pos < 0 || pos >= MSI_MAX_IRQS) {
162 + if (pos < 0 || pos >= msi_data->irqs_num) {
163 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
164 return;
165 }
166 @@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_sc
167
168 static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
169 {
170 - struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
171 + struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
172 + struct ls_scfg_msi *msi_data = msir->msi_data;
173 unsigned long val;
174 - int pos, virq;
175 + int pos, size, virq, hwirq;
176
177 chained_irq_enter(irq_desc_get_chip(desc), desc);
178
179 - val = ioread32be(msi_data->regs + MSIR);
180 - for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
181 - virq = irq_find_mapping(msi_data->parent, (31 - pos));
182 + val = ioread32be(msir->reg);
183 +
184 + pos = msir->bit_start;
185 + size = msir->bit_end + 1;
186 +
187 + for_each_set_bit_from(pos, &val, size) {
188 + hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
189 + msir->srs;
190 + virq = irq_find_mapping(msi_data->parent, hwirq);
191 if (virq)
192 generic_handle_irq(virq);
193 }
194 @@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(stru
195 {
196 /* Initialize MSI domain parent */
197 msi_data->parent = irq_domain_add_linear(NULL,
198 - MSI_MAX_IRQS,
199 + msi_data->irqs_num,
200 &ls_scfg_msi_domain_ops,
201 msi_data);
202 if (!msi_data->parent) {
203 @@ -164,16 +230,118 @@ static int ls_scfg_msi_domains_init(stru
204 return 0;
205 }
206
207 +static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
208 +{
209 + struct ls_scfg_msir *msir;
210 + int virq, i, hwirq;
211 +
212 + virq = platform_get_irq(msi_data->pdev, index);
213 + if (virq <= 0)
214 + return -ENODEV;
215 +
216 + msir = &msi_data->msir[index];
217 + msir->index = index;
218 + msir->msi_data = msi_data;
219 + msir->gic_irq = virq;
220 + msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
221 +
222 + if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
223 + msir->bit_start = 32 - ((msir->index + 1) *
224 + MSI_LS1043V1_1_IRQS_PER_MSIR);
225 + msir->bit_end = msir->bit_start +
226 + MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
227 + } else {
228 + msir->bit_start = 0;
229 + msir->bit_end = msi_data->cfg->msir_irqs - 1;
230 + }
231 +
232 + irq_set_chained_handler_and_data(msir->gic_irq,
233 + ls_scfg_msi_irq_handler,
234 + msir);
235 +
236 + if (msi_affinity_flag) {
237 + /* Associate MSIR interrupt to the cpu */
238 + irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
239 + msir->srs = 0; /* This value is determined by the CPU */
240 + } else
241 + msir->srs = index;
242 +
243 + /* Release the hwirqs corresponding to this MSIR */
244 + if (!msi_affinity_flag || msir->index == 0) {
245 + for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
246 + hwirq = i << msi_data->cfg->ibs_shift | msir->index;
247 + bitmap_clear(msi_data->used, hwirq, 1);
248 + }
249 + }
250 +
251 + return 0;
252 +}
253 +
254 +static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
255 +{
256 + struct ls_scfg_msi *msi_data = msir->msi_data;
257 + int i, hwirq;
258 +
259 + if (msir->gic_irq > 0)
260 + irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
261 +
262 + for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
263 + hwirq = i << msi_data->cfg->ibs_shift | msir->index;
264 + bitmap_set(msi_data->used, hwirq, 1);
265 + }
266 +
267 + return 0;
268 +}
269 +
270 +static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
271 + .ibs_shift = 3,
272 + .msir_irqs = MSI_IRQS_PER_MSIR,
273 + .msir_base = MSI_MSIR_OFFSET,
274 +};
275 +
276 +static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
277 + .ibs_shift = 2,
278 + .msir_irqs = MSI_IRQS_PER_MSIR,
279 + .msir_base = MSI_MSIR_OFFSET,
280 +};
281 +
282 +static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
283 + .ibs_shift = 2,
284 + .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
285 + .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
286 +};
287 +
288 +static const struct of_device_id ls_scfg_msi_id[] = {
289 + /* The following two misspelled compatibles are obsolete */
290 + { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
291 + { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
292 +
293 + { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
294 + { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
295 + { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
296 + { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
297 + { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
298 + {},
299 +};
300 +MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
301 +
302 static int ls_scfg_msi_probe(struct platform_device *pdev)
303 {
304 + const struct of_device_id *match;
305 struct ls_scfg_msi *msi_data;
306 struct resource *res;
307 - int ret;
308 + int i, ret;
309 +
310 + match = of_match_device(ls_scfg_msi_id, &pdev->dev);
311 + if (!match)
312 + return -ENODEV;
313
314 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
315 if (!msi_data)
316 return -ENOMEM;
317
318 + msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
319 +
320 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
321 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
322 if (IS_ERR(msi_data->regs)) {
323 @@ -182,23 +350,48 @@ static int ls_scfg_msi_probe(struct plat
324 }
325 msi_data->msiir_addr = res->start;
326
327 - msi_data->irq = platform_get_irq(pdev, 0);
328 - if (msi_data->irq <= 0) {
329 - dev_err(&pdev->dev, "failed to get MSI irq\n");
330 - return -ENODEV;
331 - }
332 -
333 msi_data->pdev = pdev;
334 spin_lock_init(&msi_data->lock);
335
336 + msi_data->irqs_num = MSI_IRQS_PER_MSIR *
337 + (1 << msi_data->cfg->ibs_shift);
338 + msi_data->used = devm_kcalloc(&pdev->dev,
339 + BITS_TO_LONGS(msi_data->irqs_num),
340 + sizeof(*msi_data->used),
341 + GFP_KERNEL);
342 + if (!msi_data->used)
343 + return -ENOMEM;
344 + /*
345 + * Reserve all the hwirqs
346 + * The available hwirqs will be released in ls1_msi_setup_hwirq()
347 + */
348 + bitmap_set(msi_data->used, 0, msi_data->irqs_num);
349 +
350 + msi_data->msir_num = of_irq_count(pdev->dev.of_node);
351 +
352 + if (msi_affinity_flag) {
353 + u32 cpu_num;
354 +
355 + cpu_num = num_possible_cpus();
356 + if (msi_data->msir_num >= cpu_num)
357 + msi_data->msir_num = cpu_num;
358 + else
359 + msi_affinity_flag = 0;
360 + }
361 +
362 + msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
363 + sizeof(*msi_data->msir),
364 + GFP_KERNEL);
365 + if (!msi_data->msir)
366 + return -ENOMEM;
367 +
368 + for (i = 0; i < msi_data->msir_num; i++)
369 + ls_scfg_msi_setup_hwirq(msi_data, i);
370 +
371 ret = ls_scfg_msi_domains_init(msi_data);
372 if (ret)
373 return ret;
374
375 - irq_set_chained_handler_and_data(msi_data->irq,
376 - ls_scfg_msi_irq_handler,
377 - msi_data);
378 -
379 platform_set_drvdata(pdev, msi_data);
380
381 return 0;
382 @@ -207,8 +400,10 @@ static int ls_scfg_msi_probe(struct plat
383 static int ls_scfg_msi_remove(struct platform_device *pdev)
384 {
385 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
386 + int i;
387
388 - irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
389 + for (i = 0; i < msi_data->msir_num; i++)
390 + ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
391
392 irq_domain_remove(msi_data->msi_domain);
393 irq_domain_remove(msi_data->parent);
394 @@ -218,12 +413,6 @@ static int ls_scfg_msi_remove(struct pla
395 return 0;
396 }
397
398 -static const struct of_device_id ls_scfg_msi_id[] = {
399 - { .compatible = "fsl,1s1021a-msi", },
400 - { .compatible = "fsl,1s1043a-msi", },
401 - {},
402 -};
403 -
404 static struct platform_driver ls_scfg_msi_driver = {
405 .driver = {
406 .name = "ls-scfg-msi",
407 --- a/drivers/pci/host/Makefile
408 +++ b/drivers/pci/host/Makefile
409 @@ -17,7 +17,7 @@ obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx
410 obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
411 obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
412 obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
413 -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
414 +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o pci-layerscape-ep-debugfs.o
415 obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
416 obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
417 obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
418 --- /dev/null
419 +++ b/drivers/pci/host/pci-layerscape-ep-debugfs.c
420 @@ -0,0 +1,758 @@
421 +/*
422 + * PCIe Endpoint driver for Freescale Layerscape SoCs
423 + *
424 + * Copyright (C) 2015 Freescale Semiconductor.
425 + *
426 + * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
427 + *
428 + * This program is free software; you can redistribute it and/or modify
429 + * it under the terms of the GNU General Public License version 2 as
430 + * published by the Free Software Foundation.
431 + */
432 +
433 +#include <linux/kernel.h>
434 +#include <linux/module.h>
435 +#include <linux/debugfs.h>
436 +#include <linux/time.h>
437 +#include <linux/uaccess.h>
438 +#include <linux/kthread.h>
439 +#include <linux/slab.h>
440 +#include <linux/dmaengine.h>
441 +#include <linux/dma-mapping.h>
442 +#include <linux/freezer.h>
443 +
444 +#include <linux/completion.h>
445 +
446 +#include "pci-layerscape-ep.h"
447 +
448 +#define PCIE_ATU_INDEX3 (0x3 << 0)
449 +#define PCIE_ATU_INDEX2 (0x2 << 0)
450 +#define PCIE_ATU_INDEX1 (0x1 << 0)
451 +#define PCIE_ATU_INDEX0 (0x0 << 0)
452 +
453 +#define PCIE_BAR0_SIZE (4 * 1024) /* 4K */
454 +#define PCIE_BAR1_SIZE (8 * 1024) /* 8K for MSIX */
455 +#define PCIE_BAR2_SIZE (4 * 1024) /* 4K */
456 +#define PCIE_BAR4_SIZE (1 * 1024 * 1024) /* 1M */
457 +#define PCIE_MSI_OB_SIZE (4 * 1024) /* 4K */
458 +
459 +#define PCIE_MSI_MSG_ADDR_OFF 0x54
460 +#define PCIE_MSI_MSG_DATA_OFF 0x5c
461 +
462 +enum test_type {
463 + TEST_TYPE_DMA,
464 + TEST_TYPE_MEMCPY
465 +};
466 +
467 +enum test_dirt {
468 + TEST_DIRT_READ,
469 + TEST_DIRT_WRITE
470 +};
471 +
472 +enum test_status {
473 + TEST_IDLE,
474 + TEST_BUSY
475 +};
476 +
477 +struct ls_ep_test {
478 + struct ls_ep_dev *ep;
479 + void __iomem *cfg;
480 + void __iomem *buf;
481 + void __iomem *out;
482 + void __iomem *msi;
483 + dma_addr_t cfg_addr;
484 + dma_addr_t buf_addr;
485 + dma_addr_t out_addr;
486 + dma_addr_t bus_addr;
487 + dma_addr_t msi_addr;
488 + u64 msi_msg_addr;
489 + u16 msi_msg_data;
490 + struct task_struct *thread;
491 + spinlock_t lock;
492 + struct completion done;
493 + u32 len;
494 + int loop;
495 + char data;
496 + enum test_dirt dirt;
497 + enum test_type type;
498 + enum test_status status;
499 + u64 result; /* Mbps */
500 + char cmd[256];
501 +};
502 +
503 +static int ls_pcie_ep_trigger_msi(struct ls_ep_test *test)
504 +{
505 + if (!test->msi)
506 + return -EINVAL;
507 +
508 + iowrite32(test->msi_msg_data, test->msi);
509 +
510 + return 0;
511 +}
512 +
513 +static int ls_pcie_ep_test_try_run(struct ls_ep_test *test)
514 +{
515 + int ret;
516 +
517 + spin_lock(&test->lock);
518 + if (test->status == TEST_IDLE) {
519 + test->status = TEST_BUSY;
520 + ret = 0;
521 + } else
522 + ret = -EBUSY;
523 + spin_unlock(&test->lock);
524 +
525 + return ret;
526 +}
527 +
528 +static void ls_pcie_ep_test_done(struct ls_ep_test *test)
529 +{
530 + spin_lock(&test->lock);
531 + test->status = TEST_IDLE;
532 + spin_unlock(&test->lock);
533 +}
534 +
535 +static void ls_pcie_ep_test_dma_cb(void *arg)
536 +{
537 + struct ls_ep_test *test = arg;
538 +
539 + complete(&test->done);
540 +}
541 +
542 +static int ls_pcie_ep_test_dma(struct ls_ep_test *test)
543 +{
544 + dma_cap_mask_t mask;
545 + struct dma_chan *chan;
546 + struct dma_device *dma_dev;
547 + dma_addr_t src, dst;
548 + enum dma_data_direction direction;
549 + enum dma_ctrl_flags dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
550 + struct timespec start, end, period;
551 + int i = 0;
552 +
553 + dma_cap_zero(mask);
554 + dma_cap_set(DMA_MEMCPY, mask);
555 +
556 + chan = dma_request_channel(mask, NULL, test);
557 + if (!chan) {
558 + pr_err("failed to request dma channel\n");
559 + return -EINVAL;
560 + }
561 +
562 + memset(test->buf, test->data, test->len);
563 +
564 + if (test->dirt == TEST_DIRT_WRITE) {
565 + src = test->buf_addr;
566 + dst = test->out_addr;
567 + direction = DMA_TO_DEVICE;
568 + } else {
569 + src = test->out_addr;
570 + dst = test->buf_addr;
571 + direction = DMA_FROM_DEVICE;
572 + }
573 +
574 + dma_dev = chan->device;
575 + dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
576 +
577 + dma_sync_single_for_device(&test->ep->dev, test->buf_addr,
578 + test->len, direction);
579 +
580 + set_freezable();
581 +
582 + getrawmonotonic(&start);
583 + while (!kthread_should_stop() && (i < test->loop)) {
584 + struct dma_async_tx_descriptor *dma_desc;
585 + dma_cookie_t dma_cookie = {0};
586 + unsigned long tmo;
587 + int status;
588 +
589 + init_completion(&test->done);
590 +
591 + dma_desc = dma_dev->device_prep_dma_memcpy(chan,
592 + dst, src,
593 + test->len,
594 + dma_flags);
595 + if (!dma_desc) {
596 + pr_err("DMA desc constr failed...\n");
597 + goto _err;
598 + }
599 +
600 + dma_desc->callback = ls_pcie_ep_test_dma_cb;
601 + dma_desc->callback_param = test;
602 + dma_cookie = dmaengine_submit(dma_desc);
603 +
604 + if (dma_submit_error(dma_cookie)) {
605 + pr_err("DMA submit error....\n");
606 + goto _err;
607 + }
608 +
609 + /* Trigger the transaction */
610 + dma_async_issue_pending(chan);
611 +
612 + tmo = wait_for_completion_timeout(&test->done,
613 + msecs_to_jiffies(5 * test->len));
614 + if (tmo == 0) {
615 + pr_err("Self-test copy timed out, disabling\n");
616 + goto _err;
617 + }
618 +
619 + status = dma_async_is_tx_complete(chan, dma_cookie,
620 + NULL, NULL);
621 + if (status != DMA_COMPLETE) {
622 + pr_err("got completion callback, but status is %s\n",
623 + status == DMA_ERROR ? "error" : "in progress");
624 + goto _err;
625 + }
626 +
627 + i++;
628 + }
629 +
630 + getrawmonotonic(&end);
631 + period = timespec_sub(end, start);
632 + test->result = test->len * 8ULL * i * 1000;
633 + do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec);
634 + dma_release_channel(chan);
635 +
636 + return 0;
637 +
638 +_err:
639 + dma_release_channel(chan);
640 + test->result = 0;
641 + return -EINVAL;
642 +}
643 +
644 +static int ls_pcie_ep_test_cpy(struct ls_ep_test *test)
645 +{
646 + void *dst, *src;
647 + struct timespec start, end, period;
648 + int i = 0;
649 +
650 + memset(test->buf, test->data, test->len);
651 +
652 + if (test->dirt == TEST_DIRT_WRITE) {
653 + dst = test->out;
654 + src = test->buf;
655 + } else {
656 + dst = test->buf;
657 + src = test->out;
658 + }
659 +
660 + getrawmonotonic(&start);
661 + while (!kthread_should_stop() && i < test->loop) {
662 + memcpy(dst, src, test->len);
663 + i++;
664 + }
665 + getrawmonotonic(&end);
666 +
667 + period = timespec_sub(end, start);
668 + test->result = test->len * 8ULL * i * 1000;
669 + do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec);
670 +
671 + return 0;
672 +}
673 +
674 +int ls_pcie_ep_test_thread(void *arg)
675 +{
676 + int ret;
677 +
678 + struct ls_ep_test *test = arg;
679 +
680 + if (test->type == TEST_TYPE_DMA)
681 + ret = ls_pcie_ep_test_dma(test);
682 + else
683 + ret = ls_pcie_ep_test_cpy(test);
684 +
685 + if (ret) {
686 + pr_err("\n%s \ttest failed\n",
687 + test->cmd);
688 + test->result = 0;
689 + } else
690 + pr_err("\n%s \tthroughput:%lluMbps\n",
691 + test->cmd, test->result);
692 +
693 + ls_pcie_ep_test_done(test);
694 +
695 + ls_pcie_ep_trigger_msi(test);
696 +
697 + do_exit(0);
698 +}
699 +
700 +static int ls_pcie_ep_free_test(struct ls_ep_dev *ep)
701 +{
702 + struct ls_ep_test *test = ep->driver_data;
703 +
704 + if (!test)
705 + return 0;
706 +
707 + if (test->status == TEST_BUSY) {
708 + kthread_stop(test->thread);
709 + dev_info(&ep->dev,
710 + "test is running please wait and run again\n");
711 + return -EBUSY;
712 + }
713 +
714 + if (test->buf)
715 + free_pages((unsigned long)test->buf,
716 + get_order(PCIE_BAR4_SIZE));
717 +
718 + if (test->cfg)
719 + free_pages((unsigned long)test->cfg,
720 + get_order(PCIE_BAR2_SIZE));
721 +
722 + if (test->out)
723 + iounmap(test->out);
724 +
725 + kfree(test);
726 + ep->driver_data = NULL;
727 +
728 + return 0;
729 +}
730 +
731 +static int ls_pcie_ep_init_test(struct ls_ep_dev *ep, u64 bus_addr)
732 +{
733 + struct ls_pcie *pcie = ep->pcie;
734 + struct ls_ep_test *test = ep->driver_data;
735 + int err;
736 +
737 + if (test) {
738 + dev_info(&ep->dev,
739 + "Please use 'free' to remove the exiting test\n");
740 + return -EBUSY;
741 + }
742 +
743 + test = kzalloc(sizeof(*test), GFP_KERNEL);
744 + if (!test)
745 + return -ENOMEM;
746 + ep->driver_data = test;
747 + test->ep = ep;
748 + spin_lock_init(&test->lock);
749 + test->status = TEST_IDLE;
750 +
751 + test->buf = dma_alloc_coherent(pcie->dev, get_order(PCIE_BAR4_SIZE),
752 + &test->buf_addr,
753 + GFP_KERNEL);
754 + if (!test->buf) {
755 + dev_info(&ep->dev, "failed to get mem for bar4\n");
756 + err = -ENOMEM;
757 + goto _err;
758 + }
759 +
760 + test->cfg = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
761 + get_order(PCIE_BAR2_SIZE));
762 + if (!test->cfg) {
763 + dev_info(&ep->dev, "failed to get mem for bar4\n");
764 + err = -ENOMEM;
765 + goto _err;
766 + }
767 + test->cfg_addr = virt_to_phys(test->cfg);
768 +
769 + test->out_addr = pcie->out_base;
770 + test->out = ioremap(test->out_addr, PCIE_BAR4_SIZE);
771 + if (!test->out) {
772 + dev_info(&ep->dev, "failed to map out\n");
773 + err = -ENOMEM;
774 + goto _err;
775 + }
776 +
777 + test->bus_addr = bus_addr;
778 +
779 + test->msi_addr = test->out_addr + PCIE_BAR4_SIZE;
780 + test->msi = ioremap(test->msi_addr, PCIE_MSI_OB_SIZE);
781 + if (!test->msi)
782 + dev_info(&ep->dev, "failed to map MSI outbound region\n");
783 +
784 + test->msi_msg_addr = ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF) |
785 + (((u64)ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF + 4)) << 32);
786 + test->msi_msg_data = ioread16(pcie->dbi + PCIE_MSI_MSG_DATA_OFF);
787 +
788 + ls_pcie_ep_dev_cfg_enable(ep);
789 +
790 + /* outbound iATU for memory */
791 + ls_pcie_iatu_outbound_set(pcie, 0, PCIE_ATU_TYPE_MEM,
792 + test->out_addr, bus_addr, PCIE_BAR4_SIZE);
793 + /* outbound iATU for MSI */
794 + ls_pcie_iatu_outbound_set(pcie, 1, PCIE_ATU_TYPE_MEM,
795 + test->msi_addr, test->msi_msg_addr,
796 + PCIE_MSI_OB_SIZE);
797 +
798 + /* ATU 0 : INBOUND : map BAR0 */
799 + ls_pcie_iatu_inbound_set(pcie, 0, 0, test->cfg_addr);
800 + /* ATU 2 : INBOUND : map BAR2 */
801 + ls_pcie_iatu_inbound_set(pcie, 2, 2, test->cfg_addr);
802 + /* ATU 3 : INBOUND : map BAR4 */
803 + ls_pcie_iatu_inbound_set(pcie, 3, 4, test->buf_addr);
804 +
805 + return 0;
806 +
807 +_err:
808 + ls_pcie_ep_free_test(ep);
809 + return err;
810 +}
811 +
812 +static int ls_pcie_ep_start_test(struct ls_ep_dev *ep, char *cmd)
813 +{
814 + struct ls_ep_test *test = ep->driver_data;
815 + enum test_type type;
816 + enum test_dirt dirt;
817 + u32 cnt, len, loop;
818 + unsigned int data;
819 + char dirt_str[2];
820 + int ret;
821 +
822 + if (strncmp(cmd, "dma", 3) == 0)
823 + type = TEST_TYPE_DMA;
824 + else
825 + type = TEST_TYPE_MEMCPY;
826 +
827 + cnt = sscanf(&cmd[4], "%1s %u %u %x", dirt_str, &len, &loop, &data);
828 + if (cnt != 4) {
829 + dev_info(&ep->dev, "format error %s", cmd);
830 + dev_info(&ep->dev, "dma/cpy <r/w> <packet_size> <loop> <data>\n");
831 + return -EINVAL;
832 + }
833 +
834 + if (strncmp(dirt_str, "r", 1) == 0)
835 + dirt = TEST_DIRT_READ;
836 + else
837 + dirt = TEST_DIRT_WRITE;
838 +
839 + if (len > PCIE_BAR4_SIZE) {
840 + dev_err(&ep->dev, "max len is %d", PCIE_BAR4_SIZE);
841 + return -EINVAL;
842 + }
843 +
844 + if (!test) {
845 + dev_err(&ep->dev, "Please first run init command\n");
846 + return -EINVAL;
847 + }
848 +
849 + if (ls_pcie_ep_test_try_run(test)) {
850 + dev_err(&ep->dev, "There is already a test running\n");
851 + return -EINVAL;
852 + }
853 +
854 + test->len = len;
855 + test->loop = loop;
856 + test->type = type;
857 + test->data = (char)data;
858 + test->dirt = dirt;
859 + strcpy(test->cmd, cmd);
860 + test->thread = kthread_run(ls_pcie_ep_test_thread, test,
861 + "pcie ep test");
862 + if (IS_ERR(test->thread)) {
863 + dev_err(&ep->dev, "fork failed for pcie ep test\n");
864 + ls_pcie_ep_test_done(test);
865 + ret = PTR_ERR(test->thread);
866 + }
867 +
868 + return ret;
869 +}
870 +
871 +
872 +/**
873 + * ls_pcie_reg_ops_read - read for regs data
874 + * @filp: the opened file
875 + * @buffer: where to write the data for the user to read
876 + * @count: the size of the user's buffer
877 + * @ppos: file position offset
878 + **/
879 +static ssize_t ls_pcie_ep_dbg_regs_read(struct file *filp, char __user *buffer,
880 + size_t count, loff_t *ppos)
881 +{
882 + struct ls_ep_dev *ep = filp->private_data;
883 + struct ls_pcie *pcie = ep->pcie;
884 + char *buf;
885 + int desc = 0, i, len;
886 +
887 + buf = kmalloc(4 * 1024, GFP_KERNEL);
888 + if (!buf)
889 + return -ENOMEM;
890 +
891 + ls_pcie_ep_dev_cfg_enable(ep);
892 +
893 + desc += sprintf(buf + desc, "%s", "reg info:");
894 + for (i = 0; i < 0x200; i += 4) {
895 + if (i % 16 == 0)
896 + desc += sprintf(buf + desc, "\n%08x:", i);
897 + desc += sprintf(buf + desc, " %08x", readl(pcie->dbi + i));
898 + }
899 +
900 + desc += sprintf(buf + desc, "\n%s", "outbound iATU info:\n");
901 + for (i = 0; i < 6; i++) {
902 + writel(PCIE_ATU_REGION_OUTBOUND | i,
903 + pcie->dbi + PCIE_ATU_VIEWPORT);
904 + desc += sprintf(buf + desc, "iATU%d", i);
905 + desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n",
906 + readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
907 + desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n",
908 + readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
909 + desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n",
910 + readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
911 + desc += sprintf(buf + desc, "\tUPPER BUS 0x%08x\n",
912 + readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
913 + desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n",
914 + readl(pcie->dbi + PCIE_ATU_LIMIT));
915 + desc += sprintf(buf + desc, "\tCR1 0x%08x\n",
916 + readl(pcie->dbi + PCIE_ATU_CR1));
917 + desc += sprintf(buf + desc, "\tCR2 0x%08x\n",
918 + readl(pcie->dbi + PCIE_ATU_CR2));
919 + }
920 +
921 + desc += sprintf(buf + desc, "\n%s", "inbound iATU info:\n");
922 + for (i = 0; i < 6; i++) {
923 + writel(PCIE_ATU_REGION_INBOUND | i,
924 + pcie->dbi + PCIE_ATU_VIEWPORT);
925 + desc += sprintf(buf + desc, "iATU%d", i);
926 + desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n",
927 + readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
928 + desc += sprintf(buf + desc, "\tUPPER BUSs 0x%08x\n",
929 + readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
930 + desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n",
931 + readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
932 + desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n",
933 + readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
934 + desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n",
935 + readl(pcie->dbi + PCIE_ATU_LIMIT));
936 + desc += sprintf(buf + desc, "\tCR1 0x%08x\n",
937 + readl(pcie->dbi + PCIE_ATU_CR1));
938 + desc += sprintf(buf + desc, "\tCR2 0x%08x\n",
939 + readl(pcie->dbi + PCIE_ATU_CR2));
940 + }
941 +
942 + len = simple_read_from_buffer(buffer, count, ppos, buf, desc);
943 + kfree(buf);
944 +
945 + return len;
946 +}
947 +
948 +/**
949 + * ls_pcie_ep_dbg_regs_write - write into regs datum
950 + * @filp: the opened file
951 + * @buffer: where to find the user's data
952 + * @count: the length of the user's data
953 + * @ppos: file position offset
954 + **/
955 +static ssize_t ls_pcie_ep_dbg_regs_write(struct file *filp,
956 + const char __user *buffer,
957 + size_t count, loff_t *ppos)
958 +{
959 + struct ls_ep_dev *ep = filp->private_data;
960 + struct ls_pcie *pcie = ep->pcie;
961 + char buf[256];
962 +
963 + if (count >= sizeof(buf))
964 + return -ENOSPC;
965 +
966 + memset(buf, 0, sizeof(buf));
967 +
968 + if (copy_from_user(buf, buffer, count))
969 + return -EFAULT;
970 +
971 + ls_pcie_ep_dev_cfg_enable(ep);
972 +
973 + if (strncmp(buf, "reg", 3) == 0) {
974 + u32 reg, value;
975 + int cnt;
976 +
977 + cnt = sscanf(&buf[3], "%x %x", &reg, &value);
978 + if (cnt == 2) {
979 + writel(value, pcie->dbi + reg);
980 + value = readl(pcie->dbi + reg);
981 + dev_info(&ep->dev, "reg 0x%08x: 0x%08x\n",
982 + reg, value);
983 + } else {
984 + dev_info(&ep->dev, "reg <reg> <value>\n");
985 + }
986 + } else if (strncmp(buf, "atu", 3) == 0) {
987 + /* to do */
988 + dev_info(&ep->dev, " Not support atu command\n");
989 + } else {
990 + dev_info(&ep->dev, "Unknown command %s\n", buf);
991 + dev_info(&ep->dev, "Available commands:\n");
992 + dev_info(&ep->dev, " reg <reg> <value>\n");
993 + }
994 +
995 + return count;
996 +}
997 +
998 +static const struct file_operations ls_pcie_ep_dbg_regs_fops = {
999 + .owner = THIS_MODULE,
1000 + .open = simple_open,
1001 + .read = ls_pcie_ep_dbg_regs_read,
1002 + .write = ls_pcie_ep_dbg_regs_write,
1003 +};
1004 +
1005 +static ssize_t ls_pcie_ep_dbg_test_read(struct file *filp,
1006 + char __user *buffer,
1007 + size_t count, loff_t *ppos)
1008 +{
1009 + struct ls_ep_dev *ep = filp->private_data;
1010 + struct ls_ep_test *test = ep->driver_data;
1011 + char buf[512];
1012 + int desc = 0, len;
1013 +
1014 + if (!test) {
1015 + dev_info(&ep->dev, " there is NO test\n");
1016 + return 0;
1017 + }
1018 +
1019 + if (test->status != TEST_IDLE) {
1020 + dev_info(&ep->dev, "test %s is running\n", test->cmd);
1021 + return 0;
1022 + }
1023 +
1024 + desc = sprintf(buf, "MSI ADDR:0x%llx MSI DATA:0x%x\n",
1025 + test->msi_msg_addr, test->msi_msg_data);
1026 +
1027 + desc += sprintf(buf + desc, "%s throughput:%lluMbps\n",
1028 + test->cmd, test->result);
1029 +
1030 + len = simple_read_from_buffer(buffer, count, ppos,
1031 + buf, desc);
1032 +
1033 + return len;
1034 +}
1035 +
1036 +static ssize_t ls_pcie_ep_dbg_test_write(struct file *filp,
1037 + const char __user *buffer,
1038 + size_t count, loff_t *ppos)
1039 +{
1040 + struct ls_ep_dev *ep = filp->private_data;
1041 + char buf[256];
1042 +
1043 + if (count >= sizeof(buf))
1044 + return -ENOSPC;
1045 +
1046 + memset(buf, 0, sizeof(buf));
1047 +
1048 + if (copy_from_user(buf, buffer, count))
1049 + return -EFAULT;
1050 +
1051 + if (strncmp(buf, "init", 4) == 0) {
1052 + int i = 4;
1053 + u64 bus_addr;
1054 +
1055 + while (buf[i] == ' ')
1056 + i++;
1057 +
1058 + if (kstrtou64(&buf[i], 0, &bus_addr))
1059 + dev_info(&ep->dev, "command: init <bus_addr>\n");
1060 + else {
1061 + if (ls_pcie_ep_init_test(ep, bus_addr))
1062 + dev_info(&ep->dev, "failed to init test\n");
1063 + }
1064 + } else if (strncmp(buf, "free", 4) == 0)
1065 + ls_pcie_ep_free_test(ep);
1066 + else if (strncmp(buf, "dma", 3) == 0 ||
1067 + strncmp(buf, "cpy", 3) == 0)
1068 + ls_pcie_ep_start_test(ep, buf);
1069 + else {
1070 + dev_info(&ep->dev, "Unknown command: %s\n", buf);
1071 + dev_info(&ep->dev, "Available commands:\n");
1072 + dev_info(&ep->dev, "\tinit <bus_addr>\n");
1073 + dev_info(&ep->dev, "\t<dma/cpy> <r/w> <packet_size> <loop>\n");
1074 + dev_info(&ep->dev, "\tfree\n");
1075 + }
1076 +
1077 + return count;
1078 +}
1079 +
1080 +static const struct file_operations ls_pcie_ep_dbg_test_fops = {
1081 + .owner = THIS_MODULE,
1082 + .open = simple_open,
1083 + .read = ls_pcie_ep_dbg_test_read,
1084 + .write = ls_pcie_ep_dbg_test_write,
1085 +};
1086 +
1087 +static ssize_t ls_pcie_ep_dbg_dump_read(struct file *filp,
1088 + char __user *buffer,
1089 + size_t count, loff_t *ppos)
1090 +{
1091 + struct ls_ep_dev *ep = filp->private_data;
1092 + struct ls_ep_test *test = ep->driver_data;
1093 + char *buf;
1094 + int desc = 0, i, len;
1095 +
1096 + buf = kmalloc(4 * 1024, GFP_KERNEL);
1097 + if (!buf)
1098 + return -ENOMEM;
1099 +
1100 + if (!test) {
1101 + dev_info(&ep->dev, " there is NO test\n");
1102 + kfree(buf);
1103 + return 0;
1104 + }
1105 +
1106 + desc += sprintf(buf + desc, "%s", "dump info:");
1107 + for (i = 0; i < 256; i += 4) {
1108 + if (i % 16 == 0)
1109 + desc += sprintf(buf + desc, "\n%08x:", i);
1110 + desc += sprintf(buf + desc, " %08x", readl(test->buf + i));
1111 + }
1112 +
1113 + desc += sprintf(buf + desc, "\n");
1114 + len = simple_read_from_buffer(buffer, count, ppos, buf, desc);
1115 +
1116 + kfree(buf);
1117 +
1118 + return len;
1119 +}
1120 +
1121 +static const struct file_operations ls_pcie_ep_dbg_dump_fops = {
1122 + .owner = THIS_MODULE,
1123 + .open = simple_open,
1124 + .read = ls_pcie_ep_dbg_dump_read,
1125 +};
1126 +
1127 +static int ls_pcie_ep_dev_dbgfs_init(struct ls_ep_dev *ep)
1128 +{
1129 + struct ls_pcie *pcie = ep->pcie;
1130 + struct dentry *pfile;
1131 +
1132 + ls_pcie_ep_dev_cfg_enable(ep);
1133 +
1134 + ep->dir = debugfs_create_dir(dev_name(&ep->dev), pcie->dir);
1135 + if (!ep->dir)
1136 + return -ENOMEM;
1137 +
1138 + pfile = debugfs_create_file("regs", 0600, ep->dir, ep,
1139 + &ls_pcie_ep_dbg_regs_fops);
1140 + if (!pfile)
1141 + dev_info(&ep->dev, "debugfs regs for failed\n");
1142 +
1143 + pfile = debugfs_create_file("test", 0600, ep->dir, ep,
1144 + &ls_pcie_ep_dbg_test_fops);
1145 + if (!pfile)
1146 + dev_info(&ep->dev, "debugfs test for failed\n");
1147 +
1148 + pfile = debugfs_create_file("dump", 0600, ep->dir, ep,
1149 + &ls_pcie_ep_dbg_dump_fops);
1150 + if (!pfile)
1151 + dev_info(&ep->dev, "debugfs dump for failed\n");
1152 +
1153 + return 0;
1154 +}
1155 +
1156 +int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie)
1157 +{
1158 + struct ls_ep_dev *ep;
1159 +
1160 + pcie->dir = debugfs_create_dir(dev_name(pcie->dev), NULL);
1161 + if (!pcie->dir)
1162 + return -ENOMEM;
1163 +
1164 + list_for_each_entry(ep, &pcie->ep_list, node)
1165 + ls_pcie_ep_dev_dbgfs_init(ep);
1166 +
1167 + return 0;
1168 +}
1169 +
1170 +int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie)
1171 +{
1172 + debugfs_remove_recursive(pcie->dir);
1173 + return 0;
1174 +}
1175 +
1176 +MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
1177 +MODULE_DESCRIPTION("Freescale Layerscape PCIe EP controller driver");
1178 +MODULE_LICENSE("GPL v2");
1179 --- /dev/null
1180 +++ b/drivers/pci/host/pci-layerscape-ep.c
1181 @@ -0,0 +1,309 @@
1182 +/*
1183 + * PCIe Endpoint driver for Freescale Layerscape SoCs
1184 + *
1185 + * Copyright (C) 2015 Freescale Semiconductor.
1186 + *
1187 + * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
1188 + *
1189 + * This program is free software; you can redistribute it and/or modify
1190 + * it under the terms of the GNU General Public License version 2 as
1191 + * published by the Free Software Foundation.
1192 + */
1193 +
1194 +#include <linux/kernel.h>
1195 +#include <linux/delay.h>
1196 +#include <linux/interrupt.h>
1197 +#include <linux/module.h>
1198 +#include <linux/of_pci.h>
1199 +#include <linux/of_platform.h>
1200 +#include <linux/of_irq.h>
1201 +#include <linux/of_address.h>
1202 +#include <linux/pci.h>
1203 +#include <linux/platform_device.h>
1204 +#include <linux/resource.h>
1205 +#include <linux/debugfs.h>
1206 +#include <linux/time.h>
1207 +#include <linux/uaccess.h>
1208 +
1209 +#include "pci-layerscape-ep.h"
1210 +
1211 +struct ls_ep_dev *
1212 +ls_pci_ep_find(struct ls_pcie *pcie, int dev_id)
1213 +{
1214 + struct ls_ep_dev *ep;
1215 +
1216 + list_for_each_entry(ep, &pcie->ep_list, node) {
1217 + if (ep->dev_id == dev_id)
1218 + return ep;
1219 + }
1220 +
1221 + return NULL;
1222 +}
1223 +
1224 +static void ls_pcie_try_cfg2(struct ls_pcie *pcie, int pf, int vf)
1225 +{
1226 + if (pcie->sriov)
1227 + writel(PCIE_LCTRL0_VAL(pf, vf),
1228 + pcie->dbi + PCIE_LUT_BASE + PCIE_LUT_LCTRL0);
1229 +}
1230 +
1231 +static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
1232 +{
1233 + u32 header_type = 0;
1234 +
1235 + header_type = readl(pcie->dbi + (PCI_HEADER_TYPE & ~0x3));
1236 + header_type = (header_type >> 16) & 0x7f;
1237 +
1238 + return header_type == PCI_HEADER_TYPE_BRIDGE;
1239 +}
1240 +
1241 +void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
1242 + u64 cpu_addr, u64 pci_addr, u32 size)
1243 +{
1244 + writel(PCIE_ATU_REGION_OUTBOUND | idx,
1245 + pcie->dbi + PCIE_ATU_VIEWPORT);
1246 + writel(lower_32_bits(cpu_addr),
1247 + pcie->dbi + PCIE_ATU_LOWER_BASE);
1248 + writel(upper_32_bits(cpu_addr),
1249 + pcie->dbi + PCIE_ATU_UPPER_BASE);
1250 + writel(lower_32_bits(cpu_addr + size - 1),
1251 + pcie->dbi + PCIE_ATU_LIMIT);
1252 + writel(lower_32_bits(pci_addr),
1253 + pcie->dbi + PCIE_ATU_LOWER_TARGET);
1254 + writel(upper_32_bits(pci_addr),
1255 + pcie->dbi + PCIE_ATU_UPPER_TARGET);
1256 + writel(type, pcie->dbi + PCIE_ATU_CR1);
1257 + writel(PCIE_ATU_ENABLE, pcie->dbi + PCIE_ATU_CR2);
1258 +}
1259 +
1260 +/* Use bar match mode and MEM type as default */
1261 +void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
1262 + int bar, u64 phys)
1263 +{
1264 + writel(PCIE_ATU_REGION_INBOUND | idx, pcie->dbi + PCIE_ATU_VIEWPORT);
1265 + writel((u32)phys, pcie->dbi + PCIE_ATU_LOWER_TARGET);
1266 + writel(phys >> 32, pcie->dbi + PCIE_ATU_UPPER_TARGET);
1267 + writel(PCIE_ATU_TYPE_MEM, pcie->dbi + PCIE_ATU_CR1);
1268 + writel(PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
1269 + PCIE_ATU_BAR_NUM(bar), pcie->dbi + PCIE_ATU_CR2);
1270 +}
1271 +
1272 +void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep)
1273 +{
1274 + ls_pcie_try_cfg2(ep->pcie, ep->pf_idx, ep->vf_idx);
1275 +}
1276 +
1277 +void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
1278 +{
1279 + if (size < 4 * 1024)
1280 + return;
1281 +
1282 + switch (bar) {
1283 + case 0:
1284 + writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
1285 + break;
1286 + case 1:
1287 + writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
1288 + break;
1289 + case 2:
1290 + writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
1291 + writel(0, bar_base + PCI_BASE_ADDRESS_3);
1292 + break;
1293 + case 4:
1294 + writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
1295 + writel(0, bar_base + PCI_BASE_ADDRESS_5);
1296 + break;
1297 + default:
1298 + break;
1299 + }
1300 +}
1301 +
1302 +void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size)
1303 +{
1304 + struct ls_pcie *pcie = ep->pcie;
1305 + void *bar_base;
1306 +
1307 + if (size < 4 * 1024)
1308 + return;
1309 +
1310 + if (pcie->sriov)
1311 + bar_base = pcie->dbi;
1312 + else
1313 + bar_base = pcie->dbi + PCIE_NO_SRIOV_BAR_BASE;
1314 +
1315 + ls_pcie_ep_dev_cfg_enable(ep);
1316 + ls_pcie_ep_setup_bar(bar_base, bar, size);
1317 +}
1318 +
1319 +static int ls_pcie_ep_dev_init(struct ls_pcie *pcie, int pf_idx, int vf_idx)
1320 +{
1321 + struct ls_ep_dev *ep;
1322 +
1323 + ep = devm_kzalloc(pcie->dev, sizeof(*ep), GFP_KERNEL);
1324 + if (!ep)
1325 + return -ENOMEM;
1326 +
1327 + ep->pcie = pcie;
1328 + ep->pf_idx = pf_idx;
1329 + ep->vf_idx = vf_idx;
1330 + if (vf_idx)
1331 + ep->dev_id = pf_idx + 4 + 4 * (vf_idx - 1);
1332 + else
1333 + ep->dev_id = pf_idx;
1334 +
1335 + if (ep->vf_idx)
1336 + dev_set_name(&ep->dev, "pf%d-vf%d",
1337 + ep->pf_idx,
1338 + ep->vf_idx);
1339 + else
1340 + dev_set_name(&ep->dev, "pf%d",
1341 + ep->pf_idx);
1342 +
1343 + list_add_tail(&ep->node, &pcie->ep_list);
1344 +
1345 + return 0;
1346 +}
1347 +
1348 +static int ls_pcie_ep_init(struct ls_pcie *pcie)
1349 +{
1350 + u32 sriov_header;
1351 + int pf, vf, i, j;
1352 +
1353 + sriov_header = readl(pcie->dbi + PCIE_SRIOV_POS);
1354 +
1355 + if (PCI_EXT_CAP_ID(sriov_header) == PCI_EXT_CAP_ID_SRIOV) {
1356 + pcie->sriov = PCIE_SRIOV_POS;
1357 + pf = PCIE_PF_NUM;
1358 + vf = PCIE_VF_NUM;
1359 + } else {
1360 + pcie->sriov = 0;
1361 + pf = 1;
1362 + vf = 0;
1363 + }
1364 +
1365 + for (i = 0; i < pf; i++) {
1366 + for (j = 0; j <= vf; j++)
1367 + ls_pcie_ep_dev_init(pcie, i, j);
1368 + }
1369 +
1370 + return 0;
1371 +}
1372 +
1373 +static struct ls_pcie_ep_drvdata ls1043_drvdata = {
1374 + .lut_offset = 0x10000,
1375 + .ltssm_shift = 24,
1376 + .lut_dbg = 0x7fc,
1377 +};
1378 +
1379 +static struct ls_pcie_ep_drvdata ls1046_drvdata = {
1380 + .lut_offset = 0x80000,
1381 + .ltssm_shift = 24,
1382 + .lut_dbg = 0x407fc,
1383 +};
1384 +
1385 +static struct ls_pcie_ep_drvdata ls2080_drvdata = {
1386 + .lut_offset = 0x80000,
1387 + .ltssm_shift = 0,
1388 + .lut_dbg = 0x7fc,
1389 +};
1390 +
1391 +static const struct of_device_id ls_pcie_ep_of_match[] = {
1392 + { .compatible = "fsl,ls1021a-pcie", },
1393 + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
1394 + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
1395 + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
1396 + { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
1397 + { },
1398 +};
1399 +MODULE_DEVICE_TABLE(of, ls_pcie_ep_of_match);
1400 +
1401 +static int ls_pcie_ep_probe(struct platform_device *pdev)
1402 +{
1403 + struct ls_pcie *pcie;
1404 + struct resource *dbi_base, *cfg_res;
1405 + const struct of_device_id *match;
1406 + int ret;
1407 +
1408 + match = of_match_device(ls_pcie_ep_of_match, &pdev->dev);
1409 + if (!match)
1410 + return -ENODEV;
1411 +
1412 + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1413 + if (!pcie)
1414 + return -ENOMEM;
1415 +
1416 + pcie->dev = &pdev->dev;
1417 + INIT_LIST_HEAD(&pcie->ep_list);
1418 +
1419 + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1420 + pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
1421 + if (IS_ERR(pcie->dbi)) {
1422 + dev_err(&pdev->dev, "missing *regs* space\n");
1423 + return PTR_ERR(pcie->dbi);
1424 + }
1425 +
1426 + pcie->drvdata = match->data;
1427 + pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
1428 +
1429 + if (ls_pcie_is_bridge(pcie))
1430 + return -ENODEV;
1431 +
1432 + dev_info(pcie->dev, "in EP mode\n");
1433 +
1434 + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
1435 + if (cfg_res)
1436 + pcie->out_base = cfg_res->start;
1437 + else {
1438 + dev_err(&pdev->dev, "missing *config* space\n");
1439 + return -ENODEV;
1440 + }
1441 +
1442 + ret = ls_pcie_ep_init(pcie);
1443 + if (ret)
1444 + return ret;
1445 +
1446 + ls_pcie_ep_dbgfs_init(pcie);
1447 +
1448 + platform_set_drvdata(pdev, pcie);
1449 +
1450 + return 0;
1451 +}
1452 +
1453 +static int ls_pcie_ep_dev_remove(struct ls_ep_dev *ep)
1454 +{
1455 + list_del(&ep->node);
1456 +
1457 + return 0;
1458 +}
1459 +
1460 +static int ls_pcie_ep_remove(struct platform_device *pdev)
1461 +{
1462 + struct ls_pcie *pcie = platform_get_drvdata(pdev);
1463 + struct ls_ep_dev *ep, *tmp;
1464 +
1465 + if (!pcie)
1466 + return 0;
1467 +
1468 + ls_pcie_ep_dbgfs_remove(pcie);
1469 +
1470 + list_for_each_entry_safe(ep, tmp, &pcie->ep_list, node)
1471 + ls_pcie_ep_dev_remove(ep);
1472 +
1473 + return 0;
1474 +}
1475 +
1476 +static struct platform_driver ls_pcie_ep_driver = {
1477 + .driver = {
1478 + .name = "ls-pcie-ep",
1479 + .owner = THIS_MODULE,
1480 + .of_match_table = ls_pcie_ep_of_match,
1481 + },
1482 + .probe = ls_pcie_ep_probe,
1483 + .remove = ls_pcie_ep_remove,
1484 +};
1485 +
1486 +module_platform_driver(ls_pcie_ep_driver);
1487 +
1488 +MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
1489 +MODULE_DESCRIPTION("Freescale Layerscape PCIe EP driver");
1490 +MODULE_LICENSE("GPL v2");
1491 --- /dev/null
1492 +++ b/drivers/pci/host/pci-layerscape-ep.h
1493 @@ -0,0 +1,115 @@
1494 +/*
1495 + * PCIe Endpoint driver for Freescale Layerscape SoCs
1496 + *
1497 + * Copyright (C) 2015 Freescale Semiconductor.
1498 + *
1499 + * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
1500 + *
1501 + * This program is free software; you can redistribute it and/or modify
1502 + * it under the terms of the GNU General Public License version 2 as
1503 + * published by the Free Software Foundation.
1504 + */
1505 +
1506 +
1507 +#ifndef _PCIE_LAYERSCAPE_EP_H
1508 +#define _PCIE_LAYERSCAPE_EP_H
1509 +
1510 +#include <linux/device.h>
1511 +
1512 +/* Synopsis specific PCIE configuration registers */
1513 +#define PCIE_ATU_VIEWPORT 0x900
1514 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
1515 +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
1516 +#define PCIE_ATU_REGION_INDEX3 (0x3 << 0)
1517 +#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
1518 +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
1519 +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
1520 +#define PCIE_ATU_CR1 0x904
1521 +#define PCIE_ATU_TYPE_MEM (0x0 << 0)
1522 +#define PCIE_ATU_TYPE_IO (0x2 << 0)
1523 +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
1524 +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
1525 +#define PCIE_ATU_CR2 0x908
1526 +#define PCIE_ATU_ENABLE (0x1 << 31)
1527 +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
1528 +#define PCIE_ATU_LOWER_BASE 0x90C
1529 +#define PCIE_ATU_UPPER_BASE 0x910
1530 +#define PCIE_ATU_LIMIT 0x914
1531 +#define PCIE_ATU_LOWER_TARGET 0x918
1532 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
1533 +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
1534 +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
1535 +#define PCIE_ATU_UPPER_TARGET 0x91C
1536 +
1537 +/* PEX internal configuration registers */
1538 +#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
1539 +
1540 +/* PEX LUT registers */
1541 +#define PCIE_LUT_BASE 0x80000
1542 +#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug register */
1543 +
1544 +#define PCIE_LUT_LCTRL0 0x7F8
1545 +
1546 +#define PCIE_ATU_BAR_NUM(bar) ((bar) << 8)
1547 +#define PCIE_LCTRL0_CFG2_ENABLE (1 << 31)
1548 +#define PCIE_LCTRL0_VF(vf) ((vf) << 22)
1549 +#define PCIE_LCTRL0_PF(pf) ((pf) << 16)
1550 +#define PCIE_LCTRL0_VF_ACTIVE (1 << 21)
1551 +#define PCIE_LCTRL0_VAL(pf, vf) (PCIE_LCTRL0_PF(pf) | \
1552 + PCIE_LCTRL0_VF(vf) | \
1553 + ((vf) == 0 ? 0 : PCIE_LCTRL0_VF_ACTIVE) | \
1554 + PCIE_LCTRL0_CFG2_ENABLE)
1555 +
1556 +#define PCIE_NO_SRIOV_BAR_BASE 0x1000
1557 +
1558 +#define PCIE_SRIOV_POS 0x178
1559 +#define PCIE_PF_NUM 2
1560 +#define PCIE_VF_NUM 64
1561 +
1562 +struct ls_pcie_ep_drvdata {
1563 + u32 lut_offset;
1564 + u32 ltssm_shift;
1565 + u32 lut_dbg;
1566 +};
1567 +
1568 +struct ls_pcie {
1569 + struct list_head ep_list;
1570 + struct device *dev;
1571 + struct dentry *dir;
1572 + const struct ls_pcie_ep_drvdata *drvdata;
1573 + void __iomem *dbi;
1574 + void __iomem *lut;
1575 + phys_addr_t out_base;
1576 + int sriov;
1577 + int index;
1578 +};
1579 +
1580 +struct ls_ep_dev {
1581 + struct list_head node;
1582 + struct ls_pcie *pcie;
1583 + struct device dev;
1584 + struct dentry *dir;
1585 + int pf_idx;
1586 + int vf_idx;
1587 + int dev_id;
1588 + void *driver_data;
1589 +};
1590 +
1591 +struct ls_ep_dev *ls_pci_ep_find(struct ls_pcie *pcie, int dev_id);
1592 +
1593 +void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
1594 + u64 cpu_addr, u64 pci_addr, u32 size);
1595 +
1596 +/* Use bar match mode and MEM type as default */
1597 +void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
1598 + int bar, u64 phys);
1599 +
1600 +void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size);
1601 +
1602 +
1603 +void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep);
1604 +
1605 +int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie);
1606 +int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie);
1607 +
1608 +#endif /* _PCIE_LAYERSCAPE_EP_H */
1609 --- a/drivers/pci/host/pci-layerscape.c
1610 +++ b/drivers/pci/host/pci-layerscape.c
1611 @@ -33,14 +33,18 @@
1612
1613 /* PEX Internal Configuration Registers */
1614 #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
1615 +#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
1616 +#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
1617 #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
1618
1619 -/* PEX LUT registers */
1620 -#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
1621 +#define PCIE_IATU_NUM 6
1622 +
1623 +static void ls_pcie_host_init(struct pcie_port *pp);
1624
1625 struct ls_pcie_drvdata {
1626 u32 lut_offset;
1627 u32 ltssm_shift;
1628 + u32 lut_dbg;
1629 struct pcie_host_ops *ops;
1630 };
1631
1632 @@ -86,6 +90,14 @@ static void ls_pcie_drop_msg_tlp(struct
1633 iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
1634 }
1635
1636 +static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
1637 +{
1638 + int i;
1639 +
1640 + for (i = 0; i < PCIE_IATU_NUM; i++)
1641 + dw_pcie_disable_outbound_atu(&pcie->pp, i);
1642 +}
1643 +
1644 static int ls1021_pcie_link_up(struct pcie_port *pp)
1645 {
1646 u32 state;
1647 @@ -134,7 +146,7 @@ static int ls_pcie_link_up(struct pcie_p
1648 struct ls_pcie *pcie = to_ls_pcie(pp);
1649 u32 state;
1650
1651 - state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
1652 + state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
1653 pcie->drvdata->ltssm_shift) &
1654 LTSSM_STATE_MASK;
1655
1656 @@ -144,6 +156,12 @@ static int ls_pcie_link_up(struct pcie_p
1657 return 1;
1658 }
1659
1660 +/* Forward error response of outbound non-posted requests */
1661 +static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
1662 +{
1663 + iowrite32(PCIE_ABSERR_SETTING, pcie->pp.dbi_base + PCIE_ABSERR);
1664 +}
1665 +
1666 static void ls_pcie_host_init(struct pcie_port *pp)
1667 {
1668 struct ls_pcie *pcie = to_ls_pcie(pp);
1669 @@ -153,6 +171,10 @@ static void ls_pcie_host_init(struct pci
1670 ls_pcie_clear_multifunction(pcie);
1671 ls_pcie_drop_msg_tlp(pcie);
1672 iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
1673 +
1674 + ls_pcie_disable_outbound_atus(pcie);
1675 + ls_pcie_fix_error_response(pcie);
1676 + dw_pcie_setup_rc(pp);
1677 }
1678
1679 static int ls_pcie_msi_host_init(struct pcie_port *pp,
1680 @@ -196,20 +218,40 @@ static struct ls_pcie_drvdata ls1021_drv
1681 static struct ls_pcie_drvdata ls1043_drvdata = {
1682 .lut_offset = 0x10000,
1683 .ltssm_shift = 24,
1684 + .lut_dbg = 0x7fc,
1685 + .ops = &ls_pcie_host_ops,
1686 +};
1687 +
1688 +static struct ls_pcie_drvdata ls1046_drvdata = {
1689 + .lut_offset = 0x80000,
1690 + .ltssm_shift = 24,
1691 + .lut_dbg = 0x407fc,
1692 .ops = &ls_pcie_host_ops,
1693 };
1694
1695 static struct ls_pcie_drvdata ls2080_drvdata = {
1696 .lut_offset = 0x80000,
1697 .ltssm_shift = 0,
1698 + .lut_dbg = 0x7fc,
1699 + .ops = &ls_pcie_host_ops,
1700 +};
1701 +
1702 +static struct ls_pcie_drvdata ls2088_drvdata = {
1703 + .lut_offset = 0x80000,
1704 + .ltssm_shift = 0,
1705 + .lut_dbg = 0x407fc,
1706 .ops = &ls_pcie_host_ops,
1707 };
1708
1709 static const struct of_device_id ls_pcie_of_match[] = {
1710 + { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
1711 { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
1712 { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
1713 + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
1714 { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
1715 { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
1716 + { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
1717 + { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
1718 { },
1719 };
1720
1721 --- a/drivers/pci/host/pcie-designware.c
1722 +++ b/drivers/pci/host/pcie-designware.c
1723 @@ -478,6 +478,12 @@ int dw_pcie_wait_for_link(struct pcie_po
1724 return -ETIMEDOUT;
1725 }
1726
1727 +void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index)
1728 +{
1729 + dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
1730 + dw_pcie_writel_rc(pp, PCIE_ATU_CR2, 0);
1731 +}
1732 +
1733 int dw_pcie_link_up(struct pcie_port *pp)
1734 {
1735 u32 val;
1736 --- a/drivers/pci/host/pcie-designware.h
1737 +++ b/drivers/pci/host/pcie-designware.h
1738 @@ -82,5 +82,6 @@ int dw_pcie_wait_for_link(struct pcie_po
1739 int dw_pcie_link_up(struct pcie_port *pp);
1740 void dw_pcie_setup_rc(struct pcie_port *pp);
1741 int dw_pcie_host_init(struct pcie_port *pp);
1742 +void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index);
1743
1744 #endif /* _PCIE_DESIGNWARE_H */
1745 --- a/drivers/pci/pci.c
1746 +++ b/drivers/pci/pci.c
1747 @@ -454,7 +454,7 @@ struct resource *pci_find_parent_resourc
1748 pci_bus_for_each_resource(bus, r, i) {
1749 if (!r)
1750 continue;
1751 - if (res->start && resource_contains(r, res)) {
1752 + if (resource_contains(r, res)) {
1753
1754 /*
1755 * If the window is prefetchable but the BAR is
1756 --- a/drivers/pci/pcie/portdrv_core.c
1757 +++ b/drivers/pci/pcie/portdrv_core.c
1758 @@ -44,52 +44,30 @@ static void release_pcie_device(struct d
1759 }
1760
1761 /**
1762 - * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
1763 - * @entries: Array of MSI-X entries
1764 - * @new_entry: Index of the entry to add to the array
1765 - * @nr_entries: Number of entries already in the array
1766 + * pcibios_check_service_irqs - check irqs in the device tree
1767 + * @dev: PCI Express port to handle
1768 + * @irqs: Array of irqs to populate
1769 + * @mask: Bitmask of port capabilities returned by get_port_device_capability()
1770 + *
1771 + * Return value: 0 means no service irqs in the device tree
1772 *
1773 - * Return value: Position of the added entry in the array
1774 */
1775 -static int pcie_port_msix_add_entry(
1776 - struct msix_entry *entries, int new_entry, int nr_entries)
1777 +int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
1778 {
1779 - int j;
1780 -
1781 - for (j = 0; j < nr_entries; j++)
1782 - if (entries[j].entry == new_entry)
1783 - return j;
1784 -
1785 - entries[j].entry = new_entry;
1786 - return j;
1787 + return 0;
1788 }
1789
1790 /**
1791 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
1792 * @dev: PCI Express port to handle
1793 - * @vectors: Array of interrupt vectors to populate
1794 + * @irqs: Array of interrupt vectors to populate
1795 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
1796 *
1797 * Return value: 0 on success, error code on failure
1798 */
1799 -static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
1800 +static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
1801 {
1802 - struct msix_entry *msix_entries;
1803 - int idx[PCIE_PORT_DEVICE_MAXSERVICES];
1804 - int nr_entries, status, pos, i, nvec;
1805 - u16 reg16;
1806 - u32 reg32;
1807 -
1808 - nr_entries = pci_msix_vec_count(dev);
1809 - if (nr_entries < 0)
1810 - return nr_entries;
1811 - BUG_ON(!nr_entries);
1812 - if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
1813 - nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
1814 -
1815 - msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
1816 - if (!msix_entries)
1817 - return -ENOMEM;
1818 + int nr_entries, entry, nvec = 0;
1819
1820 /*
1821 * Allocate as many entries as the port wants, so that we can check
1822 @@ -97,20 +75,13 @@ static int pcie_port_enable_msix(struct
1823 * equal to the number of entries this port actually uses, we'll happily
1824 * go through without any tricks.
1825 */
1826 - for (i = 0; i < nr_entries; i++)
1827 - msix_entries[i].entry = i;
1828 -
1829 - status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
1830 - if (status)
1831 - goto Exit;
1832 -
1833 - for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
1834 - idx[i] = -1;
1835 - status = -EIO;
1836 - nvec = 0;
1837 + nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
1838 + PCI_IRQ_MSIX);
1839 + if (nr_entries < 0)
1840 + return nr_entries;
1841
1842 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
1843 - int entry;
1844 + u16 reg16;
1845
1846 /*
1847 * The code below follows the PCI Express Base Specification 2.0
1848 @@ -125,18 +96,16 @@ static int pcie_port_enable_msix(struct
1849 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
1850 entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
1851 if (entry >= nr_entries)
1852 - goto Error;
1853 + goto out_free_irqs;
1854
1855 - i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
1856 - if (i == nvec)
1857 - nvec++;
1858 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
1859 + irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
1860
1861 - idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
1862 - idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
1863 + nvec = max(nvec, entry + 1);
1864 }
1865
1866 if (mask & PCIE_PORT_SERVICE_AER) {
1867 - int entry;
1868 + u32 reg32, pos;
1869
1870 /*
1871 * The code below follows Section 7.10.10 of the PCI Express
1872 @@ -151,13 +120,11 @@ static int pcie_port_enable_msix(struct
1873 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
1874 entry = reg32 >> 27;
1875 if (entry >= nr_entries)
1876 - goto Error;
1877 + goto out_free_irqs;
1878
1879 - i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
1880 - if (i == nvec)
1881 - nvec++;
1882 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
1883
1884 - idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
1885 + nvec = max(nvec, entry + 1);
1886 }
1887
1888 /*
1889 @@ -165,41 +132,54 @@ static int pcie_port_enable_msix(struct
1890 * what we have. Otherwise, the port has some extra entries not for the
1891 * services we know and we need to work around that.
1892 */
1893 - if (nvec == nr_entries) {
1894 - status = 0;
1895 - } else {
1896 + if (nvec != nr_entries) {
1897 /* Drop the temporary MSI-X setup */
1898 - pci_disable_msix(dev);
1899 + pci_free_irq_vectors(dev);
1900
1901 /* Now allocate the MSI-X vectors for real */
1902 - status = pci_enable_msix_exact(dev, msix_entries, nvec);
1903 - if (status)
1904 - goto Exit;
1905 + nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
1906 + PCI_IRQ_MSIX);
1907 + if (nr_entries < 0)
1908 + return nr_entries;
1909 }
1910
1911 - for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
1912 - vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
1913 -
1914 - Exit:
1915 - kfree(msix_entries);
1916 - return status;
1917 + return 0;
1918
1919 - Error:
1920 - pci_disable_msix(dev);
1921 - goto Exit;
1922 +out_free_irqs:
1923 + pci_free_irq_vectors(dev);
1924 + return -EIO;
1925 }
1926
1927 /**
1928 - * init_service_irqs - initialize irqs for PCI Express port services
1929 + * pcie_init_service_irqs - initialize irqs for PCI Express port services
1930 * @dev: PCI Express port to handle
1931 * @irqs: Array of irqs to populate
1932 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
1933 *
1934 * Return value: Interrupt mode associated with the port
1935 */
1936 -static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
1937 +static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
1938 {
1939 - int i, irq = -1;
1940 + unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
1941 + int ret, i;
1942 + int irq = -1;
1943 +
1944 + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
1945 + irqs[i] = -1;
1946 +
1947 + /* Check if some platforms owns independent irq pins for AER/PME etc.
1948 + * Some platforms may own independent AER/PME interrupts and set
1949 + * them in the device tree file.
1950 + */
1951 + ret = pcibios_check_service_irqs(dev, irqs, mask);
1952 + if (ret) {
1953 + if (dev->irq)
1954 + irq = dev->irq;
1955 + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
1956 + if (irqs[i] == -1 && i != PCIE_PORT_SERVICE_VC_SHIFT)
1957 + irqs[i] = irq;
1958 + return 0;
1959 + }
1960
1961 /*
1962 * If MSI cannot be used for PCIe PME or hotplug, we have to use
1963 @@ -207,41 +187,25 @@ static int init_service_irqs(struct pci_
1964 */
1965 if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
1966 ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
1967 - if (dev->irq)
1968 - irq = dev->irq;
1969 - goto no_msi;
1970 + flags &= ~PCI_IRQ_MSI;
1971 + } else {
1972 + /* Try to use MSI-X if supported */
1973 + if (!pcie_port_enable_msix(dev, irqs, mask))
1974 + return 0;
1975 }
1976
1977 - /* Try to use MSI-X if supported */
1978 - if (!pcie_port_enable_msix(dev, irqs, mask))
1979 - return 0;
1980 -
1981 - /*
1982 - * We're not going to use MSI-X, so try MSI and fall back to INTx.
1983 - * If neither MSI/MSI-X nor INTx available, try other interrupt. On
1984 - * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
1985 - */
1986 - if (!pci_enable_msi(dev) || dev->irq)
1987 - irq = dev->irq;
1988 + ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
1989 + if (ret < 0)
1990 + return -ENODEV;
1991
1992 - no_msi:
1993 - for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
1994 - irqs[i] = irq;
1995 - irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
1996 + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
1997 + if (i != PCIE_PORT_SERVICE_VC_SHIFT)
1998 + irqs[i] = pci_irq_vector(dev, 0);
1999 + }
2000
2001 - if (irq < 0)
2002 - return -ENODEV;
2003 return 0;
2004 }
2005
2006 -static void cleanup_service_irqs(struct pci_dev *dev)
2007 -{
2008 - if (dev->msix_enabled)
2009 - pci_disable_msix(dev);
2010 - else if (dev->msi_enabled)
2011 - pci_disable_msi(dev);
2012 -}
2013 -
2014 /**
2015 * get_port_device_capability - discover capabilities of a PCI Express port
2016 * @dev: PCI Express port to examine
2017 @@ -378,7 +342,7 @@ int pcie_port_device_register(struct pci
2018 * that can be used in the absence of irqs. Allow them to determine
2019 * if that is to be used.
2020 */
2021 - status = init_service_irqs(dev, irqs, capabilities);
2022 + status = pcie_init_service_irqs(dev, irqs, capabilities);
2023 if (status) {
2024 capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
2025 if (!capabilities)
2026 @@ -401,7 +365,7 @@ int pcie_port_device_register(struct pci
2027 return 0;
2028
2029 error_cleanup_irqs:
2030 - cleanup_service_irqs(dev);
2031 + pci_free_irq_vectors(dev);
2032 error_disable:
2033 pci_disable_device(dev);
2034 return status;
2035 @@ -469,7 +433,7 @@ static int remove_iter(struct device *de
2036 void pcie_port_device_remove(struct pci_dev *dev)
2037 {
2038 device_for_each_child(&dev->dev, NULL, remove_iter);
2039 - cleanup_service_irqs(dev);
2040 + pci_free_irq_vectors(dev);
2041 pci_disable_device(dev);
2042 }
2043
2044 @@ -499,7 +463,6 @@ static int pcie_port_probe_service(struc
2045 if (status)
2046 return status;
2047
2048 - dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
2049 get_device(dev);
2050 return 0;
2051 }
2052 @@ -524,8 +487,6 @@ static int pcie_port_remove_service(stru
2053 pciedev = to_pcie_device(dev);
2054 driver = to_service_driver(dev->driver);
2055 if (driver && driver->remove) {
2056 - dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
2057 - driver->name);
2058 driver->remove(pciedev);
2059 put_device(dev);
2060 }
2061 --- a/drivers/pci/quirks.c
2062 +++ b/drivers/pci/quirks.c
2063 @@ -3329,6 +3329,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_A
2064 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
2065 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
2066
2067 +/*
2068 + * NXP (Freescale Vendor ID) LS1088 chips do not behave correctly after
2069 + * bus reset. Link state of device does not comes UP and so config space
2070 + * never accessible again.
2071 + */
2072 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x80c0, quirk_no_bus_reset);
2073 +
2074 static void quirk_no_pm_reset(struct pci_dev *dev)
2075 {
2076 /*
2077 @@ -4673,3 +4680,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
2078 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
2079 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
2080 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
2081 +
2082 +/* Freescale PCIe doesn't support MSI in RC mode */
2083 +static void quirk_fsl_no_msi(struct pci_dev *pdev)
2084 +{
2085 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
2086 + pdev->no_msi = 1;
2087 +}
2088 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
2089 --- a/include/linux/pci.h
2090 +++ b/include/linux/pci.h
2091 @@ -1825,6 +1825,7 @@ void pcibios_release_device(struct pci_d
2092 void pcibios_penalize_isa_irq(int irq, int active);
2093 int pcibios_alloc_irq(struct pci_dev *dev);
2094 void pcibios_free_irq(struct pci_dev *dev);
2095 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
2096
2097 #ifdef CONFIG_HIBERNATE_CALLBACKS
2098 extern struct dev_pm_ops pcibios_pm_ops;