kernel: update 3.14 to 3.14.18
[openwrt/openwrt.git] / target / linux / ipq806x / patches / 0138-PCI-qcom-Add-support-for-pcie-controllers-on-IPQ8064.patch
1 From 98567c99b4dcd80fc9e5dd97229ebb9a7f6dab03 Mon Sep 17 00:00:00 2001
2 From: Kumar Gala <galak@codeaurora.org>
3 Date: Fri, 16 May 2014 11:53:23 -0500
4 Subject: [PATCH 138/182] PCI: qcom: Add support for pcie controllers on
5 IPQ8064
6
7 ---
8 arch/arm/mach-qcom/Kconfig | 2 +
9 drivers/pci/host/Makefile | 1 +
10 drivers/pci/host/pci-qcom.c | 682 +++++++++++++++++++++++++++++++++++++++++++
11 3 files changed, 685 insertions(+)
12 create mode 100644 drivers/pci/host/pci-qcom.c
13
14 --- a/arch/arm/mach-qcom/Kconfig
15 +++ b/arch/arm/mach-qcom/Kconfig
16 @@ -7,6 +7,8 @@ config ARCH_QCOM
17 select GENERIC_CLOCKEVENTS
18 select HAVE_SMP
19 select PINCTRL
20 + select MIGHT_HAVE_PCI
21 + select PCI_DOMAINS if PCI
22 select QCOM_SCM if SMP
23 help
24 Support for Qualcomm's devicetree based systems.
25 --- a/drivers/pci/host/Makefile
26 +++ b/drivers/pci/host/Makefile
27 @@ -4,3 +4,4 @@ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
28 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
29 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
30 obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
31 +obj-$(CONFIG_ARCH_QCOM) += pci-qcom.o
32 --- /dev/null
33 +++ b/drivers/pci/host/pci-qcom.c
34 @@ -0,0 +1,682 @@
35 +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
36 + *
37 + * This program is free software; you can redistribute it and/or modify
38 + * it under the terms of the GNU General Public License version 2 and
39 + * only version 2 as published by the Free Software Foundation.
40 + *
41 + * This program is distributed in the hope that it will be useful,
42 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44 + * GNU General Public License for more details.
45 + */
46 +
47 +/*
48 + * QCOM MSM PCIe controller driver.
49 + */
50 +
51 +#include <linux/kernel.h>
52 +#include <linux/pci.h>
53 +#include <linux/gpio.h>
54 +#include <linux/of_gpio.h>
55 +#include <linux/platform_device.h>
56 +#include <linux/of_address.h>
57 +#include <linux/clk.h>
58 +#include <linux/reset.h>
59 +#include <linux/delay.h>
60 +
61 +/* Root Complex Port vendor/device IDs */
62 +#define PCIE_VENDOR_ID_RCP 0x17cb
63 +#define PCIE_DEVICE_ID_RCP 0x0101
64 +
65 +#define __set(v, a, b) (((v) << (b)) & GENMASK(a, b))
66 +
67 +#define PCIE20_PARF_PCS_DEEMPH 0x34
68 +#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN1(x) __set(x, 21, 16)
69 +#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) __set(x, 13, 8)
70 +#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) __set(x, 5, 0)
71 +
72 +#define PCIE20_PARF_PCS_SWING 0x38
73 +#define PCIE20_PARF_PCS_SWING_TX_SWING_FULL(x) __set(x, 14, 8)
74 +#define PCIE20_PARF_PCS_SWING_TX_SWING_LOW(x) __set(x, 6, 0)
75 +
76 +#define PCIE20_PARF_PHY_CTRL 0x40
77 +#define PCIE20_PARF_PHY_CTRL_PHY_TX0_TERM_OFFST(x) __set(x, 20, 16)
78 +#define PCIE20_PARF_PHY_CTRL_PHY_LOS_LEVEL(x) __set(x, 12, 8)
79 +#define PCIE20_PARF_PHY_CTRL_PHY_RTUNE_REQ (1 << 4)
80 +#define PCIE20_PARF_PHY_CTRL_PHY_TEST_BURNIN (1 << 2)
81 +#define PCIE20_PARF_PHY_CTRL_PHY_TEST_BYPASS (1 << 1)
82 +#define PCIE20_PARF_PHY_CTRL_PHY_TEST_PWR_DOWN (1 << 0)
83 +
84 +#define PCIE20_PARF_PHY_REFCLK 0x4C
85 +#define PCIE20_PARF_CONFIG_BITS 0x50
86 +
87 +#define PCIE20_ELBI_SYS_CTRL 0x04
88 +#define PCIE20_ELBI_SYS_CTRL_LTSSM_EN 0x01
89 +
90 +#define PCIE20_CAP 0x70
91 +#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
92 +
93 +#define PCIE20_COMMAND_STATUS 0x04
94 +#define PCIE20_BUSNUMBERS 0x18
95 +#define PCIE20_MEMORY_BASE_LIMIT 0x20
96 +
97 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
98 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
99 +#define PCIE20_PLR_IATU_VIEWPORT 0x900
100 +#define PCIE20_PLR_IATU_CTRL1 0x904
101 +#define PCIE20_PLR_IATU_CTRL2 0x908
102 +#define PCIE20_PLR_IATU_LBAR 0x90C
103 +#define PCIE20_PLR_IATU_UBAR 0x910
104 +#define PCIE20_PLR_IATU_LAR 0x914
105 +#define PCIE20_PLR_IATU_LTAR 0x918
106 +#define PCIE20_PLR_IATU_UTAR 0x91c
107 +
108 +#define MSM_PCIE_DEV_CFG_ADDR 0x01000000
109 +
110 +#define RD 0
111 +#define WR 1
112 +
113 +#define MAX_RC_NUM 3
114 +#define PCIE_BUS_PRIV_DATA(pdev) \
115 + (((struct pci_sys_data *)pdev->bus->sysdata)->private_data)
116 +
117 +/* PCIe TLP types that we are interested in */
118 +#define PCI_CFG0_RDWR 0x4
119 +#define PCI_CFG1_RDWR 0x5
120 +
121 +#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
122 +({ \
123 + unsigned long timeout = jiffies + usecs_to_jiffies(timeout_us); \
124 + might_sleep_if(timeout_us); \
125 + for (;;) { \
126 + (val) = readl(addr); \
127 + if (cond) \
128 + break; \
129 + if (timeout_us && time_after(jiffies, timeout)) { \
130 + (val) = readl(addr); \
131 + break; \
132 + } \
133 + if (sleep_us) \
134 + usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
135 + } \
136 + (cond) ? 0 : -ETIMEDOUT; \
137 +})
138 +
139 +struct qcom_pcie {
140 + void __iomem *elbi_base;
141 + void __iomem *parf_base;
142 + void __iomem *dwc_base;
143 + void __iomem *cfg_base;
144 + int reset_gpio;
145 + struct clk *iface_clk;
146 + struct clk *bus_clk;
147 + struct clk *phy_clk;
148 + int irq_int[4];
149 + struct reset_control *axi_reset;
150 + struct reset_control *ahb_reset;
151 + struct reset_control *por_reset;
152 + struct reset_control *pci_reset;
153 + struct reset_control *phy_reset;
154 +
155 + struct resource conf;
156 + struct resource io;
157 + struct resource mem;
158 +};
159 +
160 +static int qcom_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
161 +static int qcom_pcie_setup(int nr, struct pci_sys_data *sys);
162 +static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
163 + int size, u32 *val);
164 +static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
165 + int where, int size, u32 val);
166 +
167 +static struct pci_ops qcom_pcie_ops = {
168 + .read = msm_pcie_rd_conf,
169 + .write = msm_pcie_wr_conf,
170 +};
171 +
172 +static struct hw_pci qcom_hw_pci[MAX_RC_NUM] = {
173 + {
174 +#ifdef CONFIG_PCI_DOMAINS
175 + .domain = 0,
176 +#endif
177 + .ops = &qcom_pcie_ops,
178 + .nr_controllers = 1,
179 + .swizzle = pci_common_swizzle,
180 + .setup = qcom_pcie_setup,
181 + .map_irq = qcom_pcie_map_irq,
182 + },
183 + {
184 +#ifdef CONFIG_PCI_DOMAINS
185 + .domain = 1,
186 +#endif
187 + .ops = &qcom_pcie_ops,
188 + .nr_controllers = 1,
189 + .swizzle = pci_common_swizzle,
190 + .setup = qcom_pcie_setup,
191 + .map_irq = qcom_pcie_map_irq,
192 + },
193 + {
194 +#ifdef CONFIG_PCI_DOMAINS
195 + .domain = 2,
196 +#endif
197 + .ops = &qcom_pcie_ops,
198 + .nr_controllers = 1,
199 + .swizzle = pci_common_swizzle,
200 + .setup = qcom_pcie_setup,
201 + .map_irq = qcom_pcie_map_irq,
202 + },
203 +};
204 +
205 +static int nr_controllers;
206 +static DEFINE_SPINLOCK(qcom_hw_pci_lock);
207 +
208 +static inline struct qcom_pcie *sys_to_pcie(struct pci_sys_data *sys)
209 +{
210 + return sys->private_data;
211 +}
212 +
213 +inline int is_msm_pcie_rc(struct pci_bus *bus)
214 +{
215 + return (bus->number == 0);
216 +}
217 +
218 +static int qcom_pcie_is_link_up(struct qcom_pcie *dev)
219 +{
220 + return readl_relaxed(dev->dwc_base + PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
221 +}
222 +
223 +inline int msm_pcie_get_cfgtype(struct pci_bus *bus)
224 +{
225 + /*
226 + * http://www.tldp.org/LDP/tlk/dd/pci.html
227 + * Pass it onto the secondary bus interface unchanged if the
228 + * bus number specified is greater than the secondary bus
229 + * number and less than or equal to the subordinate bus
230 + * number.
231 + *
232 + * Read/Write to the RC and Device/Switch connected to the RC
233 + * are CFG0 type transactions. Rest have to be forwarded
234 + * down stream as CFG1 transactions.
235 + *
236 + */
237 + if (bus->number == 0)
238 + return PCI_CFG0_RDWR;
239 +
240 + return PCI_CFG0_RDWR;
241 +}
242 +
243 +void msm_pcie_config_cfgtype(struct pci_bus *bus, u32 devfn)
244 +{
245 + uint32_t bdf, cfgtype;
246 + struct qcom_pcie *dev = sys_to_pcie(bus->sysdata);
247 +
248 + cfgtype = msm_pcie_get_cfgtype(bus);
249 +
250 + if (cfgtype == PCI_CFG0_RDWR) {
251 + bdf = MSM_PCIE_DEV_CFG_ADDR;
252 + } else {
253 + /*
254 + * iATU Lower Target Address Register
255 + * Bits Description
256 + * *-1:0 Forms bits [*:0] of the
257 + * start address of the new
258 + * address of the translated
259 + * region. The start address
260 + * must be aligned to a
261 + * CX_ATU_MIN_REGION_SIZE kB
262 + * boundary, so these bits are
263 + * always 0. A write to this
264 + * location is ignored by the
265 + * PCIe core.
266 + * 31:*1 Forms bits [31:*] of the of
267 + * the new address of the
268 + * translated region.
269 + *
270 + * * is log2(CX_ATU_MIN_REGION_SIZE)
271 + */
272 + bdf = (((bus->number & 0xff) << 24) & 0xff000000) |
273 + (((devfn & 0xff) << 16) & 0x00ff0000);
274 + }
275 +
276 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
277 + wmb();
278 +
279 + /* Program Bdf Address */
280 + writel_relaxed(bdf, dev->dwc_base + PCIE20_PLR_IATU_LTAR);
281 + wmb();
282 +
283 + /* Write Config Request Type */
284 + writel_relaxed(cfgtype, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
285 + wmb();
286 +}
287 +
288 +static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
289 + int where, int size, u32 *val)
290 +{
291 + uint32_t word_offset, byte_offset, mask;
292 + uint32_t rd_val, wr_val;
293 + struct qcom_pcie *dev = sys_to_pcie(bus->sysdata);
294 + void __iomem *config_base;
295 + int rc;
296 +
297 + rc = is_msm_pcie_rc(bus);
298 +
299 + /*
300 + * For downstream bus, make sure link is up
301 + */
302 + if (rc && (devfn != 0)) {
303 + *val = ~0;
304 + return PCIBIOS_DEVICE_NOT_FOUND;
305 + } else if ((!rc) && (!qcom_pcie_is_link_up(dev))) {
306 + *val = ~0;
307 + return PCIBIOS_DEVICE_NOT_FOUND;
308 + }
309 +
310 + msm_pcie_config_cfgtype(bus, devfn);
311 +
312 + word_offset = where & ~0x3;
313 + byte_offset = where & 0x3;
314 + mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
315 +
316 + config_base = (rc) ? dev->dwc_base : dev->cfg_base;
317 + rd_val = readl_relaxed(config_base + word_offset);
318 +
319 + if (oper == RD) {
320 + *val = ((rd_val & mask) >> (8 * byte_offset));
321 + } else {
322 + wr_val = (rd_val & ~mask) |
323 + ((*val << (8 * byte_offset)) & mask);
324 + writel_relaxed(wr_val, config_base + word_offset);
325 + wmb(); /* ensure config data is written to hardware register */
326 + }
327 +
328 + return 0;
329 +}
330 +
331 +static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
332 + int size, u32 *val)
333 +{
334 + return msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
335 +}
336 +
337 +static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
338 + int where, int size, u32 val)
339 +{
340 + /*
341 + *Attempt to reset secondary bus is causing PCIE core to reset.
342 + *Disable secondary bus reset functionality.
343 + */
344 + if ((bus->number == 0) && (where == PCI_BRIDGE_CONTROL) &&
345 + (val & PCI_BRIDGE_CTL_BUS_RESET)) {
346 + pr_info("PCIE secondary bus reset not supported\n");
347 + val &= ~PCI_BRIDGE_CTL_BUS_RESET;
348 + }
349 +
350 + return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
351 +}
352 +
353 +static int qcom_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
354 +{
355 + struct qcom_pcie *pcie_dev = PCIE_BUS_PRIV_DATA(dev);
356 +
357 + return pcie_dev->irq_int[pin-1];
358 +}
359 +
360 +static int qcom_pcie_setup(int nr, struct pci_sys_data *sys)
361 +{
362 + struct qcom_pcie *qcom_pcie = sys->private_data;
363 +
364 + /*
365 + * specify linux PCI framework to allocate device memory (BARs)
366 + * from msm_pcie_dev.dev_mem_res resource.
367 + */
368 + sys->mem_offset = 0;
369 + sys->io_offset = 0;
370 +
371 + pci_add_resource(&sys->resources, &qcom_pcie->mem);
372 + pci_add_resource(&sys->resources, &qcom_pcie->io);
373 +
374 + return 1;
375 +}
376 +
377 +static inline void qcom_elbi_writel_relaxed(struct qcom_pcie *pcie, u32 val, u32 reg)
378 +{
379 + writel_relaxed(val, pcie->elbi_base + reg);
380 +}
381 +
382 +static inline u32 qcom_elbi_readl_relaxed(struct qcom_pcie *pcie, u32 reg)
383 +{
384 + return readl_relaxed(pcie->elbi_base + reg);
385 +}
386 +
387 +static inline void qcom_parf_writel_relaxed(struct qcom_pcie *pcie, u32 val, u32 reg)
388 +{
389 + writel_relaxed(val, pcie->parf_base + reg);
390 +}
391 +
392 +static inline u32 qcom_parf_readl_relaxed(struct qcom_pcie *pcie, u32 reg)
393 +{
394 + return readl_relaxed(pcie->parf_base + reg);
395 +}
396 +
397 +static void msm_pcie_write_mask(void __iomem *addr,
398 + uint32_t clear_mask, uint32_t set_mask)
399 +{
400 + uint32_t val;
401 +
402 + val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
403 + writel_relaxed(val, addr);
404 + wmb(); /* ensure data is written to hardware register */
405 +}
406 +
407 +static void qcom_pcie_config_controller(struct qcom_pcie *dev)
408 +{
409 + /*
410 + * program and enable address translation region 0 (device config
411 + * address space); region type config;
412 + * axi config address range to device config address range
413 + */
414 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
415 + /* ensure that hardware locks the region before programming it */
416 + wmb();
417 +
418 + writel_relaxed(4, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
419 + writel_relaxed(BIT(31), dev->dwc_base + PCIE20_PLR_IATU_CTRL2);
420 + writel_relaxed(dev->conf.start, dev->dwc_base + PCIE20_PLR_IATU_LBAR);
421 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UBAR);
422 + writel_relaxed(dev->conf.end, dev->dwc_base + PCIE20_PLR_IATU_LAR);
423 + writel_relaxed(MSM_PCIE_DEV_CFG_ADDR,
424 + dev->dwc_base + PCIE20_PLR_IATU_LTAR);
425 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UTAR);
426 + /* ensure that hardware registers the configuration */
427 + wmb();
428 +
429 + /*
430 + * program and enable address translation region 2 (device resource
431 + * address space); region type memory;
432 + * axi device bar address range to device bar address range
433 + */
434 + writel_relaxed(2, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
435 + /* ensure that hardware locks the region before programming it */
436 + wmb();
437 +
438 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
439 + writel_relaxed(BIT(31), dev->dwc_base + PCIE20_PLR_IATU_CTRL2);
440 + writel_relaxed(dev->mem.start, dev->dwc_base + PCIE20_PLR_IATU_LBAR);
441 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UBAR);
442 + writel_relaxed(dev->mem.end, dev->dwc_base + PCIE20_PLR_IATU_LAR);
443 + writel_relaxed(dev->mem.start,
444 + dev->dwc_base + PCIE20_PLR_IATU_LTAR);
445 + writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UTAR);
446 + /* ensure that hardware registers the configuration */
447 + wmb();
448 +
449 + /* 1K PCIE buffer setting */
450 + writel_relaxed(0x3, dev->dwc_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
451 + writel_relaxed(0x1, dev->dwc_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
452 + /* ensure that hardware registers the configuration */
453 + wmb();
454 +}
455 +
456 +static int qcom_pcie_probe(struct platform_device *pdev)
457 +{
458 + unsigned long flags;
459 + struct qcom_pcie *qcom_pcie;
460 + struct device_node *np = pdev->dev.of_node;
461 + struct resource *elbi_base, *parf_base, *dwc_base;
462 + struct hw_pci *hw;
463 + struct of_pci_range range;
464 + struct of_pci_range_parser parser;
465 + int ret, i;
466 + u32 val;
467 +
468 + qcom_pcie = devm_kzalloc(&pdev->dev, sizeof(*qcom_pcie), GFP_KERNEL);
469 + if (!qcom_pcie) {
470 + dev_err(&pdev->dev, "no memory for qcom_pcie\n");
471 + return -ENOMEM;
472 + }
473 +
474 + elbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
475 + qcom_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
476 + if (IS_ERR(qcom_pcie->elbi_base)) {
477 + dev_err(&pdev->dev, "Failed to ioremap elbi space\n");
478 + return PTR_ERR(qcom_pcie->elbi_base);
479 + }
480 +
481 + parf_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
482 + qcom_pcie->parf_base = devm_ioremap_resource(&pdev->dev, parf_base);
483 + if (IS_ERR(qcom_pcie->parf_base)) {
484 + dev_err(&pdev->dev, "Failed to ioremap parf space\n");
485 + return PTR_ERR(qcom_pcie->parf_base);
486 + }
487 +
488 + dwc_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
489 + qcom_pcie->dwc_base = devm_ioremap_resource(&pdev->dev, dwc_base);
490 + if (IS_ERR(qcom_pcie->dwc_base)) {
491 + dev_err(&pdev->dev, "Failed to ioremap dwc_base space\n");
492 + return PTR_ERR(qcom_pcie->dwc_base);
493 + }
494 +
495 + if (of_pci_range_parser_init(&parser, np)) {
496 + dev_err(&pdev->dev, "missing ranges property\n");
497 + return -EINVAL;
498 + }
499 +
500 + /* Get the I/O and memory ranges from DT */
501 + for_each_of_pci_range(&parser, &range) {
502 + switch (range.pci_space & 0x3) {
503 + case 0: /* cfg */
504 + of_pci_range_to_resource(&range, np, &qcom_pcie->conf);
505 + qcom_pcie->conf.flags = IORESOURCE_MEM;
506 + break;
507 + case 1: /* io */
508 + of_pci_range_to_resource(&range, np, &qcom_pcie->io);
509 + break;
510 + default: /* mem */
511 + of_pci_range_to_resource(&range, np, &qcom_pcie->mem);
512 + break;
513 + }
514 + }
515 +
516 + qcom_pcie->cfg_base = devm_ioremap_resource(&pdev->dev, &qcom_pcie->conf);
517 + if (IS_ERR(qcom_pcie->cfg_base)) {
518 + dev_err(&pdev->dev, "Failed to ioremap PCIe cfg space\n");
519 + return PTR_ERR(qcom_pcie->cfg_base);
520 + }
521 +
522 + qcom_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
523 + if (!gpio_is_valid(qcom_pcie->reset_gpio)) {
524 + dev_err(&pdev->dev, "pcie reset gpio is not valid\n");
525 + return -EINVAL;
526 + }
527 +
528 + ret = devm_gpio_request_one(&pdev->dev, qcom_pcie->reset_gpio,
529 + GPIOF_DIR_OUT, "pcie_reset");
530 + if (ret) {
531 + dev_err(&pdev->dev, "Failed to request pcie reset gpio\n");
532 + return ret;
533 + }
534 +
535 + qcom_pcie->iface_clk = devm_clk_get(&pdev->dev, "iface");
536 + if (IS_ERR(qcom_pcie->iface_clk)) {
537 + dev_err(&pdev->dev, "Failed to get pcie iface clock\n");
538 + return PTR_ERR(qcom_pcie->iface_clk);
539 + }
540 +
541 + qcom_pcie->phy_clk = devm_clk_get(&pdev->dev, "phy");
542 + if (IS_ERR(qcom_pcie->phy_clk)) {
543 + dev_err(&pdev->dev, "Failed to get pcie phy clock\n");
544 + return PTR_ERR(qcom_pcie->phy_clk);
545 + }
546 +
547 + qcom_pcie->bus_clk = devm_clk_get(&pdev->dev, "core");
548 + if (IS_ERR(qcom_pcie->bus_clk)) {
549 + dev_err(&pdev->dev, "Failed to get pcie core clock\n");
550 + return PTR_ERR(qcom_pcie->bus_clk);
551 + }
552 +
553 + qcom_pcie->axi_reset = devm_reset_control_get(&pdev->dev, "axi");
554 + if (IS_ERR(qcom_pcie->axi_reset)) {
555 + dev_err(&pdev->dev, "Failed to get axi reset\n");
556 + return PTR_ERR(qcom_pcie->axi_reset);
557 + }
558 +
559 + qcom_pcie->ahb_reset = devm_reset_control_get(&pdev->dev, "ahb");
560 + if (IS_ERR(qcom_pcie->ahb_reset)) {
561 + dev_err(&pdev->dev, "Failed to get ahb reset\n");
562 + return PTR_ERR(qcom_pcie->ahb_reset);
563 + }
564 +
565 + qcom_pcie->por_reset = devm_reset_control_get(&pdev->dev, "por");
566 + if (IS_ERR(qcom_pcie->por_reset)) {
567 + dev_err(&pdev->dev, "Failed to get por reset\n");
568 + return PTR_ERR(qcom_pcie->por_reset);
569 + }
570 +
571 + qcom_pcie->pci_reset = devm_reset_control_get(&pdev->dev, "pci");
572 + if (IS_ERR(qcom_pcie->pci_reset)) {
573 + dev_err(&pdev->dev, "Failed to get pci reset\n");
574 + return PTR_ERR(qcom_pcie->pci_reset);
575 + }
576 +
577 + qcom_pcie->phy_reset = devm_reset_control_get(&pdev->dev, "phy");
578 + if (IS_ERR(qcom_pcie->phy_reset)) {
579 + dev_err(&pdev->dev, "Failed to get phy reset\n");
580 + return PTR_ERR(qcom_pcie->phy_reset);
581 + }
582 +
583 + for (i = 0; i < 4; i++) {
584 + qcom_pcie->irq_int[i] = platform_get_irq(pdev, i+1);
585 + if (qcom_pcie->irq_int[i] < 0) {
586 + dev_err(&pdev->dev, "failed to get irq resource\n");
587 + return qcom_pcie->irq_int[i];
588 + }
589 + }
590 +
591 + gpio_set_value(qcom_pcie->reset_gpio, 0);
592 + usleep_range(10000, 15000);
593 +
594 + /* assert PCIe PARF reset while powering the core */
595 + reset_control_assert(qcom_pcie->ahb_reset);
596 +
597 + /* enable clocks */
598 + ret = clk_prepare_enable(qcom_pcie->iface_clk);
599 + if (ret)
600 + return ret;
601 + ret = clk_prepare_enable(qcom_pcie->phy_clk);
602 + if (ret)
603 + return ret;
604 + ret = clk_prepare_enable(qcom_pcie->bus_clk);
605 + if (ret)
606 + return ret;
607 +
608 + /*
609 + * de-assert PCIe PARF reset;
610 + * wait 1us before accessing PARF registers
611 + */
612 + reset_control_deassert(qcom_pcie->ahb_reset);
613 + udelay(1);
614 +
615 + /* enable PCIe clocks and resets */
616 + msm_pcie_write_mask(qcom_pcie->parf_base + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
617 +
618 + /* Set Tx Termination Offset */
619 + val = qcom_parf_readl_relaxed(qcom_pcie, PCIE20_PARF_PHY_CTRL);
620 + val |= PCIE20_PARF_PHY_CTRL_PHY_TX0_TERM_OFFST(7);
621 + qcom_parf_writel_relaxed(qcom_pcie, val, PCIE20_PARF_PHY_CTRL);
622 +
623 + /* PARF programming */
624 + qcom_parf_writel_relaxed(qcom_pcie, PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) |
625 + PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) |
626 + PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22),
627 + PCIE20_PARF_PCS_DEEMPH);
628 + qcom_parf_writel_relaxed(qcom_pcie, PCIE20_PARF_PCS_SWING_TX_SWING_FULL(0x78) |
629 + PCIE20_PARF_PCS_SWING_TX_SWING_LOW(0x78),
630 + PCIE20_PARF_PCS_SWING);
631 + qcom_parf_writel_relaxed(qcom_pcie, (4<<24), PCIE20_PARF_CONFIG_BITS);
632 + /* ensure that hardware registers the PARF configuration */
633 + wmb();
634 +
635 + /* enable reference clock */
636 + msm_pcie_write_mask(qcom_pcie->parf_base + PCIE20_PARF_PHY_REFCLK, BIT(12), BIT(16));
637 +
638 + /* ensure that access is enabled before proceeding */
639 + wmb();
640 +
641 + /* de-assert PICe PHY, Core, POR and AXI clk domain resets */
642 + reset_control_deassert(qcom_pcie->phy_reset);
643 + reset_control_deassert(qcom_pcie->pci_reset);
644 + reset_control_deassert(qcom_pcie->por_reset);
645 + reset_control_deassert(qcom_pcie->axi_reset);
646 +
647 + /* wait 150ms for clock acquisition */
648 + usleep_range(10000, 15000);
649 +
650 + /* de-assert PCIe reset link to bring EP out of reset */
651 + gpio_set_value(qcom_pcie->reset_gpio, 1 - 0);
652 + usleep_range(10000, 15000);
653 +
654 + /* enable link training */
655 + val = qcom_elbi_readl_relaxed(qcom_pcie, PCIE20_ELBI_SYS_CTRL);
656 + val |= PCIE20_ELBI_SYS_CTRL_LTSSM_EN;
657 + qcom_elbi_writel_relaxed(qcom_pcie, val, PCIE20_ELBI_SYS_CTRL);
658 + wmb();
659 +
660 + /* poll for link to come up for upto 100ms */
661 + ret = readl_poll_timeout(
662 + (qcom_pcie->dwc_base + PCIE20_CAP_LINKCTRLSTATUS),
663 + val, (val & BIT(29)), 10000, 100000);
664 +
665 + printk("link initialized %d\n", ret);
666 +
667 + qcom_pcie_config_controller(qcom_pcie);
668 +
669 + platform_set_drvdata(pdev, qcom_pcie);
670 +
671 + spin_lock_irqsave(&qcom_hw_pci_lock, flags);
672 + qcom_hw_pci[nr_controllers].private_data = (void **)&qcom_pcie;
673 + hw = &qcom_hw_pci[nr_controllers];
674 + nr_controllers++;
675 + spin_unlock_irqrestore(&qcom_hw_pci_lock, flags);
676 +
677 + pci_common_init(hw);
678 +
679 + return 0;
680 +}
681 +
682 +static int __exit qcom_pcie_remove(struct platform_device *pdev)
683 +{
684 + struct qcom_pcie *qcom_pcie = platform_get_drvdata(pdev);
685 +
686 + return 0;
687 +}
688 +
689 +static struct of_device_id qcom_pcie_match[] = {
690 + { .compatible = "qcom,pcie-ipq8064", },
691 + {}
692 +};
693 +
694 +static struct platform_driver qcom_pcie_driver = {
695 + .probe = qcom_pcie_probe,
696 + .remove = qcom_pcie_remove,
697 + .driver = {
698 + .name = "qcom_pcie",
699 + .owner = THIS_MODULE,
700 + .of_match_table = qcom_pcie_match,
701 + },
702 +};
703 +
704 +static int qcom_pcie_init(void)
705 +{
706 + return platform_driver_register(&qcom_pcie_driver);
707 +}
708 +subsys_initcall(qcom_pcie_init);
709 +
710 +/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
711 +static void msm_pcie_fixup_early(struct pci_dev *dev)
712 +{
713 + if (dev->hdr_type == 1)
714 + dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
715 +}
716 +DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, msm_pcie_fixup_early);