kernel: bump 4.19 to 4.19.85
[openwrt/staging/wigyori.git] / target / linux / mediatek / patches-4.19 / 0101-pci-mediatek-backport-fix-pcie.patch
1 --- a/drivers/clk/clk-devres.c
2 +++ b/drivers/clk/clk-devres.c
3 @@ -34,6 +34,17 @@ struct clk *devm_clk_get(struct device *
4 }
5 EXPORT_SYMBOL(devm_clk_get);
6
7 +struct clk *devm_clk_get_optional(struct device *dev, const char *id)
8 +{
9 + struct clk *clk = devm_clk_get(dev, id);
10 +
11 + if (clk == ERR_PTR(-ENOENT))
12 + return NULL;
13 +
14 + return clk;
15 +}
16 +EXPORT_SYMBOL(devm_clk_get_optional);
17 +
18 struct clk_bulk_devres {
19 struct clk_bulk_data *clks;
20 int num_clks;
21 --- a/drivers/pci/controller/pcie-mediatek.c
22 +++ b/drivers/pci/controller/pcie-mediatek.c
23 @@ -15,6 +15,7 @@
24 #include <linux/irqdomain.h>
25 #include <linux/kernel.h>
26 #include <linux/msi.h>
27 +#include <linux/module.h>
28 #include <linux/of_address.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 @@ -162,6 +163,7 @@ struct mtk_pcie_soc {
32 * @phy: pointer to PHY control block
33 * @lane: lane count
34 * @slot: port slot
35 + * @irq: GIC irq
36 * @irq_domain: legacy INTx IRQ domain
37 * @inner_domain: inner IRQ domain
38 * @msi_domain: MSI IRQ domain
39 @@ -182,6 +184,7 @@ struct mtk_pcie_port {
40 struct phy *phy;
41 u32 lane;
42 u32 slot;
43 + int irq;
44 struct irq_domain *irq_domain;
45 struct irq_domain *inner_domain;
46 struct irq_domain *msi_domain;
47 @@ -225,10 +228,8 @@ static void mtk_pcie_subsys_powerdown(st
48
49 clk_disable_unprepare(pcie->free_ck);
50
51 - if (dev->pm_domain) {
52 - pm_runtime_put_sync(dev);
53 - pm_runtime_disable(dev);
54 - }
55 + pm_runtime_put_sync(dev);
56 + pm_runtime_disable(dev);
57 }
58
59 static void mtk_pcie_port_free(struct mtk_pcie_port *port)
60 @@ -394,75 +395,6 @@ static struct pci_ops mtk_pcie_ops_v2 =
61 .write = mtk_pcie_config_write,
62 };
63
64 -static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
65 -{
66 - struct mtk_pcie *pcie = port->pcie;
67 - struct resource *mem = &pcie->mem;
68 - const struct mtk_pcie_soc *soc = port->pcie->soc;
69 - u32 val;
70 - size_t size;
71 - int err;
72 -
73 - /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
74 - if (pcie->base) {
75 - val = readl(pcie->base + PCIE_SYS_CFG_V2);
76 - val |= PCIE_CSR_LTSSM_EN(port->slot) |
77 - PCIE_CSR_ASPM_L1_EN(port->slot);
78 - writel(val, pcie->base + PCIE_SYS_CFG_V2);
79 - }
80 -
81 - /* Assert all reset signals */
82 - writel(0, port->base + PCIE_RST_CTRL);
83 -
84 - /*
85 - * Enable PCIe link down reset, if link status changed from link up to
86 - * link down, this will reset MAC control registers and configuration
87 - * space.
88 - */
89 - writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
90 -
91 - /* De-assert PHY, PE, PIPE, MAC and configuration reset */
92 - val = readl(port->base + PCIE_RST_CTRL);
93 - val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
94 - PCIE_MAC_SRSTB | PCIE_CRSTB;
95 - writel(val, port->base + PCIE_RST_CTRL);
96 -
97 - /* Set up vendor ID and class code */
98 - if (soc->need_fix_class_id) {
99 - val = PCI_VENDOR_ID_MEDIATEK;
100 - writew(val, port->base + PCIE_CONF_VEND_ID);
101 -
102 - val = PCI_CLASS_BRIDGE_HOST;
103 - writew(val, port->base + PCIE_CONF_CLASS_ID);
104 - }
105 -
106 - /* 100ms timeout value should be enough for Gen1/2 training */
107 - err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
108 - !!(val & PCIE_PORT_LINKUP_V2), 20,
109 - 100 * USEC_PER_MSEC);
110 - if (err)
111 - return -ETIMEDOUT;
112 -
113 - /* Set INTx mask */
114 - val = readl(port->base + PCIE_INT_MASK);
115 - val &= ~INTX_MASK;
116 - writel(val, port->base + PCIE_INT_MASK);
117 -
118 - /* Set AHB to PCIe translation windows */
119 - size = mem->end - mem->start;
120 - val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
121 - writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
122 -
123 - val = upper_32_bits(mem->start);
124 - writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
125 -
126 - /* Set PCIe to AXI translation memory space.*/
127 - val = fls(0xffffffff) | WIN_ENABLE;
128 - writel(val, port->base + PCIE_AXI_WINDOW0);
129 -
130 - return 0;
131 -}
132 -
133 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
134 {
135 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
136 @@ -601,6 +533,27 @@ static void mtk_pcie_enable_msi(struct m
137 writel(val, port->base + PCIE_INT_MASK);
138 }
139
140 +static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
141 +{
142 + struct mtk_pcie_port *port, *tmp;
143 +
144 + list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
145 + irq_set_chained_handler_and_data(port->irq, NULL, NULL);
146 +
147 + if (port->irq_domain)
148 + irq_domain_remove(port->irq_domain);
149 +
150 + if (IS_ENABLED(CONFIG_PCI_MSI)) {
151 + if (port->msi_domain)
152 + irq_domain_remove(port->msi_domain);
153 + if (port->inner_domain)
154 + irq_domain_remove(port->inner_domain);
155 + }
156 +
157 + irq_dispose_mapping(port->irq);
158 + }
159 +}
160 +
161 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
162 irq_hw_number_t hwirq)
163 {
164 @@ -630,6 +583,7 @@ static int mtk_pcie_init_irq_domain(stru
165
166 port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
167 &intx_domain_ops, port);
168 + of_node_put(pcie_intc_node);
169 if (!port->irq_domain) {
170 dev_err(dev, "failed to get INTx IRQ domain\n");
171 return -ENODEV;
172 @@ -639,8 +593,6 @@ static int mtk_pcie_init_irq_domain(stru
173 ret = mtk_pcie_allocate_msi_domains(port);
174 if (ret)
175 return ret;
176 -
177 - mtk_pcie_enable_msi(port);
178 }
179
180 return 0;
181 @@ -693,7 +645,7 @@ static int mtk_pcie_setup_irq(struct mtk
182 struct mtk_pcie *pcie = port->pcie;
183 struct device *dev = pcie->dev;
184 struct platform_device *pdev = to_platform_device(dev);
185 - int err, irq;
186 + int err;
187
188 err = mtk_pcie_init_irq_domain(port, node);
189 if (err) {
190 @@ -701,8 +653,81 @@ static int mtk_pcie_setup_irq(struct mtk
191 return err;
192 }
193
194 - irq = platform_get_irq(pdev, port->slot);
195 - irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
196 + port->irq = platform_get_irq(pdev, port->slot);
197 + irq_set_chained_handler_and_data(port->irq,
198 + mtk_pcie_intr_handler, port);
199 +
200 + return 0;
201 +}
202 +
203 +static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
204 +{
205 + struct mtk_pcie *pcie = port->pcie;
206 + struct resource *mem = &pcie->mem;
207 + const struct mtk_pcie_soc *soc = port->pcie->soc;
208 + u32 val;
209 + size_t size;
210 + int err;
211 +
212 + /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
213 + if (pcie->base) {
214 + val = readl(pcie->base + PCIE_SYS_CFG_V2);
215 + val |= PCIE_CSR_LTSSM_EN(port->slot) |
216 + PCIE_CSR_ASPM_L1_EN(port->slot);
217 + writel(val, pcie->base + PCIE_SYS_CFG_V2);
218 + }
219 +
220 + /* Assert all reset signals */
221 + writel(0, port->base + PCIE_RST_CTRL);
222 +
223 + /*
224 + * Enable PCIe link down reset, if link status changed from link up to
225 + * link down, this will reset MAC control registers and configuration
226 + * space.
227 + */
228 + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
229 +
230 + /* De-assert PHY, PE, PIPE, MAC and configuration reset */
231 + val = readl(port->base + PCIE_RST_CTRL);
232 + val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
233 + PCIE_MAC_SRSTB | PCIE_CRSTB;
234 + writel(val, port->base + PCIE_RST_CTRL);
235 +
236 + /* Set up vendor ID and class code */
237 + if (soc->need_fix_class_id) {
238 + val = PCI_VENDOR_ID_MEDIATEK;
239 + writew(val, port->base + PCIE_CONF_VEND_ID);
240 +
241 + val = PCI_CLASS_BRIDGE_PCI;
242 + writew(val, port->base + PCIE_CONF_CLASS_ID);
243 + }
244 +
245 + /* 100ms timeout value should be enough for Gen1/2 training */
246 + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
247 + !!(val & PCIE_PORT_LINKUP_V2), 20,
248 + 100 * USEC_PER_MSEC);
249 + if (err)
250 + return -ETIMEDOUT;
251 +
252 + /* Set INTx mask */
253 + val = readl(port->base + PCIE_INT_MASK);
254 + val &= ~INTX_MASK;
255 + writel(val, port->base + PCIE_INT_MASK);
256 +
257 + if (IS_ENABLED(CONFIG_PCI_MSI))
258 + mtk_pcie_enable_msi(port);
259 +
260 + /* Set AHB to PCIe translation windows */
261 + size = mem->end - mem->start;
262 + val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
263 + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
264 +
265 + val = upper_32_bits(mem->start);
266 + writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
267 +
268 + /* Set PCIe to AXI translation memory space.*/
269 + val = fls(0xffffffff) | WIN_ENABLE;
270 + writel(val, port->base + PCIE_AXI_WINDOW0);
271
272 return 0;
273 }
274 @@ -903,49 +928,29 @@ static int mtk_pcie_parse_port(struct mt
275
276 /* sys_ck might be divided into the following parts in some chips */
277 snprintf(name, sizeof(name), "ahb_ck%d", slot);
278 - port->ahb_ck = devm_clk_get(dev, name);
279 - if (IS_ERR(port->ahb_ck)) {
280 - if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
281 - return -EPROBE_DEFER;
282 -
283 - port->ahb_ck = NULL;
284 - }
285 + port->ahb_ck = devm_clk_get_optional(dev, name);
286 + if (IS_ERR(port->ahb_ck))
287 + return PTR_ERR(port->ahb_ck);
288
289 snprintf(name, sizeof(name), "axi_ck%d", slot);
290 - port->axi_ck = devm_clk_get(dev, name);
291 - if (IS_ERR(port->axi_ck)) {
292 - if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
293 - return -EPROBE_DEFER;
294 -
295 - port->axi_ck = NULL;
296 - }
297 + port->axi_ck = devm_clk_get_optional(dev, name);
298 + if (IS_ERR(port->axi_ck))
299 + return PTR_ERR(port->axi_ck);
300
301 snprintf(name, sizeof(name), "aux_ck%d", slot);
302 - port->aux_ck = devm_clk_get(dev, name);
303 - if (IS_ERR(port->aux_ck)) {
304 - if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
305 - return -EPROBE_DEFER;
306 -
307 - port->aux_ck = NULL;
308 - }
309 + port->aux_ck = devm_clk_get_optional(dev, name);
310 + if (IS_ERR(port->aux_ck))
311 + return PTR_ERR(port->aux_ck);
312
313 snprintf(name, sizeof(name), "obff_ck%d", slot);
314 - port->obff_ck = devm_clk_get(dev, name);
315 - if (IS_ERR(port->obff_ck)) {
316 - if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
317 - return -EPROBE_DEFER;
318 -
319 - port->obff_ck = NULL;
320 - }
321 + port->obff_ck = devm_clk_get_optional(dev, name);
322 + if (IS_ERR(port->obff_ck))
323 + return PTR_ERR(port->obff_ck);
324
325 snprintf(name, sizeof(name), "pipe_ck%d", slot);
326 - port->pipe_ck = devm_clk_get(dev, name);
327 - if (IS_ERR(port->pipe_ck)) {
328 - if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
329 - return -EPROBE_DEFER;
330 -
331 - port->pipe_ck = NULL;
332 - }
333 + port->pipe_ck = devm_clk_get_optional(dev, name);
334 + if (IS_ERR(port->pipe_ck))
335 + return PTR_ERR(port->pipe_ck);
336
337 snprintf(name, sizeof(name), "pcie-rst%d", slot);
338 port->reset = devm_reset_control_get_optional_exclusive(dev, name);
339 @@ -998,10 +1003,8 @@ static int mtk_pcie_subsys_powerup(struc
340 pcie->free_ck = NULL;
341 }
342
343 - if (dev->pm_domain) {
344 - pm_runtime_enable(dev);
345 - pm_runtime_get_sync(dev);
346 - }
347 + pm_runtime_enable(dev);
348 + pm_runtime_get_sync(dev);
349
350 /* enable top level clock */
351 err = clk_prepare_enable(pcie->free_ck);
352 @@ -1013,10 +1016,8 @@ static int mtk_pcie_subsys_powerup(struc
353 return 0;
354
355 err_free_ck:
356 - if (dev->pm_domain) {
357 - pm_runtime_put_sync(dev);
358 - pm_runtime_disable(dev);
359 - }
360 + pm_runtime_put_sync(dev);
361 + pm_runtime_disable(dev);
362
363 return err;
364 }
365 @@ -1127,34 +1128,6 @@ static int mtk_pcie_request_resources(st
366 return 0;
367 }
368
369 -static int mtk_pcie_register_host(struct pci_host_bridge *host)
370 -{
371 - struct mtk_pcie *pcie = pci_host_bridge_priv(host);
372 - struct pci_bus *child;
373 - int err;
374 -
375 - host->busnr = pcie->busn.start;
376 - host->dev.parent = pcie->dev;
377 - host->ops = pcie->soc->ops;
378 - host->map_irq = of_irq_parse_and_map_pci;
379 - host->swizzle_irq = pci_common_swizzle;
380 - host->sysdata = pcie;
381 -
382 - err = pci_scan_root_bus_bridge(host);
383 - if (err < 0)
384 - return err;
385 -
386 - pci_bus_size_bridges(host->bus);
387 - pci_bus_assign_resources(host->bus);
388 -
389 - list_for_each_entry(child, &host->bus->children, node)
390 - pcie_bus_configure_settings(child);
391 -
392 - pci_bus_add_devices(host->bus);
393 -
394 - return 0;
395 -}
396 -
397 static int mtk_pcie_probe(struct platform_device *pdev)
398 {
399 struct device *dev = &pdev->dev;
400 @@ -1181,7 +1154,14 @@ static int mtk_pcie_probe(struct platfor
401 if (err)
402 goto put_resources;
403
404 - err = mtk_pcie_register_host(host);
405 + host->busnr = pcie->busn.start;
406 + host->dev.parent = pcie->dev;
407 + host->ops = pcie->soc->ops;
408 + host->map_irq = of_irq_parse_and_map_pci;
409 + host->swizzle_irq = pci_common_swizzle;
410 + host->sysdata = pcie;
411 +
412 + err = pci_host_probe(host);
413 if (err)
414 goto put_resources;
415
416 @@ -1194,6 +1174,80 @@ put_resources:
417 return err;
418 }
419
420 +
421 +static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
422 +{
423 + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
424 + struct list_head *windows = &host->windows;
425 +
426 + pci_free_resource_list(windows);
427 +}
428 +
429 +static int mtk_pcie_remove(struct platform_device *pdev)
430 +{
431 + struct mtk_pcie *pcie = platform_get_drvdata(pdev);
432 + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
433 +
434 + pci_stop_root_bus(host->bus);
435 + pci_remove_root_bus(host->bus);
436 + mtk_pcie_free_resources(pcie);
437 +
438 + mtk_pcie_irq_teardown(pcie);
439 +
440 + mtk_pcie_put_resources(pcie);
441 +
442 + return 0;
443 +}
444 +
445 +static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
446 +{
447 + struct mtk_pcie *pcie = dev_get_drvdata(dev);
448 + struct mtk_pcie_port *port;
449 +
450 + if (list_empty(&pcie->ports))
451 + return 0;
452 +
453 + list_for_each_entry(port, &pcie->ports, list) {
454 + clk_disable_unprepare(port->pipe_ck);
455 + clk_disable_unprepare(port->obff_ck);
456 + clk_disable_unprepare(port->axi_ck);
457 + clk_disable_unprepare(port->aux_ck);
458 + clk_disable_unprepare(port->ahb_ck);
459 + clk_disable_unprepare(port->sys_ck);
460 + phy_power_off(port->phy);
461 + phy_exit(port->phy);
462 + }
463 +
464 + clk_disable_unprepare(pcie->free_ck);
465 +
466 + return 0;
467 +}
468 +
469 +static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
470 +{
471 + struct mtk_pcie *pcie = dev_get_drvdata(dev);
472 + struct mtk_pcie_port *port, *tmp;
473 +
474 + if (list_empty(&pcie->ports))
475 + return 0;
476 +
477 + clk_prepare_enable(pcie->free_ck);
478 +
479 + list_for_each_entry_safe(port, tmp, &pcie->ports, list)
480 + mtk_pcie_enable_port(port);
481 +
482 + /* In case of EP was removed while system suspend. */
483 + if (list_empty(&pcie->ports))
484 + clk_disable_unprepare(pcie->free_ck);
485 +
486 + return 0;
487 +}
488 +
489 +static const struct dev_pm_ops mtk_pcie_pm_ops = {
490 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
491 + mtk_pcie_resume_noirq)
492 +};
493 +
494 static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
495 .ops = &mtk_pcie_ops,
496 .startup = mtk_pcie_startup_port,
497 @@ -1222,10 +1276,13 @@ static const struct of_device_id mtk_pci
498
499 static struct platform_driver mtk_pcie_driver = {
500 .probe = mtk_pcie_probe,
501 + .remove = mtk_pcie_remove,
502 .driver = {
503 .name = "mtk-pcie",
504 .of_match_table = mtk_pcie_ids,
505 .suppress_bind_attrs = true,
506 + .pm = &mtk_pcie_pm_ops,
507 },
508 };
509 -builtin_platform_driver(mtk_pcie_driver);
510 +module_platform_driver(mtk_pcie_driver);
511 +MODULE_LICENSE("GPL v2");
512 --- a/include/linux/clk.h
513 +++ b/include/linux/clk.h
514 @@ -349,6 +349,17 @@ int __must_check devm_clk_bulk_get(struc
515 struct clk *devm_clk_get(struct device *dev, const char *id);
516
517 /**
518 + * devm_clk_get_optional - lookup and obtain a managed reference to an optional
519 + * clock producer.
520 + * @dev: device for clock "consumer"
521 + * @id: clock consumer ID
522 + *
523 + * Behaves the same as devm_clk_get() except where there is no clock producer.
524 + * In this case, instead of returning -ENOENT, the function returns NULL.
525 + */
526 +struct clk *devm_clk_get_optional(struct device *dev, const char *id);
527 +
528 +/**
529 * devm_get_clk_from_child - lookup and obtain a managed reference to a
530 * clock producer from child node.
531 * @dev: device for clock "consumer"