1df55b5c029eddbd7b355e201fd1e82ffe12c25b
[openwrt/svn-archive/archive.git] / target / linux / lantiq / files-3.3 / arch / mips / pci / pcie-lantiq.c
1 #include <linux/types.h>
2 #include <linux/module.h>
3 #include <linux/pci.h>
4 #include <linux/kernel.h>
5 #include <linux/init.h>
6 #include <linux/delay.h>
7 #include <linux/mm.h>
8 #include <asm/paccess.h>
9 #include <linux/pci.h>
10 #include <linux/pci_regs.h>
11 #include <linux/platform_device.h>
12
13 #define CONFIG_IFX_PCIE_1ST_CORE
14
15 #include "pcie-lantiq.h"
16
17 #define IFX_PCIE_IR (INT_NUM_IM4_IRL0 + 25)
18 #define IFX_PCIE_INTA (INT_NUM_IM4_IRL0 + 8)
19 #define IFX_PCIE_INTB (INT_NUM_IM4_IRL0 + 9)
20 #define IFX_PCIE_INTC (INT_NUM_IM4_IRL0 + 10)
21 #define IFX_PCIE_INTD (INT_NUM_IM4_IRL0 + 11)
22 #define MS(_v, _f) (((_v) & (_f)) >> _f##_S)
23 #define SM(_v, _f) (((_v) << _f##_S) & (_f))
24 #define IFX_REG_SET_BIT(_f, _r) \
25 IFX_REG_W32((IFX_REG_R32((_r)) &~ (_f)) | (_f), (_r))
26 #define IFX_PCIE_LTSSM_ENABLE_TIMEOUT 10
27 #define IFX_PCIE_PHY_LINK_UP_TIMEOUT 1000
28 #define IFX_PCIE_PHY_LOOP_CNT 5
29
30 static DEFINE_SPINLOCK(ifx_pcie_lock);
31
32 int pcibios_1st_host_bus_nr(void);
33
34 unsigned int g_pcie_debug_flag = PCIE_MSG_ANY & (~PCIE_MSG_CFG);
35
36 static ifx_pcie_irq_t pcie_irqs[IFX_PCIE_CORE_NR] = {
37 {
38 .ir_irq = {
39 .irq = IFX_PCIE_IR,
40 .name = "ifx_pcie_rc0",
41 },
42
43 .legacy_irq = {
44 {
45 .irq_bit = PCIE_IRN_INTA,
46 .irq = IFX_PCIE_INTA,
47 },
48 {
49 .irq_bit = PCIE_IRN_INTB,
50 .irq = IFX_PCIE_INTB,
51 },
52 {
53 .irq_bit = PCIE_IRN_INTC,
54 .irq = IFX_PCIE_INTC,
55 },
56 {
57 .irq_bit = PCIE_IRN_INTD,
58 .irq = IFX_PCIE_INTD,
59 },
60 },
61 },
62 };
63
64 static inline int pcie_ltssm_enable(int pcie_port)
65 {
66 int i;
67
68 IFX_REG_W32(PCIE_RC_CCR_LTSSM_ENABLE, PCIE_RC_CCR(pcie_port)); /* Enable LTSSM */
69
70 /* Wait for the link to come up */
71 for (i = 0; i < IFX_PCIE_LTSSM_ENABLE_TIMEOUT; i++) {
72 if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port)) & PCIE_LCTLSTS_RETRAIN_PENDING)) {
73 break;
74 }
75 udelay(10);
76 }
77 if (i >= IFX_PCIE_LTSSM_ENABLE_TIMEOUT) {
78 IFX_PCIE_PRINT(PCIE_MSG_INIT, "%s link timeout!!!!!\n", __func__);
79 return -1;
80 }
81 return 0;
82 }
83
84 static inline void pcie_status_register_clear(int pcie_port)
85 {
86 IFX_REG_W32(0, PCIE_RC_DR(pcie_port));
87 IFX_REG_W32(0, PCIE_PCICMDSTS(pcie_port));
88 IFX_REG_W32(0, PCIE_DCTLSTS(pcie_port));
89 IFX_REG_W32(0, PCIE_LCTLSTS(pcie_port));
90 IFX_REG_W32(0, PCIE_SLCTLSTS(pcie_port));
91 IFX_REG_W32(0, PCIE_RSTS(pcie_port));
92 IFX_REG_W32(0, PCIE_UES_R(pcie_port));
93 IFX_REG_W32(0, PCIE_UEMR(pcie_port));
94 IFX_REG_W32(0, PCIE_UESR(pcie_port));
95 IFX_REG_W32(0, PCIE_CESR(pcie_port));
96 IFX_REG_W32(0, PCIE_CEMR(pcie_port));
97 IFX_REG_W32(0, PCIE_RESR(pcie_port));
98 IFX_REG_W32(0, PCIE_PVCCRSR(pcie_port));
99 IFX_REG_W32(0, PCIE_VC0_RSR0(pcie_port));
100 IFX_REG_W32(0, PCIE_TPFCS(pcie_port));
101 IFX_REG_W32(0, PCIE_TNPFCS(pcie_port));
102 IFX_REG_W32(0, PCIE_TCFCS(pcie_port));
103 IFX_REG_W32(0, PCIE_QSR(pcie_port));
104 IFX_REG_W32(0, PCIE_IOBLSECS(pcie_port));
105 }
106
107 static inline int ifx_pcie_link_up(int pcie_port)
108 {
109 return (IFX_REG_R32(PCIE_PHY_SR(pcie_port)) & PCIE_PHY_SR_PHY_LINK_UP) ? 1 : 0;
110 }
111
112 static inline void pcie_mem_io_setup(int pcie_port)
113 {
114 unsigned int reg;
115 /*
116 * BAR[0:1] readonly register
117 * RC contains only minimal BARs for packets mapped to this device
118 * Mem/IO filters defines a range of memory occupied by memory mapped IO devices that
119 * reside on the downstream side fo the bridge.
120 */
121 reg = SM((PCIE_MEM_PHY_PORT_TO_END(pcie_port) >> 20), PCIE_MBML_MEM_LIMIT_ADDR)
122 | SM((PCIE_MEM_PHY_PORT_TO_BASE(pcie_port) >> 20), PCIE_MBML_MEM_BASE_ADDR);
123 IFX_REG_W32(reg, PCIE_MBML(pcie_port));
124
125 /* PCIe_PBML, same as MBML */
126 IFX_REG_W32(IFX_REG_R32(PCIE_MBML(pcie_port)), PCIE_PMBL(pcie_port));
127
128 /* IO Address Range */
129 reg = SM((PCIE_IO_PHY_PORT_TO_END(pcie_port) >> 12), PCIE_IOBLSECS_IO_LIMIT_ADDR)
130 | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port) >> 12), PCIE_IOBLSECS_IO_BASE_ADDR);
131 reg |= PCIE_IOBLSECS_32BIT_IO_ADDR;
132 IFX_REG_W32(reg, PCIE_IOBLSECS(pcie_port));
133
134 reg = SM((PCIE_IO_PHY_PORT_TO_END(pcie_port) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT)
135 | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_BASE);
136 IFX_REG_W32(reg, PCIE_IO_BANDL(pcie_port));
137 }
138
139 static inline void pcie_msi_setup(int pcie_port)
140 {
141 unsigned int reg;
142
143 /* XXX, MSI stuff should only apply to EP */
144 /* MSI Capability: Only enable 32-bit addresses */
145 reg = IFX_REG_R32(PCIE_MCAPR(pcie_port));
146 reg &= ~PCIE_MCAPR_ADDR64_CAP;
147 reg |= PCIE_MCAPR_MSI_ENABLE;
148
149 /* Disable multiple message */
150 reg &= ~(PCIE_MCAPR_MULTI_MSG_CAP | PCIE_MCAPR_MULTI_MSG_ENABLE);
151 IFX_REG_W32(reg, PCIE_MCAPR(pcie_port));
152 }
153
154 static inline void pcie_pm_setup(int pcie_port)
155 {
156 unsigned int reg;
157
158 /* Enable PME, Soft reset enabled */
159 reg = IFX_REG_R32(PCIE_PM_CSR(pcie_port));
160 reg |= PCIE_PM_CSR_PME_ENABLE | PCIE_PM_CSR_SW_RST;
161 IFX_REG_W32(reg, PCIE_PM_CSR(pcie_port));
162 }
163
164 static inline void pcie_bus_setup(int pcie_port)
165 {
166 unsigned int reg;
167
168 reg = SM(0, PCIE_BNR_PRIMARY_BUS_NUM) | SM(1, PCIE_PNR_SECONDARY_BUS_NUM) | SM(0xFF, PCIE_PNR_SUB_BUS_NUM);
169 IFX_REG_W32(reg, PCIE_BNR(pcie_port));
170 }
171
172 static inline void pcie_device_setup(int pcie_port)
173 {
174 unsigned int reg;
175
176 /* Device capability register, set up Maximum payload size */
177 reg = IFX_REG_R32(PCIE_DCAP(pcie_port));
178 reg |= PCIE_DCAP_ROLE_BASE_ERR_REPORT;
179 reg |= SM(PCIE_MAX_PAYLOAD_128, PCIE_DCAP_MAX_PAYLOAD_SIZE);
180
181 /* Only available for EP */
182 reg &= ~(PCIE_DCAP_EP_L0S_LATENCY | PCIE_DCAP_EP_L1_LATENCY);
183 IFX_REG_W32(reg, PCIE_DCAP(pcie_port));
184
185 /* Device control and status register */
186 /* Set Maximum Read Request size for the device as a Requestor */
187 reg = IFX_REG_R32(PCIE_DCTLSTS(pcie_port));
188
189 /*
190 * Request size can be larger than the MPS used, but the completions returned
191 * for the read will be bounded by the MPS size.
192 * In our system, Max request size depends on AHB burst size. It is 64 bytes.
193 * but we set it as 128 as minimum one.
194 */
195 reg |= SM(PCIE_MAX_PAYLOAD_128, PCIE_DCTLSTS_MAX_READ_SIZE)
196 | SM(PCIE_MAX_PAYLOAD_128, PCIE_DCTLSTS_MAX_PAYLOAD_SIZE);
197
198 /* Enable relaxed ordering, no snoop, and all kinds of errors */
199 reg |= PCIE_DCTLSTS_RELAXED_ORDERING_EN | PCIE_DCTLSTS_ERR_EN | PCIE_DCTLSTS_NO_SNOOP_EN;
200
201 IFX_REG_W32(reg, PCIE_DCTLSTS(pcie_port));
202 }
203
204 static inline void pcie_link_setup(int pcie_port)
205 {
206 unsigned int reg;
207
208 /*
209 * XXX, Link capability register, bit 18 for EP CLKREQ# dynamic clock management for L1, L2/3 CPM
210 * L0s is reported during link training via TS1 order set by N_FTS
211 */
212 reg = IFX_REG_R32(PCIE_LCAP(pcie_port));
213 reg &= ~PCIE_LCAP_L0S_EIXT_LATENCY;
214 reg |= SM(3, PCIE_LCAP_L0S_EIXT_LATENCY);
215 IFX_REG_W32(reg, PCIE_LCAP(pcie_port));
216
217 /* Link control and status register */
218 reg = IFX_REG_R32(PCIE_LCTLSTS(pcie_port));
219
220 /* Link Enable, ASPM enabled */
221 reg &= ~PCIE_LCTLSTS_LINK_DISABLE;
222
223 #ifdef CONFIG_PCIEASPM
224 /*
225 * We use the same physical reference clock that the platform provides on the connector
226 * It paved the way for ASPM to calculate the new exit Latency
227 */
228 reg |= PCIE_LCTLSTS_SLOT_CLK_CFG;
229 reg |= PCIE_LCTLSTS_COM_CLK_CFG;
230 /*
231 * We should disable ASPM by default except that we have dedicated power management support
232 * Enable ASPM will cause the system hangup/instability, performance degration
233 */
234 reg |= PCIE_LCTLSTS_ASPM_ENABLE;
235 #else
236 reg &= ~PCIE_LCTLSTS_ASPM_ENABLE;
237 #endif /* CONFIG_PCIEASPM */
238
239 /*
240 * The maximum size of any completion with data packet is bounded by the MPS setting
241 * in device control register
242 */
243 /* RCB may cause multiple split transactions, two options available, we use 64 byte RCB */
244 reg &= ~ PCIE_LCTLSTS_RCB128;
245 IFX_REG_W32(reg, PCIE_LCTLSTS(pcie_port));
246 }
247
248 static inline void pcie_error_setup(int pcie_port)
249 {
250 unsigned int reg;
251
252 /*
253 * Forward ERR_COR, ERR_NONFATAL, ERR_FATAL to the backbone
254 * Poisoned write TLPs and completions indicating poisoned TLPs will set the PCIe_PCICMDSTS.MDPE
255 */
256 reg = IFX_REG_R32(PCIE_INTRBCTRL(pcie_port));
257 reg |= PCIE_INTRBCTRL_SERR_ENABLE | PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE;
258
259 IFX_REG_W32(reg, PCIE_INTRBCTRL(pcie_port));
260
261 /* Uncorrectable Error Mask Register, Unmask <enable> all bits in PCIE_UESR */
262 reg = IFX_REG_R32(PCIE_UEMR(pcie_port));
263 reg &= ~PCIE_ALL_UNCORRECTABLE_ERR;
264 IFX_REG_W32(reg, PCIE_UEMR(pcie_port));
265
266 /* Uncorrectable Error Severity Register, ALL errors are FATAL */
267 IFX_REG_W32(PCIE_ALL_UNCORRECTABLE_ERR, PCIE_UESR(pcie_port));
268
269 /* Correctable Error Mask Register, unmask <enable> all bits */
270 reg = IFX_REG_R32(PCIE_CEMR(pcie_port));
271 reg &= ~PCIE_CORRECTABLE_ERR;
272 IFX_REG_W32(reg, PCIE_CEMR(pcie_port));
273
274 /* Advanced Error Capabilities and Control Registr */
275 reg = IFX_REG_R32(PCIE_AECCR(pcie_port));
276 reg |= PCIE_AECCR_ECRC_CHECK_EN | PCIE_AECCR_ECRC_GEN_EN;
277 IFX_REG_W32(reg, PCIE_AECCR(pcie_port));
278
279 /* Root Error Command Register, Report all types of errors */
280 reg = IFX_REG_R32(PCIE_RECR(pcie_port));
281 reg |= PCIE_RECR_ERR_REPORT_EN;
282 IFX_REG_W32(reg, PCIE_RECR(pcie_port));
283
284 /* Clear the Root status register */
285 reg = IFX_REG_R32(PCIE_RESR(pcie_port));
286 IFX_REG_W32(reg, PCIE_RESR(pcie_port));
287 }
288
289 static inline void pcie_root_setup(int pcie_port)
290 {
291 unsigned int reg;
292
293 /* Root control and capabilities register */
294 reg = IFX_REG_R32(PCIE_RCTLCAP(pcie_port));
295 reg |= PCIE_RCTLCAP_SERR_ENABLE | PCIE_RCTLCAP_PME_INT_EN;
296 IFX_REG_W32(reg, PCIE_RCTLCAP(pcie_port));
297 }
298
299 static inline void pcie_vc_setup(int pcie_port)
300 {
301 unsigned int reg;
302
303 /* Port VC Capability Register 2 */
304 reg = IFX_REG_R32(PCIE_PVC2(pcie_port));
305 reg &= ~PCIE_PVC2_VC_ARB_WRR;
306 reg |= PCIE_PVC2_VC_ARB_16P_FIXED_WRR;
307 IFX_REG_W32(reg, PCIE_PVC2(pcie_port));
308
309 /* VC0 Resource Capability Register */
310 reg = IFX_REG_R32(PCIE_VC0_RC(pcie_port));
311 reg &= ~PCIE_VC0_RC_REJECT_SNOOP;
312 IFX_REG_W32(reg, PCIE_VC0_RC(pcie_port));
313 }
314
315 static inline void pcie_port_logic_setup(int pcie_port)
316 {
317 unsigned int reg;
318
319 /* FTS number, default 12, increase to 63, may increase time from/to L0s to L0 */
320 reg = IFX_REG_R32(PCIE_AFR(pcie_port));
321 reg &= ~(PCIE_AFR_FTS_NUM | PCIE_AFR_COM_FTS_NUM);
322 reg |= SM(PCIE_AFR_FTS_NUM_DEFAULT, PCIE_AFR_FTS_NUM)
323 | SM(PCIE_AFR_FTS_NUM_DEFAULT, PCIE_AFR_COM_FTS_NUM);
324 /* L0s and L1 entry latency */
325 reg &= ~(PCIE_AFR_L0S_ENTRY_LATENCY | PCIE_AFR_L1_ENTRY_LATENCY);
326 reg |= SM(PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT, PCIE_AFR_L0S_ENTRY_LATENCY)
327 | SM(PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT, PCIE_AFR_L1_ENTRY_LATENCY);
328 IFX_REG_W32(reg, PCIE_AFR(pcie_port));
329
330 /* Port Link Control Register */
331 reg = IFX_REG_R32(PCIE_PLCR(pcie_port));
332 reg |= PCIE_PLCR_DLL_LINK_EN; /* Enable the DLL link */
333 IFX_REG_W32(reg, PCIE_PLCR(pcie_port));
334
335 /* Lane Skew Register */
336 reg = IFX_REG_R32(PCIE_LSR(pcie_port));
337 /* Enable ACK/NACK and FC */
338 reg &= ~(PCIE_LSR_ACKNAK_DISABLE | PCIE_LSR_FC_DISABLE);
339 IFX_REG_W32(reg, PCIE_LSR(pcie_port));
340
341 /* Symbol Timer Register and Filter Mask Register 1 */
342 reg = IFX_REG_R32(PCIE_STRFMR(pcie_port));
343
344 /* Default SKP interval is very accurate already, 5us */
345 /* Enable IO/CFG transaction */
346 reg |= PCIE_STRFMR_RX_CFG_TRANS_ENABLE | PCIE_STRFMR_RX_IO_TRANS_ENABLE;
347 /* Disable FC WDT */
348 reg &= ~PCIE_STRFMR_FC_WDT_DISABLE;
349 IFX_REG_W32(reg, PCIE_STRFMR(pcie_port));
350
351 /* Filter Masker Register 2 */
352 reg = IFX_REG_R32(PCIE_FMR2(pcie_port));
353 reg |= PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1 | PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1;
354 IFX_REG_W32(reg, PCIE_FMR2(pcie_port));
355
356 /* VC0 Completion Receive Queue Control Register */
357 reg = IFX_REG_R32(PCIE_VC0_CRQCR(pcie_port));
358 reg &= ~PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE;
359 reg |= SM(PCIE_VC0_TLP_QUEUE_MODE_BYPASS, PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE);
360 IFX_REG_W32(reg, PCIE_VC0_CRQCR(pcie_port));
361 }
362
363 static inline void pcie_rc_cfg_reg_setup(int pcie_port)
364 {
365 /* diable ltssm */
366 IFX_REG_W32(0, PCIE_RC_CCR(pcie_port));
367
368 pcie_mem_io_setup(pcie_port);
369 pcie_msi_setup(pcie_port);
370 pcie_pm_setup(pcie_port);
371 pcie_bus_setup(pcie_port);
372 pcie_device_setup(pcie_port);
373 pcie_link_setup(pcie_port);
374 pcie_error_setup(pcie_port);
375 pcie_root_setup(pcie_port);
376 pcie_vc_setup(pcie_port);
377 pcie_port_logic_setup(pcie_port);
378 }
379
380 static int ifx_pcie_wait_phy_link_up(int pcie_port)
381 {
382 int i;
383
384 /* Wait for PHY link is up */
385 for (i = 0; i < IFX_PCIE_PHY_LINK_UP_TIMEOUT; i++) {
386 if (ifx_pcie_link_up(pcie_port)) {
387 break;
388 }
389 udelay(100);
390 }
391 if (i >= IFX_PCIE_PHY_LINK_UP_TIMEOUT) {
392 printk(KERN_ERR "%s timeout\n", __func__);
393 return -1;
394 }
395
396 /* Check data link up or not */
397 if (!(IFX_REG_R32(PCIE_RC_DR(pcie_port)) & PCIE_RC_DR_DLL_UP)) {
398 printk(KERN_ERR "%s DLL link is still down\n", __func__);
399 return -1;
400 }
401
402 /* Check Data link active or not */
403 if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port)) & PCIE_LCTLSTS_DLL_ACTIVE)) {
404 printk(KERN_ERR "%s DLL is not active\n", __func__);
405 return -1;
406 }
407 return 0;
408 }
409
410 static inline int pcie_app_loigc_setup(int pcie_port)
411 {
412 IFX_REG_W32(PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS, PCIE_AHB_CTRL(pcie_port));
413
414 /* Pull PCIe EP out of reset */
415 pcie_device_rst_deassert(pcie_port);
416
417 /* Start LTSSM training between RC and EP */
418 pcie_ltssm_enable(pcie_port);
419
420 /* Check PHY status after enabling LTSSM */
421 if (ifx_pcie_wait_phy_link_up(pcie_port) != 0) {
422 return -1;
423 }
424 return 0;
425 }
426
427 /*
428 * Must be done after ltssm due to based on negotiated link
429 * width and payload size
430 * Update the Replay Time Limit. Empirically, some PCIe
431 * devices take a little longer to respond than expected under
432 * load. As a workaround for this we configure the Replay Time
433 * Limit to the value expected for a 512 byte MPS instead of
434 * our actual 128 byte MPS. The numbers below are directly
435 * from the PCIe spec table 3-4/5.
436 */
437 static inline void pcie_replay_time_update(int pcie_port)
438 {
439 unsigned int reg;
440 int nlw;
441 int rtl;
442
443 reg = IFX_REG_R32(PCIE_LCTLSTS(pcie_port));
444
445 nlw = MS(reg, PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH);
446 switch (nlw) {
447 case PCIE_MAX_LENGTH_WIDTH_X1:
448 rtl = 1677;
449 break;
450 case PCIE_MAX_LENGTH_WIDTH_X2:
451 rtl = 867;
452 break;
453 case PCIE_MAX_LENGTH_WIDTH_X4:
454 rtl = 462;
455 break;
456 case PCIE_MAX_LENGTH_WIDTH_X8:
457 rtl = 258;
458 break;
459 default:
460 rtl = 1677;
461 break;
462 }
463 reg = IFX_REG_R32(PCIE_ALTRT(pcie_port));
464 reg &= ~PCIE_ALTRT_REPLAY_TIME_LIMIT;
465 reg |= SM(rtl, PCIE_ALTRT_REPLAY_TIME_LIMIT);
466 IFX_REG_W32(reg, PCIE_ALTRT(pcie_port));
467
468 IFX_PCIE_PRINT(PCIE_MSG_REG, "%s PCIE_ALTRT 0x%08x\n",
469 __func__, IFX_REG_R32(PCIE_ALTRT(pcie_port)));
470 }
471
472 /*
473 * Table 359 Enhanced Configuration Address Mapping1)
474 * 1) This table is defined in Table 7-1, page 341, PCI Express Base Specification v1.1
475 * Memory Address PCI Express Configuration Space
476 * A[(20+n-1):20] Bus Number 1 < n < 8
477 * A[19:15] Device Number
478 * A[14:12] Function Number
479 * A[11:8] Extended Register Number
480 * A[7:2] Register Number
481 * A[1:0] Along with size of the access, used to generate Byte Enables
482 * For VR9, only the address bits [22:0] are mapped to the configuration space:
483 * . Address bits [22:20] select the target bus (1-of-8)1)
484 * . Address bits [19:15] select the target device (1-of-32) on the bus
485 * . Address bits [14:12] select the target function (1-of-8) within the device.
486 * . Address bits [11:2] selects the target dword (1-of-1024) within the selected function.s configuration space
487 * . Address bits [1:0] define the start byte location within the selected dword.
488 */
489 static inline unsigned int pcie_bus_addr(u8 bus_num, u16 devfn, int where)
490 {
491 unsigned int addr;
492 u8 bus;
493
494 if (!bus_num) {
495 /* type 0 */
496 addr = ((PCI_SLOT(devfn) & 0x1F) << 15) | ((PCI_FUNC(devfn) & 0x7) << 12) | ((where & 0xFFF)& ~3);
497 } else {
498 bus = bus_num;
499 /* type 1, only support 8 buses */
500 addr = ((bus & 0x7) << 20) | ((PCI_SLOT(devfn) & 0x1F) << 15) |
501 ((PCI_FUNC(devfn) & 0x7) << 12) | ((where & 0xFFF) & ~3);
502 }
503 IFX_PCIE_PRINT(PCIE_MSG_CFG, "%s: bus addr : %02x:%02x.%01x/%02x, addr=%08x\n",
504 __func__, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), where, addr);
505 return addr;
506 }
507
508 static int pcie_valid_config(int pcie_port, int bus, int dev)
509 {
510 /* RC itself */
511 if ((bus == 0) && (dev == 0))
512 return 1;
513
514 /* No physical link */
515 if (!ifx_pcie_link_up(pcie_port))
516 return 0;
517
518 /* Bus zero only has RC itself
519 * XXX, check if EP will be integrated
520 */
521 if ((bus == 0) && (dev != 0))
522 return 0;
523
524 /* Maximum 8 buses supported for VRX */
525 if (bus > 9)
526 return 0;
527
528 /*
529 * PCIe is PtP link, one bus only supports only one device
530 * except bus zero and PCIe switch which is virtual bus device
531 * The following two conditions really depends on the system design
532 * and attached the device.
533 * XXX, how about more new switch
534 */
535 if ((bus == 1) && (dev != 0))
536 return 0;
537
538 if ((bus >= 3) && (dev != 0))
539 return 0;
540 return 1;
541 }
542
543 static inline unsigned int ifx_pcie_cfg_rd(int pcie_port, unsigned int reg)
544 {
545 return IFX_REG_R32((volatile unsigned int *)(PCIE_CFG_PORT_TO_BASE(pcie_port) + reg));
546 }
547
548 static inline void ifx_pcie_cfg_wr(int pcie_port, unsigned int reg, unsigned int val)
549 {
550 IFX_REG_W32( val, (volatile unsigned int *)(PCIE_CFG_PORT_TO_BASE(pcie_port) + reg));
551 }
552
553 static inline unsigned int ifx_pcie_rc_cfg_rd(int pcie_port, unsigned int reg)
554 {
555 return IFX_REG_R32((volatile unsigned int *)(PCIE_RC_PORT_TO_BASE(pcie_port) + reg));
556 }
557
558 static inline void ifx_pcie_rc_cfg_wr(int pcie_port, unsigned int reg, unsigned int val)
559 {
560 IFX_REG_W32(val, (volatile unsigned int *)(PCIE_RC_PORT_TO_BASE(pcie_port) + reg));
561 }
562
563 unsigned int ifx_pcie_bus_enum_read_hack(int where, unsigned int value)
564 {
565 unsigned int tvalue = value;
566
567 if (where == PCI_PRIMARY_BUS) {
568 u8 primary, secondary, subordinate;
569
570 primary = tvalue & 0xFF;
571 secondary = (tvalue >> 8) & 0xFF;
572 subordinate = (tvalue >> 16) & 0xFF;
573 primary += pcibios_1st_host_bus_nr();
574 secondary += pcibios_1st_host_bus_nr();
575 subordinate += pcibios_1st_host_bus_nr();
576 tvalue = (tvalue & 0xFF000000) | (unsigned int)primary | (unsigned int)(secondary << 8) | (unsigned int)(subordinate << 16);
577 }
578 return tvalue;
579 }
580
581 unsigned int ifx_pcie_bus_enum_write_hack(int where, unsigned int value)
582 {
583 unsigned int tvalue = value;
584
585 if (where == PCI_PRIMARY_BUS) {
586 u8 primary, secondary, subordinate;
587
588 primary = tvalue & 0xFF;
589 secondary = (tvalue >> 8) & 0xFF;
590 subordinate = (tvalue >> 16) & 0xFF;
591 if (primary > 0 && primary != 0xFF)
592 primary -= pcibios_1st_host_bus_nr();
593 if (secondary > 0 && secondary != 0xFF)
594 secondary -= pcibios_1st_host_bus_nr();
595 if (subordinate > 0 && subordinate != 0xFF)
596 subordinate -= pcibios_1st_host_bus_nr();
597 tvalue = (tvalue & 0xFF000000) | (unsigned int)primary | (unsigned int)(secondary << 8) | (unsigned int)(subordinate << 16);
598 } else if (where == PCI_SUBORDINATE_BUS) {
599 u8 subordinate = tvalue & 0xFF;
600 subordinate = subordinate > 0 ? subordinate - pcibios_1st_host_bus_nr() : 0;
601 tvalue = subordinate;
602 }
603 return tvalue;
604 }
605
606 /**
607 * \fn static int ifx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
608 * int where, int size, unsigned int *value)
609 * \brief Read a value from configuration space
610 *
611 * \param[in] bus Pointer to pci bus
612 * \param[in] devfn PCI device function number
613 * \param[in] where PCI register number
614 * \param[in] size Register read size
615 * \param[out] value Pointer to return value
616 * \return PCIBIOS_BAD_REGISTER_NUMBER Invalid register number
617 * \return PCIBIOS_FUNC_NOT_SUPPORTED PCI function not supported
618 * \return PCIBIOS_DEVICE_NOT_FOUND PCI device not found
619 * \return PCIBIOS_SUCCESSFUL OK
620 * \ingroup IFX_PCIE_OS
621 */
622 static int ifx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, unsigned int *value)
623 {
624 unsigned int data = 0;
625 int bus_number = bus->number;
626 static const unsigned int mask[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
627 int ret = PCIBIOS_SUCCESSFUL;
628 struct ifx_pci_controller *ctrl = bus->sysdata;
629 int pcie_port = ctrl->port;
630
631 if (unlikely(size != 1 && size != 2 && size != 4)){
632 ret = PCIBIOS_BAD_REGISTER_NUMBER;
633 goto out;
634 }
635
636 /* Make sure the address is aligned to natural boundary */
637 if (unlikely(((size - 1) & where))) {
638 ret = PCIBIOS_BAD_REGISTER_NUMBER;
639 goto out;
640 }
641
642 /*
643 * If we are second controller, we have to cheat OS so that it assume
644 * its bus number starts from 0 in host controller
645 */
646 bus_number = ifx_pcie_bus_nr_deduct(bus_number, pcie_port);
647
648 /*
649 * We need to force the bus number to be zero on the root
650 * bus. Linux numbers the 2nd root bus to start after all
651 * busses on root 0.
652 */
653 if (bus->parent == NULL)
654 bus_number = 0;
655
656 /*
657 * PCIe only has a single device connected to it. It is
658 * always device ID 0. Don't bother doing reads for other
659 * device IDs on the first segment.
660 */
661 if ((bus_number == 0) && (PCI_SLOT(devfn) != 0)) {
662 ret = PCIBIOS_FUNC_NOT_SUPPORTED;
663 goto out;
664 }
665
666 if (pcie_valid_config(pcie_port, bus_number, PCI_SLOT(devfn)) == 0) {
667 *value = 0xffffffff;
668 ret = PCIBIOS_DEVICE_NOT_FOUND;
669 goto out;
670 }
671
672 IFX_PCIE_PRINT(PCIE_MSG_READ_CFG, "%s: %02x:%02x.%01x/%02x:%01d\n", __func__, bus_number,
673 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
674
675 PCIE_IRQ_LOCK(ifx_pcie_lock);
676 if (bus_number == 0) { /* RC itself */
677 unsigned int t;
678
679 t = (where & ~3);
680 data = ifx_pcie_rc_cfg_rd(pcie_port, t);
681 IFX_PCIE_PRINT(PCIE_MSG_READ_CFG, "%s: rd local cfg, offset:%08x, data:%08x\n",
682 __func__, t, data);
683 } else {
684 unsigned int addr = pcie_bus_addr(bus_number, devfn, where);
685
686 data = ifx_pcie_cfg_rd(pcie_port, addr);
687 if (pcie_port == IFX_PCIE_PORT0) {
688 #ifdef CONFIG_IFX_PCIE_HW_SWAP
689 data = le32_to_cpu(data);
690 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
691 } else {
692 #ifdef CONFIG_IFX_PCIE1_HW_SWAP
693 data = le32_to_cpu(data);
694 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
695 }
696 }
697 /* To get a correct PCI topology, we have to restore the bus number to OS */
698 data = ifx_pcie_bus_enum_hack(bus, devfn, where, data, pcie_port, 1);
699
700 PCIE_IRQ_UNLOCK(ifx_pcie_lock);
701 IFX_PCIE_PRINT(PCIE_MSG_READ_CFG, "%s: read config: data=%08x raw=%08x\n",
702 __func__, (data >> (8 * (where & 3))) & mask[size & 7], data);
703
704 *value = (data >> (8 * (where & 3))) & mask[size & 7];
705 out:
706 return ret;
707 }
708
709 static unsigned int ifx_pcie_size_to_value(int where, int size, unsigned int data, unsigned int value)
710 {
711 unsigned int shift;
712 unsigned int tdata = data;
713
714 switch (size) {
715 case 1:
716 shift = (where & 0x3) << 3;
717 tdata &= ~(0xffU << shift);
718 tdata |= ((value & 0xffU) << shift);
719 break;
720 case 2:
721 shift = (where & 3) << 3;
722 tdata &= ~(0xffffU << shift);
723 tdata |= ((value & 0xffffU) << shift);
724 break;
725 case 4:
726 tdata = value;
727 break;
728 }
729 return tdata;
730 }
731
732 /**
733 * \fn static static int ifx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
734 * int where, int size, unsigned int value)
735 * \brief Write a value to PCI configuration space
736 *
737 * \param[in] bus Pointer to pci bus
738 * \param[in] devfn PCI device function number
739 * \param[in] where PCI register number
740 * \param[in] size The register size to be written
741 * \param[in] value The valule to be written
742 * \return PCIBIOS_BAD_REGISTER_NUMBER Invalid register number
743 * \return PCIBIOS_DEVICE_NOT_FOUND PCI device not found
744 * \return PCIBIOS_SUCCESSFUL OK
745 * \ingroup IFX_PCIE_OS
746 */
747 static int ifx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, unsigned int value)
748 {
749 int bus_number = bus->number;
750 int ret = PCIBIOS_SUCCESSFUL;
751 struct ifx_pci_controller *ctrl = bus->sysdata;
752 int pcie_port = ctrl->port;
753 unsigned int tvalue = value;
754 unsigned int data;
755
756 /* Make sure the address is aligned to natural boundary */
757 if (unlikely(((size - 1) & where))) {
758 ret = PCIBIOS_BAD_REGISTER_NUMBER;
759 goto out;
760 }
761 /*
762 * If we are second controller, we have to cheat OS so that it assume
763 * its bus number starts from 0 in host controller
764 */
765 bus_number = ifx_pcie_bus_nr_deduct(bus_number, pcie_port);
766
767 /*
768 * We need to force the bus number to be zero on the root
769 * bus. Linux numbers the 2nd root bus to start after all
770 * busses on root 0.
771 */
772 if (bus->parent == NULL)
773 bus_number = 0;
774
775 if (pcie_valid_config(pcie_port, bus_number, PCI_SLOT(devfn)) == 0) {
776 ret = PCIBIOS_DEVICE_NOT_FOUND;
777 goto out;
778 }
779
780 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG, "%s: %02x:%02x.%01x/%02x:%01d value=%08x\n", __func__,
781 bus_number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, value);
782
783 /* XXX, some PCIe device may need some delay */
784 PCIE_IRQ_LOCK(ifx_pcie_lock);
785
786 /*
787 * To configure the correct bus topology using native way, we have to cheat Os so that
788 * it can configure the PCIe hardware correctly.
789 */
790 tvalue = ifx_pcie_bus_enum_hack(bus, devfn, where, value, pcie_port, 0);
791
792 if (bus_number == 0) { /* RC itself */
793 unsigned int t;
794
795 t = (where & ~3);
796 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: wr local cfg, offset:%08x, fill:%08x\n", __func__, t, value);
797 data = ifx_pcie_rc_cfg_rd(pcie_port, t);
798 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: rd local cfg, offset:%08x, data:%08x\n", __func__, t, data);
799
800 data = ifx_pcie_size_to_value(where, size, data, tvalue);
801
802 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: wr local cfg, offset:%08x, value:%08x\n", __func__, t, data);
803 ifx_pcie_rc_cfg_wr(pcie_port, t, data);
804 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: rd local cfg, offset:%08x, value:%08x\n",
805 __func__, t, ifx_pcie_rc_cfg_rd(pcie_port, t));
806 } else {
807 unsigned int addr = pcie_bus_addr(bus_number, devfn, where);
808
809 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: wr cfg, offset:%08x, fill:%08x\n", __func__, addr, value);
810 data = ifx_pcie_cfg_rd(pcie_port, addr);
811 if (pcie_port == IFX_PCIE_PORT0) {
812 #ifdef CONFIG_IFX_PCIE_HW_SWAP
813 data = le32_to_cpu(data);
814 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
815 } else {
816 #ifdef CONFIG_IFX_PCIE1_HW_SWAP
817 data = le32_to_cpu(data);
818 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
819 }
820 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: rd cfg, offset:%08x, data:%08x\n", __func__, addr, data);
821
822 data = ifx_pcie_size_to_value(where, size, data, tvalue);
823 if (pcie_port == IFX_PCIE_PORT0) {
824 #ifdef CONFIG_IFX_PCIE_HW_SWAP
825 data = cpu_to_le32(data);
826 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
827 } else {
828 #ifdef CONFIG_IFX_PCIE1_HW_SWAP
829 data = cpu_to_le32(data);
830 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
831 }
832 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG, "%s: wr cfg, offset:%08x, value:%08x\n", __func__, addr, data);
833 ifx_pcie_cfg_wr(pcie_port, addr, data);
834 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG, "%s: rd cfg, offset:%08x, value:%08x\n",
835 __func__, addr, ifx_pcie_cfg_rd(pcie_port, addr));
836 }
837 PCIE_IRQ_UNLOCK(ifx_pcie_lock);
838 out:
839 return ret;
840 }
841
842 static struct resource ifx_pcie_io_resource = {
843 .name = "PCIe0 I/O space",
844 .start = PCIE_IO_PHY_BASE,
845 .end = PCIE_IO_PHY_END,
846 .flags = IORESOURCE_IO,
847 };
848
849 static struct resource ifx_pcie_mem_resource = {
850 .name = "PCIe0 Memory space",
851 .start = PCIE_MEM_PHY_BASE,
852 .end = PCIE_MEM_PHY_END,
853 .flags = IORESOURCE_MEM,
854 };
855
856 static struct pci_ops ifx_pcie_ops = {
857 .read = ifx_pcie_read_config,
858 .write = ifx_pcie_write_config,
859 };
860
861 static struct ifx_pci_controller ifx_pcie_controller[IFX_PCIE_CORE_NR] = {
862 {
863 .pcic = {
864 .pci_ops = &ifx_pcie_ops,
865 .mem_resource = &ifx_pcie_mem_resource,
866 .io_resource = &ifx_pcie_io_resource,
867 },
868 .port = IFX_PCIE_PORT0,
869 },
870 };
871
872 static inline void pcie_core_int_clear_all(int pcie_port)
873 {
874 unsigned int reg;
875 reg = IFX_REG_R32(PCIE_IRNCR(pcie_port));
876 reg &= PCIE_RC_CORE_COMBINED_INT;
877 IFX_REG_W32(reg, PCIE_IRNCR(pcie_port));
878 }
879
880 static irqreturn_t pcie_rc_core_isr(int irq, void *dev_id)
881 {
882 struct ifx_pci_controller *ctrl = (struct ifx_pci_controller *)dev_id;
883 int pcie_port = ctrl->port;
884
885 IFX_PCIE_PRINT(PCIE_MSG_ISR, "PCIe RC error intr %d\n", irq);
886 pcie_core_int_clear_all(pcie_port);
887 return IRQ_HANDLED;
888 }
889
890 static int pcie_rc_core_int_init(int pcie_port)
891 {
892 int ret;
893
894 /* Enable core interrupt */
895 IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT, PCIE_IRNEN(pcie_port));
896
897 /* Clear it first */
898 IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT, PCIE_IRNCR(pcie_port));
899 ret = request_irq(pcie_irqs[pcie_port].ir_irq.irq, pcie_rc_core_isr, IRQF_DISABLED,
900 pcie_irqs[pcie_port].ir_irq.name, &ifx_pcie_controller[pcie_port]);
901 if (ret)
902 printk(KERN_ERR "%s request irq %d failed\n", __func__, IFX_PCIE_IR);
903
904 return ret;
905 }
906
907 int ifx_pcie_bios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
908 {
909 unsigned int irq_bit = 0;
910 int irq = 0;
911 struct ifx_pci_controller *ctrl = dev->bus->sysdata;
912 int pcie_port = ctrl->port;
913
914 IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s port %d dev %s slot %d pin %d \n", __func__, pcie_port, pci_name(dev), slot, pin);
915
916 if ((pin == PCIE_LEGACY_DISABLE) || (pin > PCIE_LEGACY_INT_MAX)) {
917 printk(KERN_WARNING "WARNING: dev %s: invalid interrupt pin %d\n", pci_name(dev), pin);
918 return -1;
919 }
920 /* Pin index so minus one */
921 irq_bit = pcie_irqs[pcie_port].legacy_irq[pin - 1].irq_bit;
922 irq = pcie_irqs[pcie_port].legacy_irq[pin - 1].irq;
923 IFX_REG_SET_BIT(irq_bit, PCIE_IRNEN(pcie_port));
924 IFX_REG_SET_BIT(irq_bit, PCIE_IRNCR(pcie_port));
925 IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s irq %d assigned\n", __func__, pci_name(dev), irq);
926 return irq;
927 }
928
929 /**
930 * \fn int ifx_pcie_bios_plat_dev_init(struct pci_dev *dev)
931 * \brief Called to perform platform specific PCI setup
932 *
933 * \param[in] dev The Linux PCI device structure for the device to map
934 * \return OK
935 * \ingroup IFX_PCIE_OS
936 */
937 int ifx_pcie_bios_plat_dev_init(struct pci_dev *dev)
938 {
939 u16 config;
940 unsigned int dconfig;
941 int pos;
942 /* Enable reporting System errors and parity errors on all devices */
943 /* Enable parity checking and error reporting */
944 pci_read_config_word(dev, PCI_COMMAND, &config);
945 config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR /*| PCI_COMMAND_INVALIDATE |
946 PCI_COMMAND_FAST_BACK*/;
947 pci_write_config_word(dev, PCI_COMMAND, config);
948
949 if (dev->subordinate) {
950 /* Set latency timers on sub bridges */
951 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 0x40); /* XXX, */
952 /* More bridge error detection */
953 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
954 config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
955 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
956 }
957 /* Enable the PCIe normal error reporting */
958 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
959 if (pos) {
960 /* Disable system error generation in response to error messages */
961 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &config);
962 config &= ~(PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | PCI_EXP_RTCTL_SEFEE);
963 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, config);
964
965 /* Clear PCIE Capability's Device Status */
966 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &config);
967 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, config);
968
969 /* Update Device Control */
970 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
971 /* Correctable Error Reporting */
972 config |= PCI_EXP_DEVCTL_CERE;
973 /* Non-Fatal Error Reporting */
974 config |= PCI_EXP_DEVCTL_NFERE;
975 /* Fatal Error Reporting */
976 config |= PCI_EXP_DEVCTL_FERE;
977 /* Unsupported Request */
978 config |= PCI_EXP_DEVCTL_URRE;
979 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
980 }
981
982 /* Find the Advanced Error Reporting capability */
983 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
984 if (pos) {
985 /* Clear Uncorrectable Error Status */
986 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &dconfig);
987 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, dconfig);
988 /* Enable reporting of all uncorrectable errors */
989 /* Uncorrectable Error Mask - turned on bits disable errors */
990 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
991 /*
992 * Leave severity at HW default. This only controls if
993 * errors are reported as uncorrectable or
994 * correctable, not if the error is reported.
995 */
996 /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
997 /* Clear Correctable Error Status */
998 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
999 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
1000 /* Enable reporting of all correctable errors */
1001 /* Correctable Error Mask - turned on bits disable errors */
1002 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
1003 /* Advanced Error Capabilities */
1004 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
1005 /* ECRC Generation Enable */
1006 if (dconfig & PCI_ERR_CAP_ECRC_GENC)
1007 dconfig |= PCI_ERR_CAP_ECRC_GENE;
1008 /* ECRC Check Enable */
1009 if (dconfig & PCI_ERR_CAP_ECRC_CHKC)
1010 dconfig |= PCI_ERR_CAP_ECRC_CHKE;
1011 pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
1012
1013 /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
1014 /* Enable Root Port's interrupt in response to error messages */
1015 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
1016 PCI_ERR_ROOT_CMD_COR_EN |
1017 PCI_ERR_ROOT_CMD_NONFATAL_EN |
1018 PCI_ERR_ROOT_CMD_FATAL_EN);
1019 /* Clear the Root status register */
1020 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
1021 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
1022 }
1023 /* WAR, only 128 MRRS is supported, force all EPs to support this value */
1024 pcie_set_readrq(dev, 128);
1025 return 0;
1026 }
1027
1028 static void pcie_phy_rst(int pcie_port)
1029 {
1030 pcie_phy_rst_assert(pcie_port);
1031 pcie_phy_rst_deassert(pcie_port);
1032 /* Make sure PHY PLL is stable */
1033 udelay(20);
1034 }
1035
1036 static int pcie_rc_initialize(int pcie_port)
1037 {
1038 int i;
1039
1040 pcie_rcu_endian_setup(pcie_port);
1041
1042 pcie_ep_gpio_rst_init(pcie_port);
1043
1044 /*
1045 * XXX, PCIe elastic buffer bug will cause not to be detected. One more
1046 * reset PCIe PHY will solve this issue
1047 */
1048 for (i = 0; i < IFX_PCIE_PHY_LOOP_CNT; i++) {
1049 /* Disable PCIe PHY Analog part for sanity check */
1050 pcie_phy_pmu_disable(pcie_port);
1051 pcie_phy_rst(pcie_port);
1052 /* PCIe Core reset enabled, low active, sw programmed */
1053 pcie_core_rst_assert(pcie_port);
1054 /* Put PCIe EP in reset status */
1055 pcie_device_rst_assert(pcie_port);
1056 /* PCI PHY & Core reset disabled, high active, sw programmed */
1057 pcie_core_rst_deassert(pcie_port);
1058 /* Already in a quiet state, program PLL, enable PHY, check ready bit */
1059 pcie_phy_clock_mode_setup(pcie_port);
1060 /* Enable PCIe PHY and Clock */
1061 pcie_core_pmu_setup(pcie_port);
1062 /* Clear status registers */
1063 pcie_status_register_clear(pcie_port);
1064 #ifdef CONFIG_PCI_MSI
1065 pcie_msi_init(pcie_port);
1066 #endif /* CONFIG_PCI_MSI */
1067 pcie_rc_cfg_reg_setup(pcie_port);
1068
1069 /* Once link is up, break out */
1070 if (pcie_app_loigc_setup(pcie_port) == 0)
1071 break;
1072 }
1073 if (i >= IFX_PCIE_PHY_LOOP_CNT) {
1074 printk(KERN_ERR "%s link up failed!!!!!\n", __func__);
1075 return -EIO;
1076 }
1077 /* NB, don't increase ACK/NACK timer timeout value, which will cause a lot of COR errors */
1078 pcie_replay_time_update(pcie_port);
1079 return 0;
1080 }
1081
1082 static int inline ifx_pcie_startup_port_nr(void)
1083 {
1084 int pcie_port = IFX_PCIE_PORT0;
1085
1086 pcie_port = IFX_PCIE_PORT0;
1087 return pcie_port;
1088 }
1089
1090 /**
1091 * \fn static int __init ifx_pcie_bios_init(void)
1092 * \brief Initialize the IFX PCIe controllers
1093 *
1094 * \return -EIO PCIe PHY link is not up
1095 * \return -ENOMEM Configuration/IO space failed to map
1096 * \return 0 OK
1097 * \ingroup IFX_PCIE_OS
1098 */
1099 extern int (*ltqpci_plat_arch_init)(struct pci_dev *dev);
1100 extern int (*ltqpci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
1101 static int __devinit ltq_pcie_probe(struct platform_device *pdev)
1102 {
1103 char ver_str[128] = {0};
1104 void __iomem *io_map_base;
1105 int pcie_port;
1106 int startup_port;
1107 ltqpci_map_irq = ifx_pcie_bios_map_irq;
1108 ltqpci_plat_arch_init = ifx_pcie_bios_plat_dev_init;
1109 /* Enable AHB Master/ Slave */
1110 pcie_ahb_pmu_setup();
1111
1112 startup_port = ifx_pcie_startup_port_nr();
1113
1114 ltq_gpio_request(&pdev->dev, IFX_PCIE_GPIO_RESET, 0, 1, "pcie-reset");
1115
1116 for (pcie_port = startup_port; pcie_port < IFX_PCIE_CORE_NR; pcie_port++){
1117 if (pcie_rc_initialize(pcie_port) == 0) {
1118 /* Otherwise, warning will pop up */
1119 io_map_base = ioremap(PCIE_IO_PHY_PORT_TO_BASE(pcie_port), PCIE_IO_SIZE);
1120 if (io_map_base == NULL)
1121 return -ENOMEM;
1122 ifx_pcie_controller[pcie_port].pcic.io_map_base = (unsigned long)io_map_base;
1123 register_pci_controller(&ifx_pcie_controller[pcie_port].pcic);
1124 /* XXX, clear error status */
1125 pcie_rc_core_int_init(pcie_port);
1126 }
1127 }
1128
1129 printk(KERN_INFO "%s", ver_str);
1130 return 0;
1131 }
1132
1133 static struct platform_driver ltq_pcie_driver = {
1134 .probe = ltq_pcie_probe,
1135 .driver = {
1136 .name = "pcie-xway",
1137 .owner = THIS_MODULE,
1138 },
1139 };
1140
1141 int __init pciebios_init(void)
1142 {
1143 return platform_driver_register(&ltq_pcie_driver);
1144 }
1145
1146 arch_initcall(pciebios_init);