1 #include <linux/types.h>
2 #include <linux/module.h>
4 #include <linux/kernel.h>
5 #include <linux/init.h>
6 #include <linux/delay.h>
8 #include <asm/paccess.h>
10 #include <linux/pci_regs.h>
11 #include <linux/platform_device.h>
13 #define CONFIG_IFX_PCIE_1ST_CORE
15 #include "pcie-lantiq.h"
17 #define IFX_PCIE_IR (INT_NUM_IM4_IRL0 + 25)
18 #define IFX_PCIE_INTA (INT_NUM_IM4_IRL0 + 8)
19 #define IFX_PCIE_INTB (INT_NUM_IM4_IRL0 + 9)
20 #define IFX_PCIE_INTC (INT_NUM_IM4_IRL0 + 10)
21 #define IFX_PCIE_INTD (INT_NUM_IM4_IRL0 + 11)
22 #define MS(_v, _f) (((_v) & (_f)) >> _f##_S)
23 #define SM(_v, _f) (((_v) << _f##_S) & (_f))
24 #define IFX_REG_SET_BIT(_f, _r) \
25 IFX_REG_W32((IFX_REG_R32((_r)) &~ (_f)) | (_f), (_r))
26 #define IFX_PCIE_LTSSM_ENABLE_TIMEOUT 10
27 #define IFX_PCIE_PHY_LINK_UP_TIMEOUT 1000
28 #define IFX_PCIE_PHY_LOOP_CNT 5
30 static DEFINE_SPINLOCK(ifx_pcie_lock
);
32 int pcibios_1st_host_bus_nr(void);
34 unsigned int g_pcie_debug_flag
= PCIE_MSG_ANY
& (~PCIE_MSG_CFG
);
36 static ifx_pcie_irq_t pcie_irqs
[IFX_PCIE_CORE_NR
] = {
40 .name
= "ifx_pcie_rc0",
45 .irq_bit
= PCIE_IRN_INTA
,
49 .irq_bit
= PCIE_IRN_INTB
,
53 .irq_bit
= PCIE_IRN_INTC
,
57 .irq_bit
= PCIE_IRN_INTD
,
64 static inline int pcie_ltssm_enable(int pcie_port
)
68 IFX_REG_W32(PCIE_RC_CCR_LTSSM_ENABLE
, PCIE_RC_CCR(pcie_port
)); /* Enable LTSSM */
70 /* Wait for the link to come up */
71 for (i
= 0; i
< IFX_PCIE_LTSSM_ENABLE_TIMEOUT
; i
++) {
72 if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port
)) & PCIE_LCTLSTS_RETRAIN_PENDING
)) {
77 if (i
>= IFX_PCIE_LTSSM_ENABLE_TIMEOUT
) {
78 IFX_PCIE_PRINT(PCIE_MSG_INIT
, "%s link timeout!!!!!\n", __func__
);
84 static inline void pcie_status_register_clear(int pcie_port
)
86 IFX_REG_W32(0, PCIE_RC_DR(pcie_port
));
87 IFX_REG_W32(0, PCIE_PCICMDSTS(pcie_port
));
88 IFX_REG_W32(0, PCIE_DCTLSTS(pcie_port
));
89 IFX_REG_W32(0, PCIE_LCTLSTS(pcie_port
));
90 IFX_REG_W32(0, PCIE_SLCTLSTS(pcie_port
));
91 IFX_REG_W32(0, PCIE_RSTS(pcie_port
));
92 IFX_REG_W32(0, PCIE_UES_R(pcie_port
));
93 IFX_REG_W32(0, PCIE_UEMR(pcie_port
));
94 IFX_REG_W32(0, PCIE_UESR(pcie_port
));
95 IFX_REG_W32(0, PCIE_CESR(pcie_port
));
96 IFX_REG_W32(0, PCIE_CEMR(pcie_port
));
97 IFX_REG_W32(0, PCIE_RESR(pcie_port
));
98 IFX_REG_W32(0, PCIE_PVCCRSR(pcie_port
));
99 IFX_REG_W32(0, PCIE_VC0_RSR0(pcie_port
));
100 IFX_REG_W32(0, PCIE_TPFCS(pcie_port
));
101 IFX_REG_W32(0, PCIE_TNPFCS(pcie_port
));
102 IFX_REG_W32(0, PCIE_TCFCS(pcie_port
));
103 IFX_REG_W32(0, PCIE_QSR(pcie_port
));
104 IFX_REG_W32(0, PCIE_IOBLSECS(pcie_port
));
107 static inline int ifx_pcie_link_up(int pcie_port
)
109 return (IFX_REG_R32(PCIE_PHY_SR(pcie_port
)) & PCIE_PHY_SR_PHY_LINK_UP
) ? 1 : 0;
112 static inline void pcie_mem_io_setup(int pcie_port
)
116 * BAR[0:1] readonly register
117 * RC contains only minimal BARs for packets mapped to this device
118 * Mem/IO filters defines a range of memory occupied by memory mapped IO devices that
119 * reside on the downstream side fo the bridge.
121 reg
= SM((PCIE_MEM_PHY_PORT_TO_END(pcie_port
) >> 20), PCIE_MBML_MEM_LIMIT_ADDR
)
122 | SM((PCIE_MEM_PHY_PORT_TO_BASE(pcie_port
) >> 20), PCIE_MBML_MEM_BASE_ADDR
);
123 IFX_REG_W32(reg
, PCIE_MBML(pcie_port
));
125 /* PCIe_PBML, same as MBML */
126 IFX_REG_W32(IFX_REG_R32(PCIE_MBML(pcie_port
)), PCIE_PMBL(pcie_port
));
128 /* IO Address Range */
129 reg
= SM((PCIE_IO_PHY_PORT_TO_END(pcie_port
) >> 12), PCIE_IOBLSECS_IO_LIMIT_ADDR
)
130 | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port
) >> 12), PCIE_IOBLSECS_IO_BASE_ADDR
);
131 reg
|= PCIE_IOBLSECS_32BIT_IO_ADDR
;
132 IFX_REG_W32(reg
, PCIE_IOBLSECS(pcie_port
));
134 reg
= SM((PCIE_IO_PHY_PORT_TO_END(pcie_port
) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT
)
135 | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port
) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_BASE
);
136 IFX_REG_W32(reg
, PCIE_IO_BANDL(pcie_port
));
139 static inline void pcie_msi_setup(int pcie_port
)
143 /* XXX, MSI stuff should only apply to EP */
144 /* MSI Capability: Only enable 32-bit addresses */
145 reg
= IFX_REG_R32(PCIE_MCAPR(pcie_port
));
146 reg
&= ~PCIE_MCAPR_ADDR64_CAP
;
147 reg
|= PCIE_MCAPR_MSI_ENABLE
;
149 /* Disable multiple message */
150 reg
&= ~(PCIE_MCAPR_MULTI_MSG_CAP
| PCIE_MCAPR_MULTI_MSG_ENABLE
);
151 IFX_REG_W32(reg
, PCIE_MCAPR(pcie_port
));
154 static inline void pcie_pm_setup(int pcie_port
)
158 /* Enable PME, Soft reset enabled */
159 reg
= IFX_REG_R32(PCIE_PM_CSR(pcie_port
));
160 reg
|= PCIE_PM_CSR_PME_ENABLE
| PCIE_PM_CSR_SW_RST
;
161 IFX_REG_W32(reg
, PCIE_PM_CSR(pcie_port
));
164 static inline void pcie_bus_setup(int pcie_port
)
168 reg
= SM(0, PCIE_BNR_PRIMARY_BUS_NUM
) | SM(1, PCIE_PNR_SECONDARY_BUS_NUM
) | SM(0xFF, PCIE_PNR_SUB_BUS_NUM
);
169 IFX_REG_W32(reg
, PCIE_BNR(pcie_port
));
172 static inline void pcie_device_setup(int pcie_port
)
176 /* Device capability register, set up Maximum payload size */
177 reg
= IFX_REG_R32(PCIE_DCAP(pcie_port
));
178 reg
|= PCIE_DCAP_ROLE_BASE_ERR_REPORT
;
179 reg
|= SM(PCIE_MAX_PAYLOAD_128
, PCIE_DCAP_MAX_PAYLOAD_SIZE
);
181 /* Only available for EP */
182 reg
&= ~(PCIE_DCAP_EP_L0S_LATENCY
| PCIE_DCAP_EP_L1_LATENCY
);
183 IFX_REG_W32(reg
, PCIE_DCAP(pcie_port
));
185 /* Device control and status register */
186 /* Set Maximum Read Request size for the device as a Requestor */
187 reg
= IFX_REG_R32(PCIE_DCTLSTS(pcie_port
));
190 * Request size can be larger than the MPS used, but the completions returned
191 * for the read will be bounded by the MPS size.
192 * In our system, Max request size depends on AHB burst size. It is 64 bytes.
193 * but we set it as 128 as minimum one.
195 reg
|= SM(PCIE_MAX_PAYLOAD_128
, PCIE_DCTLSTS_MAX_READ_SIZE
)
196 | SM(PCIE_MAX_PAYLOAD_128
, PCIE_DCTLSTS_MAX_PAYLOAD_SIZE
);
198 /* Enable relaxed ordering, no snoop, and all kinds of errors */
199 reg
|= PCIE_DCTLSTS_RELAXED_ORDERING_EN
| PCIE_DCTLSTS_ERR_EN
| PCIE_DCTLSTS_NO_SNOOP_EN
;
201 IFX_REG_W32(reg
, PCIE_DCTLSTS(pcie_port
));
204 static inline void pcie_link_setup(int pcie_port
)
209 * XXX, Link capability register, bit 18 for EP CLKREQ# dynamic clock management for L1, L2/3 CPM
210 * L0s is reported during link training via TS1 order set by N_FTS
212 reg
= IFX_REG_R32(PCIE_LCAP(pcie_port
));
213 reg
&= ~PCIE_LCAP_L0S_EIXT_LATENCY
;
214 reg
|= SM(3, PCIE_LCAP_L0S_EIXT_LATENCY
);
215 IFX_REG_W32(reg
, PCIE_LCAP(pcie_port
));
217 /* Link control and status register */
218 reg
= IFX_REG_R32(PCIE_LCTLSTS(pcie_port
));
220 /* Link Enable, ASPM enabled */
221 reg
&= ~PCIE_LCTLSTS_LINK_DISABLE
;
223 #ifdef CONFIG_PCIEASPM
225 * We use the same physical reference clock that the platform provides on the connector
226 * It paved the way for ASPM to calculate the new exit Latency
228 reg
|= PCIE_LCTLSTS_SLOT_CLK_CFG
;
229 reg
|= PCIE_LCTLSTS_COM_CLK_CFG
;
231 * We should disable ASPM by default except that we have dedicated power management support
232 * Enable ASPM will cause the system hangup/instability, performance degration
234 reg
|= PCIE_LCTLSTS_ASPM_ENABLE
;
236 reg
&= ~PCIE_LCTLSTS_ASPM_ENABLE
;
237 #endif /* CONFIG_PCIEASPM */
240 * The maximum size of any completion with data packet is bounded by the MPS setting
241 * in device control register
243 /* RCB may cause multiple split transactions, two options available, we use 64 byte RCB */
244 reg
&= ~ PCIE_LCTLSTS_RCB128
;
245 IFX_REG_W32(reg
, PCIE_LCTLSTS(pcie_port
));
248 static inline void pcie_error_setup(int pcie_port
)
253 * Forward ERR_COR, ERR_NONFATAL, ERR_FATAL to the backbone
254 * Poisoned write TLPs and completions indicating poisoned TLPs will set the PCIe_PCICMDSTS.MDPE
256 reg
= IFX_REG_R32(PCIE_INTRBCTRL(pcie_port
));
257 reg
|= PCIE_INTRBCTRL_SERR_ENABLE
| PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE
;
259 IFX_REG_W32(reg
, PCIE_INTRBCTRL(pcie_port
));
261 /* Uncorrectable Error Mask Register, Unmask <enable> all bits in PCIE_UESR */
262 reg
= IFX_REG_R32(PCIE_UEMR(pcie_port
));
263 reg
&= ~PCIE_ALL_UNCORRECTABLE_ERR
;
264 IFX_REG_W32(reg
, PCIE_UEMR(pcie_port
));
266 /* Uncorrectable Error Severity Register, ALL errors are FATAL */
267 IFX_REG_W32(PCIE_ALL_UNCORRECTABLE_ERR
, PCIE_UESR(pcie_port
));
269 /* Correctable Error Mask Register, unmask <enable> all bits */
270 reg
= IFX_REG_R32(PCIE_CEMR(pcie_port
));
271 reg
&= ~PCIE_CORRECTABLE_ERR
;
272 IFX_REG_W32(reg
, PCIE_CEMR(pcie_port
));
274 /* Advanced Error Capabilities and Control Registr */
275 reg
= IFX_REG_R32(PCIE_AECCR(pcie_port
));
276 reg
|= PCIE_AECCR_ECRC_CHECK_EN
| PCIE_AECCR_ECRC_GEN_EN
;
277 IFX_REG_W32(reg
, PCIE_AECCR(pcie_port
));
279 /* Root Error Command Register, Report all types of errors */
280 reg
= IFX_REG_R32(PCIE_RECR(pcie_port
));
281 reg
|= PCIE_RECR_ERR_REPORT_EN
;
282 IFX_REG_W32(reg
, PCIE_RECR(pcie_port
));
284 /* Clear the Root status register */
285 reg
= IFX_REG_R32(PCIE_RESR(pcie_port
));
286 IFX_REG_W32(reg
, PCIE_RESR(pcie_port
));
289 static inline void pcie_root_setup(int pcie_port
)
293 /* Root control and capabilities register */
294 reg
= IFX_REG_R32(PCIE_RCTLCAP(pcie_port
));
295 reg
|= PCIE_RCTLCAP_SERR_ENABLE
| PCIE_RCTLCAP_PME_INT_EN
;
296 IFX_REG_W32(reg
, PCIE_RCTLCAP(pcie_port
));
299 static inline void pcie_vc_setup(int pcie_port
)
303 /* Port VC Capability Register 2 */
304 reg
= IFX_REG_R32(PCIE_PVC2(pcie_port
));
305 reg
&= ~PCIE_PVC2_VC_ARB_WRR
;
306 reg
|= PCIE_PVC2_VC_ARB_16P_FIXED_WRR
;
307 IFX_REG_W32(reg
, PCIE_PVC2(pcie_port
));
309 /* VC0 Resource Capability Register */
310 reg
= IFX_REG_R32(PCIE_VC0_RC(pcie_port
));
311 reg
&= ~PCIE_VC0_RC_REJECT_SNOOP
;
312 IFX_REG_W32(reg
, PCIE_VC0_RC(pcie_port
));
315 static inline void pcie_port_logic_setup(int pcie_port
)
319 /* FTS number, default 12, increase to 63, may increase time from/to L0s to L0 */
320 reg
= IFX_REG_R32(PCIE_AFR(pcie_port
));
321 reg
&= ~(PCIE_AFR_FTS_NUM
| PCIE_AFR_COM_FTS_NUM
);
322 reg
|= SM(PCIE_AFR_FTS_NUM_DEFAULT
, PCIE_AFR_FTS_NUM
)
323 | SM(PCIE_AFR_FTS_NUM_DEFAULT
, PCIE_AFR_COM_FTS_NUM
);
324 /* L0s and L1 entry latency */
325 reg
&= ~(PCIE_AFR_L0S_ENTRY_LATENCY
| PCIE_AFR_L1_ENTRY_LATENCY
);
326 reg
|= SM(PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT
, PCIE_AFR_L0S_ENTRY_LATENCY
)
327 | SM(PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT
, PCIE_AFR_L1_ENTRY_LATENCY
);
328 IFX_REG_W32(reg
, PCIE_AFR(pcie_port
));
330 /* Port Link Control Register */
331 reg
= IFX_REG_R32(PCIE_PLCR(pcie_port
));
332 reg
|= PCIE_PLCR_DLL_LINK_EN
; /* Enable the DLL link */
333 IFX_REG_W32(reg
, PCIE_PLCR(pcie_port
));
335 /* Lane Skew Register */
336 reg
= IFX_REG_R32(PCIE_LSR(pcie_port
));
337 /* Enable ACK/NACK and FC */
338 reg
&= ~(PCIE_LSR_ACKNAK_DISABLE
| PCIE_LSR_FC_DISABLE
);
339 IFX_REG_W32(reg
, PCIE_LSR(pcie_port
));
341 /* Symbol Timer Register and Filter Mask Register 1 */
342 reg
= IFX_REG_R32(PCIE_STRFMR(pcie_port
));
344 /* Default SKP interval is very accurate already, 5us */
345 /* Enable IO/CFG transaction */
346 reg
|= PCIE_STRFMR_RX_CFG_TRANS_ENABLE
| PCIE_STRFMR_RX_IO_TRANS_ENABLE
;
348 reg
&= ~PCIE_STRFMR_FC_WDT_DISABLE
;
349 IFX_REG_W32(reg
, PCIE_STRFMR(pcie_port
));
351 /* Filter Masker Register 2 */
352 reg
= IFX_REG_R32(PCIE_FMR2(pcie_port
));
353 reg
|= PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1
| PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1
;
354 IFX_REG_W32(reg
, PCIE_FMR2(pcie_port
));
356 /* VC0 Completion Receive Queue Control Register */
357 reg
= IFX_REG_R32(PCIE_VC0_CRQCR(pcie_port
));
358 reg
&= ~PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE
;
359 reg
|= SM(PCIE_VC0_TLP_QUEUE_MODE_BYPASS
, PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE
);
360 IFX_REG_W32(reg
, PCIE_VC0_CRQCR(pcie_port
));
363 static inline void pcie_rc_cfg_reg_setup(int pcie_port
)
366 IFX_REG_W32(0, PCIE_RC_CCR(pcie_port
));
368 pcie_mem_io_setup(pcie_port
);
369 pcie_msi_setup(pcie_port
);
370 pcie_pm_setup(pcie_port
);
371 pcie_bus_setup(pcie_port
);
372 pcie_device_setup(pcie_port
);
373 pcie_link_setup(pcie_port
);
374 pcie_error_setup(pcie_port
);
375 pcie_root_setup(pcie_port
);
376 pcie_vc_setup(pcie_port
);
377 pcie_port_logic_setup(pcie_port
);
380 static int ifx_pcie_wait_phy_link_up(int pcie_port
)
384 /* Wait for PHY link is up */
385 for (i
= 0; i
< IFX_PCIE_PHY_LINK_UP_TIMEOUT
; i
++) {
386 if (ifx_pcie_link_up(pcie_port
)) {
391 if (i
>= IFX_PCIE_PHY_LINK_UP_TIMEOUT
) {
392 printk(KERN_ERR
"%s timeout\n", __func__
);
396 /* Check data link up or not */
397 if (!(IFX_REG_R32(PCIE_RC_DR(pcie_port
)) & PCIE_RC_DR_DLL_UP
)) {
398 printk(KERN_ERR
"%s DLL link is still down\n", __func__
);
402 /* Check Data link active or not */
403 if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port
)) & PCIE_LCTLSTS_DLL_ACTIVE
)) {
404 printk(KERN_ERR
"%s DLL is not active\n", __func__
);
410 static inline int pcie_app_loigc_setup(int pcie_port
)
412 IFX_REG_W32(PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS
, PCIE_AHB_CTRL(pcie_port
));
414 /* Pull PCIe EP out of reset */
415 pcie_device_rst_deassert(pcie_port
);
417 /* Start LTSSM training between RC and EP */
418 pcie_ltssm_enable(pcie_port
);
420 /* Check PHY status after enabling LTSSM */
421 if (ifx_pcie_wait_phy_link_up(pcie_port
) != 0) {
428 * Must be done after ltssm due to based on negotiated link
429 * width and payload size
430 * Update the Replay Time Limit. Empirically, some PCIe
431 * devices take a little longer to respond than expected under
432 * load. As a workaround for this we configure the Replay Time
433 * Limit to the value expected for a 512 byte MPS instead of
434 * our actual 128 byte MPS. The numbers below are directly
435 * from the PCIe spec table 3-4/5.
437 static inline void pcie_replay_time_update(int pcie_port
)
443 reg
= IFX_REG_R32(PCIE_LCTLSTS(pcie_port
));
445 nlw
= MS(reg
, PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH
);
447 case PCIE_MAX_LENGTH_WIDTH_X1
:
450 case PCIE_MAX_LENGTH_WIDTH_X2
:
453 case PCIE_MAX_LENGTH_WIDTH_X4
:
456 case PCIE_MAX_LENGTH_WIDTH_X8
:
463 reg
= IFX_REG_R32(PCIE_ALTRT(pcie_port
));
464 reg
&= ~PCIE_ALTRT_REPLAY_TIME_LIMIT
;
465 reg
|= SM(rtl
, PCIE_ALTRT_REPLAY_TIME_LIMIT
);
466 IFX_REG_W32(reg
, PCIE_ALTRT(pcie_port
));
468 IFX_PCIE_PRINT(PCIE_MSG_REG
, "%s PCIE_ALTRT 0x%08x\n",
469 __func__
, IFX_REG_R32(PCIE_ALTRT(pcie_port
)));
473 * Table 359 Enhanced Configuration Address Mapping1)
474 * 1) This table is defined in Table 7-1, page 341, PCI Express Base Specification v1.1
475 * Memory Address PCI Express Configuration Space
476 * A[(20+n-1):20] Bus Number 1 < n < 8
477 * A[19:15] Device Number
478 * A[14:12] Function Number
479 * A[11:8] Extended Register Number
480 * A[7:2] Register Number
481 * A[1:0] Along with size of the access, used to generate Byte Enables
482 * For VR9, only the address bits [22:0] are mapped to the configuration space:
483 * . Address bits [22:20] select the target bus (1-of-8)1)
484 * . Address bits [19:15] select the target device (1-of-32) on the bus
485 * . Address bits [14:12] select the target function (1-of-8) within the device.
486 * . Address bits [11:2] selects the target dword (1-of-1024) within the selected function.s configuration space
487 * . Address bits [1:0] define the start byte location within the selected dword.
489 static inline unsigned int pcie_bus_addr(u8 bus_num
, u16 devfn
, int where
)
496 addr
= ((PCI_SLOT(devfn
) & 0x1F) << 15) | ((PCI_FUNC(devfn
) & 0x7) << 12) | ((where
& 0xFFF)& ~3);
499 /* type 1, only support 8 buses */
500 addr
= ((bus
& 0x7) << 20) | ((PCI_SLOT(devfn
) & 0x1F) << 15) |
501 ((PCI_FUNC(devfn
) & 0x7) << 12) | ((where
& 0xFFF) & ~3);
503 IFX_PCIE_PRINT(PCIE_MSG_CFG
, "%s: bus addr : %02x:%02x.%01x/%02x, addr=%08x\n",
504 __func__
, bus_num
, PCI_SLOT(devfn
), PCI_FUNC(devfn
), where
, addr
);
508 static int pcie_valid_config(int pcie_port
, int bus
, int dev
)
511 if ((bus
== 0) && (dev
== 0))
514 /* No physical link */
515 if (!ifx_pcie_link_up(pcie_port
))
518 /* Bus zero only has RC itself
519 * XXX, check if EP will be integrated
521 if ((bus
== 0) && (dev
!= 0))
524 /* Maximum 8 buses supported for VRX */
529 * PCIe is PtP link, one bus only supports only one device
530 * except bus zero and PCIe switch which is virtual bus device
531 * The following two conditions really depends on the system design
532 * and attached the device.
533 * XXX, how about more new switch
535 if ((bus
== 1) && (dev
!= 0))
538 if ((bus
>= 3) && (dev
!= 0))
543 static inline unsigned int ifx_pcie_cfg_rd(int pcie_port
, unsigned int reg
)
545 return IFX_REG_R32((volatile unsigned int *)(PCIE_CFG_PORT_TO_BASE(pcie_port
) + reg
));
548 static inline void ifx_pcie_cfg_wr(int pcie_port
, unsigned int reg
, unsigned int val
)
550 IFX_REG_W32( val
, (volatile unsigned int *)(PCIE_CFG_PORT_TO_BASE(pcie_port
) + reg
));
553 static inline unsigned int ifx_pcie_rc_cfg_rd(int pcie_port
, unsigned int reg
)
555 return IFX_REG_R32((volatile unsigned int *)(PCIE_RC_PORT_TO_BASE(pcie_port
) + reg
));
558 static inline void ifx_pcie_rc_cfg_wr(int pcie_port
, unsigned int reg
, unsigned int val
)
560 IFX_REG_W32(val
, (volatile unsigned int *)(PCIE_RC_PORT_TO_BASE(pcie_port
) + reg
));
563 unsigned int ifx_pcie_bus_enum_read_hack(int where
, unsigned int value
)
565 unsigned int tvalue
= value
;
567 if (where
== PCI_PRIMARY_BUS
) {
568 u8 primary
, secondary
, subordinate
;
570 primary
= tvalue
& 0xFF;
571 secondary
= (tvalue
>> 8) & 0xFF;
572 subordinate
= (tvalue
>> 16) & 0xFF;
573 primary
+= pcibios_1st_host_bus_nr();
574 secondary
+= pcibios_1st_host_bus_nr();
575 subordinate
+= pcibios_1st_host_bus_nr();
576 tvalue
= (tvalue
& 0xFF000000) | (unsigned int)primary
| (unsigned int)(secondary
<< 8) | (unsigned int)(subordinate
<< 16);
581 unsigned int ifx_pcie_bus_enum_write_hack(int where
, unsigned int value
)
583 unsigned int tvalue
= value
;
585 if (where
== PCI_PRIMARY_BUS
) {
586 u8 primary
, secondary
, subordinate
;
588 primary
= tvalue
& 0xFF;
589 secondary
= (tvalue
>> 8) & 0xFF;
590 subordinate
= (tvalue
>> 16) & 0xFF;
591 if (primary
> 0 && primary
!= 0xFF)
592 primary
-= pcibios_1st_host_bus_nr();
593 if (secondary
> 0 && secondary
!= 0xFF)
594 secondary
-= pcibios_1st_host_bus_nr();
595 if (subordinate
> 0 && subordinate
!= 0xFF)
596 subordinate
-= pcibios_1st_host_bus_nr();
597 tvalue
= (tvalue
& 0xFF000000) | (unsigned int)primary
| (unsigned int)(secondary
<< 8) | (unsigned int)(subordinate
<< 16);
598 } else if (where
== PCI_SUBORDINATE_BUS
) {
599 u8 subordinate
= tvalue
& 0xFF;
600 subordinate
= subordinate
> 0 ? subordinate
- pcibios_1st_host_bus_nr() : 0;
601 tvalue
= subordinate
;
607 * \fn static int ifx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
608 * int where, int size, unsigned int *value)
609 * \brief Read a value from configuration space
611 * \param[in] bus Pointer to pci bus
612 * \param[in] devfn PCI device function number
613 * \param[in] where PCI register number
614 * \param[in] size Register read size
615 * \param[out] value Pointer to return value
616 * \return PCIBIOS_BAD_REGISTER_NUMBER Invalid register number
617 * \return PCIBIOS_FUNC_NOT_SUPPORTED PCI function not supported
618 * \return PCIBIOS_DEVICE_NOT_FOUND PCI device not found
619 * \return PCIBIOS_SUCCESSFUL OK
620 * \ingroup IFX_PCIE_OS
622 static int ifx_pcie_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
, int size
, unsigned int *value
)
624 unsigned int data
= 0;
625 int bus_number
= bus
->number
;
626 static const unsigned int mask
[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
627 int ret
= PCIBIOS_SUCCESSFUL
;
628 struct ifx_pci_controller
*ctrl
= bus
->sysdata
;
629 int pcie_port
= ctrl
->port
;
631 if (unlikely(size
!= 1 && size
!= 2 && size
!= 4)){
632 ret
= PCIBIOS_BAD_REGISTER_NUMBER
;
636 /* Make sure the address is aligned to natural boundary */
637 if (unlikely(((size
- 1) & where
))) {
638 ret
= PCIBIOS_BAD_REGISTER_NUMBER
;
643 * If we are second controller, we have to cheat OS so that it assume
644 * its bus number starts from 0 in host controller
646 bus_number
= ifx_pcie_bus_nr_deduct(bus_number
, pcie_port
);
649 * We need to force the bus number to be zero on the root
650 * bus. Linux numbers the 2nd root bus to start after all
653 if (bus
->parent
== NULL
)
657 * PCIe only has a single device connected to it. It is
658 * always device ID 0. Don't bother doing reads for other
659 * device IDs on the first segment.
661 if ((bus_number
== 0) && (PCI_SLOT(devfn
) != 0)) {
662 ret
= PCIBIOS_FUNC_NOT_SUPPORTED
;
666 if (pcie_valid_config(pcie_port
, bus_number
, PCI_SLOT(devfn
)) == 0) {
668 ret
= PCIBIOS_DEVICE_NOT_FOUND
;
672 IFX_PCIE_PRINT(PCIE_MSG_READ_CFG
, "%s: %02x:%02x.%01x/%02x:%01d\n", __func__
, bus_number
,
673 PCI_SLOT(devfn
), PCI_FUNC(devfn
), where
, size
);
675 PCIE_IRQ_LOCK(ifx_pcie_lock
);
676 if (bus_number
== 0) { /* RC itself */
680 data
= ifx_pcie_rc_cfg_rd(pcie_port
, t
);
681 IFX_PCIE_PRINT(PCIE_MSG_READ_CFG
, "%s: rd local cfg, offset:%08x, data:%08x\n",
684 unsigned int addr
= pcie_bus_addr(bus_number
, devfn
, where
);
686 data
= ifx_pcie_cfg_rd(pcie_port
, addr
);
687 if (pcie_port
== IFX_PCIE_PORT0
) {
688 #ifdef CONFIG_IFX_PCIE_HW_SWAP
689 data
= le32_to_cpu(data
);
690 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
692 #ifdef CONFIG_IFX_PCIE1_HW_SWAP
693 data
= le32_to_cpu(data
);
694 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
697 /* To get a correct PCI topology, we have to restore the bus number to OS */
698 data
= ifx_pcie_bus_enum_hack(bus
, devfn
, where
, data
, pcie_port
, 1);
700 PCIE_IRQ_UNLOCK(ifx_pcie_lock
);
701 IFX_PCIE_PRINT(PCIE_MSG_READ_CFG
, "%s: read config: data=%08x raw=%08x\n",
702 __func__
, (data
>> (8 * (where
& 3))) & mask
[size
& 7], data
);
704 *value
= (data
>> (8 * (where
& 3))) & mask
[size
& 7];
709 static unsigned int ifx_pcie_size_to_value(int where
, int size
, unsigned int data
, unsigned int value
)
712 unsigned int tdata
= data
;
716 shift
= (where
& 0x3) << 3;
717 tdata
&= ~(0xffU
<< shift
);
718 tdata
|= ((value
& 0xffU
) << shift
);
721 shift
= (where
& 3) << 3;
722 tdata
&= ~(0xffffU
<< shift
);
723 tdata
|= ((value
& 0xffffU
) << shift
);
733 * \fn static static int ifx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
734 * int where, int size, unsigned int value)
735 * \brief Write a value to PCI configuration space
737 * \param[in] bus Pointer to pci bus
738 * \param[in] devfn PCI device function number
739 * \param[in] where PCI register number
740 * \param[in] size The register size to be written
741 * \param[in] value The valule to be written
742 * \return PCIBIOS_BAD_REGISTER_NUMBER Invalid register number
743 * \return PCIBIOS_DEVICE_NOT_FOUND PCI device not found
744 * \return PCIBIOS_SUCCESSFUL OK
745 * \ingroup IFX_PCIE_OS
747 static int ifx_pcie_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
, int size
, unsigned int value
)
749 int bus_number
= bus
->number
;
750 int ret
= PCIBIOS_SUCCESSFUL
;
751 struct ifx_pci_controller
*ctrl
= bus
->sysdata
;
752 int pcie_port
= ctrl
->port
;
753 unsigned int tvalue
= value
;
756 /* Make sure the address is aligned to natural boundary */
757 if (unlikely(((size
- 1) & where
))) {
758 ret
= PCIBIOS_BAD_REGISTER_NUMBER
;
762 * If we are second controller, we have to cheat OS so that it assume
763 * its bus number starts from 0 in host controller
765 bus_number
= ifx_pcie_bus_nr_deduct(bus_number
, pcie_port
);
768 * We need to force the bus number to be zero on the root
769 * bus. Linux numbers the 2nd root bus to start after all
772 if (bus
->parent
== NULL
)
775 if (pcie_valid_config(pcie_port
, bus_number
, PCI_SLOT(devfn
)) == 0) {
776 ret
= PCIBIOS_DEVICE_NOT_FOUND
;
780 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
, "%s: %02x:%02x.%01x/%02x:%01d value=%08x\n", __func__
,
781 bus_number
, PCI_SLOT(devfn
), PCI_FUNC(devfn
), where
, size
, value
);
783 /* XXX, some PCIe device may need some delay */
784 PCIE_IRQ_LOCK(ifx_pcie_lock
);
787 * To configure the correct bus topology using native way, we have to cheat Os so that
788 * it can configure the PCIe hardware correctly.
790 tvalue
= ifx_pcie_bus_enum_hack(bus
, devfn
, where
, value
, pcie_port
, 0);
792 if (bus_number
== 0) { /* RC itself */
796 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
,"%s: wr local cfg, offset:%08x, fill:%08x\n", __func__
, t
, value
);
797 data
= ifx_pcie_rc_cfg_rd(pcie_port
, t
);
798 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
,"%s: rd local cfg, offset:%08x, data:%08x\n", __func__
, t
, data
);
800 data
= ifx_pcie_size_to_value(where
, size
, data
, tvalue
);
802 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
,"%s: wr local cfg, offset:%08x, value:%08x\n", __func__
, t
, data
);
803 ifx_pcie_rc_cfg_wr(pcie_port
, t
, data
);
804 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
,"%s: rd local cfg, offset:%08x, value:%08x\n",
805 __func__
, t
, ifx_pcie_rc_cfg_rd(pcie_port
, t
));
807 unsigned int addr
= pcie_bus_addr(bus_number
, devfn
, where
);
809 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
,"%s: wr cfg, offset:%08x, fill:%08x\n", __func__
, addr
, value
);
810 data
= ifx_pcie_cfg_rd(pcie_port
, addr
);
811 if (pcie_port
== IFX_PCIE_PORT0
) {
812 #ifdef CONFIG_IFX_PCIE_HW_SWAP
813 data
= le32_to_cpu(data
);
814 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
816 #ifdef CONFIG_IFX_PCIE1_HW_SWAP
817 data
= le32_to_cpu(data
);
818 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
820 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
,"%s: rd cfg, offset:%08x, data:%08x\n", __func__
, addr
, data
);
822 data
= ifx_pcie_size_to_value(where
, size
, data
, tvalue
);
823 if (pcie_port
== IFX_PCIE_PORT0
) {
824 #ifdef CONFIG_IFX_PCIE_HW_SWAP
825 data
= cpu_to_le32(data
);
826 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
828 #ifdef CONFIG_IFX_PCIE1_HW_SWAP
829 data
= cpu_to_le32(data
);
830 #endif /* CONFIG_IFX_PCIE_HW_SWAP */
832 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
, "%s: wr cfg, offset:%08x, value:%08x\n", __func__
, addr
, data
);
833 ifx_pcie_cfg_wr(pcie_port
, addr
, data
);
834 IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG
, "%s: rd cfg, offset:%08x, value:%08x\n",
835 __func__
, addr
, ifx_pcie_cfg_rd(pcie_port
, addr
));
837 PCIE_IRQ_UNLOCK(ifx_pcie_lock
);
842 static struct resource ifx_pcie_io_resource
= {
843 .name
= "PCIe0 I/O space",
844 .start
= PCIE_IO_PHY_BASE
,
845 .end
= PCIE_IO_PHY_END
,
846 .flags
= IORESOURCE_IO
,
849 static struct resource ifx_pcie_mem_resource
= {
850 .name
= "PCIe0 Memory space",
851 .start
= PCIE_MEM_PHY_BASE
,
852 .end
= PCIE_MEM_PHY_END
,
853 .flags
= IORESOURCE_MEM
,
856 static struct pci_ops ifx_pcie_ops
= {
857 .read
= ifx_pcie_read_config
,
858 .write
= ifx_pcie_write_config
,
861 static struct ifx_pci_controller ifx_pcie_controller
[IFX_PCIE_CORE_NR
] = {
864 .pci_ops
= &ifx_pcie_ops
,
865 .mem_resource
= &ifx_pcie_mem_resource
,
866 .io_resource
= &ifx_pcie_io_resource
,
868 .port
= IFX_PCIE_PORT0
,
872 static inline void pcie_core_int_clear_all(int pcie_port
)
875 reg
= IFX_REG_R32(PCIE_IRNCR(pcie_port
));
876 reg
&= PCIE_RC_CORE_COMBINED_INT
;
877 IFX_REG_W32(reg
, PCIE_IRNCR(pcie_port
));
880 static irqreturn_t
pcie_rc_core_isr(int irq
, void *dev_id
)
882 struct ifx_pci_controller
*ctrl
= (struct ifx_pci_controller
*)dev_id
;
883 int pcie_port
= ctrl
->port
;
885 IFX_PCIE_PRINT(PCIE_MSG_ISR
, "PCIe RC error intr %d\n", irq
);
886 pcie_core_int_clear_all(pcie_port
);
890 static int pcie_rc_core_int_init(int pcie_port
)
894 /* Enable core interrupt */
895 IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT
, PCIE_IRNEN(pcie_port
));
898 IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT
, PCIE_IRNCR(pcie_port
));
899 ret
= request_irq(pcie_irqs
[pcie_port
].ir_irq
.irq
, pcie_rc_core_isr
, IRQF_DISABLED
,
900 pcie_irqs
[pcie_port
].ir_irq
.name
, &ifx_pcie_controller
[pcie_port
]);
902 printk(KERN_ERR
"%s request irq %d failed\n", __func__
, IFX_PCIE_IR
);
907 int ifx_pcie_bios_map_irq(const struct pci_dev
*dev
, u8 slot
, u8 pin
)
909 unsigned int irq_bit
= 0;
911 struct ifx_pci_controller
*ctrl
= dev
->bus
->sysdata
;
912 int pcie_port
= ctrl
->port
;
914 IFX_PCIE_PRINT(PCIE_MSG_FIXUP
, "%s port %d dev %s slot %d pin %d \n", __func__
, pcie_port
, pci_name(dev
), slot
, pin
);
916 if ((pin
== PCIE_LEGACY_DISABLE
) || (pin
> PCIE_LEGACY_INT_MAX
)) {
917 printk(KERN_WARNING
"WARNING: dev %s: invalid interrupt pin %d\n", pci_name(dev
), pin
);
920 /* Pin index so minus one */
921 irq_bit
= pcie_irqs
[pcie_port
].legacy_irq
[pin
- 1].irq_bit
;
922 irq
= pcie_irqs
[pcie_port
].legacy_irq
[pin
- 1].irq
;
923 IFX_REG_SET_BIT(irq_bit
, PCIE_IRNEN(pcie_port
));
924 IFX_REG_SET_BIT(irq_bit
, PCIE_IRNCR(pcie_port
));
925 IFX_PCIE_PRINT(PCIE_MSG_FIXUP
, "%s dev %s irq %d assigned\n", __func__
, pci_name(dev
), irq
);
930 * \fn int ifx_pcie_bios_plat_dev_init(struct pci_dev *dev)
931 * \brief Called to perform platform specific PCI setup
933 * \param[in] dev The Linux PCI device structure for the device to map
935 * \ingroup IFX_PCIE_OS
937 int ifx_pcie_bios_plat_dev_init(struct pci_dev
*dev
)
940 unsigned int dconfig
;
942 /* Enable reporting System errors and parity errors on all devices */
943 /* Enable parity checking and error reporting */
944 pci_read_config_word(dev
, PCI_COMMAND
, &config
);
945 config
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
/*| PCI_COMMAND_INVALIDATE |
946 PCI_COMMAND_FAST_BACK*/;
947 pci_write_config_word(dev
, PCI_COMMAND
, config
);
949 if (dev
->subordinate
) {
950 /* Set latency timers on sub bridges */
951 pci_write_config_byte(dev
, PCI_SEC_LATENCY_TIMER
, 0x40); /* XXX, */
952 /* More bridge error detection */
953 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &config
);
954 config
|= PCI_BRIDGE_CTL_PARITY
| PCI_BRIDGE_CTL_SERR
;
955 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, config
);
957 /* Enable the PCIe normal error reporting */
958 pos
= pci_find_capability(dev
, PCI_CAP_ID_EXP
);
960 /* Disable system error generation in response to error messages */
961 pci_read_config_word(dev
, pos
+ PCI_EXP_RTCTL
, &config
);
962 config
&= ~(PCI_EXP_RTCTL_SECEE
| PCI_EXP_RTCTL_SENFEE
| PCI_EXP_RTCTL_SEFEE
);
963 pci_write_config_word(dev
, pos
+ PCI_EXP_RTCTL
, config
);
965 /* Clear PCIE Capability's Device Status */
966 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVSTA
, &config
);
967 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVSTA
, config
);
969 /* Update Device Control */
970 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL
, &config
);
971 /* Correctable Error Reporting */
972 config
|= PCI_EXP_DEVCTL_CERE
;
973 /* Non-Fatal Error Reporting */
974 config
|= PCI_EXP_DEVCTL_NFERE
;
975 /* Fatal Error Reporting */
976 config
|= PCI_EXP_DEVCTL_FERE
;
977 /* Unsupported Request */
978 config
|= PCI_EXP_DEVCTL_URRE
;
979 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL
, config
);
982 /* Find the Advanced Error Reporting capability */
983 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ERR
);
985 /* Clear Uncorrectable Error Status */
986 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &dconfig
);
987 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, dconfig
);
988 /* Enable reporting of all uncorrectable errors */
989 /* Uncorrectable Error Mask - turned on bits disable errors */
990 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_MASK
, 0);
992 * Leave severity at HW default. This only controls if
993 * errors are reported as uncorrectable or
994 * correctable, not if the error is reported.
996 /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
997 /* Clear Correctable Error Status */
998 pci_read_config_dword(dev
, pos
+ PCI_ERR_COR_STATUS
, &dconfig
);
999 pci_write_config_dword(dev
, pos
+ PCI_ERR_COR_STATUS
, dconfig
);
1000 /* Enable reporting of all correctable errors */
1001 /* Correctable Error Mask - turned on bits disable errors */
1002 pci_write_config_dword(dev
, pos
+ PCI_ERR_COR_MASK
, 0);
1003 /* Advanced Error Capabilities */
1004 pci_read_config_dword(dev
, pos
+ PCI_ERR_CAP
, &dconfig
);
1005 /* ECRC Generation Enable */
1006 if (dconfig
& PCI_ERR_CAP_ECRC_GENC
)
1007 dconfig
|= PCI_ERR_CAP_ECRC_GENE
;
1008 /* ECRC Check Enable */
1009 if (dconfig
& PCI_ERR_CAP_ECRC_CHKC
)
1010 dconfig
|= PCI_ERR_CAP_ECRC_CHKE
;
1011 pci_write_config_dword(dev
, pos
+ PCI_ERR_CAP
, dconfig
);
1013 /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
1014 /* Enable Root Port's interrupt in response to error messages */
1015 pci_write_config_dword(dev
, pos
+ PCI_ERR_ROOT_COMMAND
,
1016 PCI_ERR_ROOT_CMD_COR_EN
|
1017 PCI_ERR_ROOT_CMD_NONFATAL_EN
|
1018 PCI_ERR_ROOT_CMD_FATAL_EN
);
1019 /* Clear the Root status register */
1020 pci_read_config_dword(dev
, pos
+ PCI_ERR_ROOT_STATUS
, &dconfig
);
1021 pci_write_config_dword(dev
, pos
+ PCI_ERR_ROOT_STATUS
, dconfig
);
1023 /* WAR, only 128 MRRS is supported, force all EPs to support this value */
1024 pcie_set_readrq(dev
, 128);
1028 static void pcie_phy_rst(int pcie_port
)
1030 pcie_phy_rst_assert(pcie_port
);
1031 pcie_phy_rst_deassert(pcie_port
);
1032 /* Make sure PHY PLL is stable */
1036 static int pcie_rc_initialize(int pcie_port
)
1040 pcie_rcu_endian_setup(pcie_port
);
1042 pcie_ep_gpio_rst_init(pcie_port
);
1045 * XXX, PCIe elastic buffer bug will cause not to be detected. One more
1046 * reset PCIe PHY will solve this issue
1048 for (i
= 0; i
< IFX_PCIE_PHY_LOOP_CNT
; i
++) {
1049 /* Disable PCIe PHY Analog part for sanity check */
1050 pcie_phy_pmu_disable(pcie_port
);
1051 pcie_phy_rst(pcie_port
);
1052 /* PCIe Core reset enabled, low active, sw programmed */
1053 pcie_core_rst_assert(pcie_port
);
1054 /* Put PCIe EP in reset status */
1055 pcie_device_rst_assert(pcie_port
);
1056 /* PCI PHY & Core reset disabled, high active, sw programmed */
1057 pcie_core_rst_deassert(pcie_port
);
1058 /* Already in a quiet state, program PLL, enable PHY, check ready bit */
1059 pcie_phy_clock_mode_setup(pcie_port
);
1060 /* Enable PCIe PHY and Clock */
1061 pcie_core_pmu_setup(pcie_port
);
1062 /* Clear status registers */
1063 pcie_status_register_clear(pcie_port
);
1064 #ifdef CONFIG_PCI_MSI
1065 pcie_msi_init(pcie_port
);
1066 #endif /* CONFIG_PCI_MSI */
1067 pcie_rc_cfg_reg_setup(pcie_port
);
1069 /* Once link is up, break out */
1070 if (pcie_app_loigc_setup(pcie_port
) == 0)
1073 if (i
>= IFX_PCIE_PHY_LOOP_CNT
) {
1074 printk(KERN_ERR
"%s link up failed!!!!!\n", __func__
);
1077 /* NB, don't increase ACK/NACK timer timeout value, which will cause a lot of COR errors */
1078 pcie_replay_time_update(pcie_port
);
1082 static int inline ifx_pcie_startup_port_nr(void)
1084 int pcie_port
= IFX_PCIE_PORT0
;
1086 pcie_port
= IFX_PCIE_PORT0
;
1091 * \fn static int __init ifx_pcie_bios_init(void)
1092 * \brief Initialize the IFX PCIe controllers
1094 * \return -EIO PCIe PHY link is not up
1095 * \return -ENOMEM Configuration/IO space failed to map
1097 * \ingroup IFX_PCIE_OS
1099 extern int (*ltqpci_plat_arch_init
)(struct pci_dev
*dev
);
1100 extern int (*ltqpci_map_irq
)(const struct pci_dev
*dev
, u8 slot
, u8 pin
);
1101 static int __devinit
ltq_pcie_probe(struct platform_device
*pdev
)
1103 char ver_str
[128] = {0};
1104 void __iomem
*io_map_base
;
1107 ltqpci_map_irq
= ifx_pcie_bios_map_irq
;
1108 ltqpci_plat_arch_init
= ifx_pcie_bios_plat_dev_init
;
1109 /* Enable AHB Master/ Slave */
1110 pcie_ahb_pmu_setup();
1112 startup_port
= ifx_pcie_startup_port_nr();
1114 ltq_gpio_request(&pdev
->dev
, IFX_PCIE_GPIO_RESET
, 0, 1, "pcie-reset");
1116 for (pcie_port
= startup_port
; pcie_port
< IFX_PCIE_CORE_NR
; pcie_port
++){
1117 if (pcie_rc_initialize(pcie_port
) == 0) {
1118 /* Otherwise, warning will pop up */
1119 io_map_base
= ioremap(PCIE_IO_PHY_PORT_TO_BASE(pcie_port
), PCIE_IO_SIZE
);
1120 if (io_map_base
== NULL
)
1122 ifx_pcie_controller
[pcie_port
].pcic
.io_map_base
= (unsigned long)io_map_base
;
1123 register_pci_controller(&ifx_pcie_controller
[pcie_port
].pcic
);
1124 /* XXX, clear error status */
1125 pcie_rc_core_int_init(pcie_port
);
1129 printk(KERN_INFO
"%s", ver_str
);
1133 static struct platform_driver ltq_pcie_driver
= {
1134 .probe
= ltq_pcie_probe
,
1136 .name
= "pcie-xway",
1137 .owner
= THIS_MODULE
,
1141 int __init
pciebios_init(void)
1143 return platform_driver_register(<q_pcie_driver
);
1146 arch_initcall(pciebios_init
);