move new files out from platform support patch
[openwrt/openwrt.git] / target / linux / ubicom32 / files / arch / ubicom32 / mach-common / pci.c
1 /*
2 * arch/ubicom32/mach-common/pci.c
3 * PCI interface management.
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 *
7 * This file is part of the Ubicom32 Linux Kernel Port.
8 *
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
22 *
23 * Ubicom32 implementation derived from (with many thanks):
24 * arch/m68knommu
25 * arch/blackfin
26 * arch/parisc
27 */
28
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/pci.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/io.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_fs.h>
37
38 #include <asm/devtree.h>
39 #include <asm/ip5000.h>
40 #include <asm/ubicom32-common.h>
41
42 static int debug_pci = 1 ;
43
44 /* #define PCI_USE_INTERNAL_LOCK 1 */
45
46 #ifdef PCI_USE_INTERNAL_LOCK
47 #define PCI_LOCK(lock, irqflag) pci_lock_acquire(irqflag)
48 #define PCI_UNLOCK(lock, irqflag) pci_lock_release(irqflag)
49 #elif defined(CONFIG_SMP)
50 static DEFINE_SPINLOCK(pci_master_lock);
51 #define PCI_LOCK(lock, irqflag) spin_lock_irqsave(lock, irqflag)
52 #define PCI_UNLOCK(lock, irqflag) spin_unlock_irqrestore(lock, irqflag)
53 #else
54 #define PCI_LOCK(lock, irqflag) local_irq_save(irqflag)
55 #define PCI_UNLOCK(lock, irqflag) local_irq_restore(irqflag)
56 #endif
57
58 #define PCI_DEV0_IDSEL CONFIG_PCI_DEV0_IDSEL
59 #define PCI_DEV1_IDSEL CONFIG_PCI_DEV1_IDSEL
60
61 /*
62 * PCI commands
63 */
64 #define PCI_CMD_INT_ACK 0x00 /* not supported */
65 #define PCI_CMD_SPECIAL 0x01 /* not supported */
66 #define PCI_CMD_IO_READ 0x02
67 #define PCI_CMD_IO_WRITE 0x03
68 #define PCI_CMD_MEM_READ 0x06
69 #define PCI_CMD_MEM_WRITE 0x07
70 #define PCI_CMD_CFG_READ 0x0a
71 #define PCI_CMD_CFG_WRITE 0x0b
72 #define PCI_CMD_MEM_READ_MULT 0x0c /* not supported */
73 #define PCI_CMD_DUAL_ADDR 0x0d /* not supported */
74 #define PCI_CMD_MEM_READ_LINE 0x0e /* not supported */
75 #define PCI_CMD_MEM_WRITE_INVAL 0x0f /* not supported */
76 /*
77 * Status codes, returned by pci_read_u32() and pci_write_u32()
78 */
79 #define PCI_RESP_IN_PROGRESS 0xff /* request still in queue */
80 #define PCI_RESP_OK 0
81 /*
82 * The following codes indicate that the request has completed
83 */
84 #define PCI_RESP_NO_DEVSEL 1 /* timeout before target asserted
85 * DEVSEL! */
86 #define PCI_RESP_LOST_DEVSEL 2 /* had DEVSEL, but went away before
87 * transfer completed! */
88 #define PCI_RESP_BAD_TRDY 3 /* target asserted TRDY without
89 * DEVSEL! */
90 #define PCI_RESP_NO_TRDY 4 /* timeout before target asserted
91 * TRDY! */
92 #define PCI_RESP_BAD_STOP 5 /* target asserted STOP and TRDY
93 * without DEVSEL! */
94 #define PCI_RESP_TARGET_ABORT 6
95 #define PCI_RESP_TARGET_RETRY 7
96 #define PCI_RESP_TARGET_DISCONNECT 8
97 #define PCI_RESP_MISMATCH 9 /* data read back doesn't match data
98 * written - debug only, the core PCI
99 * routines never return this */
100 #define PCI_RESP_DET_SERR 10
101 #define PCI_RESP_DET_PERR 11
102 #define PCI_RESP_MALFORMED_REQ 12 /* Could be due to misaligned
103 * requests or invalid address */
104 #define PCI_RESP_NO_RESOURCE 13 /* Could be memory or other resourse
105 * like queue space */
106 #define PCI_RESP_ERROR 14 /* All emcompassing error */
107
108 /* registers in PCI config space */
109 #define PCI_DEVICE_VENDOR_ID_REG 0x00
110 #define PCI_STATUS_COMMAND_REG 0x04
111 #define PCI_CLASS_REVISION_REG 0x08
112 #define PCI_BHLC_REG 0x0c /* BIST, Header type, Latency
113 * timer, Cache line size */
114 #define PCI_BASE_ADDR_REG 0x10
115 #define PCI_BASE_REG_COUNT 6
116 #define CARDBUS_CIS_PTR_REG 0x28
117 #define PCI_SUB_SYSTEM_ID_REG 0x2c
118 #define PCI_EXP_ROM_ADDR_REG 0x30
119 #define PCI_CAP_PTR_REG 0x34
120 #define PCI_LGPL_REG 0x3C /* max Latency, min Gnt, interrupt
121 * Pin, interrupt Line */
122
123 struct pci_master_request {
124 volatile u32_t pci_address; /* must be 4-byte aligned */
125 volatile u32_t data; /* must be 4-byte aligned */
126 volatile u8_t cmd;
127 volatile u8_t byte_valid;
128 volatile u8_t status;
129 };
130
131 struct pci_devnode {
132 struct devtree_node dn;
133 u32_t pci_idsel_0;
134 u32_t pci_idsel_1;
135 u32_t pci_cpu_address;
136 struct pci_master_request volatile *volatile req;
137 };
138
139 static struct pci_master_request req; /* globally used for faster master write
140 * (discarding result when possible) */
141 static struct pci_devnode *pci_node;
142
143 #if !defined(CONFIG_DEBUG_PCIMEASURE)
144 #define PCI_DECLARE_MEASUREMENT
145 #define PCI_MEASUREMENT_START()
146 #define PCI_MEASUREMENT_END(idx)
147 #else
148 #define PCI_DECLARE_MEASUREMENT \
149 int __diff; \
150 unsigned int __tstart;
151
152 #define PCI_MEASUREMENT_START() \
153 __tstart = UBICOM32_IO_TIMER->sysval;
154
155 #define PCI_MEASUREMENT_END(idx) \
156 __diff = (int)UBICOM32_IO_TIMER->sysval - (int)__tstart; \
157 pci_measurement_update((idx), __diff);
158
159 #define PCI_WEIGHT 32
160
161 struct pci_measurement {
162 volatile unsigned int min;
163 volatile unsigned int avg;
164 volatile unsigned int max;
165 };
166
167 enum pci_measurement_list {
168 PCI_MEASUREMENT_READ32,
169 PCI_MEASUREMENT_WRITE32,
170 PCI_MEASUREMENT_READ16,
171 PCI_MEASUREMENT_WRITE16,
172 PCI_MEASUREMENT_READ8,
173 PCI_MEASUREMENT_WRITE8,
174 PCI_MEASUREMENT_LAST,
175 };
176
177 static const char *pci_measurement_name_list[PCI_MEASUREMENT_LAST] = {
178 "READ32",
179 "WRITE32",
180 "READ16",
181 "WRITE16",
182 "READ8",
183 "WRITE8"
184 };
185 static struct pci_measurement pci_measurements[PCI_MEASUREMENT_LAST];
186
187 /*
188 * pci_measurement_update()
189 * Update an entry in the measurement array for this idx.
190 */
191 static void pci_measurement_update(int idx, int sample)
192 {
193 struct pci_measurement *pm = &pci_measurements[idx];
194 if ((pm->min == 0) || (pm->min > sample)) {
195 pm->min = sample;
196 }
197 if (pm->max < sample) {
198 pm->max = sample;
199 }
200 pm->avg = ((pm->avg * (PCI_WEIGHT - 1)) + sample) / PCI_WEIGHT;
201 }
202 #endif
203
204 #if defined(PCI_USE_INTERNAL_LOCK)
205 /*
206 * pci_lock_release()
207 * Release the PCI lock.
208 */
209 static void pci_lock_release(unsigned long irqflag)
210 {
211 UBICOM32_UNLOCK(PCI_LOCK_BIT);
212 }
213
214 /*
215 * pci_lock_acquire()
216 * Acquire the PCI lock, spin if not available.
217 */
218 static void pci_lock_acquire(unsigned long irqflag)
219 {
220 UBICOM32_LOCK(PCI_LOCK_BIT);
221 }
222 #endif
223
224 /*
225 * pci_set_hrt_interrupt()
226 */
227 static inline void pci_set_hrt_interrupt(struct pci_devnode *pci_node)
228 {
229 ubicom32_set_interrupt(pci_node->dn.sendirq);
230 }
231
232 /*
233 * pci_read_u32()
234 * Synchronously read 32 bits from PCI space.
235 */
236 u8 pci_read_u32(u8 pci_cmd, u32 address, u32 *data)
237 {
238 u8 status;
239 unsigned long irqflag;
240
241
242 /*
243 * Fill in the request.
244 */
245 volatile struct pci_master_request lreq;
246 PCI_DECLARE_MEASUREMENT;
247
248 lreq.pci_address = address;
249 lreq.cmd = pci_cmd;
250 lreq.byte_valid = 0xf; /* enable all bytes */
251
252 /*
253 * Wait for any previous request to complete and then make this request.
254 */
255 PCI_MEASUREMENT_START();
256 PCI_LOCK(&pci_master_lock, irqflag);
257 while (unlikely(pci_node->req == &req))
258 ;
259 pci_node->req = &lreq;
260 pci_set_hrt_interrupt(pci_node);
261 PCI_UNLOCK(&pci_master_lock, irqflag);
262
263 /*
264 * Wait for the result to show up.
265 */
266 while (unlikely(pci_node->req == &lreq))
267 ;
268 status = lreq.status;
269 if (likely(status == PCI_RESP_OK))
270 *data = le32_to_cpu(lreq.data);
271 else
272 *data = 0;
273 PCI_MEASUREMENT_END(PCI_MEASUREMENT_READ32);
274 return status;
275 }
276
277 /*
278 * pci_write_u32()
279 * Asyncrhnously or synchronously write 32 bits to PCI master space.
280 */
281 u8 pci_write_u32(u8 pci_cmd, u32 address, u32 data)
282 {
283 unsigned long irqflag;
284 PCI_DECLARE_MEASUREMENT;
285
286 /*
287 * Wait for any previous write or pending read to complete.
288 *
289 * We use a global data block because once we write the request
290 * we do not wait for it to complete before exiting.
291 */
292 PCI_MEASUREMENT_START();
293 PCI_LOCK(&pci_master_lock, irqflag);
294 while (unlikely(pci_node->req == &req))
295 ;
296 req.pci_address = address;
297 req.data = cpu_to_le32(data);
298 req.cmd = pci_cmd;
299 req.byte_valid = 0xf; /* enable all bytes */
300 pci_node->req = &req;
301 pci_set_hrt_interrupt(pci_node);
302 PCI_UNLOCK(&pci_master_lock, irqflag);
303 PCI_MEASUREMENT_END(PCI_MEASUREMENT_WRITE32);
304 return PCI_RESP_OK;
305 }
306
307 /*
308 * pci_read_u16()
309 * Synchronously read 16 bits from PCI space.
310 */
311 u8 pci_read_u16(u8 pci_cmd, u32 address, u16 *data)
312 {
313 u8 status;
314 unsigned long irqflag;
315
316 /*
317 * Fill in the request.
318 */
319 volatile struct pci_master_request lreq;
320 PCI_DECLARE_MEASUREMENT;
321
322 lreq.pci_address = address & ~2;
323 lreq.cmd = pci_cmd;
324 lreq.byte_valid = (address & 2) ? 0xc : 0x3;
325
326 /*
327 * Wait for any previous request to complete and then make this request.
328 */
329 PCI_MEASUREMENT_START();
330 PCI_LOCK(&pci_master_lock, irqflag);
331 while (unlikely(pci_node->req == &req))
332 ;
333 pci_node->req = &lreq;
334 pci_set_hrt_interrupt(pci_node);
335 PCI_UNLOCK(&pci_master_lock, irqflag);
336
337 /*
338 * Wait for the result to show up.
339 */
340 while (unlikely(pci_node->req == &lreq))
341 ;
342 status = lreq.status;
343 if (likely(status == PCI_RESP_OK)) {
344 lreq.data = le32_to_cpu(lreq.data);
345 *data = (u16)((address & 2) ? (lreq.data >> 16) : lreq.data);
346 } else
347 *data = 0;
348 PCI_MEASUREMENT_END(PCI_MEASUREMENT_READ16);
349 return status;
350 }
351
352 /*
353 * pci_write_u16()
354 * Asyncrhnously or synchronously write 16 bits to PCI master space.
355 */
356 u8 pci_write_u16(u8 pci_cmd, u32 address, u16 data)
357 {
358 unsigned long irqflag;
359 PCI_DECLARE_MEASUREMENT;
360
361 /*
362 * Wait for any previous write or pending read to complete.
363 *
364 * We use a global data block because once we write the request
365 * we do not wait for it to complete before exiting.
366 */
367 PCI_MEASUREMENT_START();
368 PCI_LOCK(&pci_master_lock, irqflag);
369 while (unlikely(pci_node->req == &req))
370 ;
371 req.pci_address = address & ~2;
372 req.data = (u32)data;
373 req.data = cpu_to_le32((address & 2) ? (req.data << 16) : req.data);
374 req.cmd = pci_cmd;
375 req.byte_valid = (address & 2) ? 0xc : 0x3;
376 pci_node->req = &req;
377 pci_set_hrt_interrupt(pci_node);
378 PCI_UNLOCK(&pci_master_lock, irqflag);
379 PCI_MEASUREMENT_END(PCI_MEASUREMENT_WRITE16);
380 return PCI_RESP_OK;
381 }
382
383 /*
384 * pci_read_u8()
385 * Synchronously read 8 bits from PCI space.
386 */
387 u8 pci_read_u8(u8 pci_cmd, u32 address, u8 *data)
388 {
389 u8 status;
390 unsigned long irqflag;
391
392 /*
393 * Fill in the request.
394 */
395 volatile struct pci_master_request lreq;
396 PCI_DECLARE_MEASUREMENT;
397
398 lreq.pci_address = address & ~3;
399 lreq.cmd = pci_cmd;
400 lreq.byte_valid = 1 << (address & 0x3);
401
402 /*
403 * Wait for any previous request to complete and then make this request.
404 */
405 PCI_MEASUREMENT_START();
406 PCI_LOCK(&pci_master_lock, irqflag);
407 while (unlikely(pci_node->req == &req))
408 ;
409 pci_node->req = &lreq;
410 pci_set_hrt_interrupt(pci_node);
411 PCI_UNLOCK(&pci_master_lock, irqflag);
412
413 /*
414 * Wait for the result to show up.
415 */
416 while (unlikely(pci_node->req == &lreq))
417 ;
418 status = lreq.status;
419 if (likely(status == PCI_RESP_OK)) {
420 *data = (u8)(lreq.data >> (24 - ((address & 0x3) << 3)));
421 } else
422 *data = 0;
423 PCI_MEASUREMENT_END(PCI_MEASUREMENT_READ8);
424 return status;
425 }
426
427 /*
428 * pci_write_u8()
429 * Asyncrhnously or synchronously write 8 bits to PCI master space.
430 */
431 u8 pci_write_u8(u8 pci_cmd, u32 address, u8 data)
432 {
433 unsigned long irqflag;
434 PCI_DECLARE_MEASUREMENT;
435
436 /*
437 * Wait for any previous write or pending read to complete.
438 *
439 * We use a global data block because once we write the request
440 * we do not wait for it to complete before exiting.
441 */
442 PCI_MEASUREMENT_START();
443 PCI_LOCK(&pci_master_lock, irqflag);
444 while (unlikely(pci_node->req == &req))
445 ;
446 req.pci_address = address & ~3;
447 req.data = ((u32)data << (24 - ((address & 0x3) << 3)));
448 req.cmd = pci_cmd;
449 req.byte_valid = 1 << (address & 0x3);
450 pci_node->req = &req;
451 pci_set_hrt_interrupt(pci_node);
452 PCI_UNLOCK(&pci_master_lock, irqflag);
453 PCI_MEASUREMENT_END(PCI_MEASUREMENT_WRITE8);
454 return PCI_RESP_OK;
455 }
456
457 unsigned int ubi32_pci_read_u32(const volatile void __iomem *addr)
458 {
459 unsigned int data;
460 pci_read_u32(PCI_CMD_MEM_READ, (u32)addr, &data);
461 return data;
462 }
463 EXPORT_SYMBOL(ubi32_pci_read_u32);
464
465 unsigned short ubi32_pci_read_u16(const volatile void __iomem *addr)
466 {
467 unsigned short data;
468 pci_read_u16(PCI_CMD_MEM_READ, (u32)addr, &data);
469 return data;
470 }
471 EXPORT_SYMBOL(ubi32_pci_read_u16);
472
473 unsigned char ubi32_pci_read_u8(const volatile void __iomem *addr)
474 {
475 unsigned char data;
476 pci_read_u8(PCI_CMD_MEM_READ, (u32)addr, &data);
477 return data;
478 }
479 EXPORT_SYMBOL(ubi32_pci_read_u8);
480
481 void ubi32_pci_write_u32(unsigned int val, const volatile void __iomem *addr)
482 {
483 pci_write_u32(PCI_CMD_MEM_WRITE, (u32)addr, val);
484 }
485 EXPORT_SYMBOL(ubi32_pci_write_u32);
486
487 void ubi32_pci_write_u16(unsigned short val, const volatile void __iomem *addr)
488 {
489 pci_write_u16(PCI_CMD_MEM_WRITE, (u32)addr, val);
490 }
491 EXPORT_SYMBOL(ubi32_pci_write_u16);
492
493 void ubi32_pci_write_u8(unsigned char val, const void volatile __iomem *addr)
494 {
495 pci_write_u8(PCI_CMD_MEM_WRITE, (u32)addr, val);
496 }
497 EXPORT_SYMBOL(ubi32_pci_write_u8);
498
499 #if defined(CONFIG_DEBUG_PCIMEASURE)
500 static unsigned int pci_cycles_to_nano(unsigned int cycles, unsigned int frequency)
501 {
502 unsigned int nano = ((cycles * 1000) / (frequency / 1000000));
503 return nano;
504 }
505
506 /*
507 * pci_measurement_show()
508 * Print out the min, avg, max values for each PCI transaction type.
509 *
510 * By request, the max value is reset after each dump.
511 */
512 static int pci_measurement_show(struct seq_file *p, void *v)
513 {
514 unsigned int min, avg, max;
515 unsigned int freq = processor_frequency();
516 int trans = *((loff_t *) v);
517
518 if (trans == 0) {
519 seq_puts(p, "min\tavg\tmax\t(nano-seconds)\n");
520 }
521
522 if (trans >= PCI_MEASUREMENT_LAST) {
523 return 0;
524 }
525
526 min = pci_cycles_to_nano(pci_measurements[trans].min, freq);
527 avg = pci_cycles_to_nano(pci_measurements[trans].avg, freq);
528 max = pci_cycles_to_nano(pci_measurements[trans].max, freq);
529 pci_measurements[trans].max = 0;
530 seq_printf(p, "%u\t%u\t%u\t%s\n", min, avg, max, pci_measurement_name_list[trans]);
531 return 0;
532 }
533
534 static void *pci_measurement_start(struct seq_file *f, loff_t *pos)
535 {
536 return (*pos < PCI_MEASUREMENT_LAST) ? pos : NULL;
537 }
538
539 static void *pci_measurement_next(struct seq_file *f, void *v, loff_t *pos)
540 {
541 (*pos)++;
542 if (*pos >= PCI_MEASUREMENT_LAST)
543 return NULL;
544 return pos;
545 }
546
547 static void pci_measurement_stop(struct seq_file *f, void *v)
548 {
549 /* Nothing to do */
550 }
551
552 static const struct seq_operations pci_measurement_seq_ops = {
553 .start = pci_measurement_start,
554 .next = pci_measurement_next,
555 .stop = pci_measurement_stop,
556 .show = pci_measurement_show,
557 };
558
559 static int pci_measurement_open(struct inode *inode, struct file *filp)
560 {
561 return seq_open(filp, &pci_measurement_seq_ops);
562 }
563
564 static const struct file_operations pci_measurement_fops = {
565 .open = pci_measurement_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = seq_release,
569 };
570
571 static int __init pci_measurement_init(void)
572 {
573 proc_create("pci_measurements", 0, NULL, &pci_measurement_fops);
574 return 0;
575 }
576 module_init(pci_measurement_init);
577 #endif
578
579 static int ubi32_pci_read_config(struct pci_bus *bus, unsigned int devfn,
580 int where, int size, u32 *value)
581 {
582 u8 cmd;
583 u32 addr;
584 u8 data8;
585 u16 data16;
586
587 u8 slot = PCI_SLOT(devfn);
588 u8 fn = PCI_FUNC(devfn);
589
590 if (slot > 1) {
591 return PCIBIOS_DEVICE_NOT_FOUND;
592 } else if (slot == 0) {
593 addr = PCI_DEV0_IDSEL + where;
594 } else {
595 addr = PCI_DEV1_IDSEL + where;
596 }
597
598 addr += (fn << 8);
599
600 cmd = PCI_CMD_CFG_READ;
601 if (size == 1) {
602 pci_read_u8(cmd, addr, &data8);
603 *value = (u32)data8;
604 } else if (size == 2) {
605 pci_read_u16(cmd, addr, &data16);
606 *value = (u32)data16;
607 } else {
608 pci_read_u32(cmd, addr, value);
609 }
610
611 return PCIBIOS_SUCCESSFUL;
612 }
613
614 static int ubi32_pci_write_config(struct pci_bus *bus, unsigned int devfn,
615 int where, int size, u32 value)
616 {
617 u8 cmd;
618 u32 addr;
619 u8 slot = PCI_SLOT(devfn);
620 u8 fn = PCI_FUNC(devfn);
621
622 if (slot > 1) {
623 return PCIBIOS_DEVICE_NOT_FOUND;
624 } else if (slot == 0) {
625 addr = PCI_DEV0_IDSEL + where;
626 } else {
627 addr = PCI_DEV1_IDSEL + where;
628 }
629
630 addr += (fn << 8);
631
632 cmd = PCI_CMD_CFG_WRITE;
633 if (size == 1) {
634 pci_write_u8(cmd, addr, (u8)value);
635 } else if (size == 2) {
636 pci_write_u16(cmd, addr, (u16)value);
637 } else {
638 pci_write_u32(cmd, addr, value);
639 }
640
641 return PCIBIOS_SUCCESSFUL;
642 }
643
644 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
645 {
646 return -EIO;
647 }
648 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
649
650 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
651 {
652 return -EIO;
653 }
654 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
655
656 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
657 {
658 resource_size_t start = pci_resource_start(dev, bar);
659 resource_size_t len = pci_resource_len(dev, bar);
660 unsigned long flags = pci_resource_flags(dev, bar);
661
662 if (!len || !start) {
663 return NULL;
664 }
665
666 if (maxlen && len > maxlen) {
667 len = maxlen;
668 }
669
670 if (flags & IORESOURCE_IO) {
671 return ioport_map(start, len);
672 }
673
674 if (flags & IORESOURCE_MEM) {
675 if (flags & IORESOURCE_CACHEABLE) {
676 return ioremap(start, len);
677 }
678 return ioremap_nocache(start, len);
679 }
680 return NULL;
681 }
682 EXPORT_SYMBOL(pci_iomap);
683
684 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
685 {
686 if ((unsigned long)addr >= VMALLOC_START &&
687 (unsigned long)addr < VMALLOC_END) {
688 iounmap(addr);
689 }
690 }
691 EXPORT_SYMBOL(pci_iounmap);
692
693 /*
694 * From arch/arm/kernel/bios32.c
695 *
696 * PCI bios-type initialisation for PCI machines
697 *
698 * Bits taken from various places.
699 */
700 static void __init pcibios_init_hw(struct hw_pci *hw)
701 {
702 struct pci_sys_data *sys = NULL;
703 int ret;
704 int nr, busnr;
705
706 for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
707 sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
708 if (!sys)
709 panic("PCI: unable to allocate sys data!");
710
711 sys->hw = hw;
712 sys->busnr = busnr;
713 sys->map_irq = hw->map_irq;
714 sys->resource[0] = &ioport_resource;
715 sys->resource[1] = &iomem_resource;
716
717 ret = hw->setup(nr, sys);
718
719 if (ret > 0) {
720 sys->bus = hw->scan(nr, sys);
721
722 if (!sys->bus)
723 panic("PCI: unable to scan bus!");
724
725 busnr = sys->bus->subordinate + 1;
726
727 list_add(&sys->node, &hw->buses);
728 } else {
729 kfree(sys);
730 if (ret < 0)
731 break;
732 }
733 }
734 }
735
736 /*
737 * Swizzle the device pin each time we cross a bridge.
738 * This might update pin and returns the slot number.
739 */
740 static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
741 {
742 struct pci_sys_data *sys = dev->sysdata;
743 int slot = 0, oldpin = *pin;
744
745 if (sys->swizzle)
746 slot = sys->swizzle(dev, pin);
747
748 if (debug_pci)
749 printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
750 pci_name(dev), oldpin, *pin, slot);
751 return slot;
752 }
753
754 /*
755 * Map a slot/pin to an IRQ.
756 */
757 static int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
758 {
759 struct pci_sys_data *sys = dev->sysdata;
760 int irq = -1;
761
762 if (sys->map_irq)
763 irq = sys->map_irq(dev, slot, pin);
764
765 if (debug_pci)
766 printk("PCI: %s mapping slot %d pin %d => irq %d\n",
767 pci_name(dev), slot, pin, irq);
768
769 return irq;
770 }
771
772 void __init pci_common_init(struct hw_pci *hw)
773 {
774 struct pci_sys_data *sys;
775
776 INIT_LIST_HEAD(&hw->buses);
777
778 if (hw->preinit)
779 hw->preinit();
780 pcibios_init_hw(hw);
781 if (hw->postinit)
782 hw->postinit();
783
784 pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
785 list_for_each_entry(sys, &hw->buses, node) {
786 struct pci_bus *bus = sys->bus;
787 /*
788 * Size the bridge windows.
789 */
790 pci_bus_size_bridges(bus);
791 /*
792 * Assign resources.
793 */
794 pci_bus_assign_resources(bus);
795
796 /*
797 * Tell drivers about devices found.
798 */
799 pci_bus_add_devices(bus);
800 }
801 }
802
803 char * __init pcibios_setup(char *str)
804 {
805 if (!strcmp(str, "debug")) {
806 debug_pci = 1;
807 return NULL;
808 }
809 return str;
810 }
811
812 /*
813 * From arch/i386/kernel/pci-i386.c:
814 *
815 * We need to avoid collisions with `mirrored' VGA ports
816 * and other strange ISA hardware, so we always want the
817 * addresses to be allocated in the 0x000-0x0ff region
818 * modulo 0x400.
819 *
820 * Why? Because some silly external IO cards only decode
821 * the low 10 bits of the IO address. The 0x00-0xff region
822 * is reserved for motherboard devices that decode all 16
823 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
824 * but we want to try to avoid allocating at 0x2900-0x2bff
825 * which might be mirrored at 0x0100-0x03ff..
826 */
827 void pcibios_align_resource(void *data, struct resource *res,
828 resource_size_t size, resource_size_t align)
829 {
830 resource_size_t start = res->start;
831
832 if (res->flags & IORESOURCE_IO && start & 0x300)
833 start = (start + 0x3ff) & ~0x3ff;
834
835 res->start = (start + align - 1) & ~(align - 1);
836 }
837
838
839 void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
840 {
841 if (debug_pci)
842 printk("PCI: Assigning IRQ %02d to %s\n", irq, pci_name(dev));
843 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
844 }
845
846 /*
847 * If the bus contains any of these devices, then we must not turn on
848 * parity checking of any kind. Currently this is CyberPro 20x0 only.
849 */
850 static inline int pdev_bad_for_parity(struct pci_dev *dev)
851 {
852 return (dev->vendor == PCI_VENDOR_ID_INTERG &&
853 (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
854 dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
855 (dev->vendor == PCI_VENDOR_ID_ITE &&
856 dev->device == PCI_DEVICE_ID_ITE_8152);
857
858 }
859
860 /*
861 * Adjust the device resources from bus-centric to Linux-centric.
862 */
863 static void __devinit
864 pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
865 {
866 resource_size_t offset;
867 int i;
868
869 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
870 if (dev->resource[i].start == 0)
871 continue;
872 if (dev->resource[i].flags & IORESOURCE_MEM)
873 offset = root->mem_offset;
874 else
875 offset = root->io_offset;
876
877 dev->resource[i].start += offset;
878 dev->resource[i].end += offset;
879 }
880 }
881
882 static void __devinit
883 pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
884 {
885 struct pci_dev *dev = bus->self;
886 int i;
887
888 if (!dev) {
889 /*
890 * Assign root bus resources.
891 */
892 for (i = 0; i < 3; i++)
893 bus->resource[i] = root->resource[i];
894 }
895 }
896
897 /*
898 * pcibios_fixup_bus - Called after each bus is probed,
899 * but before its children are examined.
900 */
901 void pcibios_fixup_bus(struct pci_bus *bus)
902 {
903 struct pci_sys_data *root = bus->sysdata;
904 struct pci_dev *dev;
905 u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
906 PCI_COMMAND_FAST_BACK;
907
908 pbus_assign_bus_resources(bus, root);
909
910 /*
911 * Walk the devices on this bus, working out what we can
912 * and can't support.
913 */
914 list_for_each_entry(dev, &bus->devices, bus_list) {
915 u16 status;
916
917 pdev_fixup_device_resources(root, dev);
918
919 pci_read_config_word(dev, PCI_STATUS, &status);
920
921 /*
922 * If any device on this bus does not support fast back
923 * to back transfers, then the bus as a whole is not able
924 * to support them. Having fast back to back transfers
925 * on saves us one PCI cycle per transaction.
926 */
927 if (!(status & PCI_STATUS_FAST_BACK))
928 features &= ~PCI_COMMAND_FAST_BACK;
929
930 if (pdev_bad_for_parity(dev))
931 features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
932
933 switch (dev->class >> 8) {
934 case PCI_CLASS_BRIDGE_PCI:
935 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
936 status |= PCI_BRIDGE_CTL_PARITY |
937 PCI_BRIDGE_CTL_MASTER_ABORT;
938 status &= ~(PCI_BRIDGE_CTL_BUS_RESET |
939 PCI_BRIDGE_CTL_FAST_BACK);
940 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
941 break;
942
943 case PCI_CLASS_BRIDGE_CARDBUS:
944 pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL,
945 &status);
946 status |= PCI_CB_BRIDGE_CTL_PARITY |
947 PCI_CB_BRIDGE_CTL_MASTER_ABORT;
948 pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL,
949 status);
950 break;
951 }
952 }
953
954 /*
955 * Now walk the devices again, this time setting them up.
956 */
957 list_for_each_entry(dev, &bus->devices, bus_list) {
958 u16 cmd;
959
960 pci_read_config_word(dev, PCI_COMMAND, &cmd);
961 cmd |= features;
962 pci_write_config_word(dev, PCI_COMMAND, cmd);
963
964 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
965 L1_CACHE_BYTES >> 2);
966 }
967
968 /*
969 * Propagate the flags to the PCI bridge.
970 */
971 if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
972 if (features & PCI_COMMAND_FAST_BACK)
973 bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
974 if (features & PCI_COMMAND_PARITY)
975 bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
976 }
977
978 /*
979 * Report what we did for this bus
980 */
981 printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
982 bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
983 }
984 /*
985 * Convert from Linux-centric to bus-centric addresses for bridge devices.
986 */
987 void
988 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
989 struct resource *res)
990 {
991 struct pci_sys_data *root = dev->sysdata;
992 unsigned long offset = 0;
993
994 if (res->flags & IORESOURCE_IO)
995 offset = root->io_offset;
996 if (res->flags & IORESOURCE_MEM)
997 offset = root->mem_offset;
998
999 region->start = res->start - offset;
1000 region->end = res->end - offset;
1001 }
1002
1003 void __devinit
1004 pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
1005 struct pci_bus_region *region)
1006 {
1007 struct pci_sys_data *root = dev->sysdata;
1008 unsigned long offset = 0;
1009
1010 if (res->flags & IORESOURCE_IO)
1011 offset = root->io_offset;
1012 if (res->flags & IORESOURCE_MEM)
1013 offset = root->mem_offset;
1014
1015 res->start = region->start + offset;
1016 res->end = region->end + offset;
1017 }
1018
1019 #ifdef CONFIG_HOTPLUG
1020 EXPORT_SYMBOL(pcibios_fixup_bus);
1021 EXPORT_SYMBOL(pcibios_resource_to_bus);
1022 EXPORT_SYMBOL(pcibios_bus_to_resource);
1023 #endif
1024
1025 /**
1026 * pcibios_enable_device - Enable I/O and memory.
1027 * @dev: PCI device to be enabled
1028 */
1029 int pcibios_enable_device(struct pci_dev *dev, int mask)
1030 {
1031 u16 cmd, old_cmd;
1032 int idx;
1033 struct resource *r;
1034
1035 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1036 old_cmd = cmd;
1037 for (idx = 0; idx < 6; idx++) {
1038 /* Only set up the requested stuff */
1039 if (!(mask & (1 << idx)))
1040 continue;
1041
1042 r = dev->resource + idx;
1043 if (!r->start && r->end) {
1044 printk(KERN_ERR "PCI: Device %s not available because"
1045 " of resource collisions\n", pci_name(dev));
1046 return -EINVAL;
1047 }
1048 if (r->flags & IORESOURCE_IO)
1049 cmd |= PCI_COMMAND_IO;
1050 if (r->flags & IORESOURCE_MEM)
1051 cmd |= PCI_COMMAND_MEMORY;
1052 }
1053
1054 /*
1055 * Bridges (eg, cardbus bridges) need to be fully enabled
1056 */
1057 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
1058 cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
1059
1060 if (cmd != old_cmd) {
1061 printk("PCI: enabling device %s (%04x -> %04x)\n",
1062 pci_name(dev), old_cmd, cmd);
1063 pci_write_config_word(dev, PCI_COMMAND, cmd);
1064 }
1065 return 0;
1066 }
1067
1068
1069 struct pci_ops ubi32_pci_ops = {
1070 .read = ubi32_pci_read_config,
1071 .write = ubi32_pci_write_config,
1072 };
1073
1074 static struct pci_bus *ubi32_pci_scan_bus(int nr, struct pci_sys_data *sys)
1075 {
1076 return pci_scan_bus(sys->busnr, &ubi32_pci_ops, sys);
1077 }
1078
1079 #define UBI32_PCI_MEM_BASE PCI_DEV_REG_BASE
1080 #define UBI32_PCI_MEM_LEN 0x80000000
1081
1082 #define UBI32_PCI_IO_BASE 0x0
1083 #define UBI32_PCI_IO_END 0x0
1084
1085 static struct resource ubi32_pci_mem = {
1086 .name = "PCI memory space",
1087 .start = UBI32_PCI_MEM_BASE,
1088 .end = UBI32_PCI_MEM_BASE + UBI32_PCI_MEM_LEN - 1,
1089 .flags = IORESOURCE_MEM,
1090 };
1091
1092 static struct resource ubi32_pci_io = {
1093 .name = "PCI IO space",
1094 .start = UBI32_PCI_IO_BASE,
1095 .end = UBI32_PCI_IO_END,
1096 .flags = IORESOURCE_IO,
1097 };
1098
1099 static int __init ubi32_pci_setup(int nr, struct pci_sys_data *sys)
1100 {
1101 if (nr > 0)
1102 return 0;
1103
1104 request_resource(&iomem_resource, &ubi32_pci_mem);
1105 request_resource(&ioport_resource, &ubi32_pci_io);
1106
1107 sys->resource[0] = &ubi32_pci_io;
1108 sys->resource[1] = &ubi32_pci_mem;
1109 sys->resource[2] = NULL;
1110
1111 return 1;
1112 }
1113
1114 static void __init ubi32_pci_preinit(void)
1115 {
1116 }
1117
1118 static int __init ubi32_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
1119 {
1120 return pci_node->dn.recvirq;
1121 }
1122
1123 struct hw_pci ubi32_pci __initdata = {
1124 .nr_controllers = 1,
1125 .preinit = ubi32_pci_preinit,
1126 .setup = ubi32_pci_setup,
1127 .scan = ubi32_pci_scan_bus,
1128 .map_irq = ubi32_pci_map_irq,
1129 };
1130
1131 static int __init ubi32_pci_init(void)
1132 {
1133 pci_node = (struct pci_devnode *)devtree_find_node("pci");
1134 if (pci_node == NULL) {
1135 printk(KERN_WARNING "PCI init failed\n");
1136 return -ENOSYS;
1137 }
1138 pci_common_init(&ubi32_pci);
1139 return 0;
1140 }
1141
1142 subsys_initcall(ubi32_pci_init);
1143
1144 /*
1145 * workaround for dual PCI card interrupt
1146 */
1147 #define PCI_COMMON_INT_BIT (1 << 19)
1148 void ubi32_pci_int_wr(void)
1149 {
1150 volatile unsigned int pci_int_line;
1151 pci_int_line = UBICOM32_IO_PORT(RB)->gpio_in;
1152 if (!(pci_int_line & PCI_COMMON_INT_BIT))
1153 {
1154 ubicom32_set_interrupt(pci_node->dn.recvirq);
1155 }
1156 }
1157 EXPORT_SYMBOL(ubi32_pci_int_wr);