convert brcm-2.4 to the new target structure
[openwrt/svn-archive/archive.git] / target / linux / brcm-2.4 / files / arch / mips / bcm947xx / sbutils.c
1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright 2006, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id: sbutils.c,v 1.10 2006/04/08 07:12:42 honor Exp $
13 */
14
15 #include <typedefs.h>
16 #include <bcmdefs.h>
17 #include <osl.h>
18 #include <bcmutils.h>
19 #include <sbutils.h>
20 #include <bcmdevs.h>
21 #include <sbconfig.h>
22 #include <sbchipc.h>
23 #include <sbpci.h>
24 #include <sbpcie.h>
25 #include <pcicfg.h>
26 #include <sbpcmcia.h>
27 #include <sbextif.h>
28 #include <sbsocram.h>
29 #include <bcmsrom.h>
30 #ifdef __mips__
31 #include <mipsinc.h>
32 #endif /* __mips__ */
33
34 /* debug/trace */
35 #define SB_ERROR(args)
36
37 typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
38 typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
39 typedef bool (*sb_intrsenabled_t)(void *intr_arg);
40
41 /* misc sb info needed by some of the routines */
42 typedef struct sb_info {
43
44 struct sb_pub sb; /* back plane public state (must be first field) */
45
46 void *osh; /* osl os handle */
47 void *sdh; /* bcmsdh handle */
48
49 void *curmap; /* current regs va */
50 void *regs[SB_MAXCORES]; /* other regs va */
51
52 uint curidx; /* current core index */
53 uint dev_coreid; /* the core provides driver functions */
54
55 bool memseg; /* flag to toggle MEM_SEG register */
56
57 uint gpioidx; /* gpio control core index */
58 uint gpioid; /* gpio control coretype */
59
60 uint numcores; /* # discovered cores */
61 uint coreid[SB_MAXCORES]; /* id of each core */
62
63 void *intr_arg; /* interrupt callback function arg */
64 sb_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
65 sb_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
66 sb_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
67
68 } sb_info_t;
69
70 /* local prototypes */
71 static sb_info_t * sb_doattach(sb_info_t *si, uint devid, osl_t *osh, void *regs,
72 uint bustype, void *sdh, char **vars, uint *varsz);
73 static void sb_scan(sb_info_t *si);
74 static uint sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val);
75 static uint _sb_coreidx(sb_info_t *si);
76 static uint sb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit);
77 static uint sb_pcidev2chip(uint pcidev);
78 static uint sb_chip2numcores(uint chip);
79 static bool sb_ispcie(sb_info_t *si);
80 static bool sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen);
81 static int sb_pci_fixcfg(sb_info_t *si);
82
83 /* routines to access mdio slave device registers */
84 static int sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint readdr, uint val);
85 static void sb_war30841(sb_info_t *si);
86
87 /* delay needed between the mdio control/ mdiodata register data access */
88 #define PR28829_DELAY() OSL_DELAY(10)
89
90 /* size that can take bitfielddump */
91 #define BITFIELD_DUMP_SIZE 32
92
93 /* global variable to indicate reservation/release of gpio's */
94 static uint32 sb_gpioreservation = 0;
95
96 #define SB_INFO(sbh) (sb_info_t*)sbh
97 #define SET_SBREG(si, r, mask, val) \
98 W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
99 #define GOODCOREADDR(x) (((x) >= SB_ENUM_BASE) && ((x) <= SB_ENUM_LIM) && \
100 ISALIGNED((x), SB_CORE_SIZE))
101 #define GOODREGS(regs) ((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
102 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
103 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
104 #define BADIDX (SB_MAXCORES+1)
105 #define NOREV -1 /* Invalid rev */
106
107 #define PCI(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
108 #define PCIE(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
109
110 /* sonicsrev */
111 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
112 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
113
114 #define R_SBREG(si, sbr) sb_read_sbreg((si), (sbr))
115 #define W_SBREG(si, sbr, v) sb_write_sbreg((si), (sbr), (v))
116 #define AND_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
117 #define OR_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
118
119 /*
120 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
121 * after core switching to avoid invalid register accesss inside ISR.
122 */
123 #define INTR_OFF(si, intr_val) \
124 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
125 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
126 #define INTR_RESTORE(si, intr_val) \
127 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
128 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
129
130 /* dynamic clock control defines */
131 #define LPOMINFREQ 25000 /* low power oscillator min */
132 #define LPOMAXFREQ 43000 /* low power oscillator max */
133 #define XTALMINFREQ 19800000 /* 20 MHz - 1% */
134 #define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
135 #define PCIMINFREQ 25000000 /* 25 MHz */
136 #define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
137
138 #define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
139 #define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
140
141 /* different register spaces to access thr'u pcie indirect access */
142 #define PCIE_CONFIGREGS 1 /* Access to config space */
143 #define PCIE_PCIEREGS 2 /* Access to pcie registers */
144
145 /* force HT war check */
146 #define FORCEHT_WAR32414(si) \
147 ((PCIE(si)) && (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 1)) || \
148 ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3))))
149
150 /* GPIO Based LED powersave defines */
151 #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
152 #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
153
154 #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
155
156 static uint32
157 sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
158 {
159 uint8 tmp;
160 uint32 val, intr_val = 0;
161
162
163 /*
164 * compact flash only has 11 bits address, while we needs 12 bits address.
165 * MEM_SEG will be OR'd with other 11 bits address in hardware,
166 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
167 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
168 */
169 if (si->memseg) {
170 INTR_OFF(si, intr_val);
171 tmp = 1;
172 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
173 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
174 }
175
176 val = R_REG(si->osh, sbr);
177
178 if (si->memseg) {
179 tmp = 0;
180 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
181 INTR_RESTORE(si, intr_val);
182 }
183
184 return (val);
185 }
186
187 static void
188 sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
189 {
190 uint8 tmp;
191 volatile uint32 dummy;
192 uint32 intr_val = 0;
193
194
195 /*
196 * compact flash only has 11 bits address, while we needs 12 bits address.
197 * MEM_SEG will be OR'd with other 11 bits address in hardware,
198 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
199 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
200 */
201 if (si->memseg) {
202 INTR_OFF(si, intr_val);
203 tmp = 1;
204 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
205 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
206 }
207
208 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
209 #ifdef IL_BIGENDIAN
210 dummy = R_REG(si->osh, sbr);
211 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
212 dummy = R_REG(si->osh, sbr);
213 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
214 #else
215 dummy = R_REG(si->osh, sbr);
216 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
217 dummy = R_REG(si->osh, sbr);
218 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
219 #endif /* IL_BIGENDIAN */
220 } else
221 W_REG(si->osh, sbr, v);
222
223 if (si->memseg) {
224 tmp = 0;
225 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
226 INTR_RESTORE(si, intr_val);
227 }
228 }
229
230 /*
231 * Allocate a sb handle.
232 * devid - pci device id (used to determine chip#)
233 * osh - opaque OS handle
234 * regs - virtual address of initial core registers
235 * bustype - pci/pcmcia/sb/sdio/etc
236 * vars - pointer to a pointer area for "environment" variables
237 * varsz - pointer to int to return the size of the vars
238 */
239 sb_t *
240 BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
241 uint bustype, void *sdh, char **vars, uint *varsz)
242 {
243 sb_info_t *si;
244
245 /* alloc sb_info_t */
246 if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
247 SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
248 return (NULL);
249 }
250
251 if (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, (uint*)varsz) == NULL) {
252 MFREE(osh, si, sizeof(sb_info_t));
253 return (NULL);
254 }
255
256 return (sb_t *)si;
257 }
258
259 /* Using sb_kattach depends on SB_BUS support, either implicit */
260 /* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
261 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
262
263 /* global kernel resource */
264 static sb_info_t ksi;
265 static bool ksi_attached = FALSE;
266
267 /* generic kernel variant of sb_attach() */
268 sb_t *
269 BCMINITFN(sb_kattach)(void)
270 {
271 osl_t *osh = NULL;
272 uint32 *regs;
273
274 if (!ksi_attached) {
275 uint32 cid;
276
277 regs = (uint32 *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
278 cid = R_REG(osh, (uint32 *)regs);
279 if (((cid & CID_ID_MASK) == BCM4712_CHIP_ID) &&
280 ((cid & CID_PKG_MASK) != BCM4712LARGE_PKG_ID) &&
281 ((cid & CID_REV_MASK) <= (3 << CID_REV_SHIFT))) {
282 uint32 *scc, val;
283
284 scc = (uint32 *)((uchar*)regs + OFFSETOF(chipcregs_t, slow_clk_ctl));
285 val = R_REG(osh, scc);
286 SB_ERROR((" initial scc = 0x%x\n", val));
287 val |= SCC_SS_XTAL;
288 W_REG(osh, scc, val);
289 }
290
291 if (sb_doattach(&ksi, BCM4710_DEVICE_ID, osh, (void*)regs,
292 SB_BUS, NULL, NULL, NULL) == NULL) {
293 return NULL;
294 }
295 else
296 ksi_attached = TRUE;
297 }
298
299 return (sb_t *)&ksi;
300 }
301 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
302
303 void
304 BCMINITFN(sb_war32414_forceHT)(sb_t *sbh, bool forceHT)
305 {
306 sb_info_t *si;
307
308 si = SB_INFO(sbh);
309
310
311 if (FORCEHT_WAR32414(si)) {
312 uint32 val = 0;
313 if (forceHT)
314 val = SYCC_HR;
315 sb_corereg((void*)si, SB_CC_IDX, OFFSETOF(chipcregs_t, system_clk_ctl),
316 SYCC_HR, val);
317 }
318 }
319
320 static sb_info_t *
321 BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
322 uint bustype, void *sdh, char **vars, uint *varsz)
323 {
324 uint origidx;
325 chipcregs_t *cc;
326 sbconfig_t *sb;
327 uint32 w;
328
329 ASSERT(GOODREGS(regs));
330
331 bzero((uchar*)si, sizeof(sb_info_t));
332
333 si->sb.buscoreidx = si->gpioidx = BADIDX;
334
335 si->curmap = regs;
336 si->sdh = sdh;
337 si->osh = osh;
338
339 /* check to see if we are a sb core mimic'ing a pci core */
340 if (bustype == PCI_BUS) {
341 if (OSL_PCI_READ_CONFIG(si->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff) {
342 SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB "
343 "devid:0x%x\n", __FUNCTION__, devid));
344 bustype = SB_BUS;
345 }
346 }
347
348 si->sb.bustype = bustype;
349 if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
350 SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
351 si->sb.bustype, BUSTYPE(si->sb.bustype)));
352 return NULL;
353 }
354
355 /* need to set memseg flag for CF card first before any sb registers access */
356 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
357 si->memseg = TRUE;
358
359 /* kludge to enable the clock on the 4306 which lacks a slowclock */
360 if (BUSTYPE(si->sb.bustype) == PCI_BUS)
361 sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
362
363 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
364 w = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
365 if (!GOODCOREADDR(w))
366 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32), SB_ENUM_BASE);
367 }
368
369 /* initialize current core index value */
370 si->curidx = _sb_coreidx(si);
371
372 if (si->curidx == BADIDX) {
373 SB_ERROR(("sb_doattach: bad core index\n"));
374 return NULL;
375 }
376
377 /* get sonics backplane revision */
378 sb = REGS2SB(si->curmap);
379 si->sb.sonicsrev = (R_SBREG(si, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
380
381 /* keep and reuse the initial register mapping */
382 origidx = si->curidx;
383 if (BUSTYPE(si->sb.bustype) == SB_BUS)
384 si->regs[origidx] = regs;
385
386 /* is core-0 a chipcommon core? */
387 si->numcores = 1;
388 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, 0);
389 if (sb_coreid(&si->sb) != SB_CC)
390 cc = NULL;
391
392 /* determine chip id and rev */
393 if (cc) {
394 /* chip common core found! */
395 si->sb.chip = R_REG(si->osh, &cc->chipid) & CID_ID_MASK;
396 si->sb.chiprev = (R_REG(si->osh, &cc->chipid) & CID_REV_MASK) >> CID_REV_SHIFT;
397 si->sb.chippkg = (R_REG(si->osh, &cc->chipid) & CID_PKG_MASK) >> CID_PKG_SHIFT;
398 } else {
399 /* no chip common core -- must convert device id to chip id */
400 if ((si->sb.chip = sb_pcidev2chip(devid)) == 0) {
401 SB_ERROR(("sb_doattach: unrecognized device id 0x%04x\n", devid));
402 sb_setcoreidx(&si->sb, origidx);
403 return NULL;
404 }
405 }
406
407 /* get chipcommon rev */
408 si->sb.ccrev = cc ? (int)sb_corerev(&si->sb) : NOREV;
409
410 /* determine numcores */
411 if (cc && ((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
412 si->numcores = (R_REG(si->osh, &cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
413 else
414 si->numcores = sb_chip2numcores(si->sb.chip);
415
416 /* return to original core */
417 sb_setcoreidx(&si->sb, origidx);
418
419 /* sanity checks */
420 ASSERT(si->sb.chip);
421
422 /* scan for cores */
423 sb_scan(si);
424
425 /* fixup necessary chip/core configurations */
426 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
427 if (sb_pci_fixcfg(si)) {
428 SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
429 return NULL;
430 }
431 }
432
433 /* srom_var_init() depends on sb_scan() info */
434 if (srom_var_init(si, si->sb.bustype, si->curmap, si->osh, vars, varsz)) {
435 SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
436 return (NULL);
437 }
438
439 if (cc == NULL) {
440 /*
441 * The chip revision number is hardwired into all
442 * of the pci function config rev fields and is
443 * independent from the individual core revision numbers.
444 * For example, the "A0" silicon of each chip is chip rev 0.
445 * For PCMCIA we get it from the CIS instead.
446 */
447 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
448 ASSERT(vars);
449 si->sb.chiprev = getintvar(*vars, "chiprev");
450 } else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
451 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_REV, sizeof(uint32));
452 si->sb.chiprev = w & 0xff;
453 } else
454 si->sb.chiprev = 0;
455 }
456
457 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
458 w = getintvar(*vars, "regwindowsz");
459 si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
460 }
461
462 /* gpio control core is required */
463 if (!GOODIDX(si->gpioidx)) {
464 SB_ERROR(("sb_doattach: gpio control core not found\n"));
465 return NULL;
466 }
467
468 /* get boardtype and boardrev */
469 switch (BUSTYPE(si->sb.bustype)) {
470 case PCI_BUS:
471 /* do a pci config read to get subsystem id and subvendor id */
472 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_SVID, sizeof(uint32));
473 si->sb.boardvendor = w & 0xffff;
474 si->sb.boardtype = (w >> 16) & 0xffff;
475 break;
476
477 case PCMCIA_BUS:
478 case SDIO_BUS:
479 si->sb.boardvendor = getintvar(*vars, "manfid");
480 si->sb.boardtype = getintvar(*vars, "prodid");
481 break;
482
483 case SB_BUS:
484 case JTAG_BUS:
485 si->sb.boardvendor = VENDOR_BROADCOM;
486 if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
487 si->sb.boardtype = 0xffff;
488 break;
489 }
490
491 if (si->sb.boardtype == 0) {
492 SB_ERROR(("sb_doattach: unknown board type\n"));
493 ASSERT(si->sb.boardtype);
494 }
495
496 /* setup the GPIO based LED powersave register */
497 if (si->sb.ccrev >= 16) {
498 if ((vars == NULL) || ((w = getintvar(*vars, "leddc")) == 0))
499 w = DEFAULT_GPIOTIMERVAL;
500 sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
501 }
502 if (FORCEHT_WAR32414(si)) {
503 /* set proper clk setup delays before forcing HT */
504 sb_clkctl_init((void *)si);
505 sb_war32414_forceHT((void *)si, 1);
506 }
507
508
509 return (si);
510 }
511
512
513 uint
514 sb_coreid(sb_t *sbh)
515 {
516 sb_info_t *si;
517 sbconfig_t *sb;
518
519 si = SB_INFO(sbh);
520 sb = REGS2SB(si->curmap);
521
522 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
523 }
524
525 uint
526 sb_coreidx(sb_t *sbh)
527 {
528 sb_info_t *si;
529
530 si = SB_INFO(sbh);
531 return (si->curidx);
532 }
533
534 /* return current index of core */
535 static uint
536 _sb_coreidx(sb_info_t *si)
537 {
538 sbconfig_t *sb;
539 uint32 sbaddr = 0;
540
541 ASSERT(si);
542
543 switch (BUSTYPE(si->sb.bustype)) {
544 case SB_BUS:
545 sb = REGS2SB(si->curmap);
546 sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
547 break;
548
549 case PCI_BUS:
550 sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
551 break;
552
553 case PCMCIA_BUS: {
554 uint8 tmp = 0;
555
556 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
557 sbaddr = (uint)tmp << 12;
558 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
559 sbaddr |= (uint)tmp << 16;
560 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
561 sbaddr |= (uint)tmp << 24;
562 break;
563 }
564
565 #ifdef BCMJTAG
566 case JTAG_BUS:
567 sbaddr = (uint32)si->curmap;
568 break;
569 #endif /* BCMJTAG */
570
571 default:
572 ASSERT(0);
573 }
574
575 if (!GOODCOREADDR(sbaddr))
576 return BADIDX;
577
578 return ((sbaddr - SB_ENUM_BASE) / SB_CORE_SIZE);
579 }
580
581 uint
582 sb_corevendor(sb_t *sbh)
583 {
584 sb_info_t *si;
585 sbconfig_t *sb;
586
587 si = SB_INFO(sbh);
588 sb = REGS2SB(si->curmap);
589
590 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
591 }
592
593 uint
594 sb_corerev(sb_t *sbh)
595 {
596 sb_info_t *si;
597 sbconfig_t *sb;
598 uint sbidh;
599
600 si = SB_INFO(sbh);
601 sb = REGS2SB(si->curmap);
602 sbidh = R_SBREG(si, &sb->sbidhigh);
603
604 return (SBCOREREV(sbidh));
605 }
606
607 void *
608 sb_osh(sb_t *sbh)
609 {
610 sb_info_t *si;
611
612 si = SB_INFO(sbh);
613 return si->osh;
614 }
615
616 void
617 sb_setosh(sb_t *sbh, osl_t *osh)
618 {
619 sb_info_t *si;
620
621 si = SB_INFO(sbh);
622 if (si->osh != NULL) {
623 SB_ERROR(("osh is already set....\n"));
624 ASSERT(!si->osh);
625 }
626 si->osh = osh;
627 }
628
629 /* set/clear sbtmstatelow core-specific flags */
630 uint32
631 sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
632 {
633 sb_info_t *si;
634 sbconfig_t *sb;
635 uint32 w;
636
637 si = SB_INFO(sbh);
638 sb = REGS2SB(si->curmap);
639
640 ASSERT((val & ~mask) == 0);
641
642 /* mask and set */
643 if (mask || val) {
644 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
645 W_SBREG(si, &sb->sbtmstatelow, w);
646 }
647
648 /* return the new value */
649 return (R_SBREG(si, &sb->sbtmstatelow));
650 }
651
652 /* set/clear sbtmstatehigh core-specific flags */
653 uint32
654 sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
655 {
656 sb_info_t *si;
657 sbconfig_t *sb;
658 uint32 w;
659
660 si = SB_INFO(sbh);
661 sb = REGS2SB(si->curmap);
662
663 ASSERT((val & ~mask) == 0);
664 ASSERT((mask & ~SBTMH_FL_MASK) == 0);
665
666 /* mask and set */
667 if (mask || val) {
668 w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
669 W_SBREG(si, &sb->sbtmstatehigh, w);
670 }
671
672 /* return the new value */
673 return (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_FL_MASK);
674 }
675
676 /* Run bist on current core. Caller needs to take care of core-specific bist hazards */
677 int
678 sb_corebist(sb_t *sbh)
679 {
680 uint32 sblo;
681 sb_info_t *si;
682 sbconfig_t *sb;
683 int result = 0;
684
685 si = SB_INFO(sbh);
686 sb = REGS2SB(si->curmap);
687
688 sblo = R_SBREG(si, &sb->sbtmstatelow);
689 W_SBREG(si, &sb->sbtmstatelow, (sblo | SBTML_FGC | SBTML_BE));
690
691 SPINWAIT(((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTD) == 0), 100000);
692
693 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTF)
694 result = BCME_ERROR;
695
696 W_SBREG(si, &sb->sbtmstatelow, sblo);
697
698 return result;
699 }
700
701 bool
702 sb_iscoreup(sb_t *sbh)
703 {
704 sb_info_t *si;
705 sbconfig_t *sb;
706
707 si = SB_INFO(sbh);
708 sb = REGS2SB(si->curmap);
709
710 return ((R_SBREG(si, &sb->sbtmstatelow) &
711 (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
712 }
713
714 /*
715 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
716 * switch back to the original core, and return the new value.
717 *
718 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
719 *
720 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
721 * and (on newer pci cores) chipcommon registers.
722 */
723 static uint
724 sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val)
725 {
726 uint origidx = 0;
727 uint32 *r = NULL;
728 uint w;
729 uint intr_val = 0;
730 bool fast = FALSE;
731
732 ASSERT(GOODIDX(coreidx));
733 ASSERT(regoff < SB_CORE_SIZE);
734 ASSERT((val & ~mask) == 0);
735
736 #ifdef notyet
737 if (si->sb.bustype == SB_BUS) {
738 /* If internal bus, we can always get at everything */
739 fast = TRUE;
740 r = (uint32 *)((uchar *)si->regs[coreidx] + regoff);
741 } else if (si->sb.bustype == PCI_BUS) {
742 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
743
744 if ((si->coreid[coreidx] == SB_CC) &&
745 ((si->sb.buscoretype == SB_PCIE) ||
746 (si->sb.buscorerev >= 13))) {
747 /* Chipc registers are mapped at 12KB */
748
749 fast = TRUE;
750 r = (uint32 *)((char *)si->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
751 } else if (si->sb.buscoreidx == coreidx) {
752 /* pci registers are at either in the last 2KB of an 8KB window
753 * or, in pcie and pci rev 13 at 8KB
754 */
755 fast = TRUE;
756 if ((si->sb.buscoretype == SB_PCIE) ||
757 (si->sb.buscorerev >= 13))
758 r = (uint32 *)((char *)si->curmap +
759 PCI_16KB0_PCIREGS_OFFSET + regoff);
760 else
761 r = (uint32 *)((char *)si->curmap +
762 ((regoff >= SBCONFIGOFF) ?
763 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
764 regoff);
765 }
766 }
767 #endif /* notyet */
768
769 if (!fast) {
770 INTR_OFF(si, intr_val);
771
772 /* save current core index */
773 origidx = sb_coreidx(&si->sb);
774
775 /* switch core */
776 r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
777 }
778 ASSERT(r);
779
780 /* mask and set */
781 if (mask || val) {
782 if (regoff >= SBCONFIGOFF) {
783 w = (R_SBREG(si, r) & ~mask) | val;
784 W_SBREG(si, r, w);
785 } else {
786 w = (R_REG(si->osh, r) & ~mask) | val;
787 W_REG(si->osh, r, w);
788 }
789 }
790
791 /* readback */
792 if (regoff >= SBCONFIGOFF)
793 w = R_SBREG(si, r);
794 else
795 w = R_REG(si->osh, r);
796
797 if (!fast) {
798 /* restore core index */
799 if (origidx != coreidx)
800 sb_setcoreidx(&si->sb, origidx);
801
802 INTR_RESTORE(si, intr_val);
803 }
804
805 return (w);
806 }
807
808 #define DWORD_ALIGN(x) (x & ~(0x03))
809 #define BYTE_POS(x) (x & 0x3)
810 #define WORD_POS(x) (x & 0x1)
811
812 #define BYTE_SHIFT(x) (8 * BYTE_POS(x))
813 #define WORD_SHIFT(x) (16 * WORD_POS(x))
814
815 #define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
816 #define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
817
818 #define read_pci_cfg_byte(a) \
819 (BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
820
821 #define read_pci_cfg_word(a) \
822 (WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
823
824
825 /* return TRUE if requested capability exists in the PCI config space */
826 static bool
827 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
828 {
829 uint8 cap_id;
830 uint8 cap_ptr;
831 uint32 bufsize;
832 uint8 byte_val;
833
834 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
835 return FALSE;
836
837 /* check for Header type 0 */
838 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
839 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
840 return FALSE;
841
842 /* check if the capability pointer field exists */
843 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
844 if (!(byte_val & PCI_CAPPTR_PRESENT))
845 return FALSE;
846
847 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
848 /* check if the capability pointer is 0x00 */
849 if (cap_ptr == 0x00)
850 return FALSE;
851
852
853 /* loop thr'u the capability list and see if the pcie capabilty exists */
854
855 cap_id = read_pci_cfg_byte(cap_ptr);
856
857 while (cap_id != req_cap_id) {
858 cap_ptr = read_pci_cfg_byte((cap_ptr+1));
859 if (cap_ptr == 0x00) break;
860 cap_id = read_pci_cfg_byte(cap_ptr);
861 }
862 if (cap_id != req_cap_id) {
863 return FALSE;
864 }
865 /* found the caller requested capability */
866 if ((buf != NULL) && (buflen != NULL)) {
867 bufsize = *buflen;
868 if (!bufsize) goto end;
869 *buflen = 0;
870 /* copy the cpability data excluding cap ID and next ptr */
871 cap_ptr += 2;
872 if ((bufsize + cap_ptr) > SZPCR)
873 bufsize = SZPCR - cap_ptr;
874 *buflen = bufsize;
875 while (bufsize--) {
876 *buf = read_pci_cfg_byte(cap_ptr);
877 cap_ptr++;
878 buf++;
879 }
880 }
881 end:
882 return TRUE;
883 }
884
885 /* return TRUE if PCIE capability exists the pci config space */
886 static inline bool
887 sb_ispcie(sb_info_t *si)
888 {
889 return (sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL));
890 }
891
892 /* scan the sb enumerated space to identify all cores */
893 static void
894 BCMINITFN(sb_scan)(sb_info_t *si)
895 {
896 uint origidx;
897 uint i;
898 bool pci;
899 bool pcie;
900 uint pciidx;
901 uint pcieidx;
902 uint pcirev;
903 uint pcierev;
904
905
906 /* numcores should already be set */
907 ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES));
908
909 /* save current core index */
910 origidx = sb_coreidx(&si->sb);
911
912 si->sb.buscorerev = NOREV;
913 si->sb.buscoreidx = BADIDX;
914
915 si->gpioidx = BADIDX;
916
917 pci = pcie = FALSE;
918 pcirev = pcierev = NOREV;
919 pciidx = pcieidx = BADIDX;
920
921 for (i = 0; i < si->numcores; i++) {
922 sb_setcoreidx(&si->sb, i);
923 si->coreid[i] = sb_coreid(&si->sb);
924
925 if (si->coreid[i] == SB_PCI) {
926 pciidx = i;
927 pcirev = sb_corerev(&si->sb);
928 pci = TRUE;
929 } else if (si->coreid[i] == SB_PCIE) {
930 pcieidx = i;
931 pcierev = sb_corerev(&si->sb);
932 pcie = TRUE;
933 } else if (si->coreid[i] == SB_PCMCIA) {
934 si->sb.buscorerev = sb_corerev(&si->sb);
935 si->sb.buscoretype = si->coreid[i];
936 si->sb.buscoreidx = i;
937 }
938 }
939 if (pci && pcie) {
940 if (sb_ispcie(si))
941 pci = FALSE;
942 else
943 pcie = FALSE;
944 }
945 if (pci) {
946 si->sb.buscoretype = SB_PCI;
947 si->sb.buscorerev = pcirev;
948 si->sb.buscoreidx = pciidx;
949 } else if (pcie) {
950 si->sb.buscoretype = SB_PCIE;
951 si->sb.buscorerev = pcierev;
952 si->sb.buscoreidx = pcieidx;
953 }
954
955 /*
956 * Find the gpio "controlling core" type and index.
957 * Precedence:
958 * - if there's a chip common core - use that
959 * - else if there's a pci core (rev >= 2) - use that
960 * - else there had better be an extif core (4710 only)
961 */
962 if (GOODIDX(sb_findcoreidx(si, SB_CC, 0))) {
963 si->gpioidx = sb_findcoreidx(si, SB_CC, 0);
964 si->gpioid = SB_CC;
965 } else if (PCI(si) && (si->sb.buscorerev >= 2)) {
966 si->gpioidx = si->sb.buscoreidx;
967 si->gpioid = SB_PCI;
968 } else if (sb_findcoreidx(si, SB_EXTIF, 0)) {
969 si->gpioidx = sb_findcoreidx(si, SB_EXTIF, 0);
970 si->gpioid = SB_EXTIF;
971 } else
972 ASSERT(si->gpioidx != BADIDX);
973
974 /* return to original core index */
975 sb_setcoreidx(&si->sb, origidx);
976 }
977
978 /* may be called with core in reset */
979 void
980 sb_detach(sb_t *sbh)
981 {
982 sb_info_t *si;
983 uint idx;
984
985 si = SB_INFO(sbh);
986
987 if (si == NULL)
988 return;
989
990 if (BUSTYPE(si->sb.bustype) == SB_BUS)
991 for (idx = 0; idx < SB_MAXCORES; idx++)
992 if (si->regs[idx]) {
993 REG_UNMAP(si->regs[idx]);
994 si->regs[idx] = NULL;
995 }
996 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
997 if (si != &ksi)
998 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
999 MFREE(si->osh, si, sizeof(sb_info_t));
1000
1001 }
1002
1003 /* use pci dev id to determine chip id for chips not having a chipcommon core */
1004 static uint
1005 BCMINITFN(sb_pcidev2chip)(uint pcidev)
1006 {
1007 if ((pcidev >= BCM4710_DEVICE_ID) && (pcidev <= BCM47XX_USB_ID))
1008 return (BCM4710_CHIP_ID);
1009 if ((pcidev >= BCM4402_ENET_ID) && (pcidev <= BCM4402_V90_ID))
1010 return (BCM4402_CHIP_ID);
1011 if (pcidev == BCM4401_ENET_ID)
1012 return (BCM4402_CHIP_ID);
1013
1014 return (0);
1015 }
1016
1017 /* convert chip number to number of i/o cores */
1018 static uint
1019 BCMINITFN(sb_chip2numcores)(uint chip)
1020 {
1021 if (chip == BCM4710_CHIP_ID)
1022 return (9);
1023 if (chip == BCM4402_CHIP_ID)
1024 return (3);
1025 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
1026 return (6);
1027 if (chip == BCM4704_CHIP_ID)
1028 return (9);
1029 if (chip == BCM5365_CHIP_ID)
1030 return (7);
1031
1032 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
1033 ASSERT(0);
1034 return (1);
1035 }
1036
1037 /* return index of coreid or BADIDX if not found */
1038 static uint
1039 sb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit)
1040 {
1041 uint found;
1042 uint i;
1043
1044 found = 0;
1045
1046 for (i = 0; i < si->numcores; i++)
1047 if (si->coreid[i] == coreid) {
1048 if (found == coreunit)
1049 return (i);
1050 found++;
1051 }
1052
1053 return (BADIDX);
1054 }
1055
1056 /*
1057 * this function changes logical "focus" to the indiciated core,
1058 * must be called with interrupt off.
1059 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1060 */
1061 void*
1062 sb_setcoreidx(sb_t *sbh, uint coreidx)
1063 {
1064 sb_info_t *si;
1065 uint32 sbaddr;
1066 uint8 tmp;
1067
1068 si = SB_INFO(sbh);
1069
1070 if (coreidx >= si->numcores)
1071 return (NULL);
1072
1073 /*
1074 * If the user has provided an interrupt mask enabled function,
1075 * then assert interrupts are disabled before switching the core.
1076 */
1077 ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
1078
1079 sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE);
1080
1081 switch (BUSTYPE(si->sb.bustype)) {
1082 case SB_BUS:
1083 /* map new one */
1084 if (!si->regs[coreidx]) {
1085 si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
1086 ASSERT(GOODREGS(si->regs[coreidx]));
1087 }
1088 si->curmap = si->regs[coreidx];
1089 break;
1090
1091 case PCI_BUS:
1092 /* point bar0 window */
1093 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
1094 break;
1095
1096 case PCMCIA_BUS:
1097 tmp = (sbaddr >> 12) & 0x0f;
1098 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
1099 tmp = (sbaddr >> 16) & 0xff;
1100 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
1101 tmp = (sbaddr >> 24) & 0xff;
1102 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
1103 break;
1104 #ifdef BCMJTAG
1105 case JTAG_BUS:
1106 /* map new one */
1107 if (!si->regs[coreidx]) {
1108 si->regs[coreidx] = (void *)sbaddr;
1109 ASSERT(GOODREGS(si->regs[coreidx]));
1110 }
1111 si->curmap = si->regs[coreidx];
1112 break;
1113 #endif /* BCMJTAG */
1114 }
1115
1116 si->curidx = coreidx;
1117
1118 return (si->curmap);
1119 }
1120
1121 /*
1122 * this function changes logical "focus" to the indiciated core,
1123 * must be called with interrupt off.
1124 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1125 */
1126 void*
1127 sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
1128 {
1129 sb_info_t *si;
1130 uint idx;
1131
1132 si = SB_INFO(sbh);
1133 idx = sb_findcoreidx(si, coreid, coreunit);
1134 if (!GOODIDX(idx))
1135 return (NULL);
1136
1137 return (sb_setcoreidx(sbh, idx));
1138 }
1139
1140 /* return chip number */
1141 uint
1142 sb_chip(sb_t *sbh)
1143 {
1144 sb_info_t *si;
1145
1146 si = SB_INFO(sbh);
1147 return (si->sb.chip);
1148 }
1149
1150 /* return chip revision number */
1151 uint
1152 sb_chiprev(sb_t *sbh)
1153 {
1154 sb_info_t *si;
1155
1156 si = SB_INFO(sbh);
1157 return (si->sb.chiprev);
1158 }
1159
1160 /* return chip common revision number */
1161 uint
1162 sb_chipcrev(sb_t *sbh)
1163 {
1164 sb_info_t *si;
1165
1166 si = SB_INFO(sbh);
1167 return (si->sb.ccrev);
1168 }
1169
1170 /* return chip package option */
1171 uint
1172 sb_chippkg(sb_t *sbh)
1173 {
1174 sb_info_t *si;
1175
1176 si = SB_INFO(sbh);
1177 return (si->sb.chippkg);
1178 }
1179
1180 /* return PCI core rev. */
1181 uint
1182 sb_pcirev(sb_t *sbh)
1183 {
1184 sb_info_t *si;
1185
1186 si = SB_INFO(sbh);
1187 return (si->sb.buscorerev);
1188 }
1189
1190 bool
1191 BCMINITFN(sb_war16165)(sb_t *sbh)
1192 {
1193 sb_info_t *si;
1194
1195 si = SB_INFO(sbh);
1196
1197 return (PCI(si) && (si->sb.buscorerev <= 10));
1198 }
1199
1200 static void
1201 BCMINITFN(sb_war30841)(sb_info_t *si)
1202 {
1203 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
1204 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
1205 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
1206 }
1207
1208 /* return PCMCIA core rev. */
1209 uint
1210 BCMINITFN(sb_pcmciarev)(sb_t *sbh)
1211 {
1212 sb_info_t *si;
1213
1214 si = SB_INFO(sbh);
1215 return (si->sb.buscorerev);
1216 }
1217
1218 /* return board vendor id */
1219 uint
1220 sb_boardvendor(sb_t *sbh)
1221 {
1222 sb_info_t *si;
1223
1224 si = SB_INFO(sbh);
1225 return (si->sb.boardvendor);
1226 }
1227
1228 /* return boardtype */
1229 uint
1230 sb_boardtype(sb_t *sbh)
1231 {
1232 sb_info_t *si;
1233 char *var;
1234
1235 si = SB_INFO(sbh);
1236
1237 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
1238 /* boardtype format is a hex string */
1239 si->sb.boardtype = getintvar(NULL, "boardtype");
1240
1241 /* backward compatibility for older boardtype string format */
1242 if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
1243 if (!strcmp(var, "bcm94710dev"))
1244 si->sb.boardtype = BCM94710D_BOARD;
1245 else if (!strcmp(var, "bcm94710ap"))
1246 si->sb.boardtype = BCM94710AP_BOARD;
1247 else if (!strcmp(var, "bu4710"))
1248 si->sb.boardtype = BU4710_BOARD;
1249 else if (!strcmp(var, "bcm94702mn"))
1250 si->sb.boardtype = BCM94702MN_BOARD;
1251 else if (!strcmp(var, "bcm94710r1"))
1252 si->sb.boardtype = BCM94710R1_BOARD;
1253 else if (!strcmp(var, "bcm94710r4"))
1254 si->sb.boardtype = BCM94710R4_BOARD;
1255 else if (!strcmp(var, "bcm94702cpci"))
1256 si->sb.boardtype = BCM94702CPCI_BOARD;
1257 else if (!strcmp(var, "bcm95380_rr"))
1258 si->sb.boardtype = BCM95380RR_BOARD;
1259 }
1260 }
1261
1262 return (si->sb.boardtype);
1263 }
1264
1265 /* return bus type of sbh device */
1266 uint
1267 sb_bus(sb_t *sbh)
1268 {
1269 sb_info_t *si;
1270
1271 si = SB_INFO(sbh);
1272 return (si->sb.bustype);
1273 }
1274
1275 /* return bus core type */
1276 uint
1277 sb_buscoretype(sb_t *sbh)
1278 {
1279 sb_info_t *si;
1280
1281 si = SB_INFO(sbh);
1282
1283 return (si->sb.buscoretype);
1284 }
1285
1286 /* return bus core revision */
1287 uint
1288 sb_buscorerev(sb_t *sbh)
1289 {
1290 sb_info_t *si;
1291 si = SB_INFO(sbh);
1292
1293 return (si->sb.buscorerev);
1294 }
1295
1296 /* return list of found cores */
1297 uint
1298 sb_corelist(sb_t *sbh, uint coreid[])
1299 {
1300 sb_info_t *si;
1301
1302 si = SB_INFO(sbh);
1303
1304 bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof(uint)));
1305 return (si->numcores);
1306 }
1307
1308 /* return current register mapping */
1309 void *
1310 sb_coreregs(sb_t *sbh)
1311 {
1312 sb_info_t *si;
1313
1314 si = SB_INFO(sbh);
1315 ASSERT(GOODREGS(si->curmap));
1316
1317 return (si->curmap);
1318 }
1319
1320
1321 /* do buffered registers update */
1322 void
1323 sb_commit(sb_t *sbh)
1324 {
1325 sb_info_t *si;
1326 uint origidx;
1327 uint intr_val = 0;
1328
1329 si = SB_INFO(sbh);
1330
1331 origidx = si->curidx;
1332 ASSERT(GOODIDX(origidx));
1333
1334 INTR_OFF(si, intr_val);
1335
1336 /* switch over to chipcommon core if there is one, else use pci */
1337 if (si->sb.ccrev != NOREV) {
1338 chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
1339
1340 /* do the buffer registers update */
1341 W_REG(si->osh, &ccregs->broadcastaddress, SB_COMMIT);
1342 W_REG(si->osh, &ccregs->broadcastdata, 0x0);
1343 } else if (PCI(si)) {
1344 sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
1345
1346 /* do the buffer registers update */
1347 W_REG(si->osh, &pciregs->bcastaddr, SB_COMMIT);
1348 W_REG(si->osh, &pciregs->bcastdata, 0x0);
1349 } else
1350 ASSERT(0);
1351
1352 /* restore core index */
1353 sb_setcoreidx(sbh, origidx);
1354 INTR_RESTORE(si, intr_val);
1355 }
1356
1357 /* reset and re-enable a core
1358 * inputs:
1359 * bits - core specific bits that are set during and after reset sequence
1360 * resetbits - core specific bits that are set only during reset sequence
1361 */
1362 void
1363 sb_core_reset(sb_t *sbh, uint32 bits, uint32 resetbits)
1364 {
1365 sb_info_t *si;
1366 sbconfig_t *sb;
1367 volatile uint32 dummy;
1368
1369 si = SB_INFO(sbh);
1370 ASSERT(GOODREGS(si->curmap));
1371 sb = REGS2SB(si->curmap);
1372
1373 /*
1374 * Must do the disable sequence first to work for arbitrary current core state.
1375 */
1376 sb_core_disable(sbh, (bits | resetbits));
1377
1378 /*
1379 * Now do the initialization sequence.
1380 */
1381
1382 /* set reset while enabling the clock and forcing them on throughout the core */
1383 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits | resetbits));
1384 dummy = R_SBREG(si, &sb->sbtmstatelow);
1385 OSL_DELAY(1);
1386
1387 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
1388 W_SBREG(si, &sb->sbtmstatehigh, 0);
1389 }
1390 if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1391 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1392 }
1393
1394 /* clear reset and allow it to propagate throughout the core */
1395 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1396 dummy = R_SBREG(si, &sb->sbtmstatelow);
1397 OSL_DELAY(1);
1398
1399 /* leave clock enabled */
1400 W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
1401 dummy = R_SBREG(si, &sb->sbtmstatelow);
1402 OSL_DELAY(1);
1403 }
1404
1405 void
1406 sb_core_tofixup(sb_t *sbh)
1407 {
1408 sb_info_t *si;
1409 sbconfig_t *sb;
1410
1411 si = SB_INFO(sbh);
1412
1413 if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) ||
1414 (PCI(si) && (si->sb.buscorerev >= 5)))
1415 return;
1416
1417 ASSERT(GOODREGS(si->curmap));
1418 sb = REGS2SB(si->curmap);
1419
1420 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
1421 SET_SBREG(si, &sb->sbimconfiglow,
1422 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1423 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1424 } else {
1425 if (sb_coreid(sbh) == SB_PCI) {
1426 SET_SBREG(si, &sb->sbimconfiglow,
1427 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1428 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1429 } else {
1430 SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1431 }
1432 }
1433
1434 sb_commit(sbh);
1435 }
1436
1437 /*
1438 * Set the initiator timeout for the "master core".
1439 * The master core is defined to be the core in control
1440 * of the chip and so it issues accesses to non-memory
1441 * locations (Because of dma *any* core can access memeory).
1442 *
1443 * The routine uses the bus to decide who is the master:
1444 * SB_BUS => mips
1445 * JTAG_BUS => chipc
1446 * PCI_BUS => pci or pcie
1447 * PCMCIA_BUS => pcmcia
1448 * SDIO_BUS => pcmcia
1449 *
1450 * This routine exists so callers can disable initiator
1451 * timeouts so accesses to very slow devices like otp
1452 * won't cause an abort. The routine allows arbitrary
1453 * settings of the service and request timeouts, though.
1454 *
1455 * Returns the timeout state before changing it or -1
1456 * on error.
1457 */
1458
1459 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1460
1461 uint32
1462 sb_set_initiator_to(sb_t *sbh, uint32 to)
1463 {
1464 sb_info_t *si;
1465 uint origidx, idx;
1466 uint intr_val = 0;
1467 uint32 tmp, ret = 0xffffffff;
1468 sbconfig_t *sb;
1469
1470 si = SB_INFO(sbh);
1471
1472 if ((to & ~TO_MASK) != 0)
1473 return ret;
1474
1475 /* Figure out the master core */
1476 idx = BADIDX;
1477 switch (BUSTYPE(si->sb.bustype)) {
1478 case PCI_BUS:
1479 idx = si->sb.buscoreidx;
1480 break;
1481 case JTAG_BUS:
1482 idx = SB_CC_IDX;
1483 break;
1484 case PCMCIA_BUS:
1485 case SDIO_BUS:
1486 idx = sb_findcoreidx(si, SB_PCMCIA, 0);
1487 break;
1488 case SB_BUS:
1489 if ((idx = sb_findcoreidx(si, SB_MIPS33, 0)) == BADIDX)
1490 idx = sb_findcoreidx(si, SB_MIPS, 0);
1491 break;
1492 default:
1493 ASSERT(0);
1494 }
1495 if (idx == BADIDX)
1496 return ret;
1497
1498 INTR_OFF(si, intr_val);
1499 origidx = sb_coreidx(sbh);
1500
1501 sb = REGS2SB(sb_setcoreidx(sbh, idx));
1502
1503 tmp = R_SBREG(si, &sb->sbimconfiglow);
1504 ret = tmp & TO_MASK;
1505 W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1506
1507 sb_commit(sbh);
1508 sb_setcoreidx(sbh, origidx);
1509 INTR_RESTORE(si, intr_val);
1510 return ret;
1511 }
1512
1513 void
1514 sb_core_disable(sb_t *sbh, uint32 bits)
1515 {
1516 sb_info_t *si;
1517 volatile uint32 dummy;
1518 uint32 rej;
1519 sbconfig_t *sb;
1520
1521 si = SB_INFO(sbh);
1522
1523 ASSERT(GOODREGS(si->curmap));
1524 sb = REGS2SB(si->curmap);
1525
1526 /* if core is already in reset, just return */
1527 if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
1528 return;
1529
1530 /* reject value changed between sonics 2.2 and 2.3 */
1531 if (si->sb.sonicsrev == SONICS_2_2)
1532 rej = (1 << SBTML_REJ_SHIFT);
1533 else
1534 rej = (2 << SBTML_REJ_SHIFT);
1535
1536 /* if clocks are not enabled, put into reset and return */
1537 if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
1538 goto disable;
1539
1540 /* set target reject and spin until busy is clear (preserve core-specific bits) */
1541 OR_SBREG(si, &sb->sbtmstatelow, rej);
1542 dummy = R_SBREG(si, &sb->sbtmstatelow);
1543 OSL_DELAY(1);
1544 SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
1545 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY)
1546 SB_ERROR(("%s: target state still busy\n", __FUNCTION__));
1547
1548 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
1549 OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
1550 dummy = R_SBREG(si, &sb->sbimstate);
1551 OSL_DELAY(1);
1552 SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
1553 }
1554
1555 /* set reset and reject while enabling the clocks */
1556 W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
1557 dummy = R_SBREG(si, &sb->sbtmstatelow);
1558 OSL_DELAY(10);
1559
1560 /* don't forget to clear the initiator reject bit */
1561 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
1562 AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
1563
1564 disable:
1565 /* leave reset and reject asserted */
1566 W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
1567 OSL_DELAY(1);
1568 }
1569
1570 /* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
1571 void
1572 sb_watchdog(sb_t *sbh, uint ticks)
1573 {
1574 sb_info_t *si = SB_INFO(sbh);
1575
1576 /* make sure we come up in fast clock mode */
1577 sb_clkctl_clk(sbh, CLK_FAST);
1578
1579 /* instant NMI */
1580 switch (si->gpioid) {
1581 case SB_CC:
1582 #ifdef __mips__
1583 if (sb_chip(sbh) == BCM4785_CHIP_ID && ticks <= 1)
1584 MTC0(C0_BROADCOM, 4, (1 << 22));
1585 #endif /* __mips__ */
1586 sb_corereg(si, 0, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
1587 #ifdef __mips__
1588 if (sb_chip(sbh) == BCM4785_CHIP_ID && ticks <= 1) {
1589 __asm__ __volatile__ (
1590 ".set\tmips3\n\t"
1591 "sync\n\t"
1592 "wait\n\t"
1593 ".set\tmips0"
1594 );
1595 while (1);
1596 }
1597 #endif /* __mips__ */
1598 break;
1599 case SB_EXTIF:
1600 sb_corereg(si, si->gpioidx, OFFSETOF(extifregs_t, watchdog), ~0, ticks);
1601 break;
1602 }
1603 }
1604
1605 /* initialize the pcmcia core */
1606 void
1607 sb_pcmcia_init(sb_t *sbh)
1608 {
1609 sb_info_t *si;
1610 uint8 cor = 0;
1611
1612 si = SB_INFO(sbh);
1613
1614 /* enable d11 mac interrupts */
1615 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
1616 cor |= COR_IRQEN | COR_FUNEN;
1617 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
1618
1619 }
1620
1621
1622 /*
1623 * Configure the pci core for pci client (NIC) action
1624 * coremask is the bitvec of cores by index to be enabled.
1625 */
1626 void
1627 BCMINITFN(sb_pci_setup)(sb_t *sbh, uint coremask)
1628 {
1629 sb_info_t *si;
1630 sbconfig_t *sb;
1631 sbpciregs_t *pciregs;
1632 uint32 sbflag;
1633 uint32 w;
1634 uint idx;
1635 int reg_val;
1636
1637 si = SB_INFO(sbh);
1638
1639 /* if not pci bus, we're done */
1640 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
1641 return;
1642
1643 ASSERT(PCI(si) || PCIE(si));
1644 ASSERT(si->sb.buscoreidx != BADIDX);
1645
1646 /* get current core index */
1647 idx = si->curidx;
1648
1649 /* we interrupt on this backplane flag number */
1650 ASSERT(GOODREGS(si->curmap));
1651 sb = REGS2SB(si->curmap);
1652 sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
1653
1654 /* switch over to pci core */
1655 pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
1656 sb = REGS2SB(pciregs);
1657
1658 /*
1659 * Enable sb->pci interrupts. Assume
1660 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1661 */
1662 if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
1663 /* pci config write to set this core bit in PCIIntMask */
1664 w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
1665 w |= (coremask << PCI_SBIM_SHIFT);
1666 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
1667 } else {
1668 /* set sbintvec bit for our flag number */
1669 OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
1670 }
1671
1672 if (PCI(si)) {
1673 OR_REG(si->osh, &pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
1674 if (si->sb.buscorerev >= 11)
1675 OR_REG(si->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
1676 if (si->sb.buscorerev < 5) {
1677 SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1678 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1679 sb_commit(sbh);
1680 }
1681 }
1682
1683 #ifdef PCIE_SUPPOER
1684 /* PCIE workarounds */
1685 if (PCIE(si)) {
1686 if ((si->sb.buscorerev == 0) || (si->sb.buscorerev == 1)) {
1687 reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS,
1688 PCIE_TLP_WORKAROUNDSREG);
1689 reg_val |= 0x8;
1690 sb_pcie_writereg((void *)sbh, (void *)PCIE_PCIEREGS,
1691 PCIE_TLP_WORKAROUNDSREG, reg_val);
1692 }
1693
1694 if (si->sb.buscorerev == 1) {
1695 reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS,
1696 PCIE_DLLP_LCREG);
1697 reg_val |= (0x40);
1698 sb_pcie_writereg(sbh, (void *)PCIE_PCIEREGS, PCIE_DLLP_LCREG, reg_val);
1699 }
1700
1701 if (si->sb.buscorerev == 0)
1702 sb_war30841(si);
1703 }
1704 #endif
1705
1706 /* switch back to previous core */
1707 sb_setcoreidx(sbh, idx);
1708 }
1709
1710 uint32
1711 sb_base(uint32 admatch)
1712 {
1713 uint32 base;
1714 uint type;
1715
1716 type = admatch & SBAM_TYPE_MASK;
1717 ASSERT(type < 3);
1718
1719 base = 0;
1720
1721 if (type == 0) {
1722 base = admatch & SBAM_BASE0_MASK;
1723 } else if (type == 1) {
1724 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1725 base = admatch & SBAM_BASE1_MASK;
1726 } else if (type == 2) {
1727 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1728 base = admatch & SBAM_BASE2_MASK;
1729 }
1730
1731 return (base);
1732 }
1733
1734 uint32
1735 sb_size(uint32 admatch)
1736 {
1737 uint32 size;
1738 uint type;
1739
1740 type = admatch & SBAM_TYPE_MASK;
1741 ASSERT(type < 3);
1742
1743 size = 0;
1744
1745 if (type == 0) {
1746 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1747 } else if (type == 1) {
1748 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1749 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1750 } else if (type == 2) {
1751 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1752 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1753 }
1754
1755 return (size);
1756 }
1757
1758 /* return the core-type instantiation # of the current core */
1759 uint
1760 sb_coreunit(sb_t *sbh)
1761 {
1762 sb_info_t *si;
1763 uint idx;
1764 uint coreid;
1765 uint coreunit;
1766 uint i;
1767
1768 si = SB_INFO(sbh);
1769 coreunit = 0;
1770
1771 idx = si->curidx;
1772
1773 ASSERT(GOODREGS(si->curmap));
1774 coreid = sb_coreid(sbh);
1775
1776 /* count the cores of our type */
1777 for (i = 0; i < idx; i++)
1778 if (si->coreid[i] == coreid)
1779 coreunit++;
1780
1781 return (coreunit);
1782 }
1783
1784 static INLINE uint32
1785 factor6(uint32 x)
1786 {
1787 switch (x) {
1788 case CC_F6_2: return 2;
1789 case CC_F6_3: return 3;
1790 case CC_F6_4: return 4;
1791 case CC_F6_5: return 5;
1792 case CC_F6_6: return 6;
1793 case CC_F6_7: return 7;
1794 default: return 0;
1795 }
1796 }
1797
1798 /* calculate the speed the SB would run at given a set of clockcontrol values */
1799 uint32
1800 sb_clock_rate(uint32 pll_type, uint32 n, uint32 m)
1801 {
1802 uint32 n1, n2, clock, m1, m2, m3, mc;
1803
1804 n1 = n & CN_N1_MASK;
1805 n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
1806
1807 if (pll_type == PLL_TYPE6) {
1808 if (m & CC_T6_MMASK)
1809 return CC_T6_M1;
1810 else
1811 return CC_T6_M0;
1812 } else if ((pll_type == PLL_TYPE1) ||
1813 (pll_type == PLL_TYPE3) ||
1814 (pll_type == PLL_TYPE4) ||
1815 (pll_type == PLL_TYPE7)) {
1816 n1 = factor6(n1);
1817 n2 += CC_F5_BIAS;
1818 } else if (pll_type == PLL_TYPE2) {
1819 n1 += CC_T2_BIAS;
1820 n2 += CC_T2_BIAS;
1821 ASSERT((n1 >= 2) && (n1 <= 7));
1822 ASSERT((n2 >= 5) && (n2 <= 23));
1823 } else if (pll_type == PLL_TYPE5) {
1824 return (100000000);
1825 } else
1826 ASSERT(0);
1827 /* PLL types 3 and 7 use BASE2 (25Mhz) */
1828 if ((pll_type == PLL_TYPE3) ||
1829 (pll_type == PLL_TYPE7)) {
1830 clock = CC_CLOCK_BASE2 * n1 * n2;
1831 } else
1832 clock = CC_CLOCK_BASE1 * n1 * n2;
1833
1834 if (clock == 0)
1835 return 0;
1836
1837 m1 = m & CC_M1_MASK;
1838 m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
1839 m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
1840 mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
1841
1842 if ((pll_type == PLL_TYPE1) ||
1843 (pll_type == PLL_TYPE3) ||
1844 (pll_type == PLL_TYPE4) ||
1845 (pll_type == PLL_TYPE7)) {
1846 m1 = factor6(m1);
1847 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
1848 m2 += CC_F5_BIAS;
1849 else
1850 m2 = factor6(m2);
1851 m3 = factor6(m3);
1852
1853 switch (mc) {
1854 case CC_MC_BYPASS: return (clock);
1855 case CC_MC_M1: return (clock / m1);
1856 case CC_MC_M1M2: return (clock / (m1 * m2));
1857 case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
1858 case CC_MC_M1M3: return (clock / (m1 * m3));
1859 default: return (0);
1860 }
1861 } else {
1862 ASSERT(pll_type == PLL_TYPE2);
1863
1864 m1 += CC_T2_BIAS;
1865 m2 += CC_T2M2_BIAS;
1866 m3 += CC_T2_BIAS;
1867 ASSERT((m1 >= 2) && (m1 <= 7));
1868 ASSERT((m2 >= 3) && (m2 <= 10));
1869 ASSERT((m3 >= 2) && (m3 <= 7));
1870
1871 if ((mc & CC_T2MC_M1BYP) == 0)
1872 clock /= m1;
1873 if ((mc & CC_T2MC_M2BYP) == 0)
1874 clock /= m2;
1875 if ((mc & CC_T2MC_M3BYP) == 0)
1876 clock /= m3;
1877
1878 return (clock);
1879 }
1880 }
1881
1882 /* returns the current speed the SB is running at */
1883 uint32
1884 sb_clock(sb_t *sbh)
1885 {
1886 sb_info_t *si;
1887 extifregs_t *eir;
1888 chipcregs_t *cc;
1889 uint32 n, m;
1890 uint idx;
1891 uint32 pll_type, rate;
1892 uint intr_val = 0;
1893
1894 si = SB_INFO(sbh);
1895 idx = si->curidx;
1896 pll_type = PLL_TYPE1;
1897
1898 INTR_OFF(si, intr_val);
1899
1900 /* switch to extif or chipc core */
1901 if ((eir = (extifregs_t *) sb_setcore(sbh, SB_EXTIF, 0))) {
1902 n = R_REG(si->osh, &eir->clockcontrol_n);
1903 m = R_REG(si->osh, &eir->clockcontrol_sb);
1904 } else if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0))) {
1905 pll_type = R_REG(si->osh, &cc->capabilities) & CAP_PLL_MASK;
1906 if (pll_type == PLL_NONE) {
1907 INTR_RESTORE(si, intr_val);
1908 return 80000000;
1909 }
1910 n = R_REG(si->osh, &cc->clockcontrol_n);
1911 if (pll_type == PLL_TYPE6)
1912 m = R_REG(si->osh, &cc->clockcontrol_m3);
1913 else if ((pll_type == PLL_TYPE3) && !(BCMINIT(sb_chip)(sbh) == 0x5365))
1914 m = R_REG(si->osh, &cc->clockcontrol_m2);
1915 else
1916 m = R_REG(si->osh, &cc->clockcontrol_sb);
1917 } else {
1918 INTR_RESTORE(si, intr_val);
1919 return 0;
1920 }
1921
1922 /* calculate rate */
1923 if (BCMINIT(sb_chip)(sbh) == 0x5365)
1924 rate = 100000000;
1925 else {
1926 rate = sb_clock_rate(pll_type, n, m);
1927
1928 if (pll_type == PLL_TYPE3)
1929 rate = rate / 2;
1930 }
1931
1932 /* switch back to previous core */
1933 sb_setcoreidx(sbh, idx);
1934
1935 INTR_RESTORE(si, intr_val);
1936
1937 return rate;
1938 }
1939
1940 /* change logical "focus" to the gpio core for optimized access */
1941 void*
1942 sb_gpiosetcore(sb_t *sbh)
1943 {
1944 sb_info_t *si;
1945
1946 si = SB_INFO(sbh);
1947
1948 return (sb_setcoreidx(sbh, si->gpioidx));
1949 }
1950
1951 /* mask&set gpiocontrol bits */
1952 uint32
1953 sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
1954 {
1955 sb_info_t *si;
1956 uint regoff;
1957
1958 si = SB_INFO(sbh);
1959 regoff = 0;
1960
1961 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
1962
1963 /* gpios could be shared on router platforms */
1964 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
1965 mask = priority ? (sb_gpioreservation & mask) :
1966 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
1967 val &= mask;
1968 }
1969
1970 switch (si->gpioid) {
1971 case SB_CC:
1972 regoff = OFFSETOF(chipcregs_t, gpiocontrol);
1973 break;
1974
1975 case SB_PCI:
1976 regoff = OFFSETOF(sbpciregs_t, gpiocontrol);
1977 break;
1978
1979 case SB_EXTIF:
1980 return (0);
1981 }
1982
1983 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
1984 }
1985
1986 /* mask&set gpio output enable bits */
1987 uint32
1988 sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
1989 {
1990 sb_info_t *si;
1991 uint regoff;
1992
1993 si = SB_INFO(sbh);
1994 regoff = 0;
1995
1996 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
1997
1998 /* gpios could be shared on router platforms */
1999 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2000 mask = priority ? (sb_gpioreservation & mask) :
2001 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2002 val &= mask;
2003 }
2004
2005 switch (si->gpioid) {
2006 case SB_CC:
2007 regoff = OFFSETOF(chipcregs_t, gpioouten);
2008 break;
2009
2010 case SB_PCI:
2011 regoff = OFFSETOF(sbpciregs_t, gpioouten);
2012 break;
2013
2014 case SB_EXTIF:
2015 regoff = OFFSETOF(extifregs_t, gpio[0].outen);
2016 break;
2017 }
2018
2019 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2020 }
2021
2022 /* mask&set gpio output bits */
2023 uint32
2024 sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2025 {
2026 sb_info_t *si;
2027 uint regoff;
2028
2029 si = SB_INFO(sbh);
2030 regoff = 0;
2031
2032 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2033
2034 /* gpios could be shared on router platforms */
2035 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2036 mask = priority ? (sb_gpioreservation & mask) :
2037 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2038 val &= mask;
2039 }
2040
2041 switch (si->gpioid) {
2042 case SB_CC:
2043 regoff = OFFSETOF(chipcregs_t, gpioout);
2044 break;
2045
2046 case SB_PCI:
2047 regoff = OFFSETOF(sbpciregs_t, gpioout);
2048 break;
2049
2050 case SB_EXTIF:
2051 regoff = OFFSETOF(extifregs_t, gpio[0].out);
2052 break;
2053 }
2054
2055 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2056 }
2057
2058 /* reserve one gpio */
2059 uint32
2060 sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2061 {
2062 sb_info_t *si;
2063
2064 si = SB_INFO(sbh);
2065
2066 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2067
2068 /* only cores on SB_BUS share GPIO's and only applcation users need to
2069 * reserve/release GPIO
2070 */
2071 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2072 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2073 return -1;
2074 }
2075 /* make sure only one bit is set */
2076 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2077 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2078 return -1;
2079 }
2080
2081 /* already reserved */
2082 if (sb_gpioreservation & gpio_bitmask)
2083 return -1;
2084 /* set reservation */
2085 sb_gpioreservation |= gpio_bitmask;
2086
2087 return sb_gpioreservation;
2088 }
2089
2090 /* release one gpio */
2091 /*
2092 * releasing the gpio doesn't change the current value on the GPIO last write value
2093 * persists till some one overwrites it
2094 */
2095
2096 uint32
2097 sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2098 {
2099 sb_info_t *si;
2100
2101 si = SB_INFO(sbh);
2102
2103 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2104
2105 /* only cores on SB_BUS share GPIO's and only applcation users need to
2106 * reserve/release GPIO
2107 */
2108 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2109 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2110 return -1;
2111 }
2112 /* make sure only one bit is set */
2113 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2114 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2115 return -1;
2116 }
2117
2118 /* already released */
2119 if (!(sb_gpioreservation & gpio_bitmask))
2120 return -1;
2121
2122 /* clear reservation */
2123 sb_gpioreservation &= ~gpio_bitmask;
2124
2125 return sb_gpioreservation;
2126 }
2127
2128 /* return the current gpioin register value */
2129 uint32
2130 sb_gpioin(sb_t *sbh)
2131 {
2132 sb_info_t *si;
2133 uint regoff;
2134
2135 si = SB_INFO(sbh);
2136 regoff = 0;
2137
2138 switch (si->gpioid) {
2139 case SB_CC:
2140 regoff = OFFSETOF(chipcregs_t, gpioin);
2141 break;
2142
2143 case SB_PCI:
2144 regoff = OFFSETOF(sbpciregs_t, gpioin);
2145 break;
2146
2147 case SB_EXTIF:
2148 regoff = OFFSETOF(extifregs_t, gpioin);
2149 break;
2150 }
2151
2152 return (sb_corereg(si, si->gpioidx, regoff, 0, 0));
2153 }
2154
2155 /* mask&set gpio interrupt polarity bits */
2156 uint32
2157 sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2158 {
2159 sb_info_t *si;
2160 uint regoff;
2161
2162 si = SB_INFO(sbh);
2163 regoff = 0;
2164
2165 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2166
2167 /* gpios could be shared on router platforms */
2168 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2169 mask = priority ? (sb_gpioreservation & mask) :
2170 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2171 val &= mask;
2172 }
2173
2174 switch (si->gpioid) {
2175 case SB_CC:
2176 regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
2177 break;
2178
2179 case SB_PCI:
2180 /* pci gpio implementation does not support interrupt polarity */
2181 ASSERT(0);
2182 break;
2183
2184 case SB_EXTIF:
2185 regoff = OFFSETOF(extifregs_t, gpiointpolarity);
2186 break;
2187 }
2188
2189 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2190 }
2191
2192 /* mask&set gpio interrupt mask bits */
2193 uint32
2194 sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2195 {
2196 sb_info_t *si;
2197 uint regoff;
2198
2199 si = SB_INFO(sbh);
2200 regoff = 0;
2201
2202 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2203
2204 /* gpios could be shared on router platforms */
2205 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2206 mask = priority ? (sb_gpioreservation & mask) :
2207 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2208 val &= mask;
2209 }
2210
2211 switch (si->gpioid) {
2212 case SB_CC:
2213 regoff = OFFSETOF(chipcregs_t, gpiointmask);
2214 break;
2215
2216 case SB_PCI:
2217 /* pci gpio implementation does not support interrupt mask */
2218 ASSERT(0);
2219 break;
2220
2221 case SB_EXTIF:
2222 regoff = OFFSETOF(extifregs_t, gpiointmask);
2223 break;
2224 }
2225
2226 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2227 }
2228
2229 /* assign the gpio to an led */
2230 uint32
2231 sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
2232 {
2233 sb_info_t *si;
2234
2235 si = SB_INFO(sbh);
2236 if (si->sb.ccrev < 16)
2237 return -1;
2238
2239 /* gpio led powersave reg */
2240 return (sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
2241 }
2242
2243 /* mask & set gpio timer val */
2244 uint32
2245 sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
2246 {
2247 sb_info_t *si;
2248 si = SB_INFO(sbh);
2249
2250 if (si->sb.ccrev < 16)
2251 return -1;
2252
2253 return (sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
2254 }
2255
2256
2257 /* return the slow clock source - LPO, XTAL, or PCI */
2258 static uint
2259 sb_slowclk_src(sb_info_t *si)
2260 {
2261 chipcregs_t *cc;
2262
2263
2264 ASSERT(sb_coreid(&si->sb) == SB_CC);
2265
2266 if (si->sb.ccrev < 6) {
2267 if ((BUSTYPE(si->sb.bustype) == PCI_BUS) &&
2268 (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32)) &
2269 PCI_CFG_GPIO_SCS))
2270 return (SCC_SS_PCI);
2271 else
2272 return (SCC_SS_XTAL);
2273 } else if (si->sb.ccrev < 10) {
2274 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2275 return (R_REG(si->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
2276 } else /* Insta-clock */
2277 return (SCC_SS_XTAL);
2278 }
2279
2280 /* return the ILP (slowclock) min or max frequency */
2281 static uint
2282 sb_slowclk_freq(sb_info_t *si, bool max)
2283 {
2284 chipcregs_t *cc;
2285 uint32 slowclk;
2286 uint div;
2287
2288
2289 ASSERT(sb_coreid(&si->sb) == SB_CC);
2290
2291 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2292
2293 /* shouldn't be here unless we've established the chip has dynamic clk control */
2294 ASSERT(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL);
2295
2296 slowclk = sb_slowclk_src(si);
2297 if (si->sb.ccrev < 6) {
2298 if (slowclk == SCC_SS_PCI)
2299 return (max? (PCIMAXFREQ/64) : (PCIMINFREQ/64));
2300 else
2301 return (max? (XTALMAXFREQ/32) : (XTALMINFREQ/32));
2302 } else if (si->sb.ccrev < 10) {
2303 div = 4 * (((R_REG(si->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
2304 if (slowclk == SCC_SS_LPO)
2305 return (max? LPOMAXFREQ : LPOMINFREQ);
2306 else if (slowclk == SCC_SS_XTAL)
2307 return (max? (XTALMAXFREQ/div) : (XTALMINFREQ/div));
2308 else if (slowclk == SCC_SS_PCI)
2309 return (max? (PCIMAXFREQ/div) : (PCIMINFREQ/div));
2310 else
2311 ASSERT(0);
2312 } else {
2313 /* Chipc rev 10 is InstaClock */
2314 div = R_REG(si->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
2315 div = 4 * (div + 1);
2316 return (max ? XTALMAXFREQ : (XTALMINFREQ/div));
2317 }
2318 return (0);
2319 }
2320
2321 static void
2322 BCMINITFN(sb_clkctl_setdelay)(sb_info_t *si, void *chipcregs)
2323 {
2324 chipcregs_t * cc;
2325 uint slowmaxfreq, pll_delay, slowclk;
2326 uint pll_on_delay, fref_sel_delay;
2327
2328 pll_delay = PLL_DELAY;
2329
2330 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
2331 * since the xtal will also be powered down by dynamic clk control logic.
2332 */
2333
2334 slowclk = sb_slowclk_src(si);
2335 if (slowclk != SCC_SS_XTAL)
2336 pll_delay += XTAL_ON_DELAY;
2337
2338 /* Starting with 4318 it is ILP that is used for the delays */
2339 slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
2340
2341 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
2342 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
2343
2344 cc = (chipcregs_t *)chipcregs;
2345 W_REG(si->osh, &cc->pll_on_delay, pll_on_delay);
2346 W_REG(si->osh, &cc->fref_sel_delay, fref_sel_delay);
2347 }
2348
2349 /* initialize power control delay registers */
2350 void
2351 BCMINITFN(sb_clkctl_init)(sb_t *sbh)
2352 {
2353 sb_info_t *si;
2354 uint origidx;
2355 chipcregs_t *cc;
2356
2357 si = SB_INFO(sbh);
2358
2359 origidx = si->curidx;
2360
2361 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
2362 return;
2363
2364 if ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev < 2))
2365 W_REG(si->osh, &cc->chipcontrol,
2366 (si->sb.chiprev == 0) ? CHIPCTRL_4321A0_DEFAULT : CHIPCTRL_4321A1_DEFAULT);
2367
2368 if (!(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL))
2369 goto done;
2370
2371 /* set all Instaclk chip ILP to 1 MHz */
2372 else if (si->sb.ccrev >= 10)
2373 SET_REG(si->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
2374 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
2375
2376 sb_clkctl_setdelay(si, (void *)cc);
2377
2378 done:
2379 sb_setcoreidx(sbh, origidx);
2380 }
2381
2382 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
2383 uint16
2384 sb_clkctl_fast_pwrup_delay(sb_t *sbh)
2385 {
2386 sb_info_t *si;
2387 uint origidx;
2388 chipcregs_t *cc;
2389 uint slowminfreq;
2390 uint16 fpdelay;
2391 uint intr_val = 0;
2392
2393 si = SB_INFO(sbh);
2394 fpdelay = 0;
2395 origidx = si->curidx;
2396
2397 INTR_OFF(si, intr_val);
2398
2399 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
2400 goto done;
2401
2402 if (!(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL))
2403 goto done;
2404
2405 slowminfreq = sb_slowclk_freq(si, FALSE);
2406 fpdelay = (((R_REG(si->osh, &cc->pll_on_delay) + 2) * 1000000) +
2407 (slowminfreq - 1)) / slowminfreq;
2408
2409 done:
2410 sb_setcoreidx(sbh, origidx);
2411 INTR_RESTORE(si, intr_val);
2412 return (fpdelay);
2413 }
2414
2415 /* turn primary xtal and/or pll off/on */
2416 int
2417 sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
2418 {
2419 sb_info_t *si;
2420 uint32 in, out, outen;
2421
2422 si = SB_INFO(sbh);
2423
2424 switch (BUSTYPE(si->sb.bustype)) {
2425
2426
2427 case PCMCIA_BUS:
2428 return (0);
2429
2430
2431 case PCI_BUS:
2432
2433 /* pcie core doesn't have any mapping to control the xtal pu */
2434 if (PCIE(si))
2435 return -1;
2436
2437 in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof(uint32));
2438 out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32));
2439 outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32));
2440
2441 /*
2442 * Avoid glitching the clock if GPRS is already using it.
2443 * We can't actually read the state of the PLLPD so we infer it
2444 * by the value of XTAL_PU which *is* readable via gpioin.
2445 */
2446 if (on && (in & PCI_CFG_GPIO_XTAL))
2447 return (0);
2448
2449 if (what & XTAL)
2450 outen |= PCI_CFG_GPIO_XTAL;
2451 if (what & PLL)
2452 outen |= PCI_CFG_GPIO_PLL;
2453
2454 if (on) {
2455 /* turn primary xtal on */
2456 if (what & XTAL) {
2457 out |= PCI_CFG_GPIO_XTAL;
2458 if (what & PLL)
2459 out |= PCI_CFG_GPIO_PLL;
2460 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
2461 sizeof(uint32), out);
2462 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
2463 sizeof(uint32), outen);
2464 OSL_DELAY(XTAL_ON_DELAY);
2465 }
2466
2467 /* turn pll on */
2468 if (what & PLL) {
2469 out &= ~PCI_CFG_GPIO_PLL;
2470 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
2471 sizeof(uint32), out);
2472 OSL_DELAY(2000);
2473 }
2474 } else {
2475 if (what & XTAL)
2476 out &= ~PCI_CFG_GPIO_XTAL;
2477 if (what & PLL)
2478 out |= PCI_CFG_GPIO_PLL;
2479 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32), out);
2480 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32),
2481 outen);
2482 }
2483
2484 default:
2485 return (-1);
2486 }
2487
2488 return (0);
2489 }
2490
2491 /* set dynamic clk control mode (forceslow, forcefast, dynamic) */
2492 /* returns true if we are forcing fast clock */
2493 bool
2494 sb_clkctl_clk(sb_t *sbh, uint mode)
2495 {
2496 sb_info_t *si;
2497 uint origidx;
2498 chipcregs_t *cc;
2499 uint32 scc;
2500 uint intr_val = 0;
2501
2502 si = SB_INFO(sbh);
2503
2504 /* chipcommon cores prior to rev6 don't support dynamic clock control */
2505 if (si->sb.ccrev < 6)
2506 return (FALSE);
2507
2508
2509 /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
2510 ASSERT(si->sb.ccrev != 10);
2511
2512 INTR_OFF(si, intr_val);
2513
2514 origidx = si->curidx;
2515
2516 if (sb_setcore(sbh, SB_MIPS33, 0) && (sb_corerev(&si->sb) <= 7) &&
2517 (BUSTYPE(si->sb.bustype) == SB_BUS) && (si->sb.ccrev >= 10))
2518 goto done;
2519
2520 /* PR32414WAR "Force HT clock on" all the time, no dynamic clk ctl */
2521 if ((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev <= 1))
2522 goto done;
2523
2524 cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
2525 ASSERT(cc != NULL);
2526
2527 if (!(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL))
2528 goto done;
2529
2530 switch (mode) {
2531 case CLK_FAST: /* force fast (pll) clock */
2532 if (si->sb.ccrev < 10) {
2533 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
2534 sb_clkctl_xtal(&si->sb, XTAL, ON);
2535
2536 SET_REG(si->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
2537 } else
2538 OR_REG(si->osh, &cc->system_clk_ctl, SYCC_HR);
2539 /* wait for the PLL */
2540 OSL_DELAY(PLL_DELAY);
2541 break;
2542
2543 case CLK_DYNAMIC: /* enable dynamic clock control */
2544
2545 if (si->sb.ccrev < 10) {
2546 scc = R_REG(si->osh, &cc->slow_clk_ctl);
2547 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
2548 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
2549 scc |= SCC_XC;
2550 W_REG(si->osh, &cc->slow_clk_ctl, scc);
2551
2552 /* for dynamic control, we have to release our xtal_pu "force on" */
2553 if (scc & SCC_XC)
2554 sb_clkctl_xtal(&si->sb, XTAL, OFF);
2555 } else {
2556 /* Instaclock */
2557 AND_REG(si->osh, &cc->system_clk_ctl, ~SYCC_HR);
2558 }
2559 break;
2560
2561 default:
2562 ASSERT(0);
2563 }
2564
2565 done:
2566 sb_setcoreidx(sbh, origidx);
2567 INTR_RESTORE(si, intr_val);
2568 return (mode == CLK_FAST);
2569 }
2570
2571 /* register driver interrupt disabling and restoring callback functions */
2572 void
2573 sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn,
2574 void *intrsenabled_fn, void *intr_arg)
2575 {
2576 sb_info_t *si;
2577
2578 si = SB_INFO(sbh);
2579 si->intr_arg = intr_arg;
2580 si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
2581 si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
2582 si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
2583 /* save current core id. when this function called, the current core
2584 * must be the core which provides driver functions(il, et, wl, etc.)
2585 */
2586 si->dev_coreid = si->coreid[si->curidx];
2587 }
2588
2589
2590 int
2591 sb_corepciid(sb_t *sbh, uint func, uint16 *pcivendor, uint16 *pcidevice,
2592 uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
2593 uint8 *pciheader)
2594 {
2595 uint16 vendor = 0xffff, device = 0xffff;
2596 uint core, unit;
2597 uint chip, chippkg;
2598 uint nfunc;
2599 char varname[SB_DEVPATH_BUFSZ + 8];
2600 uint8 class, subclass, progif;
2601 char devpath[SB_DEVPATH_BUFSZ];
2602 uint8 header;
2603
2604 core = sb_coreid(sbh);
2605 unit = sb_coreunit(sbh);
2606
2607 chip = sb_chip(sbh);
2608 chippkg = sb_chippkg(sbh);
2609
2610 progif = 0;
2611 header = PCI_HEADER_NORMAL;
2612
2613 /* Verify whether the function exists for the core */
2614 nfunc = (core == SB_USB20H) ? 2 : 1;
2615 if (func >= nfunc)
2616 return BCME_ERROR;
2617
2618 /* Known vendor translations */
2619 switch (sb_corevendor(sbh)) {
2620 case SB_VEND_BCM:
2621 vendor = VENDOR_BROADCOM;
2622 break;
2623 default:
2624 return BCME_ERROR;
2625 }
2626
2627 /* Determine class based on known core codes */
2628 switch (core) {
2629 case SB_ILINE20:
2630 class = PCI_CLASS_NET;
2631 subclass = PCI_NET_ETHER;
2632 device = BCM47XX_ILINE_ID;
2633 break;
2634 case SB_ENET:
2635 class = PCI_CLASS_NET;
2636 subclass = PCI_NET_ETHER;
2637 device = BCM47XX_ENET_ID;
2638 break;
2639 case SB_GIGETH:
2640 class = PCI_CLASS_NET;
2641 subclass = PCI_NET_ETHER;
2642 device = BCM47XX_GIGETH_ID;
2643 break;
2644 case SB_SDRAM:
2645 case SB_MEMC:
2646 class = PCI_CLASS_MEMORY;
2647 subclass = PCI_MEMORY_RAM;
2648 device = (uint16)core;
2649 break;
2650 case SB_PCI:
2651 case SB_PCIE:
2652 class = PCI_CLASS_BRIDGE;
2653 subclass = PCI_BRIDGE_PCI;
2654 device = (uint16)core;
2655 header = PCI_HEADER_BRIDGE;
2656 break;
2657 case SB_MIPS:
2658 case SB_MIPS33:
2659 class = PCI_CLASS_CPU;
2660 subclass = PCI_CPU_MIPS;
2661 device = (uint16)core;
2662 break;
2663 case SB_CODEC:
2664 class = PCI_CLASS_COMM;
2665 subclass = PCI_COMM_MODEM;
2666 device = BCM47XX_V90_ID;
2667 break;
2668 case SB_USB:
2669 class = PCI_CLASS_SERIAL;
2670 subclass = PCI_SERIAL_USB;
2671 progif = 0x10; /* OHCI */
2672 device = BCM47XX_USB_ID;
2673 break;
2674 case SB_USB11H:
2675 class = PCI_CLASS_SERIAL;
2676 subclass = PCI_SERIAL_USB;
2677 progif = 0x10; /* OHCI */
2678 device = BCM47XX_USBH_ID;
2679 break;
2680 case SB_USB20H:
2681 class = PCI_CLASS_SERIAL;
2682 subclass = PCI_SERIAL_USB;
2683 progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI */
2684 device = BCM47XX_USB20H_ID;
2685 header = 0x80; /* multifunction */
2686 break;
2687 case SB_USB11D:
2688 class = PCI_CLASS_SERIAL;
2689 subclass = PCI_SERIAL_USB;
2690 device = BCM47XX_USBD_ID;
2691 break;
2692 case SB_USB20D:
2693 class = PCI_CLASS_SERIAL;
2694 subclass = PCI_SERIAL_USB;
2695 device = BCM47XX_USB20D_ID;
2696 break;
2697 case SB_IPSEC:
2698 class = PCI_CLASS_CRYPT;
2699 subclass = PCI_CRYPT_NETWORK;
2700 device = BCM47XX_IPSEC_ID;
2701 break;
2702 case SB_ROBO:
2703 class = PCI_CLASS_NET;
2704 subclass = PCI_NET_OTHER;
2705 device = BCM47XX_ROBO_ID;
2706 break;
2707 case SB_EXTIF:
2708 case SB_CC:
2709 class = PCI_CLASS_MEMORY;
2710 subclass = PCI_MEMORY_FLASH;
2711 device = (uint16)core;
2712 break;
2713 case SB_D11:
2714 class = PCI_CLASS_NET;
2715 subclass = PCI_NET_OTHER;
2716 /* Let nvram variable override core ID */
2717 sb_devpath(sbh, devpath, sizeof(devpath));
2718 sprintf(varname, "%sdevid", devpath);
2719 if ((device = (uint16)getintvar(NULL, varname)))
2720 break;
2721 /*
2722 * no longer support wl%did, but keep the code
2723 * here for backward compatibility.
2724 */
2725 sprintf(varname, "wl%did", unit);
2726 if ((device = (uint16)getintvar(NULL, varname)))
2727 break;
2728 /* Chip specific conversion */
2729 if (chip == BCM4712_CHIP_ID) {
2730 if (chippkg == BCM4712SMALL_PKG_ID)
2731 device = BCM4306_D11G_ID;
2732 else
2733 device = BCM4306_D11DUAL_ID;
2734 break;
2735 }
2736 /* ignore it */
2737 device = 0xffff;
2738 break;
2739 case SB_SATAXOR:
2740 class = PCI_CLASS_XOR;
2741 subclass = PCI_XOR_QDMA;
2742 device = BCM47XX_SATAXOR_ID;
2743 break;
2744 case SB_ATA100:
2745 class = PCI_CLASS_DASDI;
2746 subclass = PCI_DASDI_IDE;
2747 device = BCM47XX_ATA100_ID;
2748 break;
2749
2750 default:
2751 class = subclass = progif = 0xff;
2752 device = (uint16)core;
2753 break;
2754 }
2755
2756 *pcivendor = vendor;
2757 *pcidevice = device;
2758 *pciclass = class;
2759 *pcisubclass = subclass;
2760 *pciprogif = progif;
2761 *pciheader = header;
2762
2763 return 0;
2764 }
2765
2766
2767
2768 /* use the mdio interface to write to mdio slaves */
2769 static int
2770 sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint regaddr, uint val)
2771 {
2772 uint mdiodata;
2773 uint i = 0;
2774 sbpcieregs_t *pcieregs;
2775
2776 pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
2777 ASSERT(pcieregs);
2778
2779 /* enable mdio access to SERDES */
2780 W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
2781
2782 mdiodata = MDIODATA_START | MDIODATA_WRITE |
2783 (physmedia << MDIODATA_DEVADDR_SHF) |
2784 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
2785
2786 W_REG(si->osh, (&pcieregs->mdiodata), mdiodata);
2787
2788 PR28829_DELAY();
2789
2790 /* retry till the transaction is complete */
2791 while (i < 10) {
2792 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
2793 /* Disable mdio access to SERDES */
2794 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
2795 return 0;
2796 }
2797 OSL_DELAY(1000);
2798 i++;
2799 }
2800
2801 SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
2802 /* Disable mdio access to SERDES */
2803 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
2804 ASSERT(0);
2805 return 1;
2806
2807 }
2808
2809 /* indirect way to read pcie config regs */
2810 uint
2811 sb_pcie_readreg(void *sb, void* arg1, uint offset)
2812 {
2813 sb_info_t *si;
2814 sb_t *sbh;
2815 uint retval = 0xFFFFFFFF;
2816 sbpcieregs_t *pcieregs;
2817 uint addrtype;
2818
2819 sbh = (sb_t *)sb;
2820 si = SB_INFO(sbh);
2821 ASSERT(PCIE(si));
2822
2823 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
2824 ASSERT(pcieregs);
2825
2826 addrtype = (uint)((uintptr)arg1);
2827 switch (addrtype) {
2828 case PCIE_CONFIGREGS:
2829 W_REG(si->osh, (&pcieregs->configaddr), offset);
2830 retval = R_REG(si->osh, &(pcieregs->configdata));
2831 break;
2832 case PCIE_PCIEREGS:
2833 W_REG(si->osh, &(pcieregs->pcieaddr), offset);
2834 retval = R_REG(si->osh, &(pcieregs->pciedata));
2835 break;
2836 default:
2837 ASSERT(0);
2838 break;
2839 }
2840 return retval;
2841 }
2842
2843 /* indirect way to write pcie config/mdio/pciecore regs */
2844 uint
2845 sb_pcie_writereg(sb_t *sbh, void *arg1, uint offset, uint val)
2846 {
2847 sb_info_t *si;
2848 sbpcieregs_t *pcieregs;
2849 uint addrtype;
2850
2851 si = SB_INFO(sbh);
2852 ASSERT(PCIE(si));
2853
2854 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
2855 ASSERT(pcieregs);
2856
2857 addrtype = (uint)((uintptr)arg1);
2858
2859 switch (addrtype) {
2860 case PCIE_CONFIGREGS:
2861 W_REG(si->osh, (&pcieregs->configaddr), offset);
2862 W_REG(si->osh, (&pcieregs->configdata), val);
2863 break;
2864 case PCIE_PCIEREGS:
2865 W_REG(si->osh, (&pcieregs->pcieaddr), offset);
2866 W_REG(si->osh, (&pcieregs->pciedata), val);
2867 break;
2868 default:
2869 ASSERT(0);
2870 break;
2871 }
2872 return 0;
2873 }
2874
2875 /* Build device path. Support SB, PCI, and JTAG for now. */
2876 int
2877 sb_devpath(sb_t *sbh, char *path, int size)
2878 {
2879 ASSERT(path);
2880 ASSERT(size >= SB_DEVPATH_BUFSZ);
2881
2882 switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
2883 case SB_BUS:
2884 case JTAG_BUS:
2885 sprintf(path, "sb/%u/", sb_coreidx(sbh));
2886 break;
2887 case PCI_BUS:
2888 ASSERT((SB_INFO(sbh))->osh);
2889 sprintf(path, "pci/%u/%u/", OSL_PCI_BUS((SB_INFO(sbh))->osh),
2890 OSL_PCI_SLOT((SB_INFO(sbh))->osh));
2891 break;
2892 case PCMCIA_BUS:
2893 SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
2894 SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
2895 sprintf(path, "pc/%u/%u/", 1, 1);
2896 break;
2897 case SDIO_BUS:
2898 SB_ERROR(("sb_devpath: device 0 assumed\n"));
2899 sprintf(path, "sd/%u/", sb_coreidx(sbh));
2900 break;
2901 default:
2902 ASSERT(0);
2903 break;
2904 }
2905
2906 return 0;
2907 }
2908
2909 /*
2910 * Fixup SROMless PCI device's configuration.
2911 * The current core may be changed upon return.
2912 */
2913 static int
2914 sb_pci_fixcfg(sb_info_t *si)
2915 {
2916 uint origidx, pciidx;
2917 sbpciregs_t *pciregs;
2918 sbpcieregs_t *pcieregs;
2919 uint16 val16, *reg16;
2920 char name[SB_DEVPATH_BUFSZ+16], *value;
2921 char devpath[SB_DEVPATH_BUFSZ];
2922
2923 ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
2924
2925 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
2926 /* save the current index */
2927 origidx = sb_coreidx(&si->sb);
2928
2929 /* check 'pi' is correct and fix it if not */
2930 if (si->sb.buscoretype == SB_PCIE) {
2931 pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
2932 ASSERT(pcieregs);
2933 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
2934 } else if (si->sb.buscoretype == SB_PCI) {
2935 pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
2936 ASSERT(pciregs);
2937 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
2938 } else {
2939 ASSERT(0);
2940 return -1;
2941 }
2942 pciidx = sb_coreidx(&si->sb);
2943 val16 = R_REG(si->osh, reg16);
2944 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
2945 val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
2946 W_REG(si->osh, reg16, val16);
2947 }
2948
2949 /* restore the original index */
2950 sb_setcoreidx(&si->sb, origidx);
2951
2952 /*
2953 * Fixup bar0window in PCI config space to make the core indicated
2954 * by the nvram variable the current core.
2955 * !Do it last, it may change the current core!
2956 */
2957 if (sb_devpath(&si->sb, devpath, sizeof(devpath)))
2958 return -1;
2959 sprintf(name, "%sb0w", devpath);
2960 if ((value = getvar(NULL, name))) {
2961 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32),
2962 bcm_strtoul(value, NULL, 16));
2963 /* update curidx since the current core is changed */
2964 si->curidx = _sb_coreidx(si);
2965 if (si->curidx == BADIDX) {
2966 SB_ERROR(("sb_pci_fixcfg: bad core index\n"));
2967 return -1;
2968 }
2969 }
2970
2971 return 0;
2972 }
2973
2974 static uint
2975 sb_chipc_capability(sb_t *sbh)
2976 {
2977 sb_info_t *si;
2978
2979 si = SB_INFO(sbh);
2980
2981 /* Make sure that there is ChipCommon core present */
2982 if (si->coreid[SB_CC_IDX] == SB_CC)
2983 return (sb_corereg(si, SB_CC_IDX, OFFSETOF(chipcregs_t, capabilities),
2984 0, 0));
2985 return 0;
2986 }
2987
2988 /* Return ADDR64 capability of the backplane */
2989 bool
2990 sb_backplane64(sb_t *sbh)
2991 {
2992 return ((sb_chipc_capability(sbh) & CAP_BKPLN64) != 0);
2993 }
2994
2995 void
2996 sb_btcgpiowar(sb_t *sbh)
2997 {
2998 sb_info_t *si;
2999 uint origidx;
3000 uint intr_val = 0;
3001 chipcregs_t *cc;
3002 si = SB_INFO(sbh);
3003
3004 /* Make sure that there is ChipCommon core present &&
3005 * UART_TX is strapped to 1
3006 */
3007 if (!(sb_chipc_capability(sbh) & CAP_UARTGPIO))
3008 return;
3009
3010 /* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
3011 INTR_OFF(si, intr_val);
3012
3013 origidx = sb_coreidx(sbh);
3014
3015 cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
3016 if (cc == NULL)
3017 goto end;
3018
3019 W_REG(si->osh, &cc->uart0mcr, R_REG(si->osh, &cc->uart0mcr) | 0x04);
3020
3021 end:
3022 /* restore the original index */
3023 sb_setcoreidx(sbh, origidx);
3024
3025 INTR_RESTORE(si, intr_val);
3026 }
3027
3028 /* check if the device is removed */
3029 bool
3030 sb_deviceremoved(sb_t *sbh)
3031 {
3032 uint32 w;
3033 sb_info_t *si;
3034
3035 si = SB_INFO(sbh);
3036
3037 switch (BUSTYPE(si->sb.bustype)) {
3038 case PCI_BUS:
3039 ASSERT(si->osh);
3040 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_VID, sizeof(uint32));
3041 if ((w & 0xFFFF) != VENDOR_BROADCOM)
3042 return TRUE;
3043 else
3044 return FALSE;
3045 default:
3046 return FALSE;
3047 }
3048 return FALSE;
3049 }
3050
3051 /* Return the RAM size of the SOCRAM core */
3052 uint32
3053 sb_socram_size(sb_t *sbh)
3054 {
3055 sb_info_t *si;
3056 uint origidx;
3057 uint intr_val = 0;
3058
3059 sbsocramregs_t *regs;
3060 bool wasup;
3061 uint corerev;
3062 uint32 coreinfo;
3063 uint memsize = 0;
3064
3065 si = SB_INFO(sbh);
3066 ASSERT(si);
3067
3068 /* Block ints and save current core */
3069 INTR_OFF(si, intr_val);
3070 origidx = sb_coreidx(sbh);
3071
3072 /* Switch to SOCRAM core */
3073 if (!(regs = sb_setcore(sbh, SB_SOCRAM, 0)))
3074 goto done;
3075
3076 /* Get info for determining size */
3077 if (!(wasup = sb_iscoreup(sbh)))
3078 sb_core_reset(sbh, 0, 0);
3079 corerev = sb_corerev(sbh);
3080 coreinfo = R_REG(si->osh, &regs->coreinfo);
3081
3082 /* Calculate size from coreinfo based on rev */
3083 switch (corerev) {
3084 case 0:
3085 memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
3086 break;
3087 default: /* rev >= 1 */
3088 memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
3089 memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3090 break;
3091 }
3092
3093 /* Return to previous state and core */
3094 if (!wasup)
3095 sb_core_disable(sbh, 0);
3096 sb_setcoreidx(sbh, origidx);
3097
3098 done:
3099 INTR_RESTORE(si, intr_val);
3100 return memsize;
3101 }
3102
3103