* update kernel to .37 * add support for falcon (big thank you goes to lantiq !!)
[openwrt/staging/chunkeey.git] / target / linux / lantiq / patches / 809-mt-vpe.patch
1 --- a/arch/mips/Kconfig
2 +++ b/arch/mips/Kconfig
3 @@ -1837,6 +1837,28 @@ config MIPS_VPE_LOADER
4 Includes a loader for loading an elf relocatable object
5 onto another VPE and running it.
6
7 +config IFX_VPE_EXT
8 + bool "IFX APRP Extensions"
9 + depends on MIPS_VPE_LOADER
10 + default y
11 + help
12 + IFX included extensions in APRP
13 +
14 +config PERFCTRS
15 + bool "34K Performance counters"
16 + depends on MIPS_MT && PROC_FS
17 + default n
18 + help
19 + 34K Performance counter through /proc
20 +
21 +config MTSCHED
22 + bool "Support mtsched priority configuration for TCs"
23 + depends on MIPS_MT && PROC_FS
24 + default y
25 + help
26 + Support for mtsched priority configuration for TCs through
27 + /proc/mips/mtsched
28 +
29 config MIPS_MT_SMTC_IM_BACKSTOP
30 bool "Use per-TC register bits as backstop for inhibited IM bits"
31 depends on MIPS_MT_SMTC
32 --- a/arch/mips/include/asm/mipsmtregs.h
33 +++ b/arch/mips/include/asm/mipsmtregs.h
34 @@ -28,14 +28,34 @@
35 #define read_c0_vpeconf0() __read_32bit_c0_register($1, 2)
36 #define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val)
37
38 +#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
39 +#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
40 +
41 +#define read_c0_vpeschedule() __read_32bit_c0_register($1, 5)
42 +#define write_c0_vpeschedule(val) __write_32bit_c0_register($1, 5, val)
43 +
44 +#define read_c0_vpeschefback() __read_32bit_c0_register($1, 6)
45 +#define write_c0_vpeschefback(val) __write_32bit_c0_register($1, 6, val)
46 +
47 +#define read_c0_vpeopt() __read_32bit_c0_register($1, 7)
48 +#define write_c0_vpeopt(val) __write_32bit_c0_register($1, 7, val)
49 +
50 #define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
51 #define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
52
53 #define read_c0_tcbind() __read_32bit_c0_register($2, 2)
54 +#define write_c0_tcbind(val) __write_32bit_c0_register($2, 2, val)
55
56 #define read_c0_tccontext() __read_32bit_c0_register($2, 5)
57 #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
58
59 +#define read_c0_tcschedule() __read_32bit_c0_register($2, 6)
60 +#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val)
61 +
62 +#define read_c0_tcschefback() __read_32bit_c0_register($2, 7)
63 +#define write_c0_tcschefback(val) __write_32bit_c0_register($2, 7, val)
64 +
65 +
66 #else /* Assembly */
67 /*
68 * Macros for use in assembly language code
69 @@ -74,6 +94,8 @@
70 #define MVPCONTROL_STLB_SHIFT 2
71 #define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
72
73 +#define MVPCONTROL_CPA_SHIFT 3
74 +#define MVPCONTROL_CPA (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
75
76 /* MVPConf0 fields */
77 #define MVPCONF0_PTC_SHIFT 0
78 @@ -84,6 +106,8 @@
79 #define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
80 #define MVPCONF0_PTLBE_SHIFT 16
81 #define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
82 +#define MVPCONF0_PCP_SHIFT 27
83 +#define MVPCONF0_PCP (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
84 #define MVPCONF0_TLBS_SHIFT 29
85 #define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
86 #define MVPCONF0_M_SHIFT 31
87 @@ -121,9 +145,25 @@
88 #define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
89 #define VPECONF0_MVP_SHIFT 1
90 #define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
91 +#define VPECONF0_ICS_SHIFT 16
92 +#define VPECONF0_ICS (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
93 +#define VPECONF0_DCS_SHIFT 17
94 +#define VPECONF0_DCS (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
95 #define VPECONF0_XTC_SHIFT 21
96 #define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
97
98 +/* VPEOpt fields */
99 +#define VPEOPT_DWX_SHIFT 0
100 +#define VPEOPT_IWX_SHIFT 8
101 +#define VPEOPT_IWX0 ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
102 +#define VPEOPT_IWX1 ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
103 +#define VPEOPT_IWX2 ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
104 +#define VPEOPT_IWX3 ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
105 +#define VPEOPT_DWX0 ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
106 +#define VPEOPT_DWX1 ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
107 +#define VPEOPT_DWX2 ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
108 +#define VPEOPT_DWX3 ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
109 +
110 /* TCStatus fields (per TC) */
111 #define TCSTATUS_TASID (_ULCAST_(0xff))
112 #define TCSTATUS_IXMT_SHIFT 10
113 @@ -350,6 +390,14 @@ do { \
114 #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
115 #define read_vpe_c0_vpeconf0() mftc0(1, 2)
116 #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
117 +#define read_vpe_c0_vpeschedule() mftc0(1, 5)
118 +#define write_vpe_c0_vpeschedule(val) mttc0(1, 5, val)
119 +#define read_vpe_c0_vpeschefback() mftc0(1, 6)
120 +#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
121 +#define read_vpe_c0_vpeopt() mftc0(1, 7)
122 +#define write_vpe_c0_vpeopt(val) mttc0(1, 7, val)
123 +#define read_vpe_c0_wired() mftc0(6, 0)
124 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
125 #define read_vpe_c0_count() mftc0(9, 0)
126 #define write_vpe_c0_count(val) mttc0(9, 0, val)
127 #define read_vpe_c0_status() mftc0(12, 0)
128 @@ -381,6 +429,12 @@ do { \
129 #define write_tc_c0_tchalt(val) mttc0(2, 4, val)
130 #define read_tc_c0_tccontext() mftc0(2, 5)
131 #define write_tc_c0_tccontext(val) mttc0(2, 5, val)
132 +#define read_tc_c0_tcschedule() mftc0(2, 6)
133 +#define write_tc_c0_tcschedule(val) mttc0(2, 6, val)
134 +#define read_tc_c0_tcschefback() mftc0(2, 7)
135 +#define write_tc_c0_tcschefback(val) mttc0(2, 7, val)
136 +#define read_tc_c0_entryhi() mftc0(10, 0)
137 +#define write_tc_c0_entryhi(val) mttc0(10, 0, val)
138
139 /* GPR */
140 #define read_tc_gpr_sp() mftgpr(29)
141 --- a/arch/mips/kernel/Makefile
142 +++ b/arch/mips/kernel/Makefile
143 @@ -85,7 +85,8 @@ obj-$(CONFIG_MIPS32_O32) += binfmt_elfo3
144
145 obj-$(CONFIG_KGDB) += kgdb.o
146 obj-$(CONFIG_PROC_FS) += proc.o
147 -
148 +obj-$(CONFIG_MTSCHED) += mtsched_proc.o
149 +obj-$(CONFIG_PERFCTRS) += perf_proc.o
150 obj-$(CONFIG_64BIT) += cpu-bugs64.o
151
152 obj-$(CONFIG_I8253) += i8253.o
153 --- a/arch/mips/kernel/mips-mt.c
154 +++ b/arch/mips/kernel/mips-mt.c
155 @@ -21,26 +21,96 @@
156 #include <asm/cacheflush.h>
157
158 int vpelimit;
159 -
160 static int __init maxvpes(char *str)
161 {
162 get_option(&str, &vpelimit);
163 -
164 return 1;
165 }
166 -
167 __setup("maxvpes=", maxvpes);
168
169 int tclimit;
170 -
171 static int __init maxtcs(char *str)
172 {
173 get_option(&str, &tclimit);
174 + return 1;
175 +}
176 +__setup("maxtcs=", maxtcs);
177
178 +#ifdef CONFIG_IFX_VPE_EXT
179 +int stlb;
180 +static int __init istlbshared(char *str)
181 +{
182 + get_option(&str, &stlb);
183 return 1;
184 }
185 +__setup("vpe_tlb_shared=", istlbshared);
186
187 -__setup("maxtcs=", maxtcs);
188 +int vpe0_wired;
189 +static int __init vpe0wired(char *str)
190 +{
191 + get_option(&str, &vpe0_wired);
192 + return 1;
193 +}
194 +__setup("vpe0_wired_tlb_entries=", vpe0wired);
195 +
196 +int vpe1_wired;
197 +static int __init vpe1wired(char *str)
198 +{
199 + get_option(&str, &vpe1_wired);
200 + return 1;
201 +}
202 +__setup("vpe1_wired_tlb_entries=", vpe1wired);
203 +
204 +#ifdef CONFIG_MIPS_MT_SMTC
205 +extern int nostlb;
206 +#endif
207 +void configure_tlb(void)
208 +{
209 + int vpeflags, tcflags, tlbsiz;
210 + unsigned int config1val;
211 + vpeflags = dvpe();
212 + tcflags = dmt();
213 + write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
214 + write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
215 + mips_ihb();
216 + //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
217 + if (stlb) {
218 + if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
219 + emt(tcflags);
220 + evpe(vpeflags);
221 + return;
222 + }
223 +
224 + write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
225 + write_c0_wired(vpe0_wired + vpe1_wired);
226 + if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
227 + config1val = read_vpe_c0_config1();
228 + tlbsiz = (((config1val >> 25) & 0x3f) + 1);
229 + if (tlbsiz > 64)
230 + tlbsiz = 64;
231 + cpu_data[0].tlbsize = tlbsiz;
232 + current_cpu_data.tlbsize = tlbsiz;
233 + }
234 +
235 + }
236 + else {
237 + write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
238 + write_c0_wired(vpe0_wired);
239 + }
240 +
241 + ehb();
242 + write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
243 + ehb();
244 + local_flush_tlb_all();
245 +
246 + printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
247 +#ifdef CONFIG_MIPS_MT_SMTC
248 + nostlb = !stlb;
249 +#endif
250 + emt(tcflags);
251 + evpe(vpeflags);
252 +}
253 +#endif
254
255 /*
256 * Dump new MIPS MT state for the core. Does not leave TCs halted.
257 @@ -78,18 +148,18 @@ void mips_mt_regdump(unsigned long mvpct
258 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
259 printk(" VPE %d\n", i);
260 printk(" VPEControl : %08lx\n",
261 - read_vpe_c0_vpecontrol());
262 + read_vpe_c0_vpecontrol());
263 printk(" VPEConf0 : %08lx\n",
264 - read_vpe_c0_vpeconf0());
265 + read_vpe_c0_vpeconf0());
266 printk(" VPE%d.Status : %08lx\n",
267 - i, read_vpe_c0_status());
268 + i, read_vpe_c0_status());
269 printk(" VPE%d.EPC : %08lx %pS\n",
270 - i, read_vpe_c0_epc(),
271 - (void *) read_vpe_c0_epc());
272 + i, read_vpe_c0_epc(),
273 + (void *) read_vpe_c0_epc());
274 printk(" VPE%d.Cause : %08lx\n",
275 - i, read_vpe_c0_cause());
276 + i, read_vpe_c0_cause());
277 printk(" VPE%d.Config7 : %08lx\n",
278 - i, read_vpe_c0_config7());
279 + i, read_vpe_c0_config7());
280 break; /* Next VPE */
281 }
282 }
283 @@ -287,6 +357,9 @@ void mips_mt_set_cpuoptions(void)
284 printk("Mapped %ld ITC cells starting at 0x%08x\n",
285 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
286 }
287 +#ifdef CONFIG_IFX_VPE_EXT
288 + configure_tlb();
289 +#endif
290 }
291
292 /*
293 --- a/arch/mips/kernel/proc.c
294 +++ b/arch/mips/kernel/proc.c
295 @@ -7,6 +7,7 @@
296 #include <linux/kernel.h>
297 #include <linux/sched.h>
298 #include <linux/seq_file.h>
299 +#include <linux/proc_fs.h>
300 #include <asm/bootinfo.h>
301 #include <asm/cpu.h>
302 #include <asm/cpu-features.h>
303 @@ -108,3 +109,19 @@ const struct seq_operations cpuinfo_op =
304 .stop = c_stop,
305 .show = show_cpuinfo,
306 };
307 +
308 +/*
309 + * Support for MIPS/local /proc hooks in /proc/mips/
310 + */
311 +
312 +static struct proc_dir_entry *mips_proc = NULL;
313 +
314 +struct proc_dir_entry *get_mips_proc_dir(void)
315 +{
316 + /*
317 + * This ought not to be preemptable.
318 + */
319 + if(mips_proc == NULL)
320 + mips_proc = proc_mkdir("mips", NULL);
321 + return(mips_proc);
322 +}
323 --- a/arch/mips/kernel/smtc.c
324 +++ b/arch/mips/kernel/smtc.c
325 @@ -1335,6 +1335,13 @@ void smtc_get_new_mmu_context(struct mm_
326 asid = asid_cache(cpu);
327
328 do {
329 +#ifdef CONFIG_IFX_VPE_EXT
330 + /* If TLB is shared between AP and RP (AP is running SMTC),
331 + leave out max ASID i.e., ASID_MASK for RP
332 + */
333 + if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
334 + asid++;
335 +#endif
336 if (!((asid += ASID_INC) & ASID_MASK) ) {
337 if (cpu_has_vtag_icache)
338 flush_icache_all();
339 --- a/arch/mips/kernel/vpe.c
340 +++ b/arch/mips/kernel/vpe.c
341 @@ -76,6 +76,58 @@ static struct kspd_notifications kspd_ev
342 static int kspd_events_reqd;
343 #endif
344
345 +#ifdef CONFIG_IFX_VPE_EXT
346 +static int is_sdepgm;
347 +extern int stlb;
348 +extern int vpe0_wired;
349 +extern int vpe1_wired;
350 +unsigned int vpe1_load_addr;
351 +
352 +static int __init load_address(char *str)
353 +{
354 + get_option(&str, &vpe1_load_addr);
355 + return 1;
356 +}
357 +__setup("vpe1_load_addr=", load_address);
358 +
359 +#include <asm/mipsmtregs.h>
360 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
361 +
362 +#ifndef COMMAND_LINE_SIZE
363 +# define COMMAND_LINE_SIZE 512
364 +#endif
365 +
366 +char command_line[COMMAND_LINE_SIZE * 2];
367 +
368 +static unsigned int vpe1_mem;
369 +static int __init vpe1mem(char *str)
370 +{
371 + vpe1_mem = memparse(str, &str);
372 + return 1;
373 +}
374 +__setup("vpe1_mem=", vpe1mem);
375 +
376 +uint32_t vpe1_wdog_ctr;
377 +static int __init wdog_ctr(char *str)
378 +{
379 + get_option(&str, &vpe1_wdog_ctr);
380 + return 1;
381 +}
382 +
383 +__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
384 +EXPORT_SYMBOL(vpe1_wdog_ctr);
385 +
386 +uint32_t vpe1_wdog_timeout;
387 +static int __init wdog_timeout(char *str)
388 +{
389 + get_option(&str, &vpe1_wdog_timeout);
390 + return 1;
391 +}
392 +
393 +__setup("vpe1_wdog_timeout=", wdog_timeout);
394 +EXPORT_SYMBOL(vpe1_wdog_timeout);
395 +
396 +#endif
397 /* grab the likely amount of memory we will need. */
398 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
399 #define P_SIZE (2 * 1024 * 1024)
400 @@ -268,6 +320,13 @@ static void *alloc_progmem(unsigned long
401 void *addr;
402
403 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
404 +#ifdef CONFIG_IFX_VPE_EXT
405 + if (vpe1_load_addr) {
406 + memset((void *)vpe1_load_addr, 0, len);
407 + return (void *)vpe1_load_addr;
408 + }
409 +#endif
410 +
411 /*
412 * This means you must tell Linux to use less memory than you
413 * physically have, for example by passing a mem= boot argument.
414 @@ -746,6 +805,12 @@ static int vpe_run(struct vpe * v)
415 }
416
417 /* Write the address we want it to start running from in the TCPC register. */
418 +#if defined(CONFIG_IFX_VPE_EXT) && 0
419 + if (stlb)
420 + write_vpe_c0_wired(vpe0_wired + vpe1_wired);
421 + else
422 + write_vpe_c0_wired(vpe1_wired);
423 +#endif
424 write_tc_c0_tcrestart((unsigned long)v->__start);
425 write_tc_c0_tccontext((unsigned long)0);
426
427 @@ -759,6 +824,20 @@ static int vpe_run(struct vpe * v)
428
429 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
430
431 +#if defined(CONFIG_IFX_VPE_EXT) && 0
432 + /*
433 + * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
434 + * points to the start of the command line string and $a3 points to
435 + * the end of the string. This convention is identical to the Linux
436 + * kernel boot parameter passing mechanism. Please note that $a3 is
437 + * used to pass physical memory size or 0 in SDE tool kit. So, if you
438 + * are passing comand line parameters through $a2 & $a3 SDE programs
439 + * don't work as desired.
440 + */
441 + mttgpr(6, command_line);
442 + mttgpr(7, (command_line + strlen(command_line)));
443 + if (is_sdepgm)
444 +#endif
445 /*
446 * The sde-kit passes 'memsize' to __start in $a3, so set something
447 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
448 @@ -833,6 +912,9 @@ static int find_vpe_symbols(struct vpe *
449 if ( (v->__start == 0) || (v->shared_ptr == NULL))
450 return -1;
451
452 +#ifdef CONFIG_IFX_VPE_EXT
453 + is_sdepgm = 1;
454 +#endif
455 return 0;
456 }
457
458 @@ -994,6 +1076,15 @@ static int vpe_elfload(struct vpe * v)
459 (unsigned long)v->load_addr + v->len);
460
461 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
462 +#ifdef CONFIG_IFX_VPE_EXT
463 + if (vpe1_load_addr) {
464 + /* Conversion to KSEG1 is required ??? */
465 + v->__start = KSEG1ADDR(vpe1_load_addr);
466 + is_sdepgm = 0;
467 + return 0;
468 + }
469 +#endif
470 +
471 if (v->__start == 0) {
472 printk(KERN_WARNING "VPE loader: program does not contain "
473 "a __start symbol\n");
474 @@ -1064,6 +1155,9 @@ static int vpe_open(struct inode *inode,
475 struct vpe_notifications *not;
476 struct vpe *v;
477 int ret;
478 +#ifdef CONFIG_IFX_VPE_EXT
479 + int progsize;
480 +#endif
481
482 if (minor != iminor(inode)) {
483 /* assume only 1 device at the moment. */
484 @@ -1090,6 +1184,12 @@ static int vpe_open(struct inode *inode,
485 cleanup_tc(get_tc(tclimit));
486 }
487
488 +#ifdef CONFIG_IFX_VPE_EXT
489 + progsize = (vpe1_mem != 0) ? vpe1_mem : P_SIZE;
490 + //printk("progsize = %x\n", progsize);
491 + v->pbuffer = vmalloc(progsize);
492 + v->plen = progsize;
493 +#else
494 /* this of-course trashes what was there before... */
495 v->pbuffer = vmalloc(P_SIZE);
496 if (!v->pbuffer) {
497 @@ -1097,11 +1197,13 @@ static int vpe_open(struct inode *inode,
498 return -ENOMEM;
499 }
500 v->plen = P_SIZE;
501 +#endif
502 v->load_addr = NULL;
503 v->len = 0;
504 -
505 +#if 0
506 v->uid = filp->f_cred->fsuid;
507 v->gid = filp->f_cred->fsgid;
508 +#endif
509
510 #ifdef CONFIG_MIPS_APSP_KSPD
511 /* get kspd to tell us when a syscall_exit happens */
512 @@ -1349,6 +1451,133 @@ static void kspd_sp_exit( int sp_id)
513 cleanup_tc(get_tc(sp_id));
514 }
515 #endif
516 +#ifdef CONFIG_IFX_VPE_EXT
517 +int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
518 +{
519 + enum vpe_state state;
520 + struct vpe *v = get_vpe(tclimit);
521 + struct vpe_notifications *not;
522 +
523 + if (tcmask || flags) {
524 + printk(KERN_WARNING "Currently tcmask and flags should be 0.\
525 + other values not supported\n");
526 + return -1;
527 + }
528 +
529 + state = xchg(&v->state, VPE_STATE_INUSE);
530 + if (state != VPE_STATE_UNUSED) {
531 + vpe_stop(v);
532 +
533 + list_for_each_entry(not, &v->notify, list) {
534 + not->stop(tclimit);
535 + }
536 + }
537 +
538 + v->__start = (unsigned long)sw_start_addr;
539 + is_sdepgm = 0;
540 +
541 + if (!vpe_run(v)) {
542 + printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
543 + return 0;
544 + }
545 + return -1;
546 +}
547 +
548 +EXPORT_SYMBOL(vpe1_sw_start);
549 +
550 +int32_t vpe1_sw_stop(uint32_t flags)
551 +{
552 + struct vpe *v = get_vpe(tclimit);
553 +
554 + if (!vpe_free(v)) {
555 + printk(KERN_DEBUG "RP Stopped\n");
556 + return 0;
557 + }
558 + else
559 + return -1;
560 +}
561 +
562 +EXPORT_SYMBOL(vpe1_sw_stop);
563 +
564 +uint32_t vpe1_get_load_addr (uint32_t flags)
565 +{
566 + return vpe1_load_addr;
567 +}
568 +
569 +EXPORT_SYMBOL(vpe1_get_load_addr);
570 +
571 +uint32_t vpe1_get_max_mem (uint32_t flags)
572 +{
573 + if (!vpe1_mem)
574 + return P_SIZE;
575 + else
576 + return vpe1_mem;
577 +}
578 +
579 +EXPORT_SYMBOL(vpe1_get_max_mem);
580 +
581 +void* vpe1_get_cmdline_argument(void)
582 +{
583 + return saved_command_line;
584 +}
585 +
586 +EXPORT_SYMBOL(vpe1_get_cmdline_argument);
587 +
588 +int32_t vpe1_set_boot_param(char *field, char *value, char flags)
589 +{
590 + char *ptr, string[64];
591 + int start_off, end_off;
592 + if (!field)
593 + return -1;
594 + strcpy(string, field);
595 + if (value) {
596 + strcat(string, "=");
597 + strcat(string, value);
598 + strcat(command_line, " ");
599 + strcat(command_line, string);
600 + }
601 + else {
602 + ptr = strstr(command_line, string);
603 + if (ptr) {
604 + start_off = ptr - command_line;
605 + ptr += strlen(string);
606 + while ((*ptr != ' ') && (*ptr != '\0'))
607 + ptr++;
608 + end_off = ptr - command_line;
609 + command_line[start_off] = '\0';
610 + strcat (command_line, command_line+end_off);
611 + }
612 + }
613 + return 0;
614 +}
615 +
616 +EXPORT_SYMBOL(vpe1_set_boot_param);
617 +
618 +int32_t vpe1_get_boot_param(char *field, char **value, char flags)
619 +{
620 + char *ptr, string[64];
621 + int i = 0;
622 + if (!field)
623 + return -1;
624 + if ((ptr = strstr(command_line, field))) {
625 + ptr += strlen(field) + 1; /* including = */
626 + while ((*ptr != ' ') && (*ptr != '\0'))
627 + string[i++] = *ptr++;
628 + string[i] = '\0';
629 + *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
630 + if (*value != NULL)
631 + strcpy(*value, string);
632 + }
633 + else
634 + *value = NULL;
635 +
636 + return 0;
637 +}
638 +
639 +EXPORT_SYMBOL(vpe1_get_boot_param);
640 +
641 +extern void configure_tlb(void);
642 +#endif
643
644 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
645 const char *buf, size_t len)
646 @@ -1430,6 +1659,18 @@ static int __init vpe_module_init(void)
647 printk("VPE loader: not a MIPS MT capable processor\n");
648 return -ENODEV;
649 }
650 +#ifdef CONFIG_IFX_VPE_EXT
651 +#ifndef CONFIG_MIPS_MT_SMTC
652 + configure_tlb();
653 +#endif
654 +#endif
655 +
656 +#ifndef CONFIG_MIPS_MT_SMTC
657 + if (!vpelimit)
658 + vpelimit = 1;
659 + if (!tclimit)
660 + tclimit = 1;
661 +#endif
662
663 if (vpelimit == 0) {
664 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
665 @@ -1474,10 +1715,12 @@ static int __init vpe_module_init(void)
666 mtflags = dmt();
667 vpflags = dvpe();
668
669 + back_to_back_c0_hazard();
670 +
671 /* Put MVPE's into 'configuration state' */
672 set_c0_mvpcontrol(MVPCONTROL_VPC);
673
674 - /* dump_mtregs(); */
675 + dump_mtregs();
676
677 val = read_c0_mvpconf0();
678 hw_tcs = (val & MVPCONF0_PTC) + 1;
679 @@ -1489,6 +1732,7 @@ static int __init vpe_module_init(void)
680 * reschedule send IPIs or similar we might hang.
681 */
682 clear_c0_mvpcontrol(MVPCONTROL_VPC);
683 + back_to_back_c0_hazard();
684 evpe(vpflags);
685 emt(mtflags);
686 local_irq_restore(flags);
687 @@ -1514,6 +1758,7 @@ static int __init vpe_module_init(void)
688 }
689
690 v->ntcs = hw_tcs - tclimit;
691 + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
692
693 /* add the tc to the list of this vpe's tc's. */
694 list_add(&t->tc, &v->tc);
695 @@ -1582,6 +1827,7 @@ static int __init vpe_module_init(void)
696 out_reenable:
697 /* release config state */
698 clear_c0_mvpcontrol(MVPCONTROL_VPC);
699 + back_to_back_c0_hazard();
700
701 evpe(vpflags);
702 emt(mtflags);
703 --- /dev/null
704 +++ b/arch/mips/kernel/mtsched_proc.c
705 @@ -0,0 +1,279 @@
706 +/*
707 + * /proc hooks for MIPS MT scheduling policy management for 34K cores
708 + *
709 + * This program is free software; you can distribute it and/or modify it
710 + * under the terms of the GNU General Public License (Version 2) as
711 + * published by the Free Software Foundation.
712 + *
713 + * This program is distributed in the hope it will be useful, but WITHOUT
714 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
715 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
716 + * for more details.
717 + *
718 + * You should have received a copy of the GNU General Public License along
719 + * with this program; if not, write to the Free Software Foundation, Inc.,
720 + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
721 + *
722 + * Copyright (C) 2006 Mips Technologies, Inc
723 + */
724 +
725 +#include <linux/kernel.h>
726 +
727 +#include <asm/cpu.h>
728 +#include <asm/processor.h>
729 +#include <asm/system.h>
730 +#include <asm/mipsregs.h>
731 +#include <asm/mipsmtregs.h>
732 +#include <asm/uaccess.h>
733 +#include <linux/proc_fs.h>
734 +
735 +static struct proc_dir_entry *mtsched_proc;
736 +
737 +#ifndef CONFIG_MIPS_MT_SMTC
738 +#define NTCS 2
739 +#else
740 +#define NTCS NR_CPUS
741 +#endif
742 +#define NVPES 2
743 +
744 +int lastvpe = 1;
745 +int lasttc = 8;
746 +
747 +static int proc_read_mtsched(char *page, char **start, off_t off,
748 + int count, int *eof, void *data)
749 +{
750 + int totalen = 0;
751 + int len;
752 +
753 + int i;
754 + int vpe;
755 + int mytc;
756 + unsigned long flags;
757 + unsigned int mtflags;
758 + unsigned int haltstate;
759 + unsigned int vpes_checked[NVPES];
760 + unsigned int vpeschedule[NVPES];
761 + unsigned int vpeschefback[NVPES];
762 + unsigned int tcschedule[NTCS];
763 + unsigned int tcschefback[NTCS];
764 +
765 + /* Dump the state of the MIPS MT scheduling policy manager */
766 + /* Inititalize control state */
767 + for(i = 0; i < NVPES; i++) {
768 + vpes_checked[i] = 0;
769 + vpeschedule[i] = 0;
770 + vpeschefback[i] = 0;
771 + }
772 + for(i = 0; i < NTCS; i++) {
773 + tcschedule[i] = 0;
774 + tcschefback[i] = 0;
775 + }
776 +
777 + /* Disable interrupts and multithreaded issue */
778 + local_irq_save(flags);
779 + mtflags = dvpe();
780 +
781 + /* Then go through the TCs, halt 'em, and extract the values */
782 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
783 + for(i = 0; i < NTCS; i++) {
784 + if(i == mytc) {
785 + /* No need to halt ourselves! */
786 + tcschedule[i] = read_c0_tcschedule();
787 + tcschefback[i] = read_c0_tcschefback();
788 + /* If VPE bound to TC hasn't been checked, do it */
789 + vpe = read_c0_tcbind() & TCBIND_CURVPE;
790 + if(!vpes_checked[vpe]) {
791 + vpeschedule[vpe] = read_c0_vpeschedule();
792 + vpeschefback[vpe] = read_c0_vpeschefback();
793 + vpes_checked[vpe] = 1;
794 + }
795 + } else {
796 + settc(i);
797 + haltstate = read_tc_c0_tchalt();
798 + write_tc_c0_tchalt(TCHALT_H);
799 + mips_ihb();
800 + tcschedule[i] = read_tc_c0_tcschedule();
801 + tcschefback[i] = read_tc_c0_tcschefback();
802 + /* If VPE bound to TC hasn't been checked, do it */
803 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
804 + if(!vpes_checked[vpe]) {
805 + vpeschedule[vpe] = read_vpe_c0_vpeschedule();
806 + vpeschefback[vpe] = read_vpe_c0_vpeschefback();
807 + vpes_checked[vpe] = 1;
808 + }
809 + if(!haltstate) write_tc_c0_tchalt(0);
810 + }
811 + }
812 + /* Re-enable MT and interrupts */
813 + evpe(mtflags);
814 + local_irq_restore(flags);
815 +
816 + for(vpe=0; vpe < NVPES; vpe++) {
817 + len = sprintf(page, "VPE[%d].VPEschedule = 0x%08x\n",
818 + vpe, vpeschedule[vpe]);
819 + totalen += len;
820 + page += len;
821 + len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
822 + vpe, vpeschefback[vpe]);
823 + totalen += len;
824 + page += len;
825 + }
826 + for(i=0; i < NTCS; i++) {
827 + len = sprintf(page, "TC[%d].TCschedule = 0x%08x\n",
828 + i, tcschedule[i]);
829 + totalen += len;
830 + page += len;
831 + len = sprintf(page, "TC[%d].TCschefback = 0x%08x\n",
832 + i, tcschefback[i]);
833 + totalen += len;
834 + page += len;
835 + }
836 + return totalen;
837 +}
838 +
839 +/*
840 + * Write to perf counter registers based on text input
841 + */
842 +
843 +#define TXTBUFSZ 100
844 +
845 +static int proc_write_mtsched(struct file *file, const char *buffer,
846 + unsigned long count, void *data)
847 +{
848 + int len = 0;
849 + char mybuf[TXTBUFSZ];
850 + /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
851 + char entity[1]; //, entity1[1];
852 + int number[1];
853 + unsigned long value[1];
854 + int nparsed = 0 , index = 0;
855 + unsigned long flags;
856 + unsigned int mtflags;
857 + unsigned int haltstate;
858 + unsigned int tcbindval;
859 +
860 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
861 + else len = count;
862 + memset(mybuf,0,TXTBUFSZ);
863 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
864 +
865 + nparsed = sscanf(mybuf, "%c%d %lx",
866 + &entity[0] ,&number[0], &value[0]);
867 +
868 + /*
869 + * Having acquired the inputs, which might have
870 + * generated exceptions and preemptions,
871 + * program the registers.
872 + */
873 + /* Disable interrupts and multithreaded issue */
874 + local_irq_save(flags);
875 + mtflags = dvpe();
876 +
877 + if(entity[index] == 't' ) {
878 + /* Set TCSchedule or TCScheFBack of specified TC */
879 + if(number[index] > NTCS) goto skip;
880 + /* If it's our own TC, do it direct */
881 + if(number[index] ==
882 + ((read_c0_tcbind() & TCBIND_CURTC)
883 + >> TCBIND_CURTC_SHIFT)) {
884 + if(entity[index] == 't')
885 + write_c0_tcschedule(value[index]);
886 + else
887 + write_c0_tcschefback(value[index]);
888 + } else {
889 + /* Otherwise, we do it via MTTR */
890 + settc(number[index]);
891 + haltstate = read_tc_c0_tchalt();
892 + write_tc_c0_tchalt(TCHALT_H);
893 + mips_ihb();
894 + if(entity[index] == 't')
895 + write_tc_c0_tcschedule(value[index]);
896 + else
897 + write_tc_c0_tcschefback(value[index]);
898 + mips_ihb();
899 + if(!haltstate) write_tc_c0_tchalt(0);
900 + }
901 + } else if(entity[index] == 'v') {
902 + /* Set VPESchedule of specified VPE */
903 + if(number[index] > NVPES) goto skip;
904 + tcbindval = read_c0_tcbind();
905 + /* Are we doing this to our current VPE? */
906 + if((tcbindval & TCBIND_CURVPE) == number[index]) {
907 + /* Then life is simple */
908 + write_c0_vpeschedule(value[index]);
909 + } else {
910 + /*
911 + * Bind ourselves to the other VPE long enough
912 + * to program the bind value.
913 + */
914 + write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
915 + | number[index]);
916 + mips_ihb();
917 + write_c0_vpeschedule(value[index]);
918 + mips_ihb();
919 + /* Restore previous binding */
920 + write_c0_tcbind(tcbindval);
921 + mips_ihb();
922 + }
923 + }
924 +
925 + else if(entity[index] == 'r') {
926 + unsigned int vpes_checked[2], vpe ,i , mytc;
927 + vpes_checked[0] = vpes_checked[1] = 0;
928 +
929 + /* Then go through the TCs, halt 'em, and extract the values */
930 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
931 +
932 + for(i = 0; i < NTCS; i++) {
933 + if(i == mytc) {
934 + /* No need to halt ourselves! */
935 + write_c0_vpeschefback(0);
936 + write_c0_tcschefback(0);
937 + } else {
938 + settc(i);
939 + haltstate = read_tc_c0_tchalt();
940 + write_tc_c0_tchalt(TCHALT_H);
941 + mips_ihb();
942 + write_tc_c0_tcschefback(0);
943 + /* If VPE bound to TC hasn't been checked, do it */
944 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
945 + if(!vpes_checked[vpe]) {
946 + write_vpe_c0_vpeschefback(0);
947 + vpes_checked[vpe] = 1;
948 + }
949 + if(!haltstate) write_tc_c0_tchalt(0);
950 + }
951 + }
952 + }
953 + else {
954 + printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
955 + }
956 +
957 +skip:
958 + /* Re-enable MT and interrupts */
959 + evpe(mtflags);
960 + local_irq_restore(flags);
961 + return (len);
962 +}
963 +
964 +static int __init init_mtsched_proc(void)
965 +{
966 + extern struct proc_dir_entry *get_mips_proc_dir(void);
967 + struct proc_dir_entry *mips_proc_dir;
968 +
969 + if (!cpu_has_mipsmt) {
970 + printk("mtsched: not a MIPS MT capable processor\n");
971 + return -ENODEV;
972 + }
973 +
974 + mips_proc_dir = get_mips_proc_dir();
975 +
976 + mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
977 + mtsched_proc->read_proc = proc_read_mtsched;
978 + mtsched_proc->write_proc = proc_write_mtsched;
979 +
980 + return 0;
981 +}
982 +
983 +/* Automagically create the entry */
984 +module_init(init_mtsched_proc);
985 --- /dev/null
986 +++ b/arch/mips/kernel/perf_proc.c
987 @@ -0,0 +1,191 @@
988 +/*
989 + * /proc hooks for CPU performance counter support for SMTC kernel
990 + * (and ultimately others)
991 + * Copyright (C) 2006 Mips Technologies, Inc
992 + */
993 +
994 +#include <linux/kernel.h>
995 +
996 +#include <asm/cpu.h>
997 +#include <asm/processor.h>
998 +#include <asm/system.h>
999 +#include <asm/mipsregs.h>
1000 +#include <asm/uaccess.h>
1001 +#include <linux/proc_fs.h>
1002 +
1003 +/*
1004 + * /proc diagnostic and statistics hooks
1005 + */
1006 +
1007 +
1008 +/* Internal software-extended event counters */
1009 +
1010 +static unsigned long long extencount[4] = {0,0,0,0};
1011 +
1012 +static struct proc_dir_entry *perf_proc;
1013 +
1014 +static int proc_read_perf(char *page, char **start, off_t off,
1015 + int count, int *eof, void *data)
1016 +{
1017 + int totalen = 0;
1018 + int len;
1019 +
1020 + len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
1021 + totalen += len;
1022 + page += len;
1023 + len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
1024 + extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
1025 + totalen += len;
1026 + page += len;
1027 + len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
1028 + totalen += len;
1029 + page += len;
1030 + len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
1031 + extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
1032 + totalen += len;
1033 + page += len;
1034 + len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
1035 + totalen += len;
1036 + page += len;
1037 + len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
1038 + extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
1039 + totalen += len;
1040 + page += len;
1041 + len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
1042 + totalen += len;
1043 + page += len;
1044 + len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
1045 + extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
1046 + totalen += len;
1047 + page += len;
1048 +
1049 + return totalen;
1050 +}
1051 +
1052 +/*
1053 + * Write to perf counter registers based on text input
1054 + */
1055 +
1056 +#define TXTBUFSZ 100
1057 +
1058 +static int proc_write_perf(struct file *file, const char *buffer,
1059 + unsigned long count, void *data)
1060 +{
1061 + int len;
1062 + int nparsed;
1063 + int index;
1064 + char mybuf[TXTBUFSZ];
1065 +
1066 + int which[4];
1067 + unsigned long control[4];
1068 + long long ctrdata[4];
1069 +
1070 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
1071 + else len = count;
1072 + memset(mybuf,0,TXTBUFSZ);
1073 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
1074 +
1075 + nparsed = sscanf(mybuf,
1076 + "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
1077 + &which[0], &control[0], &ctrdata[0],
1078 + &which[1], &control[1], &ctrdata[1],
1079 + &which[2], &control[2], &ctrdata[2],
1080 + &which[3], &control[3], &ctrdata[3]);
1081 +
1082 + for(index = 0; nparsed >= 3; index++) {
1083 + switch (which[index]) {
1084 + case 0:
1085 + write_c0_perfctrl0(control[index]);
1086 + if(ctrdata[index] != -1) {
1087 + extencount[0] = (unsigned long long)ctrdata[index];
1088 + write_c0_perfcntr0((unsigned long)0);
1089 + }
1090 + break;
1091 + case 1:
1092 + write_c0_perfctrl1(control[index]);
1093 + if(ctrdata[index] != -1) {
1094 + extencount[1] = (unsigned long long)ctrdata[index];
1095 + write_c0_perfcntr1((unsigned long)0);
1096 + }
1097 + break;
1098 + case 2:
1099 + write_c0_perfctrl2(control[index]);
1100 + if(ctrdata[index] != -1) {
1101 + extencount[2] = (unsigned long long)ctrdata[index];
1102 + write_c0_perfcntr2((unsigned long)0);
1103 + }
1104 + break;
1105 + case 3:
1106 + write_c0_perfctrl3(control[index]);
1107 + if(ctrdata[index] != -1) {
1108 + extencount[3] = (unsigned long long)ctrdata[index];
1109 + write_c0_perfcntr3((unsigned long)0);
1110 + }
1111 + break;
1112 + }
1113 + nparsed -= 3;
1114 + }
1115 + return (len);
1116 +}
1117 +
1118 +extern int (*perf_irq)(void);
1119 +
1120 +/*
1121 + * Invoked when timer interrupt vector picks up a perf counter overflow
1122 + */
1123 +
1124 +static int perf_proc_irq(void)
1125 +{
1126 + unsigned long snapshot;
1127 +
1128 + /*
1129 + * It would be nice to do this as a loop, but we don't have
1130 + * indirect access to CP0 registers.
1131 + */
1132 + snapshot = read_c0_perfcntr0();
1133 + if ((long)snapshot < 0) {
1134 + extencount[0] +=
1135 + (unsigned long long)((unsigned)read_c0_perfcntr0());
1136 + write_c0_perfcntr0(0);
1137 + }
1138 + snapshot = read_c0_perfcntr1();
1139 + if ((long)snapshot < 0) {
1140 + extencount[1] +=
1141 + (unsigned long long)((unsigned)read_c0_perfcntr1());
1142 + write_c0_perfcntr1(0);
1143 + }
1144 + snapshot = read_c0_perfcntr2();
1145 + if ((long)snapshot < 0) {
1146 + extencount[2] +=
1147 + (unsigned long long)((unsigned)read_c0_perfcntr2());
1148 + write_c0_perfcntr2(0);
1149 + }
1150 + snapshot = read_c0_perfcntr3();
1151 + if ((long)snapshot < 0) {
1152 + extencount[3] +=
1153 + (unsigned long long)((unsigned)read_c0_perfcntr3());
1154 + write_c0_perfcntr3(0);
1155 + }
1156 + return 0;
1157 +}
1158 +
1159 +static int __init init_perf_proc(void)
1160 +{
1161 + extern struct proc_dir_entry *get_mips_proc_dir(void);
1162 +
1163 + struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
1164 +
1165 + write_c0_perfcntr0(0);
1166 + write_c0_perfcntr1(0);
1167 + write_c0_perfcntr2(0);
1168 + write_c0_perfcntr3(0);
1169 + perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
1170 + perf_proc->read_proc = proc_read_perf;
1171 + perf_proc->write_proc = proc_write_perf;
1172 + perf_irq = perf_proc_irq;
1173 +
1174 + return 0;
1175 +}
1176 +
1177 +/* Automagically create the entry */
1178 +module_init(init_perf_proc);