BB: lantiq: falcon/vr9 - add support for vpe1
[openwrt/svn-archive/archive.git] / target / linux / lantiq / patches-3.10 / 0152-VPE-extensions.patch
1 diff -u -N -r linux-3.10.49.orig/arch/mips/include/asm/mipsmtregs.h linux-3.10.49/arch/mips/include/asm/mipsmtregs.h
2 --- linux-3.10.49.orig/arch/mips/include/asm/mipsmtregs.h 2014-10-15 21:41:48.000000000 +0200
3 +++ linux-3.10.49/arch/mips/include/asm/mipsmtregs.h 2014-10-15 21:44:25.000000000 +0200
4 @@ -31,14 +31,34 @@
5 #define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
6 #define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
7
8 +#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
9 +#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
10 +
11 +#define read_c0_vpeschedule() __read_32bit_c0_register($1, 5)
12 +#define write_c0_vpeschedule(val) __write_32bit_c0_register($1, 5, val)
13 +
14 +#define read_c0_vpeschefback() __read_32bit_c0_register($1, 6)
15 +#define write_c0_vpeschefback(val) __write_32bit_c0_register($1, 6, val)
16 +
17 +#define read_c0_vpeopt() __read_32bit_c0_register($1, 7)
18 +#define write_c0_vpeopt(val) __write_32bit_c0_register($1, 7, val)
19 +
20 #define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
21 #define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
22
23 #define read_c0_tcbind() __read_32bit_c0_register($2, 2)
24 +#define write_c0_tcbind(val) __write_32bit_c0_register($2, 2, val)
25
26 #define read_c0_tccontext() __read_32bit_c0_register($2, 5)
27 #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
28
29 +#define read_c0_tcschedule() __read_32bit_c0_register($2, 6)
30 +#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val)
31 +
32 +#define read_c0_tcschefback() __read_32bit_c0_register($2, 7)
33 +#define write_c0_tcschefback(val) __write_32bit_c0_register($2, 7, val)
34 +
35 +
36 #else /* Assembly */
37 /*
38 * Macros for use in assembly language code
39 @@ -77,6 +97,8 @@
40 #define MVPCONTROL_STLB_SHIFT 2
41 #define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
42
43 +#define MVPCONTROL_CPA_SHIFT 3
44 +#define MVPCONTROL_CPA (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
45
46 /* MVPConf0 fields */
47 #define MVPCONF0_PTC_SHIFT 0
48 @@ -87,6 +109,8 @@
49 #define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
50 #define MVPCONF0_PTLBE_SHIFT 16
51 #define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
52 +#define MVPCONF0_PCP_SHIFT 27
53 +#define MVPCONF0_PCP (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
54 #define MVPCONF0_TLBS_SHIFT 29
55 #define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
56 #define MVPCONF0_M_SHIFT 31
57 @@ -124,9 +148,25 @@
58 #define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
59 #define VPECONF0_MVP_SHIFT 1
60 #define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
61 +#define VPECONF0_ICS_SHIFT 16
62 +#define VPECONF0_ICS (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
63 +#define VPECONF0_DCS_SHIFT 17
64 +#define VPECONF0_DCS (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
65 #define VPECONF0_XTC_SHIFT 21
66 #define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
67
68 +/* VPEOpt fields */
69 +#define VPEOPT_DWX_SHIFT 0
70 +#define VPEOPT_IWX_SHIFT 8
71 +#define VPEOPT_IWX0 ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
72 +#define VPEOPT_IWX1 ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
73 +#define VPEOPT_IWX2 ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
74 +#define VPEOPT_IWX3 ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
75 +#define VPEOPT_DWX0 ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
76 +#define VPEOPT_DWX1 ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
77 +#define VPEOPT_DWX2 ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
78 +#define VPEOPT_DWX3 ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
79 +
80 /* VPEConf1 fields (per VPE) */
81 #define VPECONF1_NCP1_SHIFT 0
82 #define VPECONF1_NCP1 (_ULCAST_(0xff) << VPECONF1_NCP1_SHIFT)
83 @@ -363,6 +403,14 @@
84 #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
85 #define read_vpe_c0_vpeconf1() mftc0(1, 3)
86 #define write_vpe_c0_vpeconf1(val) mttc0(1, 3, val)
87 +#define read_vpe_c0_vpeschedule() mftc0(1, 5)
88 +#define write_vpe_c0_vpeschedule(val) mttc0(1, 5, val)
89 +#define read_vpe_c0_vpeschefback() mftc0(1, 6)
90 +#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
91 +#define read_vpe_c0_vpeopt() mftc0(1, 7)
92 +#define write_vpe_c0_vpeopt(val) mttc0(1, 7, val)
93 +#define read_vpe_c0_wired() mftc0(6, 0)
94 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
95 #define read_vpe_c0_count() mftc0(9, 0)
96 #define write_vpe_c0_count(val) mttc0(9, 0, val)
97 #define read_vpe_c0_status() mftc0(12, 0)
98 @@ -394,6 +442,12 @@
99 #define write_tc_c0_tchalt(val) mttc0(2, 4, val)
100 #define read_tc_c0_tccontext() mftc0(2, 5)
101 #define write_tc_c0_tccontext(val) mttc0(2, 5, val)
102 +#define read_tc_c0_tcschedule() mftc0(2, 6)
103 +#define write_tc_c0_tcschedule(val) mttc0(2, 6, val)
104 +#define read_tc_c0_tcschefback() mftc0(2, 7)
105 +#define write_tc_c0_tcschefback(val) mttc0(2, 7, val)
106 +#define read_tc_c0_entryhi() mftc0(10, 0)
107 +#define write_tc_c0_entryhi(val) mttc0(10, 0, val)
108
109 /* GPR */
110 #define read_tc_gpr_sp() mftgpr(29)
111 diff -u -N -r linux-3.10.49.orig/arch/mips/Kconfig linux-3.10.49/arch/mips/Kconfig
112 --- linux-3.10.49.orig/arch/mips/Kconfig 2014-10-15 21:41:48.000000000 +0200
113 +++ linux-3.10.49/arch/mips/Kconfig 2014-10-15 21:44:25.000000000 +0200
114 @@ -1992,6 +1992,28 @@
115 Includes a loader for loading an elf relocatable object
116 onto another VPE and running it.
117
118 +config IFX_VPE_EXT
119 + bool "IFX APRP Extensions"
120 + depends on MIPS_VPE_LOADER
121 + default y
122 + help
123 + IFX included extensions in APRP
124 +
125 +config PERFCTRS
126 + bool "34K Performance counters"
127 + depends on MIPS_MT && PROC_FS
128 + default n
129 + help
130 + 34K Performance counter through /proc
131 +
132 +config MTSCHED
133 + bool "Support mtsched priority configuration for TCs"
134 + depends on MIPS_MT && PROC_FS
135 + default y
136 + help
137 + Support for mtsched priority configuration for TCs through
138 + /proc/mips/mtsched
139 +
140 config MIPS_MT_SMTC_IM_BACKSTOP
141 bool "Use per-TC register bits as backstop for inhibited IM bits"
142 depends on MIPS_MT_SMTC
143 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/Makefile linux-3.10.49/arch/mips/kernel/Makefile
144 --- linux-3.10.49.orig/arch/mips/kernel/Makefile 2014-10-15 21:41:48.000000000 +0200
145 +++ linux-3.10.49/arch/mips/kernel/Makefile 2014-10-15 21:44:25.000000000 +0200
146 @@ -74,7 +74,8 @@
147
148 obj-$(CONFIG_KGDB) += kgdb.o
149 obj-$(CONFIG_PROC_FS) += proc.o
150 -
151 +obj-$(CONFIG_MTSCHED) += mtsched_proc.o
152 +obj-$(CONFIG_PERFCTRS) += perf_proc.o
153 obj-$(CONFIG_64BIT) += cpu-bugs64.o
154
155 obj-$(CONFIG_I8253) += i8253.o
156 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/mips-mt.c linux-3.10.49/arch/mips/kernel/mips-mt.c
157 --- linux-3.10.49.orig/arch/mips/kernel/mips-mt.c 2014-10-15 21:41:48.000000000 +0200
158 +++ linux-3.10.49/arch/mips/kernel/mips-mt.c 2014-10-15 21:44:25.000000000 +0200
159 @@ -20,26 +20,96 @@
160 #include <asm/cacheflush.h>
161
162 int vpelimit;
163 -
164 static int __init maxvpes(char *str)
165 {
166 get_option(&str, &vpelimit);
167 -
168 return 1;
169 }
170 -
171 __setup("maxvpes=", maxvpes);
172
173 int tclimit;
174 -
175 static int __init maxtcs(char *str)
176 {
177 get_option(&str, &tclimit);
178 + return 1;
179 +}
180 +__setup("maxtcs=", maxtcs);
181
182 +#ifdef CONFIG_IFX_VPE_EXT
183 +int stlb;
184 +static int __init istlbshared(char *str)
185 +{
186 + get_option(&str, &stlb);
187 return 1;
188 }
189 +__setup("vpe_tlb_shared=", istlbshared);
190
191 -__setup("maxtcs=", maxtcs);
192 +int vpe0_wired;
193 +static int __init vpe0wired(char *str)
194 +{
195 + get_option(&str, &vpe0_wired);
196 + return 1;
197 +}
198 +__setup("vpe0_wired_tlb_entries=", vpe0wired);
199 +
200 +int vpe1_wired;
201 +static int __init vpe1wired(char *str)
202 +{
203 + get_option(&str, &vpe1_wired);
204 + return 1;
205 +}
206 +__setup("vpe1_wired_tlb_entries=", vpe1wired);
207 +
208 +#ifdef CONFIG_MIPS_MT_SMTC
209 +extern int nostlb;
210 +#endif
211 +void configure_tlb(void)
212 +{
213 + int vpeflags, tcflags, tlbsiz;
214 + unsigned int config1val;
215 + vpeflags = dvpe();
216 + tcflags = dmt();
217 + write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
218 + write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
219 + mips_ihb();
220 + //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
221 + if (stlb) {
222 + if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
223 + emt(tcflags);
224 + evpe(vpeflags);
225 + return;
226 + }
227 +
228 + write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
229 + write_c0_wired(vpe0_wired + vpe1_wired);
230 + if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
231 + config1val = read_vpe_c0_config1();
232 + tlbsiz = (((config1val >> 25) & 0x3f) + 1);
233 + if (tlbsiz > 64)
234 + tlbsiz = 64;
235 + cpu_data[0].tlbsize = tlbsiz;
236 + current_cpu_data.tlbsize = tlbsiz;
237 + }
238 +
239 + }
240 + else {
241 + write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
242 + write_c0_wired(vpe0_wired);
243 + }
244 +
245 + ehb();
246 + write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
247 + ehb();
248 + local_flush_tlb_all();
249 +
250 + printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
251 +#ifdef CONFIG_MIPS_MT_SMTC
252 + nostlb = !stlb;
253 +#endif
254 + emt(tcflags);
255 + evpe(vpeflags);
256 +}
257 +#endif
258
259 /*
260 * Dump new MIPS MT state for the core. Does not leave TCs halted.
261 @@ -77,18 +147,18 @@
262 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
263 printk(" VPE %d\n", i);
264 printk(" VPEControl : %08lx\n",
265 - read_vpe_c0_vpecontrol());
266 + read_vpe_c0_vpecontrol());
267 printk(" VPEConf0 : %08lx\n",
268 - read_vpe_c0_vpeconf0());
269 + read_vpe_c0_vpeconf0());
270 printk(" VPE%d.Status : %08lx\n",
271 - i, read_vpe_c0_status());
272 + i, read_vpe_c0_status());
273 printk(" VPE%d.EPC : %08lx %pS\n",
274 - i, read_vpe_c0_epc(),
275 - (void *) read_vpe_c0_epc());
276 + i, read_vpe_c0_epc(),
277 + (void *) read_vpe_c0_epc());
278 printk(" VPE%d.Cause : %08lx\n",
279 - i, read_vpe_c0_cause());
280 + i, read_vpe_c0_cause());
281 printk(" VPE%d.Config7 : %08lx\n",
282 - i, read_vpe_c0_config7());
283 + i, read_vpe_c0_config7());
284 break; /* Next VPE */
285 }
286 }
287 @@ -286,6 +356,9 @@
288 printk("Mapped %ld ITC cells starting at 0x%08x\n",
289 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
290 }
291 +#ifdef CONFIG_IFX_VPE_EXT
292 + configure_tlb();
293 +#endif
294 }
295
296 /*
297 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/mtsched_proc.c linux-3.10.49/arch/mips/kernel/mtsched_proc.c
298 --- linux-3.10.49.orig/arch/mips/kernel/mtsched_proc.c 1970-01-01 01:00:00.000000000 +0100
299 +++ linux-3.10.49/arch/mips/kernel/mtsched_proc.c 2014-10-15 21:44:25.000000000 +0200
300 @@ -0,0 +1,279 @@
301 +/*
302 + * /proc hooks for MIPS MT scheduling policy management for 34K cores
303 + *
304 + * This program is free software; you can distribute it and/or modify it
305 + * under the terms of the GNU General Public License (Version 2) as
306 + * published by the Free Software Foundation.
307 + *
308 + * This program is distributed in the hope it will be useful, but WITHOUT
309 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
310 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
311 + * for more details.
312 + *
313 + * You should have received a copy of the GNU General Public License along
314 + * with this program; if not, write to the Free Software Foundation, Inc.,
315 + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
316 + *
317 + * Copyright (C) 2006 Mips Technologies, Inc
318 + */
319 +
320 +#include <linux/kernel.h>
321 +
322 +#include <asm/cpu.h>
323 +#include <asm/processor.h>
324 +#include <asm/system.h>
325 +#include <asm/mipsregs.h>
326 +#include <asm/mipsmtregs.h>
327 +#include <asm/uaccess.h>
328 +#include <linux/proc_fs.h>
329 +
330 +static struct proc_dir_entry *mtsched_proc;
331 +
332 +#ifndef CONFIG_MIPS_MT_SMTC
333 +#define NTCS 2
334 +#else
335 +#define NTCS NR_CPUS
336 +#endif
337 +#define NVPES 2
338 +
339 +int lastvpe = 1;
340 +int lasttc = 8;
341 +
342 +static int proc_read_mtsched(char *page, char **start, off_t off,
343 + int count, int *eof, void *data)
344 +{
345 + int totalen = 0;
346 + int len;
347 +
348 + int i;
349 + int vpe;
350 + int mytc;
351 + unsigned long flags;
352 + unsigned int mtflags;
353 + unsigned int haltstate;
354 + unsigned int vpes_checked[NVPES];
355 + unsigned int vpeschedule[NVPES];
356 + unsigned int vpeschefback[NVPES];
357 + unsigned int tcschedule[NTCS];
358 + unsigned int tcschefback[NTCS];
359 +
360 + /* Dump the state of the MIPS MT scheduling policy manager */
361 + /* Inititalize control state */
362 + for(i = 0; i < NVPES; i++) {
363 + vpes_checked[i] = 0;
364 + vpeschedule[i] = 0;
365 + vpeschefback[i] = 0;
366 + }
367 + for(i = 0; i < NTCS; i++) {
368 + tcschedule[i] = 0;
369 + tcschefback[i] = 0;
370 + }
371 +
372 + /* Disable interrupts and multithreaded issue */
373 + local_irq_save(flags);
374 + mtflags = dvpe();
375 +
376 + /* Then go through the TCs, halt 'em, and extract the values */
377 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
378 + for(i = 0; i < NTCS; i++) {
379 + if(i == mytc) {
380 + /* No need to halt ourselves! */
381 + tcschedule[i] = read_c0_tcschedule();
382 + tcschefback[i] = read_c0_tcschefback();
383 + /* If VPE bound to TC hasn't been checked, do it */
384 + vpe = read_c0_tcbind() & TCBIND_CURVPE;
385 + if(!vpes_checked[vpe]) {
386 + vpeschedule[vpe] = read_c0_vpeschedule();
387 + vpeschefback[vpe] = read_c0_vpeschefback();
388 + vpes_checked[vpe] = 1;
389 + }
390 + } else {
391 + settc(i);
392 + haltstate = read_tc_c0_tchalt();
393 + write_tc_c0_tchalt(TCHALT_H);
394 + mips_ihb();
395 + tcschedule[i] = read_tc_c0_tcschedule();
396 + tcschefback[i] = read_tc_c0_tcschefback();
397 + /* If VPE bound to TC hasn't been checked, do it */
398 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
399 + if(!vpes_checked[vpe]) {
400 + vpeschedule[vpe] = read_vpe_c0_vpeschedule();
401 + vpeschefback[vpe] = read_vpe_c0_vpeschefback();
402 + vpes_checked[vpe] = 1;
403 + }
404 + if(!haltstate) write_tc_c0_tchalt(0);
405 + }
406 + }
407 + /* Re-enable MT and interrupts */
408 + evpe(mtflags);
409 + local_irq_restore(flags);
410 +
411 + for(vpe=0; vpe < NVPES; vpe++) {
412 + len = sprintf(page, "VPE[%d].VPEschedule = 0x%08x\n",
413 + vpe, vpeschedule[vpe]);
414 + totalen += len;
415 + page += len;
416 + len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
417 + vpe, vpeschefback[vpe]);
418 + totalen += len;
419 + page += len;
420 + }
421 + for(i=0; i < NTCS; i++) {
422 + len = sprintf(page, "TC[%d].TCschedule = 0x%08x\n",
423 + i, tcschedule[i]);
424 + totalen += len;
425 + page += len;
426 + len = sprintf(page, "TC[%d].TCschefback = 0x%08x\n",
427 + i, tcschefback[i]);
428 + totalen += len;
429 + page += len;
430 + }
431 + return totalen;
432 +}
433 +
434 +/*
435 + * Write to perf counter registers based on text input
436 + */
437 +
438 +#define TXTBUFSZ 100
439 +
440 +static int proc_write_mtsched(struct file *file, const char *buffer,
441 + unsigned long count, void *data)
442 +{
443 + int len = 0;
444 + char mybuf[TXTBUFSZ];
445 + /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
446 + char entity[1]; //, entity1[1];
447 + int number[1];
448 + unsigned long value[1];
449 + int nparsed = 0 , index = 0;
450 + unsigned long flags;
451 + unsigned int mtflags;
452 + unsigned int haltstate;
453 + unsigned int tcbindval;
454 +
455 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
456 + else len = count;
457 + memset(mybuf,0,TXTBUFSZ);
458 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
459 +
460 + nparsed = sscanf(mybuf, "%c%d %lx",
461 + &entity[0] ,&number[0], &value[0]);
462 +
463 + /*
464 + * Having acquired the inputs, which might have
465 + * generated exceptions and preemptions,
466 + * program the registers.
467 + */
468 + /* Disable interrupts and multithreaded issue */
469 + local_irq_save(flags);
470 + mtflags = dvpe();
471 +
472 + if(entity[index] == 't' ) {
473 + /* Set TCSchedule or TCScheFBack of specified TC */
474 + if(number[index] > NTCS) goto skip;
475 + /* If it's our own TC, do it direct */
476 + if(number[index] ==
477 + ((read_c0_tcbind() & TCBIND_CURTC)
478 + >> TCBIND_CURTC_SHIFT)) {
479 + if(entity[index] == 't')
480 + write_c0_tcschedule(value[index]);
481 + else
482 + write_c0_tcschefback(value[index]);
483 + } else {
484 + /* Otherwise, we do it via MTTR */
485 + settc(number[index]);
486 + haltstate = read_tc_c0_tchalt();
487 + write_tc_c0_tchalt(TCHALT_H);
488 + mips_ihb();
489 + if(entity[index] == 't')
490 + write_tc_c0_tcschedule(value[index]);
491 + else
492 + write_tc_c0_tcschefback(value[index]);
493 + mips_ihb();
494 + if(!haltstate) write_tc_c0_tchalt(0);
495 + }
496 + } else if(entity[index] == 'v') {
497 + /* Set VPESchedule of specified VPE */
498 + if(number[index] > NVPES) goto skip;
499 + tcbindval = read_c0_tcbind();
500 + /* Are we doing this to our current VPE? */
501 + if((tcbindval & TCBIND_CURVPE) == number[index]) {
502 + /* Then life is simple */
503 + write_c0_vpeschedule(value[index]);
504 + } else {
505 + /*
506 + * Bind ourselves to the other VPE long enough
507 + * to program the bind value.
508 + */
509 + write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
510 + | number[index]);
511 + mips_ihb();
512 + write_c0_vpeschedule(value[index]);
513 + mips_ihb();
514 + /* Restore previous binding */
515 + write_c0_tcbind(tcbindval);
516 + mips_ihb();
517 + }
518 + }
519 +
520 + else if(entity[index] == 'r') {
521 + unsigned int vpes_checked[2], vpe ,i , mytc;
522 + vpes_checked[0] = vpes_checked[1] = 0;
523 +
524 + /* Then go through the TCs, halt 'em, and extract the values */
525 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
526 +
527 + for(i = 0; i < NTCS; i++) {
528 + if(i == mytc) {
529 + /* No need to halt ourselves! */
530 + write_c0_vpeschefback(0);
531 + write_c0_tcschefback(0);
532 + } else {
533 + settc(i);
534 + haltstate = read_tc_c0_tchalt();
535 + write_tc_c0_tchalt(TCHALT_H);
536 + mips_ihb();
537 + write_tc_c0_tcschefback(0);
538 + /* If VPE bound to TC hasn't been checked, do it */
539 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
540 + if(!vpes_checked[vpe]) {
541 + write_vpe_c0_vpeschefback(0);
542 + vpes_checked[vpe] = 1;
543 + }
544 + if(!haltstate) write_tc_c0_tchalt(0);
545 + }
546 + }
547 + }
548 + else {
549 + printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
550 + }
551 +
552 +skip:
553 + /* Re-enable MT and interrupts */
554 + evpe(mtflags);
555 + local_irq_restore(flags);
556 + return (len);
557 +}
558 +
559 +static int __init init_mtsched_proc(void)
560 +{
561 + extern struct proc_dir_entry *get_mips_proc_dir(void);
562 + struct proc_dir_entry *mips_proc_dir;
563 +
564 + if (!cpu_has_mipsmt) {
565 + printk("mtsched: not a MIPS MT capable processor\n");
566 + return -ENODEV;
567 + }
568 +
569 + mips_proc_dir = get_mips_proc_dir();
570 +
571 + mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
572 + mtsched_proc->read_proc = proc_read_mtsched;
573 + mtsched_proc->write_proc = proc_write_mtsched;
574 +
575 + return 0;
576 +}
577 +
578 +/* Automagically create the entry */
579 +module_init(init_mtsched_proc);
580 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/perf_proc.c linux-3.10.49/arch/mips/kernel/perf_proc.c
581 --- linux-3.10.49.orig/arch/mips/kernel/perf_proc.c 1970-01-01 01:00:00.000000000 +0100
582 +++ linux-3.10.49/arch/mips/kernel/perf_proc.c 2014-10-15 21:44:25.000000000 +0200
583 @@ -0,0 +1,191 @@
584 +/*
585 + * /proc hooks for CPU performance counter support for SMTC kernel
586 + * (and ultimately others)
587 + * Copyright (C) 2006 Mips Technologies, Inc
588 + */
589 +
590 +#include <linux/kernel.h>
591 +
592 +#include <asm/cpu.h>
593 +#include <asm/processor.h>
594 +#include <asm/system.h>
595 +#include <asm/mipsregs.h>
596 +#include <asm/uaccess.h>
597 +#include <linux/proc_fs.h>
598 +
599 +/*
600 + * /proc diagnostic and statistics hooks
601 + */
602 +
603 +
604 +/* Internal software-extended event counters */
605 +
606 +static unsigned long long extencount[4] = {0,0,0,0};
607 +
608 +static struct proc_dir_entry *perf_proc;
609 +
610 +static int proc_read_perf(char *page, char **start, off_t off,
611 + int count, int *eof, void *data)
612 +{
613 + int totalen = 0;
614 + int len;
615 +
616 + len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
617 + totalen += len;
618 + page += len;
619 + len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
620 + extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
621 + totalen += len;
622 + page += len;
623 + len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
624 + totalen += len;
625 + page += len;
626 + len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
627 + extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
628 + totalen += len;
629 + page += len;
630 + len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
631 + totalen += len;
632 + page += len;
633 + len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
634 + extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
635 + totalen += len;
636 + page += len;
637 + len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
638 + totalen += len;
639 + page += len;
640 + len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
641 + extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
642 + totalen += len;
643 + page += len;
644 +
645 + return totalen;
646 +}
647 +
648 +/*
649 + * Write to perf counter registers based on text input
650 + */
651 +
652 +#define TXTBUFSZ 100
653 +
654 +static int proc_write_perf(struct file *file, const char *buffer,
655 + unsigned long count, void *data)
656 +{
657 + int len;
658 + int nparsed;
659 + int index;
660 + char mybuf[TXTBUFSZ];
661 +
662 + int which[4];
663 + unsigned long control[4];
664 + long long ctrdata[4];
665 +
666 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
667 + else len = count;
668 + memset(mybuf,0,TXTBUFSZ);
669 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
670 +
671 + nparsed = sscanf(mybuf,
672 + "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
673 + &which[0], &control[0], &ctrdata[0],
674 + &which[1], &control[1], &ctrdata[1],
675 + &which[2], &control[2], &ctrdata[2],
676 + &which[3], &control[3], &ctrdata[3]);
677 +
678 + for(index = 0; nparsed >= 3; index++) {
679 + switch (which[index]) {
680 + case 0:
681 + write_c0_perfctrl0(control[index]);
682 + if(ctrdata[index] != -1) {
683 + extencount[0] = (unsigned long long)ctrdata[index];
684 + write_c0_perfcntr0((unsigned long)0);
685 + }
686 + break;
687 + case 1:
688 + write_c0_perfctrl1(control[index]);
689 + if(ctrdata[index] != -1) {
690 + extencount[1] = (unsigned long long)ctrdata[index];
691 + write_c0_perfcntr1((unsigned long)0);
692 + }
693 + break;
694 + case 2:
695 + write_c0_perfctrl2(control[index]);
696 + if(ctrdata[index] != -1) {
697 + extencount[2] = (unsigned long long)ctrdata[index];
698 + write_c0_perfcntr2((unsigned long)0);
699 + }
700 + break;
701 + case 3:
702 + write_c0_perfctrl3(control[index]);
703 + if(ctrdata[index] != -1) {
704 + extencount[3] = (unsigned long long)ctrdata[index];
705 + write_c0_perfcntr3((unsigned long)0);
706 + }
707 + break;
708 + }
709 + nparsed -= 3;
710 + }
711 + return (len);
712 +}
713 +
714 +extern int (*perf_irq)(void);
715 +
716 +/*
717 + * Invoked when timer interrupt vector picks up a perf counter overflow
718 + */
719 +
720 +static int perf_proc_irq(void)
721 +{
722 + unsigned long snapshot;
723 +
724 + /*
725 + * It would be nice to do this as a loop, but we don't have
726 + * indirect access to CP0 registers.
727 + */
728 + snapshot = read_c0_perfcntr0();
729 + if ((long)snapshot < 0) {
730 + extencount[0] +=
731 + (unsigned long long)((unsigned)read_c0_perfcntr0());
732 + write_c0_perfcntr0(0);
733 + }
734 + snapshot = read_c0_perfcntr1();
735 + if ((long)snapshot < 0) {
736 + extencount[1] +=
737 + (unsigned long long)((unsigned)read_c0_perfcntr1());
738 + write_c0_perfcntr1(0);
739 + }
740 + snapshot = read_c0_perfcntr2();
741 + if ((long)snapshot < 0) {
742 + extencount[2] +=
743 + (unsigned long long)((unsigned)read_c0_perfcntr2());
744 + write_c0_perfcntr2(0);
745 + }
746 + snapshot = read_c0_perfcntr3();
747 + if ((long)snapshot < 0) {
748 + extencount[3] +=
749 + (unsigned long long)((unsigned)read_c0_perfcntr3());
750 + write_c0_perfcntr3(0);
751 + }
752 + return 0;
753 +}
754 +
755 +static int __init init_perf_proc(void)
756 +{
757 + extern struct proc_dir_entry *get_mips_proc_dir(void);
758 +
759 + struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
760 +
761 + write_c0_perfcntr0(0);
762 + write_c0_perfcntr1(0);
763 + write_c0_perfcntr2(0);
764 + write_c0_perfcntr3(0);
765 + perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
766 + perf_proc->read_proc = proc_read_perf;
767 + perf_proc->write_proc = proc_write_perf;
768 + perf_irq = perf_proc_irq;
769 +
770 + return 0;
771 +}
772 +
773 +/* Automagically create the entry */
774 +module_init(init_perf_proc);
775 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/proc.c linux-3.10.49/arch/mips/kernel/proc.c
776 --- linux-3.10.49.orig/arch/mips/kernel/proc.c 2014-10-15 21:41:48.000000000 +0200
777 +++ linux-3.10.49/arch/mips/kernel/proc.c 2014-10-15 21:44:25.000000000 +0200
778 @@ -7,6 +7,7 @@
779 #include <linux/kernel.h>
780 #include <linux/sched.h>
781 #include <linux/seq_file.h>
782 +#include <linux/proc_fs.h>
783 #include <asm/bootinfo.h>
784 #include <asm/cpu.h>
785 #include <asm/cpu-features.h>
786 @@ -142,3 +143,19 @@
787 .stop = c_stop,
788 .show = show_cpuinfo,
789 };
790 +
791 +/*
792 + * Support for MIPS/local /proc hooks in /proc/mips/
793 + */
794 +
795 +static struct proc_dir_entry *mips_proc = NULL;
796 +
797 +struct proc_dir_entry *get_mips_proc_dir(void)
798 +{
799 + /*
800 + * This ought not to be preemptable.
801 + */
802 + if(mips_proc == NULL)
803 + mips_proc = proc_mkdir("mips", NULL);
804 + return(mips_proc);
805 +}
806 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/prom.c linux-3.10.49/arch/mips/kernel/prom.c
807 --- linux-3.10.49.orig/arch/mips/kernel/prom.c 2014-10-15 21:41:48.000000000 +0200
808 +++ linux-3.10.49/arch/mips/kernel/prom.c 2014-10-15 21:45:47.000000000 +0200
809 @@ -39,6 +39,8 @@
810 return mips_machine_name;
811 }
812
813 +unsigned long physical_memsize = 0L;
814 +
815 #ifdef CONFIG_OF
816 int __init early_init_dt_scan_memory_arch(unsigned long node,
817 const char *uname, int depth,
818 @@ -50,6 +52,7 @@
819 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
820 {
821 return add_memory_region(base, size, BOOT_MEM_RAM);
822 + physical_memsize = size;
823 }
824
825 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
826 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/smtc.c linux-3.10.49/arch/mips/kernel/smtc.c
827 --- linux-3.10.49.orig/arch/mips/kernel/smtc.c 2014-10-15 21:41:48.000000000 +0200
828 +++ linux-3.10.49/arch/mips/kernel/smtc.c 2014-10-15 21:44:25.000000000 +0200
829 @@ -1394,6 +1394,13 @@
830 asid = asid_cache(cpu);
831
832 do {
833 +#ifdef CONFIG_IFX_VPE_EXT
834 + /* If TLB is shared between AP and RP (AP is running SMTC),
835 + leave out max ASID i.e., ASID_MASK for RP
836 + */
837 + if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
838 + asid++;
839 +#endif
840 if (!((asid += ASID_INC) & ASID_MASK) ) {
841 if (cpu_has_vtag_icache)
842 flush_icache_all();
843 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/vpe.c linux-3.10.49/arch/mips/kernel/vpe.c
844 --- linux-3.10.49.orig/arch/mips/kernel/vpe.c 2014-10-15 21:41:48.000000000 +0200
845 +++ linux-3.10.49/arch/mips/kernel/vpe.c 2014-10-15 21:54:34.000000000 +0200
846 @@ -68,6 +68,58 @@
847 static int major;
848 static const int minor = 1; /* fixed for now */
849
850 +#ifdef CONFIG_IFX_VPE_EXT
851 +static int is_sdepgm;
852 +extern int stlb;
853 +extern int vpe0_wired;
854 +extern int vpe1_wired;
855 +unsigned int vpe1_load_addr;
856 +
857 +static int __init load_address(char *str)
858 +{
859 + get_option(&str, &vpe1_load_addr);
860 + return 1;
861 +}
862 +__setup("vpe1_load_addr=", load_address);
863 +
864 +#include <asm/mipsmtregs.h>
865 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
866 +
867 +#ifndef COMMAND_LINE_SIZE
868 +# define COMMAND_LINE_SIZE 512
869 +#endif
870 +
871 +char command_line[COMMAND_LINE_SIZE * 2];
872 +
873 +static unsigned int vpe1_mem;
874 +static int __init vpe1mem(char *str)
875 +{
876 + vpe1_mem = memparse(str, &str);
877 + return 1;
878 +}
879 +__setup("vpe1_mem=", vpe1mem);
880 +
881 +uint32_t vpe1_wdog_ctr;
882 +static int __init wdog_ctr(char *str)
883 +{
884 + get_option(&str, &vpe1_wdog_ctr);
885 + return 1;
886 +}
887 +
888 +__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
889 +EXPORT_SYMBOL(vpe1_wdog_ctr);
890 +
891 +uint32_t vpe1_wdog_timeout;
892 +static int __init wdog_timeout(char *str)
893 +{
894 + get_option(&str, &vpe1_wdog_timeout);
895 + return 1;
896 +}
897 +
898 +__setup("vpe1_wdog_timeout=", wdog_timeout);
899 +EXPORT_SYMBOL(vpe1_wdog_timeout);
900 +
901 +#endif
902 /* grab the likely amount of memory we will need. */
903 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
904 #define P_SIZE (2 * 1024 * 1024)
905 @@ -260,6 +312,13 @@
906 void *addr;
907
908 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
909 +#ifdef CONFIG_IFX_VPE_EXT
910 + if (vpe1_load_addr) {
911 + memset((void *)vpe1_load_addr, 0, len);
912 + return (void *)vpe1_load_addr;
913 + }
914 +#endif
915 +
916 /*
917 * This means you must tell Linux to use less memory than you
918 * physically have, for example by passing a mem= boot argument.
919 @@ -729,6 +788,12 @@
920 }
921
922 /* Write the address we want it to start running from in the TCPC register. */
923 +#if defined(CONFIG_IFX_VPE_EXT) && 0
924 + if (stlb)
925 + write_vpe_c0_wired(vpe0_wired + vpe1_wired);
926 + else
927 + write_vpe_c0_wired(vpe1_wired);
928 +#endif
929 write_tc_c0_tcrestart((unsigned long)v->__start);
930 write_tc_c0_tccontext((unsigned long)0);
931
932 @@ -742,6 +807,20 @@
933
934 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
935
936 +#if defined(CONFIG_IFX_VPE_EXT) && 0
937 + /*
938 + * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
939 + * points to the start of the command line string and $a3 points to
940 + * the end of the string. This convention is identical to the Linux
941 + * kernel boot parameter passing mechanism. Please note that $a3 is
942 + * used to pass physical memory size or 0 in SDE tool kit. So, if you
943 + * are passing comand line parameters through $a2 & $a3 SDE programs
944 + * don't work as desired.
945 + */
946 + mttgpr(6, command_line);
947 + mttgpr(7, (command_line + strlen(command_line)));
948 + if (is_sdepgm)
949 +#endif
950 /*
951 * The sde-kit passes 'memsize' to __start in $a3, so set something
952 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
953 @@ -816,6 +895,9 @@
954 if ( (v->__start == 0) || (v->shared_ptr == NULL))
955 return -1;
956
957 +#ifdef CONFIG_IFX_VPE_EXT
958 + is_sdepgm = 1;
959 +#endif
960 return 0;
961 }
962
963 @@ -977,6 +1059,15 @@
964 (unsigned long)v->load_addr + v->len);
965
966 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
967 +#ifdef CONFIG_IFX_VPE_EXT
968 + if (vpe1_load_addr) {
969 + /* Conversion to KSEG1 is required ??? */
970 + v->__start = KSEG1ADDR(vpe1_load_addr);
971 + is_sdepgm = 0;
972 + return 0;
973 + }
974 +#endif
975 +
976 if (v->__start == 0) {
977 printk(KERN_WARNING "VPE loader: program does not contain "
978 "a __start symbol\n");
979 @@ -1047,6 +1138,9 @@
980 struct vpe_notifications *not;
981 struct vpe *v;
982 int ret;
983 +#ifdef CONFIG_IFX_VPE_EXT
984 + int progsize;
985 +#endif
986
987 if (minor != iminor(inode)) {
988 /* assume only 1 device at the moment. */
989 @@ -1072,7 +1166,12 @@
990 release_progmem(v->load_addr);
991 cleanup_tc(get_tc(tclimit));
992 }
993 -
994 +#ifdef CONFIG_IFX_VPE_EXT
995 + progsize = (vpe1_mem != 0) ? vpe1_mem : P_SIZE;
996 + //printk("progsize = %x\n", progsize);
997 + v->pbuffer = vmalloc(progsize);
998 + v->plen = progsize;
999 +#else
1000 /* this of-course trashes what was there before... */
1001 v->pbuffer = vmalloc(P_SIZE);
1002 if (!v->pbuffer) {
1003 @@ -1080,11 +1179,14 @@
1004 return -ENOMEM;
1005 }
1006 v->plen = P_SIZE;
1007 +#endif
1008 v->load_addr = NULL;
1009 v->len = 0;
1010
1011 +#if 0
1012 v->uid = filp->f_cred->fsuid;
1013 v->gid = filp->f_cred->fsgid;
1014 +#endif
1015
1016 v->cwd[0] = 0;
1017 ret = getcwd(v->cwd, VPE_PATH_MAX);
1018 @@ -1318,6 +1420,134 @@
1019
1020 EXPORT_SYMBOL(vpe_getcwd);
1021
1022 +#ifdef CONFIG_IFX_VPE_EXT
1023 +int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
1024 +{
1025 + enum vpe_state state;
1026 + struct vpe *v = get_vpe(tclimit);
1027 + struct vpe_notifications *not;
1028 +
1029 + if (tcmask || flags) {
1030 + printk(KERN_WARNING "Currently tcmask and flags should be 0.\
1031 + other values not supported\n");
1032 + return -1;
1033 + }
1034 +
1035 + state = xchg(&v->state, VPE_STATE_INUSE);
1036 + if (state != VPE_STATE_UNUSED) {
1037 + vpe_stop(v);
1038 +
1039 + list_for_each_entry(not, &v->notify, list) {
1040 + not->stop(tclimit);
1041 + }
1042 + }
1043 +
1044 + v->__start = (unsigned long)sw_start_addr;
1045 + is_sdepgm = 0;
1046 +
1047 + if (!vpe_run(v)) {
1048 + printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
1049 + return 0;
1050 + }
1051 + return -1;
1052 +}
1053 +
1054 +EXPORT_SYMBOL(vpe1_sw_start);
1055 +
1056 +int32_t vpe1_sw_stop(uint32_t flags)
1057 +{
1058 + struct vpe *v = get_vpe(tclimit);
1059 +
1060 + if (!vpe_free(v)) {
1061 + printk(KERN_DEBUG "RP Stopped\n");
1062 + return 0;
1063 + }
1064 + else
1065 + return -1;
1066 +}
1067 +
1068 +EXPORT_SYMBOL(vpe1_sw_stop);
1069 +
1070 +uint32_t vpe1_get_load_addr (uint32_t flags)
1071 +{
1072 + return vpe1_load_addr;
1073 +}
1074 +
1075 +EXPORT_SYMBOL(vpe1_get_load_addr);
1076 +
1077 +uint32_t vpe1_get_max_mem (uint32_t flags)
1078 +{
1079 + if (!vpe1_mem)
1080 + return P_SIZE;
1081 + else
1082 + return vpe1_mem;
1083 +}
1084 +
1085 +EXPORT_SYMBOL(vpe1_get_max_mem);
1086 +
1087 +void* vpe1_get_cmdline_argument(void)
1088 +{
1089 + return saved_command_line;
1090 +}
1091 +
1092 +EXPORT_SYMBOL(vpe1_get_cmdline_argument);
1093 +
1094 +int32_t vpe1_set_boot_param(char *field, char *value, char flags)
1095 +{
1096 + char *ptr, string[64];
1097 + int start_off, end_off;
1098 + if (!field)
1099 + return -1;
1100 + strcpy(string, field);
1101 + if (value) {
1102 + strcat(string, "=");
1103 + strcat(string, value);
1104 + strcat(command_line, " ");
1105 + strcat(command_line, string);
1106 + }
1107 + else {
1108 + ptr = strstr(command_line, string);
1109 + if (ptr) {
1110 + start_off = ptr - command_line;
1111 + ptr += strlen(string);
1112 + while ((*ptr != ' ') && (*ptr != '\0'))
1113 + ptr++;
1114 + end_off = ptr - command_line;
1115 + command_line[start_off] = '\0';
1116 + strcat (command_line, command_line+end_off);
1117 + }
1118 + }
1119 + return 0;
1120 +}
1121 +
1122 +EXPORT_SYMBOL(vpe1_set_boot_param);
1123 +
1124 +int32_t vpe1_get_boot_param(char *field, char **value, char flags)
1125 +{
1126 + char *ptr, string[64];
1127 + int i = 0;
1128 + if (!field)
1129 + return -1;
1130 + if ((ptr = strstr(command_line, field))) {
1131 + ptr += strlen(field) + 1; /* including = */
1132 + while ((*ptr != ' ') && (*ptr != '\0'))
1133 + string[i++] = *ptr++;
1134 + string[i] = '\0';
1135 + *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
1136 + if (*value != NULL)
1137 + strcpy(*value, string);
1138 + }
1139 + else
1140 + *value = NULL;
1141 +
1142 + return 0;
1143 +}
1144 +
1145 +EXPORT_SYMBOL(vpe1_get_boot_param);
1146 +
1147 +extern void configure_tlb(void);
1148 +#endif
1149 +
1150 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1151 const char *buf, size_t len)
1152 {
1153 @@ -1398,6 +1628,18 @@
1154 printk("VPE loader: not a MIPS MT capable processor\n");
1155 return -ENODEV;
1156 }
1157 +#ifdef CONFIG_IFX_VPE_EXT
1158 +#ifndef CONFIG_MIPS_MT_SMTC
1159 + configure_tlb();
1160 +#endif
1161 +#endif
1162 +
1163 +#ifndef CONFIG_MIPS_MT_SMTC
1164 + if (!vpelimit)
1165 + vpelimit = 1;
1166 + if (!tclimit)
1167 + tclimit = 1;
1168 +#endif
1169
1170 if (vpelimit == 0) {
1171 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1172 @@ -1442,10 +1684,12 @@
1173 mtflags = dmt();
1174 vpflags = dvpe();
1175
1176 + back_to_back_c0_hazard();
1177 +
1178 /* Put MVPE's into 'configuration state' */
1179 set_c0_mvpcontrol(MVPCONTROL_VPC);
1180
1181 - /* dump_mtregs(); */
1182 + dump_mtregs();
1183
1184 val = read_c0_mvpconf0();
1185 hw_tcs = (val & MVPCONF0_PTC) + 1;
1186 @@ -1457,6 +1701,7 @@
1187 * reschedule send IPIs or similar we might hang.
1188 */
1189 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1190 + back_to_back_c0_hazard();
1191 evpe(vpflags);
1192 emt(mtflags);
1193 local_irq_restore(flags);
1194 @@ -1482,6 +1727,7 @@
1195 }
1196
1197 v->ntcs = hw_tcs - tclimit;
1198 + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
1199
1200 /* add the tc to the list of this vpe's tc's. */
1201 list_add(&t->tc, &v->tc);
1202 @@ -1550,6 +1796,7 @@
1203 out_reenable:
1204 /* release config state */
1205 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1206 + back_to_back_c0_hazard();
1207
1208 evpe(vpflags);
1209 emt(mtflags);
1210 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/vpe.c.patched linux-3.10.49/arch/mips/kernel/vpe.c.patched
1211 diff -u -N -r linux-3.10.49.orig/arch/mips/kernel/vpe.h linux-3.10.49/arch/mips/kernel/vpe.h
1212 --- linux-3.10.49.orig/arch/mips/kernel/vpe.h 1970-01-01 01:00:00.000000000 +0100
1213 +++ linux-3.10.49/arch/mips/kernel/vpe.h 2014-10-15 21:44:25.000000000 +0200
1214 @@ -0,0 +1,130 @@
1215 +/*
1216 + * This file is subject to the terms and conditions of the GNU General Public
1217 + * License. See the file "COPYING" in the main directory of this archive
1218 + * for more details.
1219 + *
1220 + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
1221 + * Copyright (C) 2013 Imagination Technologies Ltd.
1222 + */
1223 +#ifndef _ASM_VPE_H
1224 +#define _ASM_VPE_H
1225 +
1226 +#include <linux/init.h>
1227 +#include <linux/list.h>
1228 +#include <linux/smp.h>
1229 +#include <linux/spinlock.h>
1230 +
1231 +#define VPE_MODULE_NAME "vpe"
1232 +#define VPE_MODULE_MINOR 1
1233 +
1234 +/* grab the likely amount of memory we will need. */
1235 +#ifdef CONFIG_MIPS_VPE_LOADER_TOM
1236 +#define P_SIZE (2 * 1024 * 1024)
1237 +#else
1238 +/* add an overhead to the max kmalloc size for non-striped symbols/etc */
1239 +#define P_SIZE (256 * 1024)
1240 +#endif
1241 +
1242 +#define MAX_VPES 16
1243 +#define VPE_PATH_MAX 256
1244 +
1245 +static inline int aprp_cpu_index(void)
1246 +{
1247 +#ifdef CONFIG_MIPS_CMP
1248 + return setup_max_cpus;
1249 +#else
1250 + extern int tclimit;
1251 + return tclimit;
1252 +#endif
1253 +}
1254 +
1255 +enum vpe_state {
1256 + VPE_STATE_UNUSED = 0,
1257 + VPE_STATE_INUSE,
1258 + VPE_STATE_RUNNING
1259 +};
1260 +
1261 +enum tc_state {
1262 + TC_STATE_UNUSED = 0,
1263 + TC_STATE_INUSE,
1264 + TC_STATE_RUNNING,
1265 + TC_STATE_DYNAMIC
1266 +};
1267 +
1268 +struct vpe {
1269 + enum vpe_state state;
1270 +
1271 + /* (device) minor associated with this vpe */
1272 + int minor;
1273 +
1274 + /* elfloader stuff */
1275 + void *load_addr;
1276 + unsigned long len;
1277 + char *pbuffer;
1278 + unsigned long plen;
1279 + char cwd[VPE_PATH_MAX];
1280 +
1281 + unsigned long __start;
1282 +
1283 + /* tc's associated with this vpe */
1284 + struct list_head tc;
1285 +
1286 + /* The list of vpe's */
1287 + struct list_head list;
1288 +
1289 + /* shared symbol address */
1290 + void *shared_ptr;
1291 +
1292 + /* the list of who wants to know when something major happens */
1293 + struct list_head notify;
1294 +
1295 + unsigned int ntcs;
1296 +};
1297 +
1298 +struct tc {
1299 + enum tc_state state;
1300 + int index;
1301 +
1302 + struct vpe *pvpe; /* parent VPE */
1303 + struct list_head tc; /* The list of TC's with this VPE */
1304 + struct list_head list; /* The global list of tc's */
1305 +};
1306 +
1307 +struct vpe_notifications {
1308 + void (*start)(int vpe);
1309 + void (*stop)(int vpe);
1310 +
1311 + struct list_head list;
1312 +};
1313 +
1314 +struct vpe_control {
1315 + spinlock_t vpe_list_lock;
1316 + struct list_head vpe_list; /* Virtual processing elements */
1317 + spinlock_t tc_list_lock;
1318 + struct list_head tc_list; /* Thread contexts */
1319 +};
1320 +
1321 +extern unsigned long physical_memsize;
1322 +extern struct vpe_control vpecontrol;
1323 +extern const struct file_operations vpe_fops;
1324 +
1325 +int vpe_notify(int index, struct vpe_notifications *notify);
1326 +
1327 +void *vpe_get_shared(int index);
1328 +char *vpe_getcwd(int index);
1329 +
1330 +struct vpe *get_vpe(int minor);
1331 +struct tc *get_tc(int index);
1332 +struct vpe *alloc_vpe(int minor);
1333 +struct tc *alloc_tc(int index);
1334 +void release_vpe(struct vpe *v);
1335 +
1336 +void *alloc_progmem(unsigned long len);
1337 +void release_progmem(void *ptr);
1338 +
1339 +int __weak vpe_run(struct vpe *v);
1340 +void cleanup_tc(struct tc *tc);
1341 +
1342 +int __init vpe_module_init(void);
1343 +void __exit vpe_module_exit(void);
1344 +#endif /* _ASM_VPE_H */
1345 diff -u -N -r linux-3.10.49.orig/arch/mips/lantiq/prom.c linux-3.10.49/arch/mips/lantiq/prom.c
1346 --- linux-3.10.49.orig/arch/mips/lantiq/prom.c 2014-10-15 21:41:48.000000000 +0200
1347 +++ linux-3.10.49/arch/mips/lantiq/prom.c 2014-10-15 21:44:39.000000000 +0200
1348 @@ -30,6 +30,10 @@
1349 */
1350 static struct ltq_soc_info soc_info;
1351
1352 +/* for Multithreading (APRP), vpe.c will use it */
1353 +//unsigned long physical_memsize = 0;
1354 +unsigned long cp0_memsize = 0;
1355 +
1356 const char *get_system_type(void)
1357 {
1358 return soc_info.sys_type;