ramips: fix subtarget kernel version assignment (only mt7621 is ready for now)
[openwrt/svn-archive/archive.git] / target / linux / ramips / patches-4.3 / 0056-cm-intoruce-core-other-locking-functions.patch
1 commit 23d5de8efb9aed48074a72bf3d43841e1556ca42
2 Author: Paul Burton <paul.burton@imgtec.com>
3 Date: Tue Sep 22 11:12:16 2015 -0700
4
5 MIPS: CM: Introduce core-other locking functions
6
7 Introduce mips_cm_lock_other & mips_cm_unlock_other, mirroring the
8 existing CPC equivalents, in order to lock access from the current core
9 to another via the core-other GCR region. This hasn't been required in
10 the past but with CM3 the CPC starts using GCR_CL_OTHER rather than
11 CPC_CL_OTHER and this will be required for safety.
12
13 [ralf@linux-mips.org: Fix merge conflict.]
14
15 Signed-off-by: Paul Burton <paul.burton@imgtec.com>
16 Cc: linux-mips@linux-mips.org
17 Cc: linux-kernel@vger.kernel.org
18 Cc: James Hogan <james.hogan@imgtec.com>
19 Cc: Markos Chandras <markos.chandras@imgtec.com>
20 Patchwork: https://patchwork.linux-mips.org/patch/11207/
21 Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
22
23 --- a/arch/mips/include/asm/mips-cm.h
24 +++ b/arch/mips/include/asm/mips-cm.h
25 @@ -334,6 +334,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
26 /* GCR_Cx_OTHER register fields */
27 #define CM_GCR_Cx_OTHER_CORENUM_SHF 16
28 #define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16)
29 +#define CM3_GCR_Cx_OTHER_CORE_SHF 8
30 +#define CM3_GCR_Cx_OTHER_CORE_MSK (_ULCAST_(0x3f) << 8)
31 +#define CM3_GCR_Cx_OTHER_VP_SHF 0
32 +#define CM3_GCR_Cx_OTHER_VP_MSK (_ULCAST_(0x7) << 0)
33
34 /* GCR_Cx_RESET_BASE register fields */
35 #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12
36 @@ -444,4 +448,32 @@ static inline unsigned int mips_cm_vp_id
37 return (core * mips_cm_max_vp_width()) + vp;
38 }
39
40 +#ifdef CONFIG_MIPS_CM
41 +
42 +/**
43 + * mips_cm_lock_other - lock access to another core
44 + * @core: the other core to be accessed
45 + * @vp: the VP within the other core to be accessed
46 + *
47 + * Call before operating upon a core via the 'other' register region in
48 + * order to prevent the region being moved during access. Must be followed
49 + * by a call to mips_cm_unlock_other.
50 + */
51 +extern void mips_cm_lock_other(unsigned int core, unsigned int vp);
52 +
53 +/**
54 + * mips_cm_unlock_other - unlock access to another core
55 + *
56 + * Call after operating upon another core via the 'other' register region.
57 + * Must be called after mips_cm_lock_other.
58 + */
59 +extern void mips_cm_unlock_other(void);
60 +
61 +#else /* !CONFIG_MIPS_CM */
62 +
63 +static inline void mips_cm_lock_other(unsigned int core) { }
64 +static inline void mips_cm_unlock_other(void) { }
65 +
66 +#endif /* !CONFIG_MIPS_CM */
67 +
68 #endif /* __MIPS_ASM_MIPS_CM_H__ */
69 --- a/arch/mips/kernel/mips-cm.c
70 +++ b/arch/mips/kernel/mips-cm.c
71 @@ -9,6 +9,8 @@
72 */
73
74 #include <linux/errno.h>
75 +#include <linux/percpu.h>
76 +#include <linux/spinlock.h>
77
78 #include <asm/mips-cm.h>
79 #include <asm/mipsregs.h>
80 @@ -136,6 +138,9 @@ static char *cm3_causes[32] = {
81 "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
82 };
83
84 +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
85 +static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
86 +
87 phys_addr_t __mips_cm_phys_base(void)
88 {
89 u32 config3 = read_c0_config3();
90 @@ -200,6 +205,7 @@ int mips_cm_probe(void)
91 {
92 phys_addr_t addr;
93 u32 base_reg;
94 + unsigned cpu;
95
96 /*
97 * No need to probe again if we have already been
98 @@ -247,9 +253,42 @@ int mips_cm_probe(void)
99 /* determine register width for this CM */
100 mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
101
102 + for_each_possible_cpu(cpu)
103 + spin_lock_init(&per_cpu(cm_core_lock, cpu));
104 +
105 return 0;
106 }
107
108 +void mips_cm_lock_other(unsigned int core, unsigned int vp)
109 +{
110 + unsigned curr_core;
111 + u32 val;
112 +
113 + preempt_disable();
114 + curr_core = current_cpu_data.core;
115 + spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
116 + per_cpu(cm_core_lock_flags, curr_core));
117 +
118 + if (mips_cm_revision() >= CM_REV_CM3) {
119 + val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
120 + val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
121 + } else {
122 + BUG_ON(vp != 0);
123 + val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
124 + }
125 +
126 + write_gcr_cl_other(val);
127 +}
128 +
129 +void mips_cm_unlock_other(void)
130 +{
131 + unsigned curr_core = current_cpu_data.core;
132 +
133 + spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
134 + per_cpu(cm_core_lock_flags, curr_core));
135 + preempt_enable();
136 +}
137 +
138 void mips_cm_error_report(void)
139 {
140 unsigned long revision = mips_cm_revision();