xburst: add support for 3.8
[openwrt/svn-archive/archive.git] / target / linux / xburst / patches-3.8 / 0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch
1 From ca40c7542f0cd0e0dfa074bd4ccefc04b8561427 Mon Sep 17 00:00:00 2001
2 From: Maarten ter Huurne <maarten@treewalker.org>
3 Date: Tue, 2 Aug 2011 10:26:09 +0200
4 Subject: [PATCH 10/21] cpufreq_stats: Support runtime changes to frequency
5 table.
6
7 ---
8 drivers/cpufreq/cpufreq_stats.c | 161 ++++++++++++++++++++-------------------
9 1 files changed, 83 insertions(+), 78 deletions(-)
10
11 --- a/drivers/cpufreq/cpufreq_stats.c
12 +++ b/drivers/cpufreq/cpufreq_stats.c
13 @@ -20,6 +20,7 @@
14 #include <linux/kobject.h>
15 #include <linux/spinlock.h>
16 #include <linux/notifier.h>
17 +#include <linux/string.h>
18 #include <asm/cputime.h>
19
20 static spinlock_t cpufreq_stats_lock;
21 @@ -36,7 +37,7 @@ struct cpufreq_stats {
22 unsigned long long last_time;
23 unsigned int max_state;
24 unsigned int state_num;
25 - unsigned int last_index;
26 + int last_index;
27 u64 *time_in_state;
28 unsigned int *freq_table;
29 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
30 @@ -59,7 +60,7 @@ static int cpufreq_stats_update(unsigned
31 cur_time = get_jiffies_64();
32 spin_lock(&cpufreq_stats_lock);
33 stat = per_cpu(cpufreq_stats_table, cpu);
34 - if (stat->time_in_state)
35 + if (stat->time_in_state && stat->last_index != -1)
36 stat->time_in_state[stat->last_index] +=
37 cur_time - stat->last_time;
38 stat->last_time = cur_time;
39 @@ -81,7 +82,7 @@ static ssize_t show_time_in_state(struct
40 ssize_t len = 0;
41 int i;
42 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
43 - if (!stat)
44 + if (!stat || !stat->time_in_state)
45 return 0;
46 cpufreq_stats_update(stat->cpu);
47 for (i = 0; i < stat->state_num; i++) {
48 @@ -99,7 +100,7 @@ static ssize_t show_trans_table(struct c
49 int i, j;
50
51 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
52 - if (!stat)
53 + if (!stat || !stat->trans_table)
54 return 0;
55 cpufreq_stats_update(stat->cpu);
56 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
57 @@ -158,63 +159,35 @@ static struct attribute_group stats_attr
58 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
59 {
60 int index;
61 - for (index = 0; index < stat->max_state; index++)
62 - if (stat->freq_table[index] == freq)
63 - return index;
64 + if (stat->freq_table)
65 + for (index = 0; index < stat->max_state; index++)
66 + if (stat->freq_table[index] == freq)
67 + return index;
68 return -1;
69 }
70
71 -/* should be called late in the CPU removal sequence so that the stats
72 - * memory is still available in case someone tries to use it.
73 - */
74 static void cpufreq_stats_free_table(unsigned int cpu)
75 {
76 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
77 + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
78 + if (policy && policy->cpu == cpu)
79 + sysfs_remove_group(&policy->kobj, &stats_attr_group);
80 if (stat) {
81 kfree(stat->time_in_state);
82 kfree(stat);
83 }
84 per_cpu(cpufreq_stats_table, cpu) = NULL;
85 -}
86 -
87 -/* must be called early in the CPU removal sequence (before
88 - * cpufreq_remove_dev) so that policy is still valid.
89 - */
90 -static void cpufreq_stats_free_sysfs(unsigned int cpu)
91 -{
92 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
93 - if (policy && policy->cpu == cpu)
94 - sysfs_remove_group(&policy->kobj, &stats_attr_group);
95 if (policy)
96 cpufreq_cpu_put(policy);
97 }
98
99 -static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
100 +static int cpufreq_stats_update_table(struct cpufreq_policy *policy,
101 struct cpufreq_frequency_table *table)
102 {
103 - unsigned int i, j, count = 0, ret = 0;
104 - struct cpufreq_stats *stat;
105 - struct cpufreq_policy *data;
106 + unsigned int i, j, count = 0;
107 unsigned int alloc_size;
108 unsigned int cpu = policy->cpu;
109 - if (per_cpu(cpufreq_stats_table, cpu))
110 - return -EBUSY;
111 - stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
112 - if ((stat) == NULL)
113 - return -ENOMEM;
114 -
115 - data = cpufreq_cpu_get(cpu);
116 - if (data == NULL) {
117 - ret = -EINVAL;
118 - goto error_get_fail;
119 - }
120 -
121 - ret = sysfs_create_group(&data->kobj, &stats_attr_group);
122 - if (ret)
123 - goto error_out;
124 -
125 - stat->cpu = cpu;
126 - per_cpu(cpufreq_stats_table, cpu) = stat;
127 + struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
128
129 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
130 unsigned int freq = table[i].frequency;
131 @@ -223,40 +196,76 @@ static int cpufreq_stats_create_table(st
132 count++;
133 }
134
135 + if (stat->max_state != count) {
136 + stat->max_state = count;
137 + kfree(stat->time_in_state);
138 + stat->time_in_state = NULL;
139 + }
140 +
141 alloc_size = count * sizeof(int) + count * sizeof(u64);
142
143 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
144 alloc_size += count * count * sizeof(int);
145 #endif
146 - stat->max_state = count;
147 - stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
148 - if (!stat->time_in_state) {
149 - ret = -ENOMEM;
150 - goto error_out;
151 - }
152 - stat->freq_table = (unsigned int *)(stat->time_in_state + count);
153 -
154 + if (stat->time_in_state) {
155 + memset(stat->time_in_state, 0, alloc_size);
156 + } else {
157 + stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
158 + if (!stat->time_in_state)
159 + return -ENOMEM;
160 + stat->freq_table = (unsigned int *)(
161 + stat->time_in_state + count);
162 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
163 - stat->trans_table = stat->freq_table + count;
164 + stat->trans_table = stat->freq_table + count;
165 #endif
166 + }
167 +
168 j = 0;
169 - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
170 - unsigned int freq = table[i].frequency;
171 - if (freq == CPUFREQ_ENTRY_INVALID)
172 - continue;
173 - if (freq_table_get_index(stat, freq) == -1)
174 - stat->freq_table[j++] = freq;
175 + if (stat->freq_table) {
176 + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
177 + unsigned int freq = table[i].frequency;
178 + if (freq == CPUFREQ_ENTRY_INVALID)
179 + continue;
180 + if (freq_table_get_index(stat, freq) == -1)
181 + stat->freq_table[j++] = freq;
182 + }
183 }
184 +
185 stat->state_num = j;
186 spin_lock(&cpufreq_stats_lock);
187 stat->last_time = get_jiffies_64();
188 stat->last_index = freq_table_get_index(stat, policy->cur);
189 spin_unlock(&cpufreq_stats_lock);
190 + return 0;
191 +}
192 +
193 +static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
194 + struct cpufreq_frequency_table *table)
195 +{
196 + unsigned int ret = 0;
197 + struct cpufreq_stats *stat;
198 + struct cpufreq_policy *data;
199 + unsigned int cpu = policy->cpu;
200 +
201 + stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
202 + if ((stat) == NULL)
203 + return -ENOMEM;
204 +
205 + data = cpufreq_cpu_get(cpu);
206 + if (data == NULL) {
207 + ret = -EINVAL;
208 + goto error_out;
209 + }
210 + ret = sysfs_create_group(&data->kobj, &stats_attr_group);
211 cpufreq_cpu_put(data);
212 + if (ret)
213 + goto error_out;
214 +
215 + stat->cpu = cpu;
216 + per_cpu(cpufreq_stats_table, cpu) = stat;
217 +
218 return 0;
219 error_out:
220 - cpufreq_cpu_put(data);
221 -error_get_fail:
222 kfree(stat);
223 per_cpu(cpufreq_stats_table, cpu) = NULL;
224 return ret;
225 @@ -274,10 +283,12 @@ static int cpufreq_stat_notifier_policy(
226 table = cpufreq_frequency_get_table(cpu);
227 if (!table)
228 return 0;
229 - ret = cpufreq_stats_create_table(policy, table);
230 - if (ret)
231 - return ret;
232 - return 0;
233 + if (!per_cpu(cpufreq_stats_table, cpu)) {
234 + ret = cpufreq_stats_create_table(policy, table);
235 + if (ret)
236 + return ret;
237 + }
238 + return cpufreq_stats_update_table(policy, table);
239 }
240
241 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
242 @@ -297,21 +308,23 @@ static int cpufreq_stat_notifier_trans(s
243 old_index = stat->last_index;
244 new_index = freq_table_get_index(stat, freq->new);
245
246 - /* We can't do stat->time_in_state[-1]= .. */
247 - if (old_index == -1 || new_index == -1)
248 - return 0;
249 -
250 cpufreq_stats_update(freq->cpu);
251 -
252 if (old_index == new_index)
253 return 0;
254
255 + if (new_index == -1)
256 + return 0;
257 +
258 spin_lock(&cpufreq_stats_lock);
259 stat->last_index = new_index;
260 + if (old_index != -1) {
261 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
262 - stat->trans_table[old_index * stat->max_state + new_index]++;
263 + if (stat->trans_table)
264 + stat->trans_table[old_index * stat->max_state +
265 + new_index]++;
266 #endif
267 - stat->total_trans++;
268 + stat->total_trans++;
269 + }
270 spin_unlock(&cpufreq_stats_lock);
271 return 0;
272 }
273 @@ -327,10 +340,6 @@ static int __cpuinit cpufreq_stat_cpu_ca
274 case CPU_ONLINE_FROZEN:
275 cpufreq_update_policy(cpu);
276 break;
277 - case CPU_DOWN_PREPARE:
278 - case CPU_DOWN_PREPARE_FROZEN:
279 - cpufreq_stats_free_sysfs(cpu);
280 - break;
281 case CPU_DEAD:
282 case CPU_DEAD_FROZEN:
283 cpufreq_stats_free_table(cpu);
284 @@ -339,10 +348,9 @@ static int __cpuinit cpufreq_stat_cpu_ca
285 return NOTIFY_OK;
286 }
287
288 -/* priority=1 so this will get called before cpufreq_remove_dev */
289 -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
290 +static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
291 +{
292 .notifier_call = cpufreq_stat_cpu_callback,
293 - .priority = 1,
294 };
295
296 static struct notifier_block notifier_policy_block = {
297 @@ -392,7 +400,6 @@ static void __exit cpufreq_stats_exit(vo
298 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
299 for_each_online_cpu(cpu) {
300 cpufreq_stats_free_table(cpu);
301 - cpufreq_stats_free_sysfs(cpu);
302 }
303 }
304