ipq806x: Add support for IPQ806x chip family
[openwrt/staging/wigyori.git] / target / linux / ipq806x / patches / 0165-clk-qcom-Add-support-for-muxes-dividers-and-mux-divi.patch
1 From 151d7e91baaa4016ba687b80e8f7ccead62d6c72 Mon Sep 17 00:00:00 2001
2 From: Stephen Boyd <sboyd@codeaurora.org>
3 Date: Tue, 25 Mar 2014 13:37:55 -0700
4 Subject: [PATCH 165/182] clk: qcom: Add support for muxes, dividers, and mux
5 dividers
6
7 The Krait CPU clocks are made up of muxes and dividers with a
8 handful of sources. Add a set of clk_ops that allow us to
9 configure these clocks so we can support CPU frequency scaling on
10 Krait CPUs.
11
12 Based on code originally written by Saravana Kannan.
13
14 Cc: Saravana Kannan <skannan@codeaurora.org>
15 Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
16 ---
17 drivers/clk/qcom/Makefile | 1 +
18 drivers/clk/qcom/clk-generic.c | 405 +++++++++++++++++++++++++++++++++++
19 include/linux/clk/msm-clk-generic.h | 208 ++++++++++++++++++
20 3 files changed, 614 insertions(+)
21 create mode 100644 drivers/clk/qcom/clk-generic.c
22 create mode 100644 include/linux/clk/msm-clk-generic.h
23
24 diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
25 index df2a1b3..2cc6039 100644
26 --- a/drivers/clk/qcom/Makefile
27 +++ b/drivers/clk/qcom/Makefile
28 @@ -6,6 +6,7 @@ clk-qcom-y += clk-pll.o
29 clk-qcom-y += clk-rcg.o
30 clk-qcom-y += clk-rcg2.o
31 clk-qcom-y += clk-branch.o
32 +clk-qcom-y += clk-generic.o
33 clk-qcom-y += reset.o
34
35 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
36 diff --git a/drivers/clk/qcom/clk-generic.c b/drivers/clk/qcom/clk-generic.c
37 new file mode 100644
38 index 0000000..a0d778b
39 --- /dev/null
40 +++ b/drivers/clk/qcom/clk-generic.c
41 @@ -0,0 +1,405 @@
42 +/*
43 + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
44 + *
45 + * This software is licensed under the terms of the GNU General Public
46 + * License version 2, as published by the Free Software Foundation, and
47 + * may be copied, distributed, and modified under those terms.
48 + *
49 + * This program is distributed in the hope that it will be useful,
50 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
51 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
52 + * GNU General Public License for more details.
53 + */
54 +
55 +#include <linux/kernel.h>
56 +#include <linux/export.h>
57 +#include <linux/bug.h>
58 +#include <linux/err.h>
59 +#include <linux/clk-provider.h>
60 +#include <linux/clk/msm-clk-generic.h>
61 +
62 +
63 +/* ==================== Mux clock ==================== */
64 +
65 +static int mux_set_parent(struct clk_hw *hw, u8 sel)
66 +{
67 + struct mux_clk *mux = to_mux_clk(hw);
68 +
69 + if (mux->parent_map)
70 + sel = mux->parent_map[sel];
71 +
72 + return mux->ops->set_mux_sel(mux, sel);
73 +}
74 +
75 +static u8 mux_get_parent(struct clk_hw *hw)
76 +{
77 + struct mux_clk *mux = to_mux_clk(hw);
78 + int num_parents = __clk_get_num_parents(hw->clk);
79 + int i;
80 + u8 sel;
81 +
82 + sel = mux->ops->get_mux_sel(mux);
83 + if (mux->parent_map) {
84 + for (i = 0; i < num_parents; i++)
85 + if (sel == mux->parent_map[i])
86 + return i;
87 + WARN(1, "Can't find parent\n");
88 + return -EINVAL;
89 + }
90 +
91 + return sel;
92 +}
93 +
94 +static int mux_enable(struct clk_hw *hw)
95 +{
96 + struct mux_clk *mux = to_mux_clk(hw);
97 + if (mux->ops->enable)
98 + return mux->ops->enable(mux);
99 + return 0;
100 +}
101 +
102 +static void mux_disable(struct clk_hw *hw)
103 +{
104 + struct mux_clk *mux = to_mux_clk(hw);
105 + if (mux->ops->disable)
106 + return mux->ops->disable(mux);
107 +}
108 +
109 +static struct clk *mux_get_safe_parent(struct clk_hw *hw)
110 +{
111 + int i;
112 + struct mux_clk *mux = to_mux_clk(hw);
113 + int num_parents = __clk_get_num_parents(hw->clk);
114 +
115 + if (!mux->has_safe_parent)
116 + return NULL;
117 +
118 + i = mux->safe_sel;
119 + if (mux->parent_map)
120 + for (i = 0; i < num_parents; i++)
121 + if (mux->safe_sel == mux->parent_map[i])
122 + break;
123 +
124 + return clk_get_parent_by_index(hw->clk, i);
125 +}
126 +
127 +const struct clk_ops clk_ops_gen_mux = {
128 + .enable = mux_enable,
129 + .disable = mux_disable,
130 + .set_parent = mux_set_parent,
131 + .get_parent = mux_get_parent,
132 + .determine_rate = __clk_mux_determine_rate,
133 + .get_safe_parent = mux_get_safe_parent,
134 +};
135 +EXPORT_SYMBOL_GPL(clk_ops_gen_mux);
136 +
137 +/* ==================== Divider clock ==================== */
138 +
139 +static long __div_round_rate(struct div_data *data, unsigned long rate,
140 + struct clk *parent, unsigned int *best_div, unsigned long *best_prate,
141 + bool set_parent)
142 +{
143 + unsigned int div, min_div, max_div, _best_div = 1;
144 + unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
145 + unsigned int numer;
146 +
147 + rate = max(rate, 1UL);
148 +
149 + min_div = max(data->min_div, 1U);
150 + max_div = min(data->max_div, (unsigned int) (ULONG_MAX / rate));
151 +
152 + /*
153 + * div values are doubled for half dividers.
154 + * Adjust for that by picking a numer of 2.
155 + */
156 + numer = data->is_half_divider ? 2 : 1;
157 +
158 + if (!set_parent) {
159 + prate = *best_prate * numer;
160 + div = DIV_ROUND_UP(prate, rate);
161 + div = clamp(1U, div, max_div);
162 + if (best_div)
163 + *best_div = div;
164 + return mult_frac(*best_prate, numer, div);
165 + }
166 +
167 + for (div = min_div; div <= max_div; div++) {
168 + req_prate = mult_frac(rate, div, numer);
169 + prate = __clk_round_rate(parent, req_prate);
170 + if (IS_ERR_VALUE(prate))
171 + break;
172 +
173 + actual_rate = mult_frac(prate, numer, div);
174 + if (is_better_rate(rate, rrate, actual_rate)) {
175 + rrate = actual_rate;
176 + _best_div = div;
177 + _best_prate = prate;
178 + }
179 +
180 + /*
181 + * Trying higher dividers is only going to ask the parent for
182 + * a higher rate. If it can't even output a rate higher than
183 + * the one we request for this divider, the parent is not
184 + * going to be able to output an even higher rate required
185 + * for a higher divider. So, stop trying higher dividers.
186 + */
187 + if (actual_rate < rate)
188 + break;
189 +
190 + if (rrate <= rate)
191 + break;
192 + }
193 +
194 + if (!rrate)
195 + return -EINVAL;
196 + if (best_div)
197 + *best_div = _best_div;
198 + if (best_prate)
199 + *best_prate = _best_prate;
200 +
201 + return rrate;
202 +}
203 +
204 +static long div_round_rate(struct clk_hw *hw, unsigned long rate,
205 + unsigned long *parent_rate)
206 +{
207 + struct div_clk *d = to_div_clk(hw);
208 + bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;
209 +
210 + return __div_round_rate(&d->data, rate, __clk_get_parent(hw->clk),
211 + NULL, parent_rate, set_parent);
212 +}
213 +
214 +static int div_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
215 + parent_rate)
216 +{
217 + struct div_clk *d = to_div_clk(hw);
218 + int div, rc = 0;
219 + struct div_data *data = &d->data;
220 +
221 + div = parent_rate / rate;
222 + if (div != data->div)
223 + rc = d->ops->set_div(d, div);
224 + data->div = div;
225 +
226 + return rc;
227 +}
228 +
229 +static int div_enable(struct clk_hw *hw)
230 +{
231 + struct div_clk *d = to_div_clk(hw);
232 + if (d->ops && d->ops->enable)
233 + return d->ops->enable(d);
234 + return 0;
235 +}
236 +
237 +static void div_disable(struct clk_hw *hw)
238 +{
239 + struct div_clk *d = to_div_clk(hw);
240 + if (d->ops && d->ops->disable)
241 + return d->ops->disable(d);
242 +}
243 +
244 +static unsigned long div_recalc_rate(struct clk_hw *hw, unsigned long prate)
245 +{
246 + struct div_clk *d = to_div_clk(hw);
247 + unsigned int div = d->data.div;
248 +
249 + if (d->ops && d->ops->get_div)
250 + div = max(d->ops->get_div(d), 1);
251 + div = max(div, 1U);
252 +
253 + if (!d->ops || !d->ops->set_div)
254 + d->data.min_div = d->data.max_div = div;
255 + d->data.div = div;
256 +
257 + return prate / div;
258 +}
259 +
260 +const struct clk_ops clk_ops_div = {
261 + .enable = div_enable,
262 + .disable = div_disable,
263 + .round_rate = div_round_rate,
264 + .set_rate = div_set_rate,
265 + .recalc_rate = div_recalc_rate,
266 +};
267 +EXPORT_SYMBOL_GPL(clk_ops_div);
268 +
269 +/* ==================== Mux_div clock ==================== */
270 +
271 +static int mux_div_clk_enable(struct clk_hw *hw)
272 +{
273 + struct mux_div_clk *md = to_mux_div_clk(hw);
274 +
275 + if (md->ops->enable)
276 + return md->ops->enable(md);
277 + return 0;
278 +}
279 +
280 +static void mux_div_clk_disable(struct clk_hw *hw)
281 +{
282 + struct mux_div_clk *md = to_mux_div_clk(hw);
283 +
284 + if (md->ops->disable)
285 + return md->ops->disable(md);
286 +}
287 +
288 +static long __mux_div_round_rate(struct clk_hw *hw, unsigned long rate,
289 + struct clk **best_parent, int *best_div, unsigned long *best_prate)
290 +{
291 + struct mux_div_clk *md = to_mux_div_clk(hw);
292 + unsigned int i;
293 + unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
294 + struct clk *_best_parent = 0;
295 + int num_parents = __clk_get_num_parents(hw->clk);
296 + bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;
297 +
298 + for (i = 0; i < num_parents; i++) {
299 + int div;
300 + unsigned long prate;
301 + struct clk *p = clk_get_parent_by_index(hw->clk, i);
302 +
303 + rrate = __div_round_rate(&md->data, rate, p, &div, &prate,
304 + set_parent);
305 +
306 + if (is_better_rate(rate, best, rrate)) {
307 + best = rrate;
308 + _best_div = div;
309 + _best_prate = prate;
310 + _best_parent = p;
311 + }
312 +
313 + if (rate <= rrate)
314 + break;
315 + }
316 +
317 + if (best_div)
318 + *best_div = _best_div;
319 + if (best_prate)
320 + *best_prate = _best_prate;
321 + if (best_parent)
322 + *best_parent = _best_parent;
323 +
324 + if (best)
325 + return best;
326 + return -EINVAL;
327 +}
328 +
329 +static long mux_div_clk_round_rate(struct clk_hw *hw, unsigned long rate,
330 + unsigned long *parent_rate)
331 +{
332 + return __mux_div_round_rate(hw, rate, NULL, NULL, parent_rate);
333 +}
334 +
335 +/* requires enable lock to be held */
336 +static int __set_src_div(struct mux_div_clk *md, u8 src_sel, u32 div)
337 +{
338 + int rc;
339 +
340 + rc = md->ops->set_src_div(md, src_sel, div);
341 + if (!rc) {
342 + md->data.div = div;
343 + md->src_sel = src_sel;
344 + }
345 +
346 + return rc;
347 +}
348 +
349 +/* Must be called after handoff to ensure parent clock rates are initialized */
350 +static int safe_parent_init_once(struct clk_hw *hw)
351 +{
352 + unsigned long rrate;
353 + u32 best_div;
354 + struct clk *best_parent;
355 + struct mux_div_clk *md = to_mux_div_clk(hw);
356 +
357 + if (IS_ERR(md->safe_parent))
358 + return -EINVAL;
359 + if (!md->safe_freq || md->safe_parent)
360 + return 0;
361 +
362 + rrate = __mux_div_round_rate(hw, md->safe_freq, &best_parent,
363 + &best_div, NULL);
364 +
365 + if (rrate == md->safe_freq) {
366 + md->safe_div = best_div;
367 + md->safe_parent = best_parent;
368 + } else {
369 + md->safe_parent = ERR_PTR(-EINVAL);
370 + return -EINVAL;
371 + }
372 + return 0;
373 +}
374 +
375 +static int
376 +__mux_div_clk_set_rate_and_parent(struct clk_hw *hw, u8 index, u32 div)
377 +{
378 + struct mux_div_clk *md = to_mux_div_clk(hw);
379 + int rc;
380 +
381 + rc = safe_parent_init_once(hw);
382 + if (rc)
383 + return rc;
384 +
385 + return __set_src_div(md, index, div);
386 +}
387 +
388 +static int mux_div_clk_set_rate_and_parent(struct clk_hw *hw,
389 + unsigned long rate, unsigned long parent_rate, u8 index)
390 +{
391 + return __mux_div_clk_set_rate_and_parent(hw, index, parent_rate / rate);
392 +}
393 +
394 +static int mux_div_clk_set_rate(struct clk_hw *hw,
395 + unsigned long rate, unsigned long parent_rate)
396 +{
397 + struct mux_div_clk *md = to_mux_div_clk(hw);
398 + return __mux_div_clk_set_rate_and_parent(hw, md->src_sel,
399 + parent_rate / rate);
400 +}
401 +
402 +static int mux_div_clk_set_parent(struct clk_hw *hw, u8 index)
403 +{
404 + struct mux_div_clk *md = to_mux_div_clk(hw);
405 + return __mux_div_clk_set_rate_and_parent(hw, md->parent_map[index],
406 + md->data.div);
407 +}
408 +
409 +static u8 mux_div_clk_get_parent(struct clk_hw *hw)
410 +{
411 + struct mux_div_clk *md = to_mux_div_clk(hw);
412 + int num_parents = __clk_get_num_parents(hw->clk);
413 + u32 i, div, sel;
414 +
415 + md->ops->get_src_div(md, &sel, &div);
416 + md->src_sel = sel;
417 +
418 + for (i = 0; i < num_parents; i++)
419 + if (sel == md->parent_map[i])
420 + return i;
421 + WARN(1, "Can't find parent\n");
422 + return -EINVAL;
423 +}
424 +
425 +static unsigned long
426 +mux_div_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
427 +{
428 + struct mux_div_clk *md = to_mux_div_clk(hw);
429 + u32 div, sel;
430 +
431 + md->ops->get_src_div(md, &sel, &div);
432 +
433 + return prate / div;
434 +}
435 +
436 +const struct clk_ops clk_ops_mux_div_clk = {
437 + .enable = mux_div_clk_enable,
438 + .disable = mux_div_clk_disable,
439 + .set_rate_and_parent = mux_div_clk_set_rate_and_parent,
440 + .set_rate = mux_div_clk_set_rate,
441 + .set_parent = mux_div_clk_set_parent,
442 + .round_rate = mux_div_clk_round_rate,
443 + .get_parent = mux_div_clk_get_parent,
444 + .recalc_rate = mux_div_clk_recalc_rate,
445 +};
446 +EXPORT_SYMBOL_GPL(clk_ops_mux_div_clk);
447 diff --git a/include/linux/clk/msm-clk-generic.h b/include/linux/clk/msm-clk-generic.h
448 new file mode 100644
449 index 0000000..cee3863
450 --- /dev/null
451 +++ b/include/linux/clk/msm-clk-generic.h
452 @@ -0,0 +1,208 @@
453 +/*
454 + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
455 + *
456 + * This software is licensed under the terms of the GNU General Public
457 + * License version 2, as published by the Free Software Foundation, and
458 + * may be copied, distributed, and modified under those terms.
459 + *
460 + * This program is distributed in the hope that it will be useful,
461 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
462 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
463 + * GNU General Public License for more details.
464 + */
465 +
466 +#ifndef __QCOM_CLK_GENERIC_H__
467 +#define __QCOM_CLK_GENERIC_H__
468 +
469 +#include <linux/err.h>
470 +#include <linux/clk-provider.h>
471 +
472 +static inline bool is_better_rate(unsigned long req, unsigned long best,
473 + unsigned long new)
474 +{
475 + if (IS_ERR_VALUE(new))
476 + return false;
477 +
478 + return (req <= new && new < best) || (best < req && best < new);
479 +}
480 +
481 +/* ==================== Mux clock ==================== */
482 +
483 +struct mux_clk;
484 +
485 +struct clk_mux_ops {
486 + int (*set_mux_sel)(struct mux_clk *clk, int sel);
487 + int (*get_mux_sel)(struct mux_clk *clk);
488 +
489 + /* Optional */
490 + bool (*is_enabled)(struct mux_clk *clk);
491 + int (*enable)(struct mux_clk *clk);
492 + void (*disable)(struct mux_clk *clk);
493 +};
494 +
495 +struct mux_clk {
496 + /* Parents in decreasing order of preference for obtaining rates. */
497 + u8 *parent_map;
498 + bool has_safe_parent;
499 + u8 safe_sel;
500 + const struct clk_mux_ops *ops;
501 +
502 + /* Fields not used by helper function. */
503 + void __iomem *base;
504 + u32 offset;
505 + u32 en_offset;
506 + int en_reg;
507 + u32 mask;
508 + u32 shift;
509 + u32 en_mask;
510 + void *priv;
511 +
512 + struct clk_hw hw;
513 +};
514 +
515 +static inline struct mux_clk *to_mux_clk(struct clk_hw *hw)
516 +{
517 + return container_of(hw, struct mux_clk, hw);
518 +}
519 +
520 +extern const struct clk_ops clk_ops_gen_mux;
521 +
522 +/* ==================== Divider clock ==================== */
523 +
524 +struct div_clk;
525 +
526 +struct clk_div_ops {
527 + int (*set_div)(struct div_clk *clk, int div);
528 + int (*get_div)(struct div_clk *clk);
529 + bool (*is_enabled)(struct div_clk *clk);
530 + int (*enable)(struct div_clk *clk);
531 + void (*disable)(struct div_clk *clk);
532 +};
533 +
534 +struct div_data {
535 + unsigned int div;
536 + unsigned int min_div;
537 + unsigned int max_div;
538 + /*
539 + * Indicate whether this divider clock supports half-interger divider.
540 + * If it is, all the min_div and max_div have been doubled. It means
541 + * they are 2*N.
542 + */
543 + bool is_half_divider;
544 +};
545 +
546 +struct div_clk {
547 + struct div_data data;
548 +
549 + /* Optional */
550 + const struct clk_div_ops *ops;
551 +
552 + /* Fields not used by helper function. */
553 + void __iomem *base;
554 + u32 offset;
555 + u32 mask;
556 + u32 shift;
557 + u32 en_mask;
558 + void *priv;
559 + struct clk_hw hw;
560 +};
561 +
562 +static inline struct div_clk *to_div_clk(struct clk_hw *hw)
563 +{
564 + return container_of(hw, struct div_clk, hw);
565 +}
566 +
567 +extern const struct clk_ops clk_ops_div;
568 +
569 +#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
570 +static struct div_clk clk_name = { \
571 + .data = { \
572 + .max_div = _div, \
573 + .min_div = _div, \
574 + .div = _div, \
575 + }, \
576 + .hw.init = &(struct clk_init_data){ \
577 + .parent_names = (const char *[]){ _parent }, \
578 + .num_parents = 1, \
579 + .name = #clk_name, \
580 + .ops = &clk_ops_div, \
581 + .flags = CLK_SET_RATE_PARENT, \
582 + } \
583 +}
584 +
585 +/* ==================== Mux Div clock ==================== */
586 +
587 +struct mux_div_clk;
588 +
589 +/*
590 + * struct mux_div_ops
591 + * the enable and disable ops are optional.
592 + */
593 +
594 +struct mux_div_ops {
595 + int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
596 + void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
597 + int (*enable)(struct mux_div_clk *);
598 + void (*disable)(struct mux_div_clk *);
599 + bool (*is_enabled)(struct mux_div_clk *);
600 +};
601 +
602 +/*
603 + * struct mux_div_clk - combined mux/divider clock
604 + * @priv
605 + parameters needed by ops
606 + * @safe_freq
607 + when switching rates from A to B, the mux div clock will
608 + instead switch from A -> safe_freq -> B. This allows the
609 + mux_div clock to change rates while enabled, even if this
610 + behavior is not supported by the parent clocks.
611 +
612 + If changing the rate of parent A also causes the rate of
613 + parent B to change, then safe_freq must be defined.
614 +
615 + safe_freq is expected to have a source clock which is always
616 + on and runs at only one rate.
617 + * @parents
618 + list of parents and mux indicies
619 + * @ops
620 + function pointers for hw specific operations
621 + * @src_sel
622 + the mux index which will be used if the clock is enabled.
623 + */
624 +
625 +struct mux_div_clk {
626 + /* Required parameters */
627 + const struct mux_div_ops *ops;
628 + struct div_data data;
629 + u8 *parent_map;
630 +
631 + struct clk_hw hw;
632 +
633 + /* Internal */
634 + u32 src_sel;
635 +
636 + /* Optional parameters */
637 + void *priv;
638 + void __iomem *base;
639 + u32 div_mask;
640 + u32 div_offset;
641 + u32 div_shift;
642 + u32 src_mask;
643 + u32 src_offset;
644 + u32 src_shift;
645 + u32 en_mask;
646 + u32 en_offset;
647 +
648 + u32 safe_div;
649 + struct clk *safe_parent;
650 + unsigned long safe_freq;
651 +};
652 +
653 +static inline struct mux_div_clk *to_mux_div_clk(struct clk_hw *hw)
654 +{
655 + return container_of(hw, struct mux_div_clk, hw);
656 +}
657 +
658 +extern const struct clk_ops clk_ops_mux_div_clk;
659 +
660 +#endif
661 --
662 1.7.10.4
663