kernel: bump 5.10 to 5.10.111
[openwrt/staging/chunkeey.git] / target / linux / bcm27xx / patches-5.10 / 950-0537-clk-Introduce-a-clock-request-API.patch
1 From d937a5c25139dd919d857a8e4a6491917b568176 Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Tue, 13 Apr 2021 11:00:01 +0200
4 Subject: [PATCH] clk: Introduce a clock request API
5
6 It's not unusual to find clocks being shared across multiple devices
7 that need to change the rate depending on what the device is doing at a
8 given time.
9
10 The SoC found on the RaspberryPi4 (BCM2711) is in such a situation
11 between its two HDMI controllers that share a clock that needs to be
12 raised depending on the output resolution of each controller.
13
14 The current clk_set_rate API doesn't really allow to support that case
15 since there's really no synchronisation between multiple users, it's
16 essentially a fire-and-forget solution.
17
18 clk_set_min_rate does allow for such a synchronisation, but has another
19 drawback: it doesn't allow to reduce the clock rate once the work is
20 over.
21
22 In our previous example, this means that if we were to raise the
23 resolution of one HDMI controller to the largest resolution and then
24 changing for a smaller one, we would still have the clock running at the
25 largest resolution rate resulting in a poor power-efficiency.
26
27 In order to address both issues, let's create an API that allows user to
28 create temporary requests to increase the rate to a minimum, before
29 going back to the initial rate once the request is done.
30
31 This introduces mainly two side-effects:
32
33 * There's an interaction between clk_set_rate and requests. This has
34 been addressed by having clk_set_rate increasing the rate if it's
35 greater than what the requests asked for, and in any case changing
36 the rate the clock will return to once all the requests are done.
37
38 * Similarly, clk_round_rate has been adjusted to take the requests
39 into account and return a rate that will be greater or equal to the
40 requested rates.
41
42 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
43 ---
44 drivers/clk/clk.c | 121 ++++++++++++++++++++++++++++++++++++++++++++
45 include/linux/clk.h | 4 ++
46 2 files changed, 125 insertions(+)
47
48 --- a/drivers/clk/clk.c
49 +++ b/drivers/clk/clk.c
50 @@ -77,12 +77,14 @@ struct clk_core {
51 unsigned int protect_count;
52 unsigned long min_rate;
53 unsigned long max_rate;
54 + unsigned long default_request_rate;
55 unsigned long accuracy;
56 int phase;
57 struct clk_duty duty;
58 struct hlist_head children;
59 struct hlist_node child_node;
60 struct hlist_head clks;
61 + struct list_head pending_requests;
62 unsigned int notifier_count;
63 #ifdef CONFIG_DEBUG_FS
64 struct dentry *dentry;
65 @@ -105,6 +107,12 @@ struct clk {
66 struct hlist_node clks_node;
67 };
68
69 +struct clk_request {
70 + struct list_head list;
71 + struct clk *clk;
72 + unsigned long rate;
73 +};
74 +
75 /*** runtime pm ***/
76 static int clk_pm_runtime_get(struct clk_core *core)
77 {
78 @@ -1431,10 +1439,14 @@ unsigned long clk_hw_round_rate(struct c
79 {
80 int ret;
81 struct clk_rate_request req;
82 + struct clk_request *clk_req;
83
84 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
85 req.rate = rate;
86
87 + list_for_each_entry(clk_req, &hw->core->pending_requests, list)
88 + req.min_rate = max(clk_req->rate, req.min_rate);
89 +
90 ret = clk_core_round_rate_nolock(hw->core, &req);
91 if (ret)
92 return 0;
93 @@ -1455,6 +1467,7 @@ EXPORT_SYMBOL_GPL(clk_hw_round_rate);
94 long clk_round_rate(struct clk *clk, unsigned long rate)
95 {
96 struct clk_rate_request req;
97 + struct clk_request *clk_req;
98 int ret;
99
100 if (!clk)
101 @@ -1468,6 +1481,9 @@ long clk_round_rate(struct clk *clk, uns
102 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
103 req.rate = rate;
104
105 + list_for_each_entry(clk_req, &clk->core->pending_requests, list)
106 + req.min_rate = max(clk_req->rate, req.min_rate);
107 +
108 ret = clk_core_round_rate_nolock(clk->core, &req);
109
110 if (clk->exclusive_count)
111 @@ -1935,6 +1951,7 @@ static struct clk_core *clk_calc_new_rat
112 unsigned long new_rate;
113 unsigned long min_rate;
114 unsigned long max_rate;
115 + struct clk_request *req;
116 int p_index = 0;
117 long ret;
118
119 @@ -1949,6 +1966,9 @@ static struct clk_core *clk_calc_new_rat
120
121 clk_core_get_boundaries(core, &min_rate, &max_rate);
122
123 + list_for_each_entry(req, &core->pending_requests, list)
124 + min_rate = max(req->rate, min_rate);
125 +
126 /* find the closest rate and parent clk/rate */
127 if (clk_core_can_round(core)) {
128 struct clk_rate_request req;
129 @@ -2153,6 +2173,7 @@ static unsigned long clk_core_req_round_
130 {
131 int ret, cnt;
132 struct clk_rate_request req;
133 + struct clk_request *clk_req;
134
135 lockdep_assert_held(&prepare_lock);
136
137 @@ -2167,6 +2188,9 @@ static unsigned long clk_core_req_round_
138 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
139 req.rate = req_rate;
140
141 + list_for_each_entry(clk_req, &core->pending_requests, list)
142 + req.min_rate = max(clk_req->rate, req.min_rate);
143 +
144 ret = clk_core_round_rate_nolock(core, &req);
145
146 /* restore the protection */
147 @@ -2260,6 +2284,9 @@ int clk_set_rate(struct clk *clk, unsign
148
149 ret = clk_core_set_rate_nolock(clk->core, rate);
150
151 + if (!list_empty(&clk->core->pending_requests))
152 + clk->core->default_request_rate = rate;
153 +
154 if (clk->exclusive_count)
155 clk_core_rate_protect(clk->core);
156
157 @@ -2426,6 +2453,99 @@ int clk_set_max_rate(struct clk *clk, un
158 EXPORT_SYMBOL_GPL(clk_set_max_rate);
159
160 /**
161 + * clk_request_start - Request a rate to be enforced temporarily
162 + * @clk: the clk to act on
163 + * @rate: the new rate asked for
164 + *
165 + * This function will create a request to temporarily increase the rate
166 + * of the clock to a given rate to a certain minimum.
167 + *
168 + * This is meant as a best effort mechanism and while the rate of the
169 + * clock will be guaranteed to be equal or higher than the requested
170 + * rate, there's none on what the actual rate will be due to other
171 + * factors (other requests previously set, clock boundaries, etc.).
172 + *
173 + * Once the request is marked as done through clk_request_done(), the
174 + * rate will be reverted back to what the rate was before the request.
175 + *
176 + * The reported boundaries of the clock will also be adjusted so that
177 + * clk_round_rate() take those requests into account. A call to
178 + * clk_set_rate() during a request will affect the rate the clock will
179 + * return to after the requests on that clock are done.
180 + *
181 + * Returns 0 on success, an ERR_PTR otherwise.
182 + */
183 +struct clk_request *clk_request_start(struct clk *clk, unsigned long rate)
184 +{
185 + struct clk_request *req;
186 + int ret;
187 +
188 + if (!clk)
189 + return ERR_PTR(-EINVAL);
190 +
191 + req = kzalloc(sizeof(*req), GFP_KERNEL);
192 + if (!req)
193 + return ERR_PTR(-ENOMEM);
194 +
195 + clk_prepare_lock();
196 +
197 + req->clk = clk;
198 + req->rate = rate;
199 +
200 + if (list_empty(&clk->core->pending_requests))
201 + clk->core->default_request_rate = clk_core_get_rate_recalc(clk->core);
202 +
203 + ret = clk_core_set_rate_nolock(clk->core, rate);
204 + if (ret) {
205 + clk_prepare_unlock();
206 + kfree(req);
207 + return ERR_PTR(ret);
208 + }
209 +
210 + list_add_tail(&req->list, &clk->core->pending_requests);
211 + clk_prepare_unlock();
212 +
213 + return req;
214 +}
215 +EXPORT_SYMBOL_GPL(clk_request_start);
216 +
217 +/**
218 + * clk_request_done - Mark a clk_request as done
219 + * @req: the request to mark done
220 + *
221 + * This function will remove the rate request from the clock and adjust
222 + * the clock rate back to either to what it was before the request
223 + * started, or if there's any other request on that clock to a proper
224 + * rate for them.
225 + */
226 +void clk_request_done(struct clk_request *req)
227 +{
228 + struct clk_core *core = req->clk->core;
229 +
230 + clk_prepare_lock();
231 +
232 + list_del(&req->list);
233 +
234 + if (list_empty(&core->pending_requests)) {
235 + clk_core_set_rate_nolock(core, core->default_request_rate);
236 + core->default_request_rate = 0;
237 + } else {
238 + struct clk_request *cur_req;
239 + unsigned long new_rate = 0;
240 +
241 + list_for_each_entry(cur_req, &core->pending_requests, list)
242 + new_rate = max(new_rate, cur_req->rate);
243 +
244 + clk_core_set_rate_nolock(core, new_rate);
245 + }
246 +
247 + clk_prepare_unlock();
248 +
249 + kfree(req);
250 +}
251 +EXPORT_SYMBOL_GPL(clk_request_done);
252 +
253 +/**
254 * clk_get_parent - return the parent of a clk
255 * @clk: the clk whose parent gets returned
256 *
257 @@ -3875,6 +3995,7 @@ __clk_register(struct device *dev, struc
258 goto fail_parents;
259
260 INIT_HLIST_HEAD(&core->clks);
261 + INIT_LIST_HEAD(&core->pending_requests);
262
263 /*
264 * Don't call clk_hw_create_clk() here because that would pin the
265 --- a/include/linux/clk.h
266 +++ b/include/linux/clk.h
267 @@ -15,6 +15,7 @@
268
269 struct device;
270 struct clk;
271 +struct clk_request;
272 struct device_node;
273 struct of_phandle_args;
274
275 @@ -743,6 +744,9 @@ int clk_save_context(void);
276 */
277 void clk_restore_context(void);
278
279 +struct clk_request *clk_request_start(struct clk *clk, unsigned long rate);
280 +void clk_request_done(struct clk_request *req);
281 +
282 #else /* !CONFIG_HAVE_CLK */
283
284 static inline struct clk *clk_get(struct device *dev, const char *id)