ca91981d103c784f335a07202bd2bd6d20336ec8
[openwrt/staging/svanheule.git] / target / linux / realtek / files-5.10 / drivers / clk / realtek / clk-rtl83xx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Realtek RTL83XX clock driver
4 * Copyright (C) 2022 Markus Stockhausen <markus.stockhausen@gmx.de>
5 *
6 * This driver provides basic clock support for the central core clock unit (CCU) and its PLLs
7 * inside the RTL838X and RTL8389X SOC. Currently CPU, memory and LXB clock information can be
8 * accessed. To make use of the driver add the following devices and configurations at the
9 * appropriate locations to the DT.
10 *
11 * #include <dt-bindings/clock/rtl83xx-clk.h>
12 *
13 * sram0: sram@9f000000 {
14 * compatible = "mmio-sram";
15 * reg = <0x9f000000 0x18000>;
16 * #address-cells = <1>;
17 * #size-cells = <1>;
18 * ranges = <0 0x9f000000 0x18000>;
19 * };
20 *
21 * osc: oscillator {
22 * compatible = "fixed-clock";
23 * #clock-cells = <0>;
24 * clock-frequency = <25000000>;
25 * };
26 *
27 * ccu: clock-controller {
28 * compatible = "realtek,rtl8380-clock";
29 * #clock-cells = <1>;
30 * clocks = <&osc>;
31 * clock-names = "ref_clk";
32 * };
33 *
34 *
35 * The SRAM part is needed to be able to set clocks. When changing clocks the code must not run
36 * from DRAM. Otherwise system might freeze. Take care to adjust CCU compatibility, SRAM address
37 * and size to the target SOC device. Afterwards one can access/identify the clocks in the other
38 * DT devices with <&ccu CLK_CPU>, <&ccu CLK_MEM> or <&ccu CLK_LXB>. Additionally the clocks can
39 * be used inside the kernel with
40 *
41 * cpu_clk = clk_get(NULL, "cpu_clk");
42 * mem_clk = clk_get(NULL, "mem_clk");
43 * lxb_clk = clk_get(NULL, "lxb_clk");
44 *
45 * This driver can be directly used by the DT based cpufreq driver (CONFIG_CPUFREQ_DT) if CPU
46 * references the right clock and sane operating points (OPP) are provided. E.g.
47 *
48 * cpu@0 {
49 * compatible = "mips,mips4KEc";
50 * reg = <0>;
51 * clocks = <&ccu CLK_CPU>;
52 * operating-points-v2 = <&cpu_opp_table>;
53 * };
54 *
55 * cpu_opp_table: opp-table-0 {
56 * compatible = "operating-points-v2";
57 * opp-shared;
58 * opp00 {
59 * opp-hz = /bits/ 64 <425000000>;
60 * };
61 * ...
62 * }
63 */
64
65 #include <asm/cacheflush.h>
66 #include <asm/mipsmtregs.h>
67 #include <dt-bindings/clock/rtl83xx-clk.h>
68 #include <linux/clk.h>
69 #include <linux/clk-provider.h>
70 #include <linux/clkdev.h>
71 #include <linux/cpu.h>
72 #include <linux/delay.h>
73 #include <linux/genalloc.h>
74 #include <linux/io.h>
75 #include <linux/ioport.h>
76 #include <linux/of_address.h>
77 #include <linux/of_platform.h>
78 #include <linux/platform_device.h>
79 #include <linux/slab.h>
80
81 #include "clk-rtl83xx.h"
82
83 #define read_sw(reg) ioread32(((void *)RTL_SW_CORE_BASE) + reg)
84 #define read_soc(reg) ioread32(((void *)RTL_SOC_BASE) + reg)
85
86 #define write_sw(val, reg) iowrite32(val, ((void *)RTL_SW_CORE_BASE) + reg)
87 #define write_soc(val, reg) iowrite32(val, ((void *)RTL_SOC_BASE) + reg)
88
89 /*
90 * some hardware specific definitions
91 */
92
93 #define RTCL_SOC838X 0
94 #define RTCL_SOC839X 1
95 #define RTCL_SOCCNT 2
96
97 #define RTCL_DDR1 1
98 #define RTCL_DDR2 2
99 #define RTCL_DDR3 3
100
101 #define REG_CTRL0 0
102 #define REG_CTRL1 1
103 #define REG_COUNT 2
104
105 #define RTCL_XTAL_RATE 25000000
106
107 static const int rtcl_regs[RTCL_SOCCNT][REG_COUNT][CLK_COUNT] = {
108 {
109 { RTL838X_PLL_CPU_CTRL0, RTL838X_PLL_MEM_CTRL0, RTL838X_PLL_LXB_CTRL0 },
110 { RTL838X_PLL_CPU_CTRL1, RTL838X_PLL_MEM_CTRL1, RTL838X_PLL_LXB_CTRL1 },
111 }, {
112 { RTL839X_PLL_CPU_CTRL0, RTL839X_PLL_MEM_CTRL0, RTL839X_PLL_LXB_CTRL0 },
113 { RTL839X_PLL_CPU_CTRL1, RTL839X_PLL_MEM_CTRL1, RTL839X_PLL_LXB_CTRL1 },
114 }
115 };
116
117 #define RTCL_REG_SET(_rate, _ctrl0, _ctrl1) \
118 { \
119 .rate = _rate, \
120 .ctrl0 = _ctrl0, \
121 .ctrl1 = _ctrl1, \
122 }
123
124 struct rtcl_reg_set {
125 unsigned int rate;
126 unsigned int ctrl0;
127 unsigned int ctrl1;
128 };
129
130 /*
131 * The following configuration tables are valid operation points for their corresponding PLLs.
132 * The magic numbers are precalculated mulitpliers and dividers to keep the driver simple. They
133 * also provide rates outside the allowed physical specifications. E.g. DDR3 memory has a lower
134 * limit of 303 MHz or the CPU might get unstable if set to anything above its startup frequency.
135 * Additionally the Realtek SOCs tend to expect CPU speed > MEM speed > LXB speed. The caller or
136 * DT configuration must take care that only valid operating points are selected.
137 */
138
139 static const struct rtcl_reg_set rtcl_838x_cpu_reg_set[] = {
140 RTCL_REG_SET(300000000, 0x045c, 5),
141 RTCL_REG_SET(325000000, 0x0464, 5),
142 RTCL_REG_SET(350000000, 0x046c, 5),
143 RTCL_REG_SET(375000000, 0x0474, 5),
144 RTCL_REG_SET(400000000, 0x045c, 3),
145 RTCL_REG_SET(425000000, 0x0462, 3),
146 RTCL_REG_SET(450000000, 0x0468, 3),
147 RTCL_REG_SET(475000000, 0x046e, 3),
148 RTCL_REG_SET(500000000, 0x0474, 3),
149 RTCL_REG_SET(525000000, 0x047a, 3),
150 RTCL_REG_SET(550000000, 0x0480, 3),
151 RTCL_REG_SET(575000000, 0x0486, 3),
152 RTCL_REG_SET(600000000, 0x048c, 3),
153 RTCL_REG_SET(625000000, 0x0492, 3)
154 };
155
156 static const struct rtcl_reg_set rtcl_838x_mem_reg_set[] = {
157 RTCL_REG_SET(200000000, 0x041b, 5),
158 RTCL_REG_SET(225000000, 0x0417, 3),
159 RTCL_REG_SET(250000000, 0x041a, 3),
160 RTCL_REG_SET(275000000, 0x0412, 1),
161 RTCL_REG_SET(300000000, 0x0414, 1),
162 RTCL_REG_SET(325000000, 0x0416, 1),
163 RTCL_REG_SET(350000000, 0x0418, 1),
164 RTCL_REG_SET(375000000, 0x041a, 1)
165 };
166
167 static const struct rtcl_reg_set rtcl_838x_lxb_reg_set[] = {
168 RTCL_REG_SET(100000000, 0x043c, 0),
169 RTCL_REG_SET(125000000, 0x043c, 0),
170 RTCL_REG_SET(150000000, 0x0450, 5),
171 RTCL_REG_SET(175000000, 0x0450, 5),
172 RTCL_REG_SET(200000000, 0x047c, 0)
173 };
174
175 static const struct rtcl_reg_set rtcl_839x_cpu_reg_set[] = {
176 RTCL_REG_SET(400000000, 0x0414, 5),
177 RTCL_REG_SET(425000000, 0x041e, 6),
178 RTCL_REG_SET(450000000, 0x0417, 5),
179 RTCL_REG_SET(475000000, 0x0422, 6),
180 RTCL_REG_SET(500000000, 0x041a, 5),
181 RTCL_REG_SET(525000000, 0x0426, 6),
182 RTCL_REG_SET(550000000, 0x0412, 4),
183 RTCL_REG_SET(575000000, 0x042a, 6),
184 RTCL_REG_SET(600000000, 0x0414, 4),
185 RTCL_REG_SET(625000000, 0x042e, 6),
186 RTCL_REG_SET(650000000, 0x0416, 4),
187 RTCL_REG_SET(675000000, 0x0432, 6),
188 RTCL_REG_SET(700000000, 0x0418, 4),
189 RTCL_REG_SET(725000000, 0x0436, 6),
190 RTCL_REG_SET(750000000, 0x0438, 6),
191 RTCL_REG_SET(775000000, 0x043a, 6),
192 RTCL_REG_SET(800000000, 0x043c, 6),
193 RTCL_REG_SET(825000000, 0x043e, 6),
194 RTCL_REG_SET(850000000, 0x0440, 6)
195 };
196
197 static const struct rtcl_reg_set rtcl_839x_mem_reg_set[] = {
198 RTCL_REG_SET(125000000, 0x041a, 7),
199 RTCL_REG_SET(150000000, 0x0414, 6),
200 RTCL_REG_SET(175000000, 0x0418, 6),
201 RTCL_REG_SET(200000000, 0x041c, 6),
202 RTCL_REG_SET(225000000, 0x0417, 5),
203 RTCL_REG_SET(250000000, 0x041a, 5),
204 RTCL_REG_SET(275000000, 0x0412, 4),
205 RTCL_REG_SET(300000000, 0x0414, 4),
206 RTCL_REG_SET(325000000, 0x0416, 4),
207 RTCL_REG_SET(350000000, 0x0418, 4),
208 RTCL_REG_SET(375000000, 0x041a, 4),
209 RTCL_REG_SET(400000000, 0x041c, 4)
210 };
211
212 static const struct rtcl_reg_set rtcl_839x_lxb_reg_set[] = {
213 RTCL_REG_SET(50000000, 0x1414, 3),
214 RTCL_REG_SET(100000000, 0x0814, 3),
215 RTCL_REG_SET(150000000, 0x0414, 3),
216 RTCL_REG_SET(200000000, 0x0414, 7)
217 };
218
219 struct rtcl_rtab_set {
220 int count;
221 const struct rtcl_reg_set *rset;
222 };
223
224 #define RTCL_RTAB_SET(_rset) \
225 { \
226 .count = ARRAY_SIZE(_rset), \
227 .rset = _rset, \
228 }
229
230 static const struct rtcl_rtab_set rtcl_rtab_set[RTCL_SOCCNT][CLK_COUNT] = {
231 {
232 RTCL_RTAB_SET(rtcl_838x_cpu_reg_set),
233 RTCL_RTAB_SET(rtcl_838x_mem_reg_set),
234 RTCL_RTAB_SET(rtcl_838x_lxb_reg_set)
235 }, {
236 RTCL_RTAB_SET(rtcl_839x_cpu_reg_set),
237 RTCL_RTAB_SET(rtcl_839x_mem_reg_set),
238 RTCL_RTAB_SET(rtcl_839x_lxb_reg_set)
239 }
240 };
241
242 #define RTCL_ROUND_SET(_min, _max, _step) \
243 { \
244 .min = _min, \
245 .max = _max, \
246 .step = _step, \
247 }
248
249 struct rtcl_round_set {
250 unsigned long min;
251 unsigned long max;
252 unsigned long step;
253 };
254
255 static const struct rtcl_round_set rtcl_round_set[RTCL_SOCCNT][CLK_COUNT] = {
256 {
257 RTCL_ROUND_SET(300000000, 625000000, 25000000),
258 RTCL_ROUND_SET(200000000, 375000000, 25000000),
259 RTCL_ROUND_SET(100000000, 200000000, 25000000)
260 }, {
261 RTCL_ROUND_SET(400000000, 850000000, 25000000),
262 RTCL_ROUND_SET(100000000, 400000000, 25000000),
263 RTCL_ROUND_SET(50000000, 200000000, 50000000)
264 }
265 };
266
267 static const int rtcl_divn3[] = { 2, 3, 4, 6 };
268 static const int rtcl_xdiv[] = { 2, 4, 2 };
269
270 /*
271 * module data structures
272 */
273
274 #define RTCL_CLK_INFO(_idx, _name, _pname, _dname) \
275 { \
276 .idx = _idx, \
277 .name = _name, \
278 .parent_name = _pname, \
279 .display_name = _dname, \
280 }
281
282 struct rtcl_clk_info {
283 unsigned int idx;
284 const char *name;
285 const char *parent_name;
286 const char *display_name;
287 };
288
289 struct rtcl_clk {
290 struct clk_hw hw;
291 unsigned int idx;
292 unsigned long min;
293 unsigned long max;
294 unsigned long rate;
295 unsigned long startup;
296 };
297
298 static const struct rtcl_clk_info rtcl_clk_info[CLK_COUNT] = {
299 RTCL_CLK_INFO(CLK_CPU, "cpu_clk", "ref_clk", "CPU"),
300 RTCL_CLK_INFO(CLK_MEM, "mem_clk", "ref_clk", "MEM"),
301 RTCL_CLK_INFO(CLK_LXB, "lxb_clk", "ref_clk", "LXB")
302 };
303
304 struct rtcl_dram {
305 int type;
306 int buswidth;
307 };
308
309 struct rtcl_sram {
310 int *pmark;
311 unsigned long vbase;
312 };
313
314 struct rtcl_ccu {
315 spinlock_t lock;
316 unsigned int soc;
317 struct rtcl_sram sram;
318 struct rtcl_dram dram;
319 struct device_node *np;
320 struct platform_device *pdev;
321 struct rtcl_clk clks[CLK_COUNT];
322 };
323
324 struct rtcl_ccu *rtcl_ccu;
325
326 #define rtcl_hw_to_clk(_hw) container_of(_hw, struct rtcl_clk, hw)
327
328 /*
329 * SRAM relocatable assembler functions. The dram() parts point to normal kernel memory while
330 * the sram() parts are the same functions but relocated to SRAM.
331 */
332
333 extern void rtcl_838x_dram_start(void);
334 extern int rtcl_838x_dram_size;
335
336 extern void (*rtcl_838x_dram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
337 static void (*rtcl_838x_sram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
338
339 extern void rtcl_839x_dram_start(void);
340 extern int rtcl_839x_dram_size;
341
342 extern void (*rtcl_839x_dram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
343 static void (*rtcl_839x_sram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
344
345 /*
346 * clock setter/getter functions
347 */
348
349 static unsigned long rtcl_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
350 {
351 struct rtcl_clk *clk = rtcl_hw_to_clk(hw);
352 unsigned int ctrl0, ctrl1, div1, div2, cmu_ncode_in;
353 unsigned int cmu_sel_prediv, cmu_sel_div4, cmu_divn2, cmu_divn2_selb, cmu_divn3_sel;
354
355 if ((clk->idx >= CLK_COUNT) || (!rtcl_ccu) || (rtcl_ccu->soc >= RTCL_SOCCNT))
356 return 0;
357
358 ctrl0 = read_sw(rtcl_regs[rtcl_ccu->soc][0][clk->idx]);
359 ctrl1 = read_sw(rtcl_regs[rtcl_ccu->soc][1][clk->idx]);
360
361 cmu_sel_prediv = 1 << RTL_PLL_CTRL0_CMU_SEL_PREDIV(ctrl0);
362 cmu_sel_div4 = RTL_PLL_CTRL0_CMU_SEL_DIV4(ctrl0) ? 4 : 1;
363 cmu_ncode_in = RTL_PLL_CTRL0_CMU_NCODE_IN(ctrl0) + 4;
364 cmu_divn2 = RTL_PLL_CTRL0_CMU_DIVN2(ctrl0) + 4;
365
366 switch (rtcl_ccu->soc) {
367 case RTCL_SOC838X:
368 if ((ctrl0 == 0) && (ctrl1 == 0) && (clk->idx == CLK_LXB))
369 return 200000000;
370
371 cmu_divn2_selb = RTL838X_PLL_CTRL1_CMU_DIVN2_SELB(ctrl1);
372 cmu_divn3_sel = rtcl_divn3[RTL838X_PLL_CTRL1_CMU_DIVN3_SEL(ctrl1)];
373 break;
374 case RTCL_SOC839X:
375 cmu_divn2_selb = RTL839X_PLL_CTRL1_CMU_DIVN2_SELB(ctrl1);
376 cmu_divn3_sel = rtcl_divn3[RTL839X_PLL_CTRL1_CMU_DIVN3_SEL(ctrl1)];
377 break;
378 }
379 div1 = cmu_divn2_selb ? cmu_divn3_sel : cmu_divn2;
380 div2 = rtcl_xdiv[clk->idx];
381
382 return (((parent_rate / 16) * cmu_ncode_in) / (div1 * div2)) *
383 cmu_sel_prediv * cmu_sel_div4 * 16;
384 }
385
386 static int rtcl_838x_set_rate(int clk_idx, const struct rtcl_reg_set *reg)
387 {
388 unsigned long irqflags;
389 /*
390 * Runtime of this function (including locking)
391 * CPU: up to 14000 cycles / up to 56 us at 250 MHz (half default speed)
392 */
393 spin_lock_irqsave(&rtcl_ccu->lock, irqflags);
394 rtcl_838x_sram_set_rate(clk_idx, reg->ctrl0, reg->ctrl1);
395 spin_unlock_irqrestore(&rtcl_ccu->lock, irqflags);
396
397 return 0;
398 }
399
400 static int rtcl_839x_set_rate(int clk_idx, const struct rtcl_reg_set *reg)
401 {
402 unsigned long vpflags;
403 unsigned long irqflags;
404 /*
405 * Runtime of this function (including locking)
406 * CPU: up to 31000 cycles / up to 89 us at 350 MHz (half default speed)
407 */
408 spin_lock_irqsave(&rtcl_ccu->lock, irqflags);
409 vpflags = dvpe();
410 rtcl_839x_sram_set_rate(clk_idx, reg->ctrl0, reg->ctrl1);
411 evpe(vpflags);
412 spin_unlock_irqrestore(&rtcl_ccu->lock, irqflags);
413
414 return 0;
415 }
416
417 static int rtcl_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
418 {
419 int tab_idx;
420 struct rtcl_clk *clk = rtcl_hw_to_clk(hw);
421 const struct rtcl_rtab_set *rtab = &rtcl_rtab_set[rtcl_ccu->soc][clk->idx];
422 const struct rtcl_round_set *round = &rtcl_round_set[rtcl_ccu->soc][clk->idx];
423
424 if ((parent_rate != RTCL_XTAL_RATE) || (!rtcl_ccu->sram.vbase))
425 return -EINVAL;
426 /*
427 * Currently we do not know if SRAM is stable on these devices. Maybe someone changes memory in
428 * this region and does not care about proper allocation. So check if something might go wrong.
429 */
430 if (unlikely(*rtcl_ccu->sram.pmark != RTL_SRAM_MARKER)) {
431 dev_err(&rtcl_ccu->pdev->dev, "SRAM code lost\n");
432 return -EINVAL;
433 }
434
435 tab_idx = (rate - round->min) / round->step;
436 if ((tab_idx < 0) || (tab_idx >= rtab->count) || (rtab->rset[tab_idx].rate != rate))
437 return -EINVAL;
438
439 rtcl_ccu->clks[clk->idx].rate = rate;
440
441 switch (rtcl_ccu->soc) {
442 case RTCL_SOC838X:
443 return rtcl_838x_set_rate(clk->idx, &rtab->rset[tab_idx]);
444 case RTCL_SOC839X:
445 return rtcl_839x_set_rate(clk->idx, &rtab->rset[tab_idx]);
446 }
447
448 return -ENXIO;
449 }
450
451 static long rtcl_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
452 {
453 struct rtcl_clk *clk = rtcl_hw_to_clk(hw);
454 unsigned long rrate = max(clk->min, min(clk->max, rate));
455 const struct rtcl_round_set *round = &rtcl_round_set[rtcl_ccu->soc][clk->idx];
456
457 rrate = ((rrate + (round->step >> 1)) / round->step) * round->step;
458 rrate -= (rrate > clk->max) ? round->step : 0;
459 rrate += (rrate < clk->min) ? round->step : 0;
460
461 return rrate;
462 }
463
464 /*
465 * Initialization functions to register the CCU and its clocks
466 */
467
468 #define RTCL_SRAM_FUNC(SOC, PBASE, FN) ({ \
469 rtcl_##SOC##_sram_##FN = ((void *)&rtcl_##SOC##_dram_##FN \
470 - (void *)&rtcl_##SOC##_dram_start) \
471 + (void *)PBASE; })
472
473 static const struct clk_ops rtcl_clk_ops = {
474 .set_rate = rtcl_set_rate,
475 .round_rate = rtcl_round_rate,
476 .recalc_rate = rtcl_recalc_rate,
477 };
478
479 static int rtcl_ccu_create(struct device_node *np)
480 {
481 int soc;
482
483 if (of_device_is_compatible(np, "realtek,rtl8380-clock"))
484 soc = RTCL_SOC838X;
485 else if (of_device_is_compatible(np, "realtek,rtl8390-clock"))
486 soc = RTCL_SOC839X;
487 else
488 return -ENXIO;
489
490 rtcl_ccu = kzalloc(sizeof(*rtcl_ccu), GFP_KERNEL);
491 if (IS_ERR(rtcl_ccu))
492 return -ENOMEM;
493
494 rtcl_ccu->np = np;
495 rtcl_ccu->soc = soc;
496 rtcl_ccu->dram.type = RTL_MC_MCR_DRAMTYPE(read_soc(RTL_MC_MCR));
497 rtcl_ccu->dram.buswidth = RTL_MC_DCR_BUSWIDTH(read_soc(RTL_MC_DCR));
498 spin_lock_init(&rtcl_ccu->lock);
499
500 return 0;
501 }
502
503 int rtcl_register_clkhw(int clk_idx)
504 {
505 int ret;
506 struct clk *clk;
507 struct clk_init_data hw_init = { };
508 struct rtcl_clk *rclk = &rtcl_ccu->clks[clk_idx];
509 struct clk_parent_data parent_data = { .fw_name = rtcl_clk_info[clk_idx].parent_name };
510
511 rclk->idx = clk_idx;
512 rclk->hw.init = &hw_init;
513
514 hw_init.num_parents = 1;
515 hw_init.ops = &rtcl_clk_ops;
516 hw_init.parent_data = &parent_data;
517 hw_init.name = rtcl_clk_info[clk_idx].name;
518
519 ret = of_clk_hw_register(rtcl_ccu->np, &rclk->hw);
520 if (ret)
521 return ret;
522
523 clk_hw_register_clkdev(&rclk->hw, rtcl_clk_info[clk_idx].name, NULL);
524
525 clk = clk_get(NULL, rtcl_clk_info[clk_idx].name);
526 rclk->startup = clk_get_rate(clk);
527 clk_put(clk);
528
529 switch (clk_idx) {
530 case CLK_CPU:
531 rclk->min = rtcl_round_set[rtcl_ccu->soc][clk_idx].min;
532 rclk->max = rtcl_round_set[rtcl_ccu->soc][clk_idx].max;
533 break;
534 default:
535 /*
536 * TODO: This driver supports PLL reclocking and nothing else. Additional required steps for non
537 * CPU PLLs are missing. E.g. if we want to change memory clocks the right way we must adapt a lot
538 * of other settings like MCR and DTRx timing registers (0xb80001000, 0xb8001008, ...) and initiate
539 * a DLL reset so that hardware operates in the allowed limits. This is far too complex without
540 * official support. Avoid this for now.
541 */
542 rclk->min = rclk->max = rclk->startup;
543 break;
544 }
545
546 return 0;
547 }
548
549 static struct clk_hw *rtcl_get_clkhw(struct of_phandle_args *clkspec, void *prv)
550 {
551 unsigned int idx = clkspec->args[0];
552
553 if (idx >= CLK_COUNT) {
554 pr_err("%s: Invalid index %u\n", __func__, idx);
555 return ERR_PTR(-EINVAL);
556 }
557
558 return &rtcl_ccu->clks[idx].hw;
559 }
560
561 static int rtcl_ccu_register_clocks(void)
562 {
563 int clk_idx, ret;
564
565 for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
566 ret = rtcl_register_clkhw(clk_idx);
567 if (ret) {
568 pr_err("%s: Couldn't register %s clock\n",
569 __func__, rtcl_clk_info[clk_idx].display_name);
570 goto err_hw_unregister;
571 }
572 }
573
574 ret = of_clk_add_hw_provider(rtcl_ccu->np, rtcl_get_clkhw, rtcl_ccu);
575 if (ret) {
576 pr_err("%s: Couldn't register clock provider of %s\n",
577 __func__, of_node_full_name(rtcl_ccu->np));
578 goto err_hw_unregister;
579 }
580
581 return 0;
582
583 err_hw_unregister:
584 for (--clk_idx; clk_idx >= 0; --clk_idx)
585 clk_hw_unregister(&rtcl_ccu->clks[clk_idx].hw);
586
587 return ret;
588 }
589
590 int rtcl_init_sram(void)
591 {
592 struct gen_pool *sram_pool;
593 phys_addr_t sram_pbase;
594 unsigned long sram_vbase;
595 struct device_node *node;
596 struct platform_device *pdev = NULL;
597 void *dram_start;
598 int dram_size;
599 const char *wrn = ", rate setting disabled.\n";
600
601 switch (rtcl_ccu->soc) {
602 case RTCL_SOC838X:
603 dram_start = &rtcl_838x_dram_start;
604 dram_size = rtcl_838x_dram_size;
605 break;
606 case RTCL_SOC839X:
607 dram_start = &rtcl_839x_dram_start;
608 dram_size = rtcl_839x_dram_size;
609 break;
610 default:
611 return -ENXIO;
612 }
613
614 for_each_compatible_node(node, NULL, "mmio-sram") {
615 pdev = of_find_device_by_node(node);
616 if (pdev) {
617 of_node_put(node);
618 break;
619 }
620 }
621
622 if (!pdev) {
623 dev_warn(&rtcl_ccu->pdev->dev, "no SRAM device found%s", wrn);
624 return -ENXIO;
625 }
626
627 sram_pool = gen_pool_get(&pdev->dev, NULL);
628 if (!sram_pool) {
629 dev_warn(&rtcl_ccu->pdev->dev, "SRAM pool unavailable%s", wrn);
630 goto err_put_device;
631 }
632
633 sram_vbase = gen_pool_alloc(sram_pool, dram_size);
634 if (!sram_vbase) {
635 dev_warn(&rtcl_ccu->pdev->dev, "can not allocate SRAM%s", wrn);
636 goto err_put_device;
637 }
638
639 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_vbase);
640 memcpy((void *)sram_pbase, dram_start, dram_size);
641 flush_icache_range((unsigned long)sram_pbase, (unsigned long)(sram_pbase + dram_size));
642
643 switch (rtcl_ccu->soc) {
644 case RTCL_SOC838X:
645 RTCL_SRAM_FUNC(838x, sram_pbase, set_rate);
646 break;
647 case RTCL_SOC839X:
648 RTCL_SRAM_FUNC(839x, sram_pbase, set_rate);
649 break;
650 }
651
652 rtcl_ccu->sram.pmark = (int *)((void *)sram_pbase + (dram_size - 4));
653 rtcl_ccu->sram.vbase = sram_vbase;
654
655 return 0;
656
657 err_put_device:
658 put_device(&pdev->dev);
659
660 return -ENXIO;
661 }
662
663 void rtcl_ccu_log_early(void)
664 {
665 int clk_idx;
666 char meminfo[80], clkinfo[255], msg[255] = "rtl83xx-clk: initialized";
667
668 sprintf(meminfo, " (%d Bit DDR%d)", rtcl_ccu->dram.buswidth, rtcl_ccu->dram.type);
669 for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
670 sprintf(clkinfo, ", %s %lu MHz", rtcl_clk_info[clk_idx].display_name,
671 rtcl_ccu->clks[clk_idx].startup / 1000000);
672 if (clk_idx == CLK_MEM)
673 strcat(clkinfo, meminfo);
674 strcat(msg, clkinfo);
675 }
676 pr_info("%s\n", msg);
677 }
678
679 void rtcl_ccu_log_late(void)
680 {
681 int clk_idx;
682 struct rtcl_clk *rclk;
683 bool overclock = false;
684 char clkinfo[80], msg[255] = "rate setting enabled";
685
686 for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
687 rclk = &rtcl_ccu->clks[clk_idx];
688 overclock |= rclk->max > rclk->startup;
689 sprintf(clkinfo, ", %s %lu-%lu MHz", rtcl_clk_info[clk_idx].display_name,
690 rclk->min / 1000000, rclk->max / 1000000);
691 strcat(msg, clkinfo);
692 }
693 if (overclock)
694 strcat(msg, ", OVERCLOCK AT OWN RISK");
695
696 dev_info(&rtcl_ccu->pdev->dev, "%s\n", msg);
697 }
698
699 /*
700 * Early registration: This module provides core startup clocks that are needed for generic SOC
701 * init and for further builtin devices (e.g. UART). Register asap via clock framework.
702 */
703
704 static void __init rtcl_probe_early(struct device_node *np)
705 {
706 if (rtcl_ccu_create(np))
707 return;
708
709 if (rtcl_ccu_register_clocks())
710 kfree(rtcl_ccu);
711 else
712 rtcl_ccu_log_early();
713 }
714
715 CLK_OF_DECLARE_DRIVER(rtl838x_clk, "realtek,rtl8380-clock", rtcl_probe_early);
716 CLK_OF_DECLARE_DRIVER(rtl839x_clk, "realtek,rtl8390-clock", rtcl_probe_early);
717
718 /*
719 * Late registration: Finally register as normal platform driver. At this point we can make use
720 * of other modules like SRAM.
721 */
722
723 static const struct of_device_id rtcl_dt_ids[] = {
724 { .compatible = "realtek,rtl8380-clock" },
725 { .compatible = "realtek,rtl8390-clock" },
726 {}
727 };
728
729 static int rtcl_probe_late(struct platform_device *pdev)
730 {
731 int ret;
732
733 if (!rtcl_ccu) {
734 dev_err(&pdev->dev, "early initialization not run");
735 return -ENXIO;
736 }
737 rtcl_ccu->pdev = pdev;
738 ret = rtcl_init_sram();
739 if (ret)
740 return ret;
741
742 rtcl_ccu_log_late();
743
744 return 0;
745 }
746
747 static struct platform_driver rtcl_platform_driver = {
748 .driver = {
749 .name = "rtl83xx-clk",
750 .of_match_table = rtcl_dt_ids,
751 },
752 .probe = rtcl_probe_late,
753 };
754
755 static int __init rtcl_init_subsys(void)
756 {
757 return platform_driver_register(&rtcl_platform_driver);
758 }
759
760 /*
761 * The driver does not know when SRAM module has finally loaded. With an arch_initcall() we might
762 * overtake SRAM initialization. Be polite and give the system a little more time.
763 */
764
765 subsys_initcall(rtcl_init_subsys);