realtek: clock driver: get away with register macros
[openwrt/staging/svanheule.git] / target / linux / realtek / files-5.10 / drivers / clk / realtek / clk-rtl83xx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Realtek RTL83XX clock driver
4 * Copyright (C) 2022 Markus Stockhausen <markus.stockhausen@gmx.de>
5 *
6 * This driver provides basic clock support for the central core clock unit (CCU) and its PLLs
7 * inside the RTL838X and RTL8389X SOC. Currently CPU, memory and LXB clock information can be
8 * accessed. To make use of the driver add the following devices and configurations at the
9 * appropriate locations to the DT.
10 *
11 * #include <dt-bindings/clock/rtl83xx-clk.h>
12 *
13 * sram0: sram@9f000000 {
14 * compatible = "mmio-sram";
15 * reg = <0x9f000000 0x18000>;
16 * #address-cells = <1>;
17 * #size-cells = <1>;
18 * ranges = <0 0x9f000000 0x18000>;
19 * };
20 *
21 * osc: oscillator {
22 * compatible = "fixed-clock";
23 * #clock-cells = <0>;
24 * clock-frequency = <25000000>;
25 * };
26 *
27 * ccu: clock-controller {
28 * compatible = "realtek,rtl8380-clock";
29 * #clock-cells = <1>;
30 * clocks = <&osc>;
31 * clock-names = "ref_clk";
32 * };
33 *
34 *
35 * The SRAM part is needed to be able to set clocks. When changing clocks the code must not run
36 * from DRAM. Otherwise system might freeze. Take care to adjust CCU compatibility, SRAM address
37 * and size to the target SOC device. Afterwards one can access/identify the clocks in the other
38 * DT devices with <&ccu CLK_CPU>, <&ccu CLK_MEM> or <&ccu CLK_LXB>. Additionally the clocks can
39 * be used inside the kernel with
40 *
41 * cpu_clk = clk_get(NULL, "cpu_clk");
42 * mem_clk = clk_get(NULL, "mem_clk");
43 * lxb_clk = clk_get(NULL, "lxb_clk");
44 *
45 * This driver can be directly used by the DT based cpufreq driver (CONFIG_CPUFREQ_DT) if CPU
46 * references the right clock and sane operating points (OPP) are provided. E.g.
47 *
48 * cpu@0 {
49 * compatible = "mips,mips4KEc";
50 * reg = <0>;
51 * clocks = <&ccu CLK_CPU>;
52 * operating-points-v2 = <&cpu_opp_table>;
53 * };
54 *
55 * cpu_opp_table: opp-table-0 {
56 * compatible = "operating-points-v2";
57 * opp-shared;
58 * opp00 {
59 * opp-hz = /bits/ 64 <425000000>;
60 * };
61 * ...
62 * }
63 */
64
65 #include <asm/cacheflush.h>
66 #include <asm/mipsmtregs.h>
67 #include <dt-bindings/clock/rtl83xx-clk.h>
68 #include <linux/clk.h>
69 #include <linux/clk-provider.h>
70 #include <linux/clkdev.h>
71 #include <linux/cpu.h>
72 #include <linux/delay.h>
73 #include <linux/genalloc.h>
74 #include <linux/io.h>
75 #include <linux/ioport.h>
76 #include <linux/of_address.h>
77 #include <linux/of_platform.h>
78 #include <linux/platform_device.h>
79 #include <linux/slab.h>
80
81 #include "clk-rtl83xx.h"
82
83 /*
84 * some hardware specific definitions
85 */
86
87 #define RTCL_SOC838X 0
88 #define RTCL_SOC839X 1
89 #define RTCL_SOCCNT 2
90
91 #define RTCL_DDR1 1
92 #define RTCL_DDR2 2
93 #define RTCL_DDR3 3
94
95 #define REG_CTRL0 0
96 #define REG_CTRL1 1
97 #define REG_COUNT 2
98
99 #define RTCL_XTAL_RATE 25000000
100
101 static const int rtcl_regs[RTCL_SOCCNT][REG_COUNT][CLK_COUNT] = {
102 {
103 {
104 RTL_SW_CORE_BASE + RTL838X_PLL_CPU_CTRL0,
105 RTL_SW_CORE_BASE + RTL838X_PLL_MEM_CTRL0,
106 RTL_SW_CORE_BASE + RTL838X_PLL_LXB_CTRL0,
107 }, {
108 RTL_SW_CORE_BASE + RTL838X_PLL_CPU_CTRL1,
109 RTL_SW_CORE_BASE + RTL838X_PLL_MEM_CTRL1,
110 RTL_SW_CORE_BASE + RTL838X_PLL_LXB_CTRL1
111 }
112 }, {
113 {
114 RTL_SW_CORE_BASE + RTL839X_PLL_CPU_CTRL0,
115 RTL_SW_CORE_BASE + RTL839X_PLL_MEM_CTRL0,
116 RTL_SW_CORE_BASE + RTL839X_PLL_LXB_CTRL0
117 }, {
118 RTL_SW_CORE_BASE + RTL839X_PLL_CPU_CTRL1,
119 RTL_SW_CORE_BASE + RTL839X_PLL_MEM_CTRL1,
120 RTL_SW_CORE_BASE + RTL839X_PLL_LXB_CTRL1
121 }
122 }
123 };
124
125 #define RTCL_REG_SET(_rate, _ctrl0, _ctrl1) \
126 { \
127 .rate = _rate, \
128 .ctrl0 = _ctrl0, \
129 .ctrl1 = _ctrl1, \
130 }
131
132 struct rtcl_reg_set {
133 unsigned int rate;
134 unsigned int ctrl0;
135 unsigned int ctrl1;
136 };
137
138 /*
139 * The following configuration tables are valid operation points for their corresponding PLLs.
140 * The magic numbers are precalculated mulitpliers and dividers to keep the driver simple. They
141 * also provide rates outside the allowed physical specifications. E.g. DDR3 memory has a lower
142 * limit of 303 MHz or the CPU might get unstable if set to anything above its startup frequency.
143 * Additionally the Realtek SOCs tend to expect CPU speed > MEM speed > LXB speed. The caller or
144 * DT configuration must take care that only valid operating points are selected.
145 */
146
147 static const struct rtcl_reg_set rtcl_838x_cpu_reg_set[] = {
148 RTCL_REG_SET(300000000, 0x045c, 5),
149 RTCL_REG_SET(325000000, 0x0464, 5),
150 RTCL_REG_SET(350000000, 0x046c, 5),
151 RTCL_REG_SET(375000000, 0x0474, 5),
152 RTCL_REG_SET(400000000, 0x045c, 3),
153 RTCL_REG_SET(425000000, 0x0462, 3),
154 RTCL_REG_SET(450000000, 0x0468, 3),
155 RTCL_REG_SET(475000000, 0x046e, 3),
156 RTCL_REG_SET(500000000, 0x0474, 3),
157 RTCL_REG_SET(525000000, 0x047a, 3),
158 RTCL_REG_SET(550000000, 0x0480, 3),
159 RTCL_REG_SET(575000000, 0x0486, 3),
160 RTCL_REG_SET(600000000, 0x048c, 3),
161 RTCL_REG_SET(625000000, 0x0492, 3)
162 };
163
164 static const struct rtcl_reg_set rtcl_838x_mem_reg_set[] = {
165 RTCL_REG_SET(200000000, 0x041b, 5),
166 RTCL_REG_SET(225000000, 0x0417, 3),
167 RTCL_REG_SET(250000000, 0x041a, 3),
168 RTCL_REG_SET(275000000, 0x0412, 1),
169 RTCL_REG_SET(300000000, 0x0414, 1),
170 RTCL_REG_SET(325000000, 0x0416, 1),
171 RTCL_REG_SET(350000000, 0x0418, 1),
172 RTCL_REG_SET(375000000, 0x041a, 1)
173 };
174
175 static const struct rtcl_reg_set rtcl_838x_lxb_reg_set[] = {
176 RTCL_REG_SET(100000000, 0x043c, 0),
177 RTCL_REG_SET(125000000, 0x043c, 0),
178 RTCL_REG_SET(150000000, 0x0450, 5),
179 RTCL_REG_SET(175000000, 0x0450, 5),
180 RTCL_REG_SET(200000000, 0x047c, 0)
181 };
182
183 static const struct rtcl_reg_set rtcl_839x_cpu_reg_set[] = {
184 RTCL_REG_SET(400000000, 0x0414, 5),
185 RTCL_REG_SET(425000000, 0x041e, 6),
186 RTCL_REG_SET(450000000, 0x0417, 5),
187 RTCL_REG_SET(475000000, 0x0422, 6),
188 RTCL_REG_SET(500000000, 0x041a, 5),
189 RTCL_REG_SET(525000000, 0x0426, 6),
190 RTCL_REG_SET(550000000, 0x0412, 4),
191 RTCL_REG_SET(575000000, 0x042a, 6),
192 RTCL_REG_SET(600000000, 0x0414, 4),
193 RTCL_REG_SET(625000000, 0x042e, 6),
194 RTCL_REG_SET(650000000, 0x0416, 4),
195 RTCL_REG_SET(675000000, 0x0432, 6),
196 RTCL_REG_SET(700000000, 0x0418, 4),
197 RTCL_REG_SET(725000000, 0x0436, 6),
198 RTCL_REG_SET(750000000, 0x0438, 6),
199 RTCL_REG_SET(775000000, 0x043a, 6),
200 RTCL_REG_SET(800000000, 0x043c, 6),
201 RTCL_REG_SET(825000000, 0x043e, 6),
202 RTCL_REG_SET(850000000, 0x0440, 6)
203 };
204
205 static const struct rtcl_reg_set rtcl_839x_mem_reg_set[] = {
206 RTCL_REG_SET(125000000, 0x041a, 7),
207 RTCL_REG_SET(150000000, 0x0414, 6),
208 RTCL_REG_SET(175000000, 0x0418, 6),
209 RTCL_REG_SET(200000000, 0x041c, 6),
210 RTCL_REG_SET(225000000, 0x0417, 5),
211 RTCL_REG_SET(250000000, 0x041a, 5),
212 RTCL_REG_SET(275000000, 0x0412, 4),
213 RTCL_REG_SET(300000000, 0x0414, 4),
214 RTCL_REG_SET(325000000, 0x0416, 4),
215 RTCL_REG_SET(350000000, 0x0418, 4),
216 RTCL_REG_SET(375000000, 0x041a, 4),
217 RTCL_REG_SET(400000000, 0x041c, 4)
218 };
219
220 static const struct rtcl_reg_set rtcl_839x_lxb_reg_set[] = {
221 RTCL_REG_SET(50000000, 0x1414, 3),
222 RTCL_REG_SET(100000000, 0x0814, 3),
223 RTCL_REG_SET(150000000, 0x0414, 3),
224 RTCL_REG_SET(200000000, 0x0414, 7)
225 };
226
227 struct rtcl_rtab_set {
228 int count;
229 const struct rtcl_reg_set *rset;
230 };
231
232 #define RTCL_RTAB_SET(_rset) \
233 { \
234 .count = ARRAY_SIZE(_rset), \
235 .rset = _rset, \
236 }
237
238 static const struct rtcl_rtab_set rtcl_rtab_set[RTCL_SOCCNT][CLK_COUNT] = {
239 {
240 RTCL_RTAB_SET(rtcl_838x_cpu_reg_set),
241 RTCL_RTAB_SET(rtcl_838x_mem_reg_set),
242 RTCL_RTAB_SET(rtcl_838x_lxb_reg_set)
243 }, {
244 RTCL_RTAB_SET(rtcl_839x_cpu_reg_set),
245 RTCL_RTAB_SET(rtcl_839x_mem_reg_set),
246 RTCL_RTAB_SET(rtcl_839x_lxb_reg_set)
247 }
248 };
249
250 #define RTCL_ROUND_SET(_min, _max, _step) \
251 { \
252 .min = _min, \
253 .max = _max, \
254 .step = _step, \
255 }
256
257 struct rtcl_round_set {
258 unsigned long min;
259 unsigned long max;
260 unsigned long step;
261 };
262
263 static const struct rtcl_round_set rtcl_round_set[RTCL_SOCCNT][CLK_COUNT] = {
264 {
265 RTCL_ROUND_SET(300000000, 625000000, 25000000),
266 RTCL_ROUND_SET(200000000, 375000000, 25000000),
267 RTCL_ROUND_SET(100000000, 200000000, 25000000)
268 }, {
269 RTCL_ROUND_SET(400000000, 850000000, 25000000),
270 RTCL_ROUND_SET(100000000, 400000000, 25000000),
271 RTCL_ROUND_SET(50000000, 200000000, 50000000)
272 }
273 };
274
275 static const int rtcl_divn3[] = { 2, 3, 4, 6 };
276 static const int rtcl_xdiv[] = { 2, 4, 2 };
277
278 /*
279 * module data structures
280 */
281
282 #define RTCL_CLK_INFO(_idx, _name, _pname, _dname) \
283 { \
284 .idx = _idx, \
285 .name = _name, \
286 .parent_name = _pname, \
287 .display_name = _dname, \
288 }
289
290 struct rtcl_clk_info {
291 unsigned int idx;
292 const char *name;
293 const char *parent_name;
294 const char *display_name;
295 };
296
297 struct rtcl_clk {
298 struct clk_hw hw;
299 unsigned int idx;
300 unsigned long min;
301 unsigned long max;
302 unsigned long rate;
303 unsigned long startup;
304 };
305
306 static const struct rtcl_clk_info rtcl_clk_info[CLK_COUNT] = {
307 RTCL_CLK_INFO(CLK_CPU, "cpu_clk", "ref_clk", "CPU"),
308 RTCL_CLK_INFO(CLK_MEM, "mem_clk", "ref_clk", "MEM"),
309 RTCL_CLK_INFO(CLK_LXB, "lxb_clk", "ref_clk", "LXB")
310 };
311
312 struct rtcl_dram {
313 int type;
314 int buswidth;
315 };
316
317 struct rtcl_sram {
318 int *pmark;
319 unsigned long vbase;
320 };
321
322 struct rtcl_ccu {
323 spinlock_t lock;
324 unsigned int soc;
325 struct rtcl_sram sram;
326 struct rtcl_dram dram;
327 struct device_node *np;
328 struct platform_device *pdev;
329 struct rtcl_clk clks[CLK_COUNT];
330 };
331
332 struct rtcl_ccu *rtcl_ccu;
333
334 #define rtcl_hw_to_clk(_hw) container_of(_hw, struct rtcl_clk, hw)
335
336 /*
337 * SRAM relocatable assembler functions. The dram() parts point to normal kernel memory while
338 * the sram() parts are the same functions but relocated to SRAM.
339 */
340
341 extern void rtcl_838x_dram_start(void);
342 extern int rtcl_838x_dram_size;
343
344 extern void (*rtcl_838x_dram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
345 static void (*rtcl_838x_sram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
346
347 extern void rtcl_839x_dram_start(void);
348 extern int rtcl_839x_dram_size;
349
350 extern void (*rtcl_839x_dram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
351 static void (*rtcl_839x_sram_set_rate)(int clk_idx, int ctrl0, int ctrl1);
352
353 /*
354 * clock setter/getter functions
355 */
356
357 static unsigned long rtcl_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
358 {
359 struct rtcl_clk *clk = rtcl_hw_to_clk(hw);
360 unsigned int ctrl0, ctrl1, div1, div2, cmu_ncode_in;
361 unsigned int cmu_sel_prediv, cmu_sel_div4, cmu_divn2, cmu_divn2_selb, cmu_divn3_sel;
362
363 if ((clk->idx >= CLK_COUNT) || (!rtcl_ccu) || (rtcl_ccu->soc >= RTCL_SOCCNT))
364 return 0;
365
366 ctrl0 = ioread32((void *)rtcl_regs[rtcl_ccu->soc][0][clk->idx]);
367 ctrl1 = ioread32((void *)rtcl_regs[rtcl_ccu->soc][1][clk->idx]);
368
369 cmu_sel_prediv = 1 << RTL_PLL_CTRL0_CMU_SEL_PREDIV(ctrl0);
370 cmu_sel_div4 = RTL_PLL_CTRL0_CMU_SEL_DIV4(ctrl0) ? 4 : 1;
371 cmu_ncode_in = RTL_PLL_CTRL0_CMU_NCODE_IN(ctrl0) + 4;
372 cmu_divn2 = RTL_PLL_CTRL0_CMU_DIVN2(ctrl0) + 4;
373
374 switch (rtcl_ccu->soc) {
375 case RTCL_SOC838X:
376 if ((ctrl0 == 0) && (ctrl1 == 0) && (clk->idx == CLK_LXB))
377 return 200000000;
378
379 cmu_divn2_selb = RTL838X_PLL_CTRL1_CMU_DIVN2_SELB(ctrl1);
380 cmu_divn3_sel = rtcl_divn3[RTL838X_PLL_CTRL1_CMU_DIVN3_SEL(ctrl1)];
381 break;
382 case RTCL_SOC839X:
383 cmu_divn2_selb = RTL839X_PLL_CTRL1_CMU_DIVN2_SELB(ctrl1);
384 cmu_divn3_sel = rtcl_divn3[RTL839X_PLL_CTRL1_CMU_DIVN3_SEL(ctrl1)];
385 break;
386 }
387 div1 = cmu_divn2_selb ? cmu_divn3_sel : cmu_divn2;
388 div2 = rtcl_xdiv[clk->idx];
389
390 return (((parent_rate / 16) * cmu_ncode_in) / (div1 * div2)) *
391 cmu_sel_prediv * cmu_sel_div4 * 16;
392 }
393
394 static int rtcl_838x_set_rate(int clk_idx, const struct rtcl_reg_set *reg)
395 {
396 unsigned long irqflags;
397 /*
398 * Runtime of this function (including locking)
399 * CPU: up to 14000 cycles / up to 56 us at 250 MHz (half default speed)
400 */
401 spin_lock_irqsave(&rtcl_ccu->lock, irqflags);
402 rtcl_838x_sram_set_rate(clk_idx, reg->ctrl0, reg->ctrl1);
403 spin_unlock_irqrestore(&rtcl_ccu->lock, irqflags);
404
405 return 0;
406 }
407
408 static int rtcl_839x_set_rate(int clk_idx, const struct rtcl_reg_set *reg)
409 {
410 unsigned long vpflags;
411 unsigned long irqflags;
412 /*
413 * Runtime of this function (including locking)
414 * CPU: up to 31000 cycles / up to 89 us at 350 MHz (half default speed)
415 */
416 spin_lock_irqsave(&rtcl_ccu->lock, irqflags);
417 vpflags = dvpe();
418 rtcl_839x_sram_set_rate(clk_idx, reg->ctrl0, reg->ctrl1);
419 evpe(vpflags);
420 spin_unlock_irqrestore(&rtcl_ccu->lock, irqflags);
421
422 return 0;
423 }
424
425 static int rtcl_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
426 {
427 int tab_idx;
428 struct rtcl_clk *clk = rtcl_hw_to_clk(hw);
429 const struct rtcl_rtab_set *rtab = &rtcl_rtab_set[rtcl_ccu->soc][clk->idx];
430 const struct rtcl_round_set *round = &rtcl_round_set[rtcl_ccu->soc][clk->idx];
431
432 if ((parent_rate != RTCL_XTAL_RATE) || (!rtcl_ccu->sram.vbase))
433 return -EINVAL;
434 /*
435 * Currently we do not know if SRAM is stable on these devices. Maybe someone changes memory in
436 * this region and does not care about proper allocation. So check if something might go wrong.
437 */
438 if (unlikely(*rtcl_ccu->sram.pmark != RTL_SRAM_MARKER)) {
439 dev_err(&rtcl_ccu->pdev->dev, "SRAM code lost\n");
440 return -EINVAL;
441 }
442
443 tab_idx = (rate - round->min) / round->step;
444 if ((tab_idx < 0) || (tab_idx >= rtab->count) || (rtab->rset[tab_idx].rate != rate))
445 return -EINVAL;
446
447 rtcl_ccu->clks[clk->idx].rate = rate;
448
449 switch (rtcl_ccu->soc) {
450 case RTCL_SOC838X:
451 return rtcl_838x_set_rate(clk->idx, &rtab->rset[tab_idx]);
452 case RTCL_SOC839X:
453 return rtcl_839x_set_rate(clk->idx, &rtab->rset[tab_idx]);
454 }
455
456 return -ENXIO;
457 }
458
459 static long rtcl_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
460 {
461 struct rtcl_clk *clk = rtcl_hw_to_clk(hw);
462 unsigned long rrate = max(clk->min, min(clk->max, rate));
463 const struct rtcl_round_set *round = &rtcl_round_set[rtcl_ccu->soc][clk->idx];
464
465 rrate = ((rrate + (round->step >> 1)) / round->step) * round->step;
466 rrate -= (rrate > clk->max) ? round->step : 0;
467 rrate += (rrate < clk->min) ? round->step : 0;
468
469 return rrate;
470 }
471
472 /*
473 * Initialization functions to register the CCU and its clocks
474 */
475
476 #define RTCL_SRAM_FUNC(SOC, PBASE, FN) ({ \
477 rtcl_##SOC##_sram_##FN = ((void *)&rtcl_##SOC##_dram_##FN \
478 - (void *)&rtcl_##SOC##_dram_start) \
479 + (void *)PBASE; })
480
481 static const struct clk_ops rtcl_clk_ops = {
482 .set_rate = rtcl_set_rate,
483 .round_rate = rtcl_round_rate,
484 .recalc_rate = rtcl_recalc_rate,
485 };
486
487 static int rtcl_ccu_create(struct device_node *np)
488 {
489 int soc;
490
491 if (of_device_is_compatible(np, "realtek,rtl8380-clock"))
492 soc = RTCL_SOC838X;
493 else if (of_device_is_compatible(np, "realtek,rtl8390-clock"))
494 soc = RTCL_SOC839X;
495 else
496 return -ENXIO;
497
498 rtcl_ccu = kzalloc(sizeof(*rtcl_ccu), GFP_KERNEL);
499 if (IS_ERR(rtcl_ccu))
500 return -ENOMEM;
501
502 rtcl_ccu->np = np;
503 rtcl_ccu->soc = soc;
504 rtcl_ccu->dram.type = RTL_MC_MCR_DRAMTYPE(ioread32((void *)RTL_SOC_BASE + RTL_MC_MCR));
505 rtcl_ccu->dram.buswidth = RTL_MC_DCR_BUSWIDTH(ioread32((void *)RTL_SOC_BASE + RTL_MC_DCR));
506 spin_lock_init(&rtcl_ccu->lock);
507
508 return 0;
509 }
510
511 int rtcl_register_clkhw(int clk_idx)
512 {
513 int ret;
514 struct clk *clk;
515 struct clk_init_data hw_init = { };
516 struct rtcl_clk *rclk = &rtcl_ccu->clks[clk_idx];
517 struct clk_parent_data parent_data = { .fw_name = rtcl_clk_info[clk_idx].parent_name };
518
519 rclk->idx = clk_idx;
520 rclk->hw.init = &hw_init;
521
522 hw_init.num_parents = 1;
523 hw_init.ops = &rtcl_clk_ops;
524 hw_init.parent_data = &parent_data;
525 hw_init.name = rtcl_clk_info[clk_idx].name;
526
527 ret = of_clk_hw_register(rtcl_ccu->np, &rclk->hw);
528 if (ret)
529 return ret;
530
531 clk_hw_register_clkdev(&rclk->hw, rtcl_clk_info[clk_idx].name, NULL);
532
533 clk = clk_get(NULL, rtcl_clk_info[clk_idx].name);
534 rclk->startup = clk_get_rate(clk);
535 clk_put(clk);
536
537 switch (clk_idx) {
538 case CLK_CPU:
539 rclk->min = rtcl_round_set[rtcl_ccu->soc][clk_idx].min;
540 rclk->max = rtcl_round_set[rtcl_ccu->soc][clk_idx].max;
541 break;
542 default:
543 /*
544 * TODO: This driver supports PLL reclocking and nothing else. Additional required steps for non
545 * CPU PLLs are missing. E.g. if we want to change memory clocks the right way we must adapt a lot
546 * of other settings like MCR and DTRx timing registers (0xb80001000, 0xb8001008, ...) and initiate
547 * a DLL reset so that hardware operates in the allowed limits. This is far too complex without
548 * official support. Avoid this for now.
549 */
550 rclk->min = rclk->max = rclk->startup;
551 break;
552 }
553
554 return 0;
555 }
556
557 static struct clk_hw *rtcl_get_clkhw(struct of_phandle_args *clkspec, void *prv)
558 {
559 unsigned int idx = clkspec->args[0];
560
561 if (idx >= CLK_COUNT) {
562 pr_err("%s: Invalid index %u\n", __func__, idx);
563 return ERR_PTR(-EINVAL);
564 }
565
566 return &rtcl_ccu->clks[idx].hw;
567 }
568
569 static int rtcl_ccu_register_clocks(void)
570 {
571 int clk_idx, ret;
572
573 for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
574 ret = rtcl_register_clkhw(clk_idx);
575 if (ret) {
576 pr_err("%s: Couldn't register %s clock\n",
577 __func__, rtcl_clk_info[clk_idx].display_name);
578 goto err_hw_unregister;
579 }
580 }
581
582 ret = of_clk_add_hw_provider(rtcl_ccu->np, rtcl_get_clkhw, rtcl_ccu);
583 if (ret) {
584 pr_err("%s: Couldn't register clock provider of %s\n",
585 __func__, of_node_full_name(rtcl_ccu->np));
586 goto err_hw_unregister;
587 }
588
589 return 0;
590
591 err_hw_unregister:
592 for (--clk_idx; clk_idx >= 0; --clk_idx)
593 clk_hw_unregister(&rtcl_ccu->clks[clk_idx].hw);
594
595 return ret;
596 }
597
598 int rtcl_init_sram(void)
599 {
600 struct gen_pool *sram_pool;
601 phys_addr_t sram_pbase;
602 unsigned long sram_vbase;
603 struct device_node *node;
604 struct platform_device *pdev = NULL;
605 void *dram_start;
606 int dram_size;
607 const char *wrn = ", rate setting disabled.\n";
608
609 switch (rtcl_ccu->soc) {
610 case RTCL_SOC838X:
611 dram_start = &rtcl_838x_dram_start;
612 dram_size = rtcl_838x_dram_size;
613 break;
614 case RTCL_SOC839X:
615 dram_start = &rtcl_839x_dram_start;
616 dram_size = rtcl_839x_dram_size;
617 break;
618 default:
619 return -ENXIO;
620 }
621
622 for_each_compatible_node(node, NULL, "mmio-sram") {
623 pdev = of_find_device_by_node(node);
624 if (pdev) {
625 of_node_put(node);
626 break;
627 }
628 }
629
630 if (!pdev) {
631 dev_warn(&rtcl_ccu->pdev->dev, "no SRAM device found%s", wrn);
632 return -ENXIO;
633 }
634
635 sram_pool = gen_pool_get(&pdev->dev, NULL);
636 if (!sram_pool) {
637 dev_warn(&rtcl_ccu->pdev->dev, "SRAM pool unavailable%s", wrn);
638 goto err_put_device;
639 }
640
641 sram_vbase = gen_pool_alloc(sram_pool, dram_size);
642 if (!sram_vbase) {
643 dev_warn(&rtcl_ccu->pdev->dev, "can not allocate SRAM%s", wrn);
644 goto err_put_device;
645 }
646
647 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_vbase);
648 memcpy((void *)sram_pbase, dram_start, dram_size);
649 flush_icache_range((unsigned long)sram_pbase, (unsigned long)(sram_pbase + dram_size));
650
651 switch (rtcl_ccu->soc) {
652 case RTCL_SOC838X:
653 RTCL_SRAM_FUNC(838x, sram_pbase, set_rate);
654 break;
655 case RTCL_SOC839X:
656 RTCL_SRAM_FUNC(839x, sram_pbase, set_rate);
657 break;
658 }
659
660 rtcl_ccu->sram.pmark = (int *)((void *)sram_pbase + (dram_size - 4));
661 rtcl_ccu->sram.vbase = sram_vbase;
662
663 return 0;
664
665 err_put_device:
666 put_device(&pdev->dev);
667
668 return -ENXIO;
669 }
670
671 void rtcl_ccu_log_early(void)
672 {
673 int clk_idx;
674 char meminfo[80], clkinfo[255], msg[255] = "rtl83xx-clk: initialized";
675
676 sprintf(meminfo, " (%d Bit DDR%d)", rtcl_ccu->dram.buswidth, rtcl_ccu->dram.type);
677 for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
678 sprintf(clkinfo, ", %s %lu MHz", rtcl_clk_info[clk_idx].display_name,
679 rtcl_ccu->clks[clk_idx].startup / 1000000);
680 if (clk_idx == CLK_MEM)
681 strcat(clkinfo, meminfo);
682 strcat(msg, clkinfo);
683 }
684 pr_info("%s\n", msg);
685 }
686
687 void rtcl_ccu_log_late(void)
688 {
689 int clk_idx;
690 struct rtcl_clk *rclk;
691 bool overclock = false;
692 char clkinfo[80], msg[255] = "rate setting enabled";
693
694 for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
695 rclk = &rtcl_ccu->clks[clk_idx];
696 overclock |= rclk->max > rclk->startup;
697 sprintf(clkinfo, ", %s %lu-%lu MHz", rtcl_clk_info[clk_idx].display_name,
698 rclk->min / 1000000, rclk->max / 1000000);
699 strcat(msg, clkinfo);
700 }
701 if (overclock)
702 strcat(msg, ", OVERCLOCK AT OWN RISK");
703
704 dev_info(&rtcl_ccu->pdev->dev, "%s\n", msg);
705 }
706
707 /*
708 * Early registration: This module provides core startup clocks that are needed for generic SOC
709 * init and for further builtin devices (e.g. UART). Register asap via clock framework.
710 */
711
712 static void __init rtcl_probe_early(struct device_node *np)
713 {
714 if (rtcl_ccu_create(np))
715 return;
716
717 if (rtcl_ccu_register_clocks())
718 kfree(rtcl_ccu);
719 else
720 rtcl_ccu_log_early();
721 }
722
723 CLK_OF_DECLARE_DRIVER(rtl838x_clk, "realtek,rtl8380-clock", rtcl_probe_early);
724 CLK_OF_DECLARE_DRIVER(rtl839x_clk, "realtek,rtl8390-clock", rtcl_probe_early);
725
726 /*
727 * Late registration: Finally register as normal platform driver. At this point we can make use
728 * of other modules like SRAM.
729 */
730
731 static const struct of_device_id rtcl_dt_ids[] = {
732 { .compatible = "realtek,rtl8380-clock" },
733 { .compatible = "realtek,rtl8390-clock" },
734 {}
735 };
736
737 static int rtcl_probe_late(struct platform_device *pdev)
738 {
739 int ret;
740
741 if (!rtcl_ccu) {
742 dev_err(&pdev->dev, "early initialization not run");
743 return -ENXIO;
744 }
745 rtcl_ccu->pdev = pdev;
746 ret = rtcl_init_sram();
747 if (ret)
748 return ret;
749
750 rtcl_ccu_log_late();
751
752 return 0;
753 }
754
755 static struct platform_driver rtcl_platform_driver = {
756 .driver = {
757 .name = "rtl83xx-clk",
758 .of_match_table = rtcl_dt_ids,
759 },
760 .probe = rtcl_probe_late,
761 };
762
763 static int __init rtcl_init_subsys(void)
764 {
765 return platform_driver_register(&rtcl_platform_driver);
766 }
767
768 /*
769 * The driver does not know when SRAM module has finally loaded. With an arch_initcall() we might
770 * overtake SRAM initialization. Be polite and give the system a little more time.
771 */
772
773 subsys_initcall(rtcl_init_subsys);