kernel-5.4: bump to 5.4.102 and refresh patches
[openwrt/openwrt.git] / target / linux / layerscape / patches-5.4 / 303-core-0007-of-of_reserved_mem-Ensure-cma-reserved-region-not-cr.patch
1 From 6973ab95b9e0b0ce7b878c70c301bc5b3d11eca4 Mon Sep 17 00:00:00 2001
2 From: Jason Liu <jason.hui.liu@nxp.com>
3 Date: Mon, 11 Nov 2019 17:51:13 +0800
4 Subject: [PATCH] of: of_reserved_mem: Ensure cma reserved region not cross the
5 low/high memory
6
7 Need ensure the cma reserved region not cross the low/high memory boundary
8 when using the dynamic allocation methond through device-tree, otherwise,
9 kernel will fail to boot up when cma reserved region cross how/high mem.
10
11 Signed-off-by: Jason Liu <jason.hui.liu@nxp.com>
12 Cc: Laura Abbott <labbott@redhat.com>
13 Cc: Frank Rowand <frowand.list@gmail.com>
14 Cc: Rob Herring <robh+dt@kernel.org>
15 Cc: stable@vger.kernel.org
16 Signed-off-by: Arulpandiyan Vadivel <arulpandiyan_vadivel@mentor.com>
17 ---
18 drivers/of/of_reserved_mem.c | 33 ++++++++++++++++++++++++++-------
19 1 file changed, 26 insertions(+), 7 deletions(-)
20
21 --- a/drivers/of/of_reserved_mem.c
22 +++ b/drivers/of/of_reserved_mem.c
23 @@ -26,11 +26,12 @@
24 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
25 static int reserved_mem_count;
26
27 -static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
28 - phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
29 - phys_addr_t *res_base)
30 +static int __init early_init_dt_alloc_reserved_memory_arch(unsigned long node,
31 + phys_addr_t size, phys_addr_t align, phys_addr_t start,
32 + phys_addr_t end, bool nomap, phys_addr_t *res_base)
33 {
34 phys_addr_t base;
35 + phys_addr_t highmem_start = __pa(high_memory - 1) + 1;
36
37 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
38 align = !align ? SMP_CACHE_BYTES : align;
39 @@ -38,6 +39,24 @@ static int __init early_init_dt_alloc_re
40 if (!base)
41 return -ENOMEM;
42
43 +
44 + /*
45 + * Sanity check for the cma reserved region:If the reserved region
46 + * crosses the low/high memory boundary, try to fix it up and then
47 + * fall back to allocate the cma region from the low mememory space.
48 + */
49 +
50 + if (IS_ENABLED(CONFIG_CMA)
51 + && of_flat_dt_is_compatible(node, "shared-dma-pool")
52 + && of_get_flat_dt_prop(node, "reusable", NULL) && !nomap) {
53 + if (base < highmem_start && (base + size) > highmem_start) {
54 + base = memblock_find_in_range(start, highmem_start,
55 + size, align);
56 + if (!base)
57 + return -ENOMEM;
58 + }
59 + }
60 +
61 *res_base = base;
62 if (nomap)
63 return memblock_remove(base, size);
64 @@ -131,8 +150,8 @@ static int __init __reserved_mem_alloc_s
65 end = start + dt_mem_next_cell(dt_root_size_cells,
66 &prop);
67
68 - ret = early_init_dt_alloc_reserved_memory_arch(size,
69 - align, start, end, nomap, &base);
70 + ret = early_init_dt_alloc_reserved_memory_arch(node,
71 + size, align, start, end, nomap, &base);
72 if (ret == 0) {
73 pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
74 uname, &base,
75 @@ -143,8 +162,8 @@ static int __init __reserved_mem_alloc_s
76 }
77
78 } else {
79 - ret = early_init_dt_alloc_reserved_memory_arch(size, align,
80 - 0, 0, nomap, &base);
81 + ret = early_init_dt_alloc_reserved_memory_arch(node,
82 + size, align, 0, 0, nomap, &base);
83 if (ret == 0)
84 pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
85 uname, &base, (unsigned long)size / SZ_1M);