bcm27xx: update patches from RPi foundation
[openwrt/openwrt.git] / target / linux / bcm27xx / patches-5.4 / 950-0444-arm64-use-both-ZONE_DMA-and-ZONE_DMA32.patch
1 From 1fb65f4bc30fbadd0c89521985ff8142693c9631 Mon Sep 17 00:00:00 2001
2 From: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
3 Date: Wed, 11 Sep 2019 20:25:45 +0200
4 Subject: [PATCH] arm64: use both ZONE_DMA and ZONE_DMA32
5
6 commit 1a8e1cef7603e218339ac63cb3178b25554524e5 upstream.
7
8 So far all arm64 devices have supported 32 bit DMA masks for their
9 peripherals. This is not true anymore for the Raspberry Pi 4 as most of
10 it's peripherals can only address the first GB of memory on a total of
11 up to 4 GB.
12
13 This goes against ZONE_DMA32's intent, as it's expected for ZONE_DMA32
14 to be addressable with a 32 bit mask. So it was decided to re-introduce
15 ZONE_DMA in arm64.
16
17 ZONE_DMA will contain the lower 1G of memory, which is currently the
18 memory area addressable by any peripheral on an arm64 device.
19 ZONE_DMA32 will contain the rest of the 32 bit addressable memory.
20
21 Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
22 Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
23 Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
24 ---
25 arch/arm64/Kconfig | 4 +++
26 arch/arm64/include/asm/page.h | 2 ++
27 arch/arm64/mm/init.c | 54 +++++++++++++++++++++++++----------
28 3 files changed, 45 insertions(+), 15 deletions(-)
29
30 --- a/arch/arm64/Kconfig
31 +++ b/arch/arm64/Kconfig
32 @@ -267,6 +267,10 @@ config GENERIC_CSUM
33 config GENERIC_CALIBRATE_DELAY
34 def_bool y
35
36 +config ZONE_DMA
37 + bool "Support DMA zone" if EXPERT
38 + default y
39 +
40 config ZONE_DMA32
41 bool "Support DMA32 zone" if EXPERT
42 default y
43 --- a/arch/arm64/include/asm/page.h
44 +++ b/arch/arm64/include/asm/page.h
45 @@ -38,4 +38,6 @@ extern int pfn_valid(unsigned long);
46
47 #include <asm-generic/getorder.h>
48
49 +#define ARCH_ZONE_DMA_BITS 30
50 +
51 #endif
52 --- a/arch/arm64/mm/init.c
53 +++ b/arch/arm64/mm/init.c
54 @@ -56,6 +56,13 @@ EXPORT_SYMBOL(physvirt_offset);
55 struct page *vmemmap __ro_after_init;
56 EXPORT_SYMBOL(vmemmap);
57
58 +/*
59 + * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
60 + * memory as some devices, namely the Raspberry Pi 4, have peripherals with
61 + * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
62 + * bit addressable memory area.
63 + */
64 +phys_addr_t arm64_dma_phys_limit __ro_after_init;
65 phys_addr_t arm64_dma32_phys_limit __ro_after_init;
66
67 #ifdef CONFIG_KEXEC_CORE
68 @@ -169,15 +176,16 @@ static void __init reserve_elfcorehdr(vo
69 {
70 }
71 #endif /* CONFIG_CRASH_DUMP */
72 +
73 /*
74 - * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
75 - * currently assumes that for memory starting above 4G, 32-bit devices will
76 - * use a DMA offset.
77 + * Return the maximum physical address for a zone with a given address size
78 + * limit. It currently assumes that for memory starting above 4G, 32-bit
79 + * devices will use a DMA offset.
80 */
81 -static phys_addr_t __init max_zone_dma32_phys(void)
82 +static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
83 {
84 - phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
85 - return min(offset + (1ULL << 32), memblock_end_of_DRAM());
86 + phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
87 + return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
88 }
89
90 #ifdef CONFIG_NUMA
91 @@ -186,6 +194,9 @@ static void __init zone_sizes_init(unsig
92 {
93 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
94
95 +#ifdef CONFIG_ZONE_DMA
96 + max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
97 +#endif
98 #ifdef CONFIG_ZONE_DMA32
99 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
100 #endif
101 @@ -201,13 +212,18 @@ static void __init zone_sizes_init(unsig
102 struct memblock_region *reg;
103 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
104 unsigned long max_dma32 = min;
105 + unsigned long max_dma = min;
106
107 memset(zone_size, 0, sizeof(zone_size));
108
109 - /* 4GB maximum for 32-bit only capable devices */
110 +#ifdef CONFIG_ZONE_DMA
111 + max_dma = PFN_DOWN(arm64_dma_phys_limit);
112 + zone_size[ZONE_DMA] = max_dma - min;
113 + max_dma32 = max_dma;
114 +#endif
115 #ifdef CONFIG_ZONE_DMA32
116 max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
117 - zone_size[ZONE_DMA32] = max_dma32 - min;
118 + zone_size[ZONE_DMA32] = max_dma32 - max_dma;
119 #endif
120 zone_size[ZONE_NORMAL] = max - max_dma32;
121
122 @@ -219,11 +235,17 @@ static void __init zone_sizes_init(unsig
123
124 if (start >= max)
125 continue;
126 -
127 +#ifdef CONFIG_ZONE_DMA
128 + if (start < max_dma) {
129 + unsigned long dma_end = min_not_zero(end, max_dma);
130 + zhole_size[ZONE_DMA] -= dma_end - start;
131 + }
132 +#endif
133 #ifdef CONFIG_ZONE_DMA32
134 if (start < max_dma32) {
135 - unsigned long dma_end = min(end, max_dma32);
136 - zhole_size[ZONE_DMA32] -= dma_end - start;
137 + unsigned long dma32_end = min(end, max_dma32);
138 + unsigned long dma32_start = max(start, max_dma);
139 + zhole_size[ZONE_DMA32] -= dma32_end - dma32_start;
140 }
141 #endif
142 if (end > max_dma32) {
143 @@ -418,9 +440,11 @@ void __init arm64_memblock_init(void)
144
145 early_init_fdt_scan_reserved_mem();
146
147 - /* 4GB maximum for 32-bit only capable devices */
148 + if (IS_ENABLED(CONFIG_ZONE_DMA))
149 + arm64_dma_phys_limit = max_zone_phys(ARCH_ZONE_DMA_BITS);
150 +
151 if (IS_ENABLED(CONFIG_ZONE_DMA32))
152 - arm64_dma32_phys_limit = max_zone_dma32_phys();
153 + arm64_dma32_phys_limit = max_zone_phys(32);
154 else
155 arm64_dma32_phys_limit = PHYS_MASK + 1;
156
157 @@ -430,7 +454,7 @@ void __init arm64_memblock_init(void)
158
159 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
160
161 - dma_contiguous_reserve(arm64_dma32_phys_limit);
162 + dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
163 }
164
165 void __init bootmem_init(void)
166 @@ -534,7 +558,7 @@ static void __init free_unused_memmap(vo
167 void __init mem_init(void)
168 {
169 if (swiotlb_force == SWIOTLB_FORCE ||
170 - max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
171 + max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
172 swiotlb_init(1);
173 else
174 swiotlb_force = SWIOTLB_NO_FORCE;