kernel: fix earlycon by keeping __earlycon_table
[openwrt/openwrt.git] / target / linux / generic / patches-4.9 / 220-gc_sections.patch
1 From: Felix Fietkau <nbd@nbd.name>
2
3 use -ffunction-sections, -fdata-sections and --gc-sections
4
5 In combination with kernel symbol export stripping this significantly reduces
6 the kernel image size. Used on both ARM and MIPS architectures.
7
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
9 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
10 Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
11 ---
12
13 --- a/arch/mips/kernel/vmlinux.lds.S
14 +++ b/arch/mips/kernel/vmlinux.lds.S
15 @@ -71,7 +71,7 @@ SECTIONS
16 /* Exception table for data bus errors */
17 __dbe_table : {
18 __start___dbe_table = .;
19 - *(__dbe_table)
20 + KEEP(*(__dbe_table))
21 __stop___dbe_table = .;
22 }
23
24 @@ -121,7 +121,7 @@ SECTIONS
25 . = ALIGN(4);
26 .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
27 __mips_machines_start = .;
28 - *(.mips.machines.init)
29 + KEEP(*(.mips.machines.init))
30 __mips_machines_end = .;
31 }
32
33 --- a/include/asm-generic/vmlinux.lds.h
34 +++ b/include/asm-generic/vmlinux.lds.h
35 @@ -114,7 +114,7 @@
36 #ifdef CONFIG_KPROBES
37 #define KPROBE_BLACKLIST() . = ALIGN(8); \
38 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
39 - *(_kprobe_blacklist) \
40 + KEEP(*(_kprobe_blacklist)) \
41 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
42 #else
43 #define KPROBE_BLACKLIST()
44 @@ -123,10 +123,10 @@
45 #ifdef CONFIG_EVENT_TRACING
46 #define FTRACE_EVENTS() . = ALIGN(8); \
47 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
48 - *(_ftrace_events) \
49 + KEEP(*(_ftrace_events)) \
50 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
51 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
52 - *(_ftrace_enum_map) \
53 + KEEP(*(_ftrace_enum_map)) \
54 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
55 #else
56 #define FTRACE_EVENTS()
57 @@ -147,7 +147,7 @@
58 #ifdef CONFIG_FTRACE_SYSCALLS
59 #define TRACE_SYSCALLS() . = ALIGN(8); \
60 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
61 - *(__syscalls_metadata) \
62 + KEEP(*(__syscalls_metadata)) \
63 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
64 #else
65 #define TRACE_SYSCALLS()
66 @@ -156,7 +156,7 @@
67 #ifdef CONFIG_SERIAL_EARLYCON
68 #define EARLYCON_TABLE() STRUCT_ALIGN(); \
69 VMLINUX_SYMBOL(__earlycon_table) = .; \
70 - *(__earlycon_table) \
71 + KEEP(*(__earlycon_table)) \
72 VMLINUX_SYMBOL(__earlycon_table_end) = .;
73 #else
74 #define EARLYCON_TABLE()
75 @@ -169,8 +169,8 @@
76 #define _OF_TABLE_1(name) \
77 . = ALIGN(8); \
78 VMLINUX_SYMBOL(__##name##_of_table) = .; \
79 - *(__##name##_of_table) \
80 - *(__##name##_of_table_end)
81 + KEEP(*(__##name##_of_table)) \
82 + KEEP(*(__##name##_of_table_end))
83
84 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
85 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
86 @@ -193,7 +193,7 @@
87 #define KERNEL_DTB() \
88 STRUCT_ALIGN(); \
89 VMLINUX_SYMBOL(__dtb_start) = .; \
90 - *(.dtb.init.rodata) \
91 + KEEP(*(.dtb.init.rodata)) \
92 VMLINUX_SYMBOL(__dtb_end) = .;
93
94 /*
95 @@ -214,16 +214,17 @@
96 /* implement dynamic printk debug */ \
97 . = ALIGN(8); \
98 VMLINUX_SYMBOL(__start___jump_table) = .; \
99 - *(__jump_table) \
100 + KEEP(*(__jump_table)) \
101 VMLINUX_SYMBOL(__stop___jump_table) = .; \
102 . = ALIGN(8); \
103 VMLINUX_SYMBOL(__start___verbose) = .; \
104 - *(__verbose) \
105 + KEEP(*(__verbose)) \
106 VMLINUX_SYMBOL(__stop___verbose) = .; \
107 LIKELY_PROFILE() \
108 BRANCH_PROFILE() \
109 TRACE_PRINTKS() \
110 - TRACEPOINT_STR()
111 + TRACEPOINT_STR() \
112 + *(.data.[a-zA-Z_]*)
113
114 /*
115 * Data section helpers
116 @@ -291,35 +292,35 @@
117 /* PCI quirks */ \
118 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
119 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
120 - *(.pci_fixup_early) \
121 + KEEP(*(.pci_fixup_early)) \
122 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
123 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
124 - *(.pci_fixup_header) \
125 + KEEP(*(.pci_fixup_header)) \
126 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
127 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
128 - *(.pci_fixup_final) \
129 + KEEP(*(.pci_fixup_final)) \
130 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
131 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
132 - *(.pci_fixup_enable) \
133 + KEEP(*(.pci_fixup_enable)) \
134 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
135 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
136 - *(.pci_fixup_resume) \
137 + KEEP(*(.pci_fixup_resume)) \
138 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
139 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
140 - *(.pci_fixup_resume_early) \
141 + KEEP(*(.pci_fixup_resume_early)) \
142 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
143 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
144 - *(.pci_fixup_suspend) \
145 + KEEP(*(.pci_fixup_suspend)) \
146 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
147 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
148 - *(.pci_fixup_suspend_late) \
149 + KEEP(*(.pci_fixup_suspend_late)) \
150 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
151 } \
152 \
153 /* Built-in firmware blobs */ \
154 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
155 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
156 - *(.builtin_fw) \
157 + KEEP(*(.builtin_fw)) \
158 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
159 } \
160 \
161 @@ -397,7 +398,7 @@
162 \
163 /* Kernel symbol table: strings */ \
164 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
165 - KEEP(*(__ksymtab_strings)) \
166 + *(__ksymtab_strings) \
167 } \
168 \
169 /* __*init sections */ \
170 @@ -410,14 +411,14 @@
171 /* Built-in module parameters. */ \
172 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
173 VMLINUX_SYMBOL(__start___param) = .; \
174 - *(__param) \
175 + KEEP(*(__param)) \
176 VMLINUX_SYMBOL(__stop___param) = .; \
177 } \
178 \
179 /* Built-in module versions. */ \
180 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
181 VMLINUX_SYMBOL(__start___modver) = .; \
182 - *(__modver) \
183 + KEEP(*(__modver)) \
184 VMLINUX_SYMBOL(__stop___modver) = .; \
185 . = ALIGN((align)); \
186 VMLINUX_SYMBOL(__end_rodata) = .; \
187 @@ -482,7 +483,7 @@
188 #define ENTRY_TEXT \
189 ALIGN_FUNCTION(); \
190 VMLINUX_SYMBOL(__entry_text_start) = .; \
191 - *(.entry.text) \
192 + KEEP(*(.entry.text)) \
193 VMLINUX_SYMBOL(__entry_text_end) = .;
194
195 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
196 @@ -520,7 +521,7 @@
197 . = ALIGN(align); \
198 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
199 VMLINUX_SYMBOL(__start___ex_table) = .; \
200 - *(__ex_table) \
201 + KEEP(*(__ex_table)) \
202 VMLINUX_SYMBOL(__stop___ex_table) = .; \
203 }
204
205 @@ -536,9 +537,9 @@
206 #ifdef CONFIG_CONSTRUCTORS
207 #define KERNEL_CTORS() . = ALIGN(8); \
208 VMLINUX_SYMBOL(__ctors_start) = .; \
209 - *(.ctors) \
210 + KEEP(*(.ctors)) \
211 *(SORT(.init_array.*)) \
212 - *(.init_array) \
213 + KEEP(*(.init_array)) \
214 VMLINUX_SYMBOL(__ctors_end) = .;
215 #else
216 #define KERNEL_CTORS()
217 @@ -595,7 +596,7 @@
218 #define SBSS(sbss_align) \
219 . = ALIGN(sbss_align); \
220 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
221 - *(.sbss) \
222 + *(.sbss .sbss.*) \
223 *(.scommon) \
224 }
225
226 @@ -662,7 +663,7 @@
227 . = ALIGN(8); \
228 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
229 VMLINUX_SYMBOL(__start___bug_table) = .; \
230 - *(__bug_table) \
231 + KEEP(*(__bug_table)) \
232 VMLINUX_SYMBOL(__stop___bug_table) = .; \
233 }
234 #else
235 @@ -674,7 +675,7 @@
236 . = ALIGN(4); \
237 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
238 VMLINUX_SYMBOL(__tracedata_start) = .; \
239 - *(.tracedata) \
240 + KEEP(*(.tracedata)) \
241 VMLINUX_SYMBOL(__tracedata_end) = .; \
242 }
243 #else
244 @@ -691,7 +692,7 @@
245 #define INIT_SETUP(initsetup_align) \
246 . = ALIGN(initsetup_align); \
247 VMLINUX_SYMBOL(__setup_start) = .; \
248 - *(.init.setup) \
249 + KEEP(*(.init.setup)) \
250 VMLINUX_SYMBOL(__setup_end) = .;
251
252 #define INIT_CALLS_LEVEL(level) \
253 --- a/arch/arm/kernel/vmlinux.lds.S
254 +++ b/arch/arm/kernel/vmlinux.lds.S
255 @@ -17,7 +17,7 @@
256 #define PROC_INFO \
257 . = ALIGN(4); \
258 VMLINUX_SYMBOL(__proc_info_begin) = .; \
259 - *(.proc.info.init) \
260 + KEEP(*(.proc.info.init)) \
261 VMLINUX_SYMBOL(__proc_info_end) = .;
262
263 #define HYPERVISOR_TEXT \
264 @@ -28,11 +28,11 @@
265 #define IDMAP_TEXT \
266 ALIGN_FUNCTION(); \
267 VMLINUX_SYMBOL(__idmap_text_start) = .; \
268 - *(.idmap.text) \
269 + KEEP(*(.idmap.text)) \
270 VMLINUX_SYMBOL(__idmap_text_end) = .; \
271 . = ALIGN(PAGE_SIZE); \
272 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
273 - *(.hyp.idmap.text) \
274 + KEEP(*(.hyp.idmap.text)) \
275 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
276
277 #ifdef CONFIG_HOTPLUG_CPU
278 @@ -105,7 +105,7 @@ SECTIONS
279 _stext = .; /* Text and read-only data */
280 IDMAP_TEXT
281 __exception_text_start = .;
282 - *(.exception.text)
283 + KEEP(*(.exception.text))
284 __exception_text_end = .;
285 IRQENTRY_TEXT
286 SOFTIRQENTRY_TEXT
287 @@ -134,7 +134,7 @@ SECTIONS
288 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
289 __start___ex_table = .;
290 #ifdef CONFIG_MMU
291 - *(__ex_table)
292 + KEEP(*(__ex_table))
293 #endif
294 __stop___ex_table = .;
295 }
296 @@ -146,12 +146,12 @@ SECTIONS
297 . = ALIGN(8);
298 .ARM.unwind_idx : {
299 __start_unwind_idx = .;
300 - *(.ARM.exidx*)
301 + KEEP(*(.ARM.exidx*))
302 __stop_unwind_idx = .;
303 }
304 .ARM.unwind_tab : {
305 __start_unwind_tab = .;
306 - *(.ARM.extab*)
307 + KEEP(*(.ARM.extab*))
308 __stop_unwind_tab = .;
309 }
310 #endif
311 @@ -171,14 +171,14 @@ SECTIONS
312 */
313 __vectors_start = .;
314 .vectors 0xffff0000 : AT(__vectors_start) {
315 - *(.vectors)
316 + KEEP(*(.vectors))
317 }
318 . = __vectors_start + SIZEOF(.vectors);
319 __vectors_end = .;
320
321 __stubs_start = .;
322 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
323 - *(.stubs)
324 + KEEP(*(.stubs))
325 }
326 . = __stubs_start + SIZEOF(.stubs);
327 __stubs_end = .;
328 @@ -194,24 +194,24 @@ SECTIONS
329 }
330 .init.arch.info : {
331 __arch_info_begin = .;
332 - *(.arch.info.init)
333 + KEEP(*(.arch.info.init))
334 __arch_info_end = .;
335 }
336 .init.tagtable : {
337 __tagtable_begin = .;
338 - *(.taglist.init)
339 + KEEP(*(.taglist.init))
340 __tagtable_end = .;
341 }
342 #ifdef CONFIG_SMP_ON_UP
343 .init.smpalt : {
344 __smpalt_begin = .;
345 - *(.alt.smp.init)
346 + KEEP(*(.alt.smp.init))
347 __smpalt_end = .;
348 }
349 #endif
350 .init.pv_table : {
351 __pv_table_begin = .;
352 - *(.pv_table)
353 + KEEP(*(.pv_table))
354 __pv_table_end = .;
355 }
356 .init.data : {
357 --- a/arch/arm/boot/compressed/Makefile
358 +++ b/arch/arm/boot/compressed/Makefile
359 @@ -102,6 +102,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
360 ORIG_CFLAGS := $(KBUILD_CFLAGS)
361 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
362 endif
363 +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
364
365 # -fstack-protector-strong triggers protection checks in this code,
366 # but it is being used too early to link to meaningful stack_chk logic.
367 --- a/arch/arm/Kconfig
368 +++ b/arch/arm/Kconfig
369 @@ -81,6 +81,7 @@ config ARM
370 select HAVE_UID16
371 select HAVE_VIRT_CPU_ACCOUNTING_GEN
372 select IRQ_FORCED_THREADING
373 + select LD_DEAD_CODE_DATA_ELIMINATION
374 select MODULES_USE_ELF_REL
375 select NO_BOOTMEM
376 select OF_EARLY_FLATTREE if OF
377 --- a/arch/mips/Kconfig
378 +++ b/arch/mips/Kconfig
379 @@ -55,6 +55,7 @@ config MIPS
380 select CLONE_BACKWARDS
381 select HAVE_DEBUG_STACKOVERFLOW
382 select HAVE_CC_STACKPROTECTOR
383 + select LD_DEAD_CODE_DATA_ELIMINATION
384 select CPU_PM if CPU_IDLE
385 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
386 select ARCH_BINFMT_ELF_STATE
387 --- a/Makefile
388 +++ b/Makefile
389 @@ -409,6 +409,11 @@ KBUILD_AFLAGS_MODULE := -DMODULE
390 KBUILD_CFLAGS_MODULE := -DMODULE
391 KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
392
393 +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
394 +KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,)
395 +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,)
396 +endif
397 +
398 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
399 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
400 KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
401 @@ -630,11 +635,6 @@ include arch/$(SRCARCH)/Makefile
402 KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
403 KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
404
405 -ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
406 -KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
407 -KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
408 -endif
409 -
410 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
411 KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
412 else