treewide: replace nbd@openwrt.org with nbd@nbd.name
[openwrt/staging/wigyori.git] / target / linux / generic / patches-4.1 / 220-gc_sections.patch
1 From: Felix Fietkau <nbd@nbd.name>
2
3 use -ffunction-sections, -fdata-sections and --gc-sections
4
5 In combination with kernel symbol export stripping this significantly reduces
6 the kernel image size. Used on both ARM and MIPS architectures.
7
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
9 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
10 Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
11 ---
12
13 --- a/arch/mips/Makefile
14 +++ b/arch/mips/Makefile
15 @@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
16 #
17 cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
18 cflags-y += -msoft-float
19 -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
20 +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
21 KBUILD_AFLAGS_MODULE += -mlong-calls
22 KBUILD_CFLAGS_MODULE += -mlong-calls
23
24 +ifndef CONFIG_FUNCTION_TRACER
25 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
26 +endif
27 +
28 #
29 # pass -msoft-float to GAS if it supports it. However on newer binutils
30 # (specifically newer than 2.24.51.20140728) we then also need to explicitly
31 --- a/arch/mips/kernel/vmlinux.lds.S
32 +++ b/arch/mips/kernel/vmlinux.lds.S
33 @@ -67,7 +67,7 @@ SECTIONS
34 /* Exception table for data bus errors */
35 __dbe_table : {
36 __start___dbe_table = .;
37 - *(__dbe_table)
38 + KEEP(*(__dbe_table))
39 __stop___dbe_table = .;
40 }
41
42 @@ -112,7 +112,7 @@ SECTIONS
43 . = ALIGN(4);
44 .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
45 __mips_machines_start = .;
46 - *(.mips.machines.init)
47 + KEEP(*(.mips.machines.init))
48 __mips_machines_end = .;
49 }
50
51 --- a/include/asm-generic/vmlinux.lds.h
52 +++ b/include/asm-generic/vmlinux.lds.h
53 @@ -89,7 +89,7 @@
54 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
55 #define MCOUNT_REC() . = ALIGN(8); \
56 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
57 - *(__mcount_loc) \
58 + KEEP(*(__mcount_loc)) \
59 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
60 #else
61 #define MCOUNT_REC()
62 @@ -97,7 +97,7 @@
63
64 #ifdef CONFIG_TRACE_BRANCH_PROFILING
65 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
66 - *(_ftrace_annotated_branch) \
67 + KEEP(*(_ftrace_annotated_branch)) \
68 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
69 #else
70 #define LIKELY_PROFILE()
71 @@ -105,7 +105,7 @@
72
73 #ifdef CONFIG_PROFILE_ALL_BRANCHES
74 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
75 - *(_ftrace_branch) \
76 + KEEP(*(_ftrace_branch)) \
77 VMLINUX_SYMBOL(__stop_branch_profile) = .;
78 #else
79 #define BRANCH_PROFILE()
80 @@ -114,7 +114,7 @@
81 #ifdef CONFIG_KPROBES
82 #define KPROBE_BLACKLIST() . = ALIGN(8); \
83 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
84 - *(_kprobe_blacklist) \
85 + KEEP(*(_kprobe_blacklist)) \
86 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
87 #else
88 #define KPROBE_BLACKLIST()
89 @@ -123,10 +123,10 @@
90 #ifdef CONFIG_EVENT_TRACING
91 #define FTRACE_EVENTS() . = ALIGN(8); \
92 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
93 - *(_ftrace_events) \
94 + KEEP(*(_ftrace_events)) \
95 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
96 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
97 - *(_ftrace_enum_map) \
98 + KEEP(*(_ftrace_enum_map)) \
99 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
100 #else
101 #define FTRACE_EVENTS()
102 @@ -134,7 +134,7 @@
103
104 #ifdef CONFIG_TRACING
105 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
106 - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
107 + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
108 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
109 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
110 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
111 @@ -147,7 +147,7 @@
112 #ifdef CONFIG_FTRACE_SYSCALLS
113 #define TRACE_SYSCALLS() . = ALIGN(8); \
114 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
115 - *(__syscalls_metadata) \
116 + KEEP(*(__syscalls_metadata)) \
117 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
118 #else
119 #define TRACE_SYSCALLS()
120 @@ -169,8 +169,8 @@
121 #define _OF_TABLE_1(name) \
122 . = ALIGN(8); \
123 VMLINUX_SYMBOL(__##name##_of_table) = .; \
124 - *(__##name##_of_table) \
125 - *(__##name##_of_table_end)
126 + KEEP(*(__##name##_of_table)) \
127 + KEEP(*(__##name##_of_table_end))
128
129 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
130 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
131 @@ -184,7 +184,7 @@
132 #define KERNEL_DTB() \
133 STRUCT_ALIGN(); \
134 VMLINUX_SYMBOL(__dtb_start) = .; \
135 - *(.dtb.init.rodata) \
136 + KEEP(*(.dtb.init.rodata)) \
137 VMLINUX_SYMBOL(__dtb_end) = .;
138
139 /* .data section */
140 @@ -200,16 +200,17 @@
141 /* implement dynamic printk debug */ \
142 . = ALIGN(8); \
143 VMLINUX_SYMBOL(__start___jump_table) = .; \
144 - *(__jump_table) \
145 + KEEP(*(__jump_table)) \
146 VMLINUX_SYMBOL(__stop___jump_table) = .; \
147 . = ALIGN(8); \
148 VMLINUX_SYMBOL(__start___verbose) = .; \
149 - *(__verbose) \
150 + KEEP(*(__verbose)) \
151 VMLINUX_SYMBOL(__stop___verbose) = .; \
152 LIKELY_PROFILE() \
153 BRANCH_PROFILE() \
154 TRACE_PRINTKS() \
155 - TRACEPOINT_STR()
156 + TRACEPOINT_STR() \
157 + *(.data.[a-zA-Z_]*)
158
159 /*
160 * Data section helpers
161 @@ -263,35 +264,35 @@
162 /* PCI quirks */ \
163 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
164 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
165 - *(.pci_fixup_early) \
166 + KEEP(*(.pci_fixup_early)) \
167 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
168 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
169 - *(.pci_fixup_header) \
170 + KEEP(*(.pci_fixup_header)) \
171 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
172 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
173 - *(.pci_fixup_final) \
174 + KEEP(*(.pci_fixup_final)) \
175 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
176 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
177 - *(.pci_fixup_enable) \
178 + KEEP(*(.pci_fixup_enable)) \
179 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
180 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
181 - *(.pci_fixup_resume) \
182 + KEEP(*(.pci_fixup_resume)) \
183 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
184 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
185 - *(.pci_fixup_resume_early) \
186 + KEEP(*(.pci_fixup_resume_early)) \
187 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
188 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
189 - *(.pci_fixup_suspend) \
190 + KEEP(*(.pci_fixup_suspend)) \
191 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
192 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
193 - *(.pci_fixup_suspend_late) \
194 + KEEP(*(.pci_fixup_suspend_late)) \
195 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
196 } \
197 \
198 /* Built-in firmware blobs */ \
199 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
200 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
201 - *(.builtin_fw) \
202 + KEEP(*(.builtin_fw)) \
203 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
204 } \
205 \
206 @@ -300,49 +301,49 @@
207 /* Kernel symbol table: Normal symbols */ \
208 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
209 VMLINUX_SYMBOL(__start___ksymtab) = .; \
210 - *(SORT(___ksymtab+*)) \
211 + KEEP(*(SORT(___ksymtab+*))) \
212 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
213 } \
214 \
215 /* Kernel symbol table: GPL-only symbols */ \
216 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
217 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
218 - *(SORT(___ksymtab_gpl+*)) \
219 + KEEP(*(SORT(___ksymtab_gpl+*))) \
220 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
221 } \
222 \
223 /* Kernel symbol table: Normal unused symbols */ \
224 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
225 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
226 - *(SORT(___ksymtab_unused+*)) \
227 + KEEP(*(SORT(___ksymtab_unused+*))) \
228 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
229 } \
230 \
231 /* Kernel symbol table: GPL-only unused symbols */ \
232 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
233 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
234 - *(SORT(___ksymtab_unused_gpl+*)) \
235 + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
236 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
237 } \
238 \
239 /* Kernel symbol table: GPL-future-only symbols */ \
240 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
241 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
242 - *(SORT(___ksymtab_gpl_future+*)) \
243 + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
244 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
245 } \
246 \
247 /* Kernel symbol table: Normal symbols */ \
248 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
249 VMLINUX_SYMBOL(__start___kcrctab) = .; \
250 - *(SORT(___kcrctab+*)) \
251 + KEEP(*(SORT(___kcrctab+*))) \
252 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
253 } \
254 \
255 /* Kernel symbol table: GPL-only symbols */ \
256 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
257 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
258 - *(SORT(___kcrctab_gpl+*)) \
259 + KEEP(*(SORT(___kcrctab_gpl+*))) \
260 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
261 } \
262 \
263 @@ -356,14 +357,14 @@
264 /* Kernel symbol table: GPL-only unused symbols */ \
265 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
266 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
267 - *(SORT(___kcrctab_unused_gpl+*)) \
268 + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
269 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
270 } \
271 \
272 /* Kernel symbol table: GPL-future-only symbols */ \
273 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
274 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
275 - *(SORT(___kcrctab_gpl_future+*)) \
276 + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
277 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
278 } \
279 \
280 @@ -382,14 +383,14 @@
281 /* Built-in module parameters. */ \
282 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
283 VMLINUX_SYMBOL(__start___param) = .; \
284 - *(__param) \
285 + KEEP(*(__param)) \
286 VMLINUX_SYMBOL(__stop___param) = .; \
287 } \
288 \
289 /* Built-in module versions. */ \
290 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
291 VMLINUX_SYMBOL(__start___modver) = .; \
292 - *(__modver) \
293 + KEEP(*(__modver)) \
294 VMLINUX_SYMBOL(__stop___modver) = .; \
295 . = ALIGN((align)); \
296 VMLINUX_SYMBOL(__end_rodata) = .; \
297 @@ -445,7 +446,7 @@
298 #define ENTRY_TEXT \
299 ALIGN_FUNCTION(); \
300 VMLINUX_SYMBOL(__entry_text_start) = .; \
301 - *(.entry.text) \
302 + KEEP(*(.entry.text)) \
303 VMLINUX_SYMBOL(__entry_text_end) = .;
304
305 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
306 @@ -473,7 +474,7 @@
307 . = ALIGN(align); \
308 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___ex_table) = .; \
310 - *(__ex_table) \
311 + KEEP(*(__ex_table)) \
312 VMLINUX_SYMBOL(__stop___ex_table) = .; \
313 }
314
315 @@ -489,9 +490,9 @@
316 #ifdef CONFIG_CONSTRUCTORS
317 #define KERNEL_CTORS() . = ALIGN(8); \
318 VMLINUX_SYMBOL(__ctors_start) = .; \
319 - *(.ctors) \
320 + KEEP(*(.ctors)) \
321 *(SORT(.init_array.*)) \
322 - *(.init_array) \
323 + KEEP(*(.init_array)) \
324 VMLINUX_SYMBOL(__ctors_end) = .;
325 #else
326 #define KERNEL_CTORS()
327 @@ -542,7 +543,7 @@
328 #define SBSS(sbss_align) \
329 . = ALIGN(sbss_align); \
330 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
331 - *(.sbss) \
332 + *(.sbss .sbss.*) \
333 *(.scommon) \
334 }
335
336 @@ -560,7 +561,7 @@
337 BSS_FIRST_SECTIONS \
338 *(.bss..page_aligned) \
339 *(.dynbss) \
340 - *(.bss) \
341 + *(.bss .bss.*) \
342 *(COMMON) \
343 }
344
345 @@ -609,7 +610,7 @@
346 . = ALIGN(8); \
347 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
348 VMLINUX_SYMBOL(__start___bug_table) = .; \
349 - *(__bug_table) \
350 + KEEP(*(__bug_table)) \
351 VMLINUX_SYMBOL(__stop___bug_table) = .; \
352 }
353 #else
354 @@ -621,7 +622,7 @@
355 . = ALIGN(4); \
356 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
357 VMLINUX_SYMBOL(__tracedata_start) = .; \
358 - *(.tracedata) \
359 + KEEP(*(.tracedata)) \
360 VMLINUX_SYMBOL(__tracedata_end) = .; \
361 }
362 #else
363 @@ -638,17 +639,17 @@
364 #define INIT_SETUP(initsetup_align) \
365 . = ALIGN(initsetup_align); \
366 VMLINUX_SYMBOL(__setup_start) = .; \
367 - *(.init.setup) \
368 + KEEP(*(.init.setup)) \
369 VMLINUX_SYMBOL(__setup_end) = .;
370
371 #define INIT_CALLS_LEVEL(level) \
372 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
373 - *(.initcall##level##.init) \
374 - *(.initcall##level##s.init) \
375 + KEEP(*(.initcall##level##.init)) \
376 + KEEP(*(.initcall##level##s.init)) \
377
378 #define INIT_CALLS \
379 VMLINUX_SYMBOL(__initcall_start) = .; \
380 - *(.initcallearly.init) \
381 + KEEP(*(.initcallearly.init)) \
382 INIT_CALLS_LEVEL(0) \
383 INIT_CALLS_LEVEL(1) \
384 INIT_CALLS_LEVEL(2) \
385 @@ -662,21 +663,21 @@
386
387 #define CON_INITCALL \
388 VMLINUX_SYMBOL(__con_initcall_start) = .; \
389 - *(.con_initcall.init) \
390 + KEEP(*(.con_initcall.init)) \
391 VMLINUX_SYMBOL(__con_initcall_end) = .;
392
393 #define SECURITY_INITCALL \
394 VMLINUX_SYMBOL(__security_initcall_start) = .; \
395 - *(.security_initcall.init) \
396 + KEEP(*(.security_initcall.init)) \
397 VMLINUX_SYMBOL(__security_initcall_end) = .;
398
399 #ifdef CONFIG_BLK_DEV_INITRD
400 #define INIT_RAM_FS \
401 . = ALIGN(4); \
402 VMLINUX_SYMBOL(__initramfs_start) = .; \
403 - *(.init.ramfs) \
404 + KEEP(*(.init.ramfs)) \
405 . = ALIGN(8); \
406 - *(.init.ramfs.info)
407 + KEEP(*(.init.ramfs.info))
408 #else
409 #define INIT_RAM_FS
410 #endif
411 --- a/arch/arm/Makefile
412 +++ b/arch/arm/Makefile
413 @@ -18,11 +18,16 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
414 LDFLAGS_vmlinux += --be8
415 LDFLAGS_MODULE += --be8
416 endif
417 +LDFLAGS_vmlinux += --gc-sections
418
419 OBJCOPYFLAGS :=-O binary -R .comment -S
420 GZFLAGS :=-9
421 #KBUILD_CFLAGS +=-pipe
422
423 +ifndef CONFIG_FUNCTION_TRACER
424 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
425 +endif
426 +
427 # Never generate .eh_frame
428 KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
429
430 --- a/arch/arm/kernel/vmlinux.lds.S
431 +++ b/arch/arm/kernel/vmlinux.lds.S
432 @@ -15,13 +15,13 @@
433 #define PROC_INFO \
434 . = ALIGN(4); \
435 VMLINUX_SYMBOL(__proc_info_begin) = .; \
436 - *(.proc.info.init) \
437 + KEEP(*(.proc.info.init)) \
438 VMLINUX_SYMBOL(__proc_info_end) = .;
439
440 #define IDMAP_TEXT \
441 ALIGN_FUNCTION(); \
442 VMLINUX_SYMBOL(__idmap_text_start) = .; \
443 - *(.idmap.text) \
444 + KEEP(*(.idmap.text)) \
445 VMLINUX_SYMBOL(__idmap_text_end) = .; \
446 . = ALIGN(PAGE_SIZE); \
447 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
448 @@ -102,7 +102,7 @@ SECTIONS
449 _stext = .; /* Text and read-only data */
450 IDMAP_TEXT
451 __exception_text_start = .;
452 - *(.exception.text)
453 + KEEP(*(.exception.text))
454 __exception_text_end = .;
455 IRQENTRY_TEXT
456 TEXT_TEXT
457 @@ -126,7 +126,7 @@ SECTIONS
458 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
459 __start___ex_table = .;
460 #ifdef CONFIG_MMU
461 - *(__ex_table)
462 + KEEP(*(__ex_table))
463 #endif
464 __stop___ex_table = .;
465 }
466 @@ -138,12 +138,12 @@ SECTIONS
467 . = ALIGN(8);
468 .ARM.unwind_idx : {
469 __start_unwind_idx = .;
470 - *(.ARM.exidx*)
471 + KEEP(*(.ARM.exidx*))
472 __stop_unwind_idx = .;
473 }
474 .ARM.unwind_tab : {
475 __start_unwind_tab = .;
476 - *(.ARM.extab*)
477 + KEEP(*(.ARM.extab*))
478 __stop_unwind_tab = .;
479 }
480 #endif
481 @@ -166,14 +166,14 @@ SECTIONS
482 */
483 __vectors_start = .;
484 .vectors 0 : AT(__vectors_start) {
485 - *(.vectors)
486 + KEEP(*(.vectors))
487 }
488 . = __vectors_start + SIZEOF(.vectors);
489 __vectors_end = .;
490
491 __stubs_start = .;
492 .stubs 0x1000 : AT(__stubs_start) {
493 - *(.stubs)
494 + KEEP(*(.stubs))
495 }
496 . = __stubs_start + SIZEOF(.stubs);
497 __stubs_end = .;
498 @@ -187,24 +187,24 @@ SECTIONS
499 }
500 .init.arch.info : {
501 __arch_info_begin = .;
502 - *(.arch.info.init)
503 + KEEP(*(.arch.info.init))
504 __arch_info_end = .;
505 }
506 .init.tagtable : {
507 __tagtable_begin = .;
508 - *(.taglist.init)
509 + KEEP(*(.taglist.init))
510 __tagtable_end = .;
511 }
512 #ifdef CONFIG_SMP_ON_UP
513 .init.smpalt : {
514 __smpalt_begin = .;
515 - *(.alt.smp.init)
516 + KEEP(*(.alt.smp.init))
517 __smpalt_end = .;
518 }
519 #endif
520 .init.pv_table : {
521 __pv_table_begin = .;
522 - *(.pv_table)
523 + KEEP(*(.pv_table))
524 __pv_table_end = .;
525 }
526 .init.data : {
527 --- a/arch/arm/boot/compressed/Makefile
528 +++ b/arch/arm/boot/compressed/Makefile
529 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
530 ORIG_CFLAGS := $(KBUILD_CFLAGS)
531 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
532 endif
533 +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
534
535 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
536 asflags-y := -DZIMAGE