kernel: prevent -f*-sections from leaking into the boot zImage wrapper on arm
[openwrt/svn-archive/archive.git] / target / linux / generic / patches-3.8 / 220-gc_sections.patch
1 --- a/arch/mips/Makefile
2 +++ b/arch/mips/Makefile
3 @@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
4 #
5 cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
6 cflags-y += -msoft-float
7 -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
8 +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
9 KBUILD_AFLAGS_MODULE += -mlong-calls
10 KBUILD_CFLAGS_MODULE += -mlong-calls
11
12 +ifndef CONFIG_FUNCTION_TRACER
13 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
14 +endif
15 +
16 cflags-y += -ffreestanding
17
18 #
19 --- a/arch/mips/kernel/vmlinux.lds.S
20 +++ b/arch/mips/kernel/vmlinux.lds.S
21 @@ -67,7 +67,7 @@ SECTIONS
22 /* Exception table for data bus errors */
23 __dbe_table : {
24 __start___dbe_table = .;
25 - *(__dbe_table)
26 + KEEP(*(__dbe_table))
27 __stop___dbe_table = .;
28 }
29
30 @@ -112,7 +112,7 @@ SECTIONS
31 . = ALIGN(4);
32 .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
33 __mips_machines_start = .;
34 - *(.mips.machines.init)
35 + KEEP(*(.mips.machines.init))
36 __mips_machines_end = .;
37 }
38
39 --- a/include/asm-generic/vmlinux.lds.h
40 +++ b/include/asm-generic/vmlinux.lds.h
41 @@ -101,7 +101,7 @@
42 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
43 #define MCOUNT_REC() . = ALIGN(8); \
44 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
45 - *(__mcount_loc) \
46 + KEEP(*(__mcount_loc)) \
47 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
48 #else
49 #define MCOUNT_REC()
50 @@ -109,7 +109,7 @@
51
52 #ifdef CONFIG_TRACE_BRANCH_PROFILING
53 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
54 - *(_ftrace_annotated_branch) \
55 + KEEP(*(_ftrace_annotated_branch)) \
56 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
57 #else
58 #define LIKELY_PROFILE()
59 @@ -117,7 +117,7 @@
60
61 #ifdef CONFIG_PROFILE_ALL_BRANCHES
62 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
63 - *(_ftrace_branch) \
64 + KEEP(*(_ftrace_branch)) \
65 VMLINUX_SYMBOL(__stop_branch_profile) = .;
66 #else
67 #define BRANCH_PROFILE()
68 @@ -126,7 +126,7 @@
69 #ifdef CONFIG_EVENT_TRACING
70 #define FTRACE_EVENTS() . = ALIGN(8); \
71 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
72 - *(_ftrace_events) \
73 + KEEP(*(_ftrace_events)) \
74 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
75 #else
76 #define FTRACE_EVENTS()
77 @@ -134,7 +134,7 @@
78
79 #ifdef CONFIG_TRACING
80 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
81 - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
82 + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
83 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
84 #else
85 #define TRACE_PRINTKS()
86 @@ -143,7 +143,7 @@
87 #ifdef CONFIG_FTRACE_SYSCALLS
88 #define TRACE_SYSCALLS() . = ALIGN(8); \
89 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
90 - *(__syscalls_metadata) \
91 + KEEP(*(__syscalls_metadata)) \
92 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
93 #else
94 #define TRACE_SYSCALLS()
95 @@ -153,7 +153,7 @@
96 #define KERNEL_DTB() \
97 STRUCT_ALIGN(); \
98 VMLINUX_SYMBOL(__dtb_start) = .; \
99 - *(.dtb.init.rodata) \
100 + KEEP(*(.dtb.init.rodata)) \
101 VMLINUX_SYMBOL(__dtb_end) = .;
102
103 /* .data section */
104 @@ -173,15 +173,16 @@
105 /* implement dynamic printk debug */ \
106 . = ALIGN(8); \
107 VMLINUX_SYMBOL(__start___jump_table) = .; \
108 - *(__jump_table) \
109 + KEEP(*(__jump_table)) \
110 VMLINUX_SYMBOL(__stop___jump_table) = .; \
111 . = ALIGN(8); \
112 VMLINUX_SYMBOL(__start___verbose) = .; \
113 - *(__verbose) \
114 + KEEP(*(__verbose)) \
115 VMLINUX_SYMBOL(__stop___verbose) = .; \
116 LIKELY_PROFILE() \
117 BRANCH_PROFILE() \
118 - TRACE_PRINTKS()
119 + TRACE_PRINTKS() \
120 + *(.data.[a-zA-Z_]*)
121
122 /*
123 * Data section helpers
124 @@ -235,39 +236,39 @@
125 /* PCI quirks */ \
126 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
127 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
128 - *(.pci_fixup_early) \
129 + KEEP(*(.pci_fixup_early)) \
130 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
131 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
132 - *(.pci_fixup_header) \
133 + KEEP(*(.pci_fixup_header)) \
134 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
135 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
136 - *(.pci_fixup_final) \
137 + KEEP(*(.pci_fixup_final)) \
138 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
139 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
140 - *(.pci_fixup_enable) \
141 + KEEP(*(.pci_fixup_enable)) \
142 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
143 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
144 - *(.pci_fixup_resume) \
145 + KEEP(*(.pci_fixup_resume)) \
146 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
147 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
148 - *(.pci_fixup_resume_early) \
149 + KEEP(*(.pci_fixup_resume_early)) \
150 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
151 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
152 - *(.pci_fixup_suspend) \
153 + KEEP(*(.pci_fixup_suspend)) \
154 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
155 } \
156 \
157 /* Built-in firmware blobs */ \
158 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
159 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
160 - *(.builtin_fw) \
161 + KEEP(*(.builtin_fw)) \
162 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
163 } \
164 \
165 /* RapidIO route ops */ \
166 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
167 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
168 - *(.rio_switch_ops) \
169 + KEEP(*(.rio_switch_ops)) \
170 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
171 } \
172 \
173 @@ -276,49 +277,49 @@
174 /* Kernel symbol table: Normal symbols */ \
175 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
176 VMLINUX_SYMBOL(__start___ksymtab) = .; \
177 - *(SORT(___ksymtab+*)) \
178 + KEEP(*(SORT(___ksymtab+*))) \
179 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
180 } \
181 \
182 /* Kernel symbol table: GPL-only symbols */ \
183 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
184 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
185 - *(SORT(___ksymtab_gpl+*)) \
186 + KEEP(*(SORT(___ksymtab_gpl+*))) \
187 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
188 } \
189 \
190 /* Kernel symbol table: Normal unused symbols */ \
191 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
192 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
193 - *(SORT(___ksymtab_unused+*)) \
194 + KEEP(*(SORT(___ksymtab_unused+*))) \
195 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
196 } \
197 \
198 /* Kernel symbol table: GPL-only unused symbols */ \
199 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
200 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
201 - *(SORT(___ksymtab_unused_gpl+*)) \
202 + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
203 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
204 } \
205 \
206 /* Kernel symbol table: GPL-future-only symbols */ \
207 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
208 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
209 - *(SORT(___ksymtab_gpl_future+*)) \
210 + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
211 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
212 } \
213 \
214 /* Kernel symbol table: Normal symbols */ \
215 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
216 VMLINUX_SYMBOL(__start___kcrctab) = .; \
217 - *(SORT(___kcrctab+*)) \
218 + KEEP(*(SORT(___kcrctab+*))) \
219 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
220 } \
221 \
222 /* Kernel symbol table: GPL-only symbols */ \
223 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
224 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
225 - *(SORT(___kcrctab_gpl+*)) \
226 + KEEP(*(SORT(___kcrctab_gpl+*))) \
227 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
228 } \
229 \
230 @@ -332,14 +333,14 @@
231 /* Kernel symbol table: GPL-only unused symbols */ \
232 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
233 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
234 - *(SORT(___kcrctab_unused_gpl+*)) \
235 + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
236 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
237 } \
238 \
239 /* Kernel symbol table: GPL-future-only symbols */ \
240 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
241 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
242 - *(SORT(___kcrctab_gpl_future+*)) \
243 + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
244 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
245 } \
246 \
247 @@ -362,14 +363,14 @@
248 /* Built-in module parameters. */ \
249 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
250 VMLINUX_SYMBOL(__start___param) = .; \
251 - *(__param) \
252 + KEEP(*(__param)) \
253 VMLINUX_SYMBOL(__stop___param) = .; \
254 } \
255 \
256 /* Built-in module versions. */ \
257 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
258 VMLINUX_SYMBOL(__start___modver) = .; \
259 - *(__modver) \
260 + KEEP(*(__modver)) \
261 VMLINUX_SYMBOL(__stop___modver) = .; \
262 . = ALIGN((align)); \
263 VMLINUX_SYMBOL(__end_rodata) = .; \
264 @@ -429,7 +430,7 @@
265 #define ENTRY_TEXT \
266 ALIGN_FUNCTION(); \
267 VMLINUX_SYMBOL(__entry_text_start) = .; \
268 - *(.entry.text) \
269 + KEEP(*(.entry.text)) \
270 VMLINUX_SYMBOL(__entry_text_end) = .;
271
272 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
273 @@ -457,7 +458,7 @@
274 . = ALIGN(align); \
275 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
276 VMLINUX_SYMBOL(__start___ex_table) = .; \
277 - *(__ex_table) \
278 + KEEP(*(__ex_table)) \
279 VMLINUX_SYMBOL(__stop___ex_table) = .; \
280 }
281
282 @@ -473,7 +474,7 @@
283 #ifdef CONFIG_CONSTRUCTORS
284 #define KERNEL_CTORS() . = ALIGN(8); \
285 VMLINUX_SYMBOL(__ctors_start) = .; \
286 - *(.ctors) \
287 + KEEP(*(.ctors)) \
288 VMLINUX_SYMBOL(__ctors_end) = .;
289 #else
290 #define KERNEL_CTORS()
291 @@ -526,7 +527,7 @@
292 #define SBSS(sbss_align) \
293 . = ALIGN(sbss_align); \
294 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
295 - *(.sbss) \
296 + *(.sbss .sbss.*) \
297 *(.scommon) \
298 }
299
300 @@ -544,7 +545,7 @@
301 BSS_FIRST_SECTIONS \
302 *(.bss..page_aligned) \
303 *(.dynbss) \
304 - *(.bss) \
305 + *(.bss .bss.*) \
306 *(COMMON) \
307 }
308
309 @@ -593,7 +594,7 @@
310 . = ALIGN(8); \
311 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
312 VMLINUX_SYMBOL(__start___bug_table) = .; \
313 - *(__bug_table) \
314 + KEEP(*(__bug_table)) \
315 VMLINUX_SYMBOL(__stop___bug_table) = .; \
316 }
317 #else
318 @@ -605,7 +606,7 @@
319 . = ALIGN(4); \
320 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
321 VMLINUX_SYMBOL(__tracedata_start) = .; \
322 - *(.tracedata) \
323 + KEEP(*(.tracedata)) \
324 VMLINUX_SYMBOL(__tracedata_end) = .; \
325 }
326 #else
327 @@ -622,17 +623,17 @@
328 #define INIT_SETUP(initsetup_align) \
329 . = ALIGN(initsetup_align); \
330 VMLINUX_SYMBOL(__setup_start) = .; \
331 - *(.init.setup) \
332 + KEEP(*(.init.setup)) \
333 VMLINUX_SYMBOL(__setup_end) = .;
334
335 #define INIT_CALLS_LEVEL(level) \
336 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
337 - *(.initcall##level##.init) \
338 - *(.initcall##level##s.init) \
339 + KEEP(*(.initcall##level##.init)) \
340 + KEEP(*(.initcall##level##s.init)) \
341
342 #define INIT_CALLS \
343 VMLINUX_SYMBOL(__initcall_start) = .; \
344 - *(.initcallearly.init) \
345 + KEEP(*(.initcallearly.init)) \
346 INIT_CALLS_LEVEL(0) \
347 INIT_CALLS_LEVEL(1) \
348 INIT_CALLS_LEVEL(2) \
349 @@ -646,21 +647,21 @@
350
351 #define CON_INITCALL \
352 VMLINUX_SYMBOL(__con_initcall_start) = .; \
353 - *(.con_initcall.init) \
354 + KEEP(*(.con_initcall.init)) \
355 VMLINUX_SYMBOL(__con_initcall_end) = .;
356
357 #define SECURITY_INITCALL \
358 VMLINUX_SYMBOL(__security_initcall_start) = .; \
359 - *(.security_initcall.init) \
360 + KEEP(*(.security_initcall.init)) \
361 VMLINUX_SYMBOL(__security_initcall_end) = .;
362
363 #ifdef CONFIG_BLK_DEV_INITRD
364 #define INIT_RAM_FS \
365 . = ALIGN(4); \
366 VMLINUX_SYMBOL(__initramfs_start) = .; \
367 - *(.init.ramfs) \
368 + KEEP(*(.init.ramfs)) \
369 . = ALIGN(8); \
370 - *(.init.ramfs.info)
371 + KEEP(*(.init.ramfs.info))
372 #else
373 #define INIT_RAM_FS
374 #endif
375 --- a/arch/arm/Makefile
376 +++ b/arch/arm/Makefile
377 @@ -17,11 +17,16 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
378 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
379 LDFLAGS_vmlinux += --be8
380 endif
381 +LDFLAGS_vmlinux += --gc-sections
382
383 OBJCOPYFLAGS :=-O binary -R .comment -S
384 GZFLAGS :=-9
385 #KBUILD_CFLAGS +=-pipe
386
387 +ifndef CONFIG_FUNCTION_TRACER
388 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
389 +endif
390 +
391 # Never generate .eh_frame
392 KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
393
394 --- a/arch/arm/kernel/vmlinux.lds.S
395 +++ b/arch/arm/kernel/vmlinux.lds.S
396 @@ -12,13 +12,13 @@
397 #define PROC_INFO \
398 . = ALIGN(4); \
399 VMLINUX_SYMBOL(__proc_info_begin) = .; \
400 - *(.proc.info.init) \
401 + KEEP(*(.proc.info.init)) \
402 VMLINUX_SYMBOL(__proc_info_end) = .;
403
404 #define IDMAP_TEXT \
405 ALIGN_FUNCTION(); \
406 VMLINUX_SYMBOL(__idmap_text_start) = .; \
407 - *(.idmap.text) \
408 + KEEP(*(.idmap.text)) \
409 VMLINUX_SYMBOL(__idmap_text_end) = .;
410
411 #ifdef CONFIG_HOTPLUG_CPU
412 @@ -93,7 +93,7 @@ SECTIONS
413 .text : { /* Real text segment */
414 _stext = .; /* Text and read-only data */
415 __exception_text_start = .;
416 - *(.exception.text)
417 + KEEP(*(.exception.text))
418 __exception_text_end = .;
419 IRQENTRY_TEXT
420 TEXT_TEXT
421 @@ -118,7 +118,7 @@ SECTIONS
422 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
423 __start___ex_table = .;
424 #ifdef CONFIG_MMU
425 - *(__ex_table)
426 + KEEP(*(__ex_table))
427 #endif
428 __stop___ex_table = .;
429 }
430 @@ -130,12 +130,12 @@ SECTIONS
431 . = ALIGN(8);
432 .ARM.unwind_idx : {
433 __start_unwind_idx = .;
434 - *(.ARM.exidx*)
435 + KEEP(*(.ARM.exidx*))
436 __stop_unwind_idx = .;
437 }
438 .ARM.unwind_tab : {
439 __start_unwind_tab = .;
440 - *(.ARM.extab*)
441 + KEEP(*(.ARM.extab*))
442 __stop_unwind_tab = .;
443 }
444 #endif
445 @@ -158,24 +158,24 @@ SECTIONS
446 }
447 .init.arch.info : {
448 __arch_info_begin = .;
449 - *(.arch.info.init)
450 + KEEP(*(.arch.info.init))
451 __arch_info_end = .;
452 }
453 .init.tagtable : {
454 __tagtable_begin = .;
455 - *(.taglist.init)
456 + KEEP(*(.taglist.init))
457 __tagtable_end = .;
458 }
459 #ifdef CONFIG_SMP_ON_UP
460 .init.smpalt : {
461 __smpalt_begin = .;
462 - *(.alt.smp.init)
463 + KEEP(*(.alt.smp.init))
464 __smpalt_end = .;
465 }
466 #endif
467 .init.pv_table : {
468 __pv_table_begin = .;
469 - *(.pv_table)
470 + KEEP(*(.pv_table))
471 __pv_table_end = .;
472 }
473 .init.data : {
474 --- a/arch/arm/boot/compressed/Makefile
475 +++ b/arch/arm/boot/compressed/Makefile
476 @@ -119,6 +119,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
477 ORIG_CFLAGS := $(KBUILD_CFLAGS)
478 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
479 endif
480 +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
481
482 ccflags-y := -fpic -fno-builtin -I$(obj)
483 asflags-y := -Wa,-march=all -DZIMAGE