348970961d7509cf7fb32ccc40a231b6c0bf721f
[openwrt/svn-archive/archive.git] / target / linux / generic / patches-3.10 / 220-gc_sections.patch
1 --- a/arch/mips/Makefile
2 +++ b/arch/mips/Makefile
3 @@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
4 #
5 cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
6 cflags-y += -msoft-float
7 -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
8 +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
9 KBUILD_AFLAGS_MODULE += -mlong-calls
10 KBUILD_CFLAGS_MODULE += -mlong-calls
11
12 +ifndef CONFIG_FUNCTION_TRACER
13 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
14 +endif
15 +
16 cflags-y += -ffreestanding
17
18 #
19 --- a/arch/mips/kernel/vmlinux.lds.S
20 +++ b/arch/mips/kernel/vmlinux.lds.S
21 @@ -67,7 +67,7 @@ SECTIONS
22 /* Exception table for data bus errors */
23 __dbe_table : {
24 __start___dbe_table = .;
25 - *(__dbe_table)
26 + KEEP(*(__dbe_table))
27 __stop___dbe_table = .;
28 }
29
30 @@ -112,7 +112,7 @@ SECTIONS
31 . = ALIGN(4);
32 .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
33 __mips_machines_start = .;
34 - *(.mips.machines.init)
35 + KEEP(*(.mips.machines.init))
36 __mips_machines_end = .;
37 }
38
39 --- a/include/asm-generic/vmlinux.lds.h
40 +++ b/include/asm-generic/vmlinux.lds.h
41 @@ -95,7 +95,7 @@
42 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
43 #define MCOUNT_REC() . = ALIGN(8); \
44 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
45 - *(__mcount_loc) \
46 + KEEP(*(__mcount_loc)) \
47 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
48 #else
49 #define MCOUNT_REC()
50 @@ -103,7 +103,7 @@
51
52 #ifdef CONFIG_TRACE_BRANCH_PROFILING
53 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
54 - *(_ftrace_annotated_branch) \
55 + KEEP(*(_ftrace_annotated_branch)) \
56 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
57 #else
58 #define LIKELY_PROFILE()
59 @@ -111,7 +111,7 @@
60
61 #ifdef CONFIG_PROFILE_ALL_BRANCHES
62 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
63 - *(_ftrace_branch) \
64 + KEEP(*(_ftrace_branch)) \
65 VMLINUX_SYMBOL(__stop_branch_profile) = .;
66 #else
67 #define BRANCH_PROFILE()
68 @@ -120,7 +120,7 @@
69 #ifdef CONFIG_EVENT_TRACING
70 #define FTRACE_EVENTS() . = ALIGN(8); \
71 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
72 - *(_ftrace_events) \
73 + KEEP(*(_ftrace_events)) \
74 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
75 #else
76 #define FTRACE_EVENTS()
77 @@ -128,7 +128,7 @@
78
79 #ifdef CONFIG_TRACING
80 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
81 - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
82 + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
83 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
84 #else
85 #define TRACE_PRINTKS()
86 @@ -137,7 +137,7 @@
87 #ifdef CONFIG_FTRACE_SYSCALLS
88 #define TRACE_SYSCALLS() . = ALIGN(8); \
89 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
90 - *(__syscalls_metadata) \
91 + KEEP(*(__syscalls_metadata)) \
92 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
93 #else
94 #define TRACE_SYSCALLS()
95 @@ -146,8 +146,8 @@
96 #ifdef CONFIG_CLKSRC_OF
97 #define CLKSRC_OF_TABLES() . = ALIGN(8); \
98 VMLINUX_SYMBOL(__clksrc_of_table) = .; \
99 - *(__clksrc_of_table) \
100 - *(__clksrc_of_table_end)
101 + KEEP(*(__clksrc_of_table)) \
102 + KEEP(*(__clksrc_of_table_end))
103 #else
104 #define CLKSRC_OF_TABLES()
105 #endif
106 @@ -156,8 +156,8 @@
107 #define IRQCHIP_OF_MATCH_TABLE() \
108 . = ALIGN(8); \
109 VMLINUX_SYMBOL(__irqchip_begin) = .; \
110 - *(__irqchip_of_table) \
111 - *(__irqchip_of_end)
112 + KEEP(*(__irqchip_of_table)) \
113 + KEEP(*(__irqchip_of_end))
114 #else
115 #define IRQCHIP_OF_MATCH_TABLE()
116 #endif
117 @@ -165,8 +165,8 @@
118 #ifdef CONFIG_COMMON_CLK
119 #define CLK_OF_TABLES() . = ALIGN(8); \
120 VMLINUX_SYMBOL(__clk_of_table) = .; \
121 - *(__clk_of_table) \
122 - *(__clk_of_table_end)
123 + KEEP(*(__clk_of_table)) \
124 + KEEP(*(__clk_of_table_end))
125 #else
126 #define CLK_OF_TABLES()
127 #endif
128 @@ -174,7 +174,7 @@
129 #define KERNEL_DTB() \
130 STRUCT_ALIGN(); \
131 VMLINUX_SYMBOL(__dtb_start) = .; \
132 - *(.dtb.init.rodata) \
133 + KEEP(*(.dtb.init.rodata)) \
134 VMLINUX_SYMBOL(__dtb_end) = .;
135
136 /* .data section */
137 @@ -194,15 +194,16 @@
138 /* implement dynamic printk debug */ \
139 . = ALIGN(8); \
140 VMLINUX_SYMBOL(__start___jump_table) = .; \
141 - *(__jump_table) \
142 + KEEP(*(__jump_table)) \
143 VMLINUX_SYMBOL(__stop___jump_table) = .; \
144 . = ALIGN(8); \
145 VMLINUX_SYMBOL(__start___verbose) = .; \
146 - *(__verbose) \
147 + KEEP(*(__verbose)) \
148 VMLINUX_SYMBOL(__stop___verbose) = .; \
149 LIKELY_PROFILE() \
150 BRANCH_PROFILE() \
151 - TRACE_PRINTKS()
152 + TRACE_PRINTKS() \
153 + *(.data.[a-zA-Z_]*)
154
155 /*
156 * Data section helpers
157 @@ -256,39 +257,39 @@
158 /* PCI quirks */ \
159 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
160 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
161 - *(.pci_fixup_early) \
162 + KEEP(*(.pci_fixup_early)) \
163 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
164 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
165 - *(.pci_fixup_header) \
166 + KEEP(*(.pci_fixup_header)) \
167 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
168 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
169 - *(.pci_fixup_final) \
170 + KEEP(*(.pci_fixup_final)) \
171 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
172 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
173 - *(.pci_fixup_enable) \
174 + KEEP(*(.pci_fixup_enable)) \
175 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
176 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
177 - *(.pci_fixup_resume) \
178 + KEEP(*(.pci_fixup_resume)) \
179 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
180 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
181 - *(.pci_fixup_resume_early) \
182 + KEEP(*(.pci_fixup_resume_early)) \
183 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
184 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
185 - *(.pci_fixup_suspend) \
186 + KEEP(*(.pci_fixup_suspend)) \
187 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
188 } \
189 \
190 /* Built-in firmware blobs */ \
191 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
192 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
193 - *(.builtin_fw) \
194 + KEEP(*(.builtin_fw)) \
195 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
196 } \
197 \
198 /* RapidIO route ops */ \
199 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
200 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
201 - *(.rio_switch_ops) \
202 + KEEP(*(.rio_switch_ops)) \
203 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
204 } \
205 \
206 @@ -297,49 +298,49 @@
207 /* Kernel symbol table: Normal symbols */ \
208 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
209 VMLINUX_SYMBOL(__start___ksymtab) = .; \
210 - *(SORT(___ksymtab+*)) \
211 + KEEP(*(SORT(___ksymtab+*))) \
212 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
213 } \
214 \
215 /* Kernel symbol table: GPL-only symbols */ \
216 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
217 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
218 - *(SORT(___ksymtab_gpl+*)) \
219 + KEEP(*(SORT(___ksymtab_gpl+*))) \
220 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
221 } \
222 \
223 /* Kernel symbol table: Normal unused symbols */ \
224 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
225 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
226 - *(SORT(___ksymtab_unused+*)) \
227 + KEEP(*(SORT(___ksymtab_unused+*))) \
228 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
229 } \
230 \
231 /* Kernel symbol table: GPL-only unused symbols */ \
232 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
233 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
234 - *(SORT(___ksymtab_unused_gpl+*)) \
235 + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
236 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
237 } \
238 \
239 /* Kernel symbol table: GPL-future-only symbols */ \
240 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
241 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
242 - *(SORT(___ksymtab_gpl_future+*)) \
243 + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
244 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
245 } \
246 \
247 /* Kernel symbol table: Normal symbols */ \
248 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
249 VMLINUX_SYMBOL(__start___kcrctab) = .; \
250 - *(SORT(___kcrctab+*)) \
251 + KEEP(*(SORT(___kcrctab+*))) \
252 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
253 } \
254 \
255 /* Kernel symbol table: GPL-only symbols */ \
256 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
257 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
258 - *(SORT(___kcrctab_gpl+*)) \
259 + KEEP(*(SORT(___kcrctab_gpl+*))) \
260 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
261 } \
262 \
263 @@ -353,14 +354,14 @@
264 /* Kernel symbol table: GPL-only unused symbols */ \
265 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
266 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
267 - *(SORT(___kcrctab_unused_gpl+*)) \
268 + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
269 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
270 } \
271 \
272 /* Kernel symbol table: GPL-future-only symbols */ \
273 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
274 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
275 - *(SORT(___kcrctab_gpl_future+*)) \
276 + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
277 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
278 } \
279 \
280 @@ -383,14 +384,14 @@
281 /* Built-in module parameters. */ \
282 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
283 VMLINUX_SYMBOL(__start___param) = .; \
284 - *(__param) \
285 + KEEP(*(__param)) \
286 VMLINUX_SYMBOL(__stop___param) = .; \
287 } \
288 \
289 /* Built-in module versions. */ \
290 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
291 VMLINUX_SYMBOL(__start___modver) = .; \
292 - *(__modver) \
293 + KEEP(*(__modver)) \
294 VMLINUX_SYMBOL(__stop___modver) = .; \
295 . = ALIGN((align)); \
296 VMLINUX_SYMBOL(__end_rodata) = .; \
297 @@ -450,7 +451,7 @@
298 #define ENTRY_TEXT \
299 ALIGN_FUNCTION(); \
300 VMLINUX_SYMBOL(__entry_text_start) = .; \
301 - *(.entry.text) \
302 + KEEP(*(.entry.text)) \
303 VMLINUX_SYMBOL(__entry_text_end) = .;
304
305 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
306 @@ -478,7 +479,7 @@
307 . = ALIGN(align); \
308 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___ex_table) = .; \
310 - *(__ex_table) \
311 + KEEP(*(__ex_table)) \
312 VMLINUX_SYMBOL(__stop___ex_table) = .; \
313 }
314
315 @@ -494,7 +495,7 @@
316 #ifdef CONFIG_CONSTRUCTORS
317 #define KERNEL_CTORS() . = ALIGN(8); \
318 VMLINUX_SYMBOL(__ctors_start) = .; \
319 - *(.ctors) \
320 + KEEP(*(.ctors)) \
321 VMLINUX_SYMBOL(__ctors_end) = .;
322 #else
323 #define KERNEL_CTORS()
324 @@ -550,7 +551,7 @@
325 #define SBSS(sbss_align) \
326 . = ALIGN(sbss_align); \
327 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
328 - *(.sbss) \
329 + *(.sbss .sbss.*) \
330 *(.scommon) \
331 }
332
333 @@ -568,7 +569,7 @@
334 BSS_FIRST_SECTIONS \
335 *(.bss..page_aligned) \
336 *(.dynbss) \
337 - *(.bss) \
338 + *(.bss .bss.*) \
339 *(COMMON) \
340 }
341
342 @@ -617,7 +618,7 @@
343 . = ALIGN(8); \
344 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
345 VMLINUX_SYMBOL(__start___bug_table) = .; \
346 - *(__bug_table) \
347 + KEEP(*(__bug_table)) \
348 VMLINUX_SYMBOL(__stop___bug_table) = .; \
349 }
350 #else
351 @@ -629,7 +630,7 @@
352 . = ALIGN(4); \
353 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
354 VMLINUX_SYMBOL(__tracedata_start) = .; \
355 - *(.tracedata) \
356 + KEEP(*(.tracedata)) \
357 VMLINUX_SYMBOL(__tracedata_end) = .; \
358 }
359 #else
360 @@ -646,17 +647,17 @@
361 #define INIT_SETUP(initsetup_align) \
362 . = ALIGN(initsetup_align); \
363 VMLINUX_SYMBOL(__setup_start) = .; \
364 - *(.init.setup) \
365 + KEEP(*(.init.setup)) \
366 VMLINUX_SYMBOL(__setup_end) = .;
367
368 #define INIT_CALLS_LEVEL(level) \
369 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
370 - *(.initcall##level##.init) \
371 - *(.initcall##level##s.init) \
372 + KEEP(*(.initcall##level##.init)) \
373 + KEEP(*(.initcall##level##s.init)) \
374
375 #define INIT_CALLS \
376 VMLINUX_SYMBOL(__initcall_start) = .; \
377 - *(.initcallearly.init) \
378 + KEEP(*(.initcallearly.init)) \
379 INIT_CALLS_LEVEL(0) \
380 INIT_CALLS_LEVEL(1) \
381 INIT_CALLS_LEVEL(2) \
382 @@ -670,21 +671,21 @@
383
384 #define CON_INITCALL \
385 VMLINUX_SYMBOL(__con_initcall_start) = .; \
386 - *(.con_initcall.init) \
387 + KEEP(*(.con_initcall.init)) \
388 VMLINUX_SYMBOL(__con_initcall_end) = .;
389
390 #define SECURITY_INITCALL \
391 VMLINUX_SYMBOL(__security_initcall_start) = .; \
392 - *(.security_initcall.init) \
393 + KEEP(*(.security_initcall.init)) \
394 VMLINUX_SYMBOL(__security_initcall_end) = .;
395
396 #ifdef CONFIG_BLK_DEV_INITRD
397 #define INIT_RAM_FS \
398 . = ALIGN(4); \
399 VMLINUX_SYMBOL(__initramfs_start) = .; \
400 - *(.init.ramfs) \
401 + KEEP(*(.init.ramfs)) \
402 . = ALIGN(8); \
403 - *(.init.ramfs.info)
404 + KEEP(*(.init.ramfs.info))
405 #else
406 #define INIT_RAM_FS
407 #endif
408 --- a/arch/arm/Makefile
409 +++ b/arch/arm/Makefile
410 @@ -17,11 +17,16 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
411 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
412 LDFLAGS_vmlinux += --be8
413 endif
414 +LDFLAGS_vmlinux += --gc-sections
415
416 OBJCOPYFLAGS :=-O binary -R .comment -S
417 GZFLAGS :=-9
418 #KBUILD_CFLAGS +=-pipe
419
420 +ifndef CONFIG_FUNCTION_TRACER
421 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
422 +endif
423 +
424 # Never generate .eh_frame
425 KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
426
427 --- a/arch/arm/kernel/vmlinux.lds.S
428 +++ b/arch/arm/kernel/vmlinux.lds.S
429 @@ -12,13 +12,13 @@
430 #define PROC_INFO \
431 . = ALIGN(4); \
432 VMLINUX_SYMBOL(__proc_info_begin) = .; \
433 - *(.proc.info.init) \
434 + KEEP(*(.proc.info.init)) \
435 VMLINUX_SYMBOL(__proc_info_end) = .;
436
437 #define IDMAP_TEXT \
438 ALIGN_FUNCTION(); \
439 VMLINUX_SYMBOL(__idmap_text_start) = .; \
440 - *(.idmap.text) \
441 + KEEP(*(.idmap.text)) \
442 VMLINUX_SYMBOL(__idmap_text_end) = .; \
443 . = ALIGN(32); \
444 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
445 @@ -97,7 +97,7 @@ SECTIONS
446 .text : { /* Real text segment */
447 _stext = .; /* Text and read-only data */
448 __exception_text_start = .;
449 - *(.exception.text)
450 + KEEP(*(.exception.text))
451 __exception_text_end = .;
452 IRQENTRY_TEXT
453 TEXT_TEXT
454 @@ -122,7 +122,7 @@ SECTIONS
455 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
456 __start___ex_table = .;
457 #ifdef CONFIG_MMU
458 - *(__ex_table)
459 + KEEP(*(__ex_table))
460 #endif
461 __stop___ex_table = .;
462 }
463 @@ -134,12 +134,12 @@ SECTIONS
464 . = ALIGN(8);
465 .ARM.unwind_idx : {
466 __start_unwind_idx = .;
467 - *(.ARM.exidx*)
468 + KEEP(*(.ARM.exidx*))
469 __stop_unwind_idx = .;
470 }
471 .ARM.unwind_tab : {
472 __start_unwind_tab = .;
473 - *(.ARM.extab*)
474 + KEEP(*(.ARM.extab*))
475 __stop_unwind_tab = .;
476 }
477 #endif
478 @@ -162,24 +162,24 @@ SECTIONS
479 }
480 .init.arch.info : {
481 __arch_info_begin = .;
482 - *(.arch.info.init)
483 + KEEP(*(.arch.info.init))
484 __arch_info_end = .;
485 }
486 .init.tagtable : {
487 __tagtable_begin = .;
488 - *(.taglist.init)
489 + KEEP(*(.taglist.init))
490 __tagtable_end = .;
491 }
492 #ifdef CONFIG_SMP_ON_UP
493 .init.smpalt : {
494 __smpalt_begin = .;
495 - *(.alt.smp.init)
496 + KEEP(*(.alt.smp.init))
497 __smpalt_end = .;
498 }
499 #endif
500 .init.pv_table : {
501 __pv_table_begin = .;
502 - *(.pv_table)
503 + KEEP(*(.pv_table))
504 __pv_table_end = .;
505 }
506 .init.data : {
507 --- a/arch/arm/boot/compressed/Makefile
508 +++ b/arch/arm/boot/compressed/Makefile
509 @@ -122,6 +122,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
510 ORIG_CFLAGS := $(KBUILD_CFLAGS)
511 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
512 endif
513 +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
514
515 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
516 asflags-y := -Wa,-march=all -DZIMAGE