220-gc_sections.patch 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. From: Felix Fietkau <[email protected]>
  2. use -ffunction-sections, -fdata-sections and --gc-sections
  3. In combination with kernel symbol export stripping this significantly reduces
  4. the kernel image size. Used on both ARM and MIPS architectures.
  5. Signed-off-by: Felix Fietkau <[email protected]>
  6. Signed-off-by: Jonas Gorski <[email protected]>
  7. Signed-off-by: Gabor Juhos <[email protected]>
  8. ---
  9. --- a/arch/mips/Makefile
  10. +++ b/arch/mips/Makefile
  11. @@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
  12. #
  13. cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
  14. cflags-y += -msoft-float
  15. -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
  16. +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
  17. KBUILD_AFLAGS_MODULE += -mlong-calls
  18. KBUILD_CFLAGS_MODULE += -mlong-calls
  19. +ifndef CONFIG_FUNCTION_TRACER
  20. +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
  21. +endif
  22. +
  23. cflags-y += -ffreestanding
  24. #
  25. --- a/arch/mips/kernel/vmlinux.lds.S
  26. +++ b/arch/mips/kernel/vmlinux.lds.S
  27. @@ -67,7 +67,7 @@ SECTIONS
  28. /* Exception table for data bus errors */
  29. __dbe_table : {
  30. __start___dbe_table = .;
  31. - *(__dbe_table)
  32. + KEEP(*(__dbe_table))
  33. __stop___dbe_table = .;
  34. }
  35. @@ -112,7 +112,7 @@ SECTIONS
  36. . = ALIGN(4);
  37. .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
  38. __mips_machines_start = .;
  39. - *(.mips.machines.init)
  40. + KEEP(*(.mips.machines.init))
  41. __mips_machines_end = .;
  42. }
  43. --- a/include/asm-generic/vmlinux.lds.h
  44. +++ b/include/asm-generic/vmlinux.lds.h
  45. @@ -95,7 +95,7 @@
  46. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  47. #define MCOUNT_REC() . = ALIGN(8); \
  48. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  49. - *(__mcount_loc) \
  50. + KEEP(*(__mcount_loc)) \
  51. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  52. #else
  53. #define MCOUNT_REC()
  54. @@ -103,7 +103,7 @@
  55. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  56. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  57. - *(_ftrace_annotated_branch) \
  58. + KEEP(*(_ftrace_annotated_branch)) \
  59. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  60. #else
  61. #define LIKELY_PROFILE()
  62. @@ -111,7 +111,7 @@
  63. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  64. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  65. - *(_ftrace_branch) \
  66. + KEEP(*(_ftrace_branch)) \
  67. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  68. #else
  69. #define BRANCH_PROFILE()
  70. @@ -120,7 +120,7 @@
  71. #ifdef CONFIG_EVENT_TRACING
  72. #define FTRACE_EVENTS() . = ALIGN(8); \
  73. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  74. - *(_ftrace_events) \
  75. + KEEP(*(_ftrace_events)) \
  76. VMLINUX_SYMBOL(__stop_ftrace_events) = .;
  77. #else
  78. #define FTRACE_EVENTS()
  79. @@ -128,7 +128,7 @@
  80. #ifdef CONFIG_TRACING
  81. #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
  82. - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
  83. + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
  84. VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
  85. #else
  86. #define TRACE_PRINTKS()
  87. @@ -137,7 +137,7 @@
  88. #ifdef CONFIG_FTRACE_SYSCALLS
  89. #define TRACE_SYSCALLS() . = ALIGN(8); \
  90. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  91. - *(__syscalls_metadata) \
  92. + KEEP(*(__syscalls_metadata)) \
  93. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  94. #else
  95. #define TRACE_SYSCALLS()
  96. @@ -146,8 +146,8 @@
  97. #ifdef CONFIG_CLKSRC_OF
  98. #define CLKSRC_OF_TABLES() . = ALIGN(8); \
  99. VMLINUX_SYMBOL(__clksrc_of_table) = .; \
  100. - *(__clksrc_of_table) \
  101. - *(__clksrc_of_table_end)
  102. + KEEP(*(__clksrc_of_table)) \
  103. + KEEP(*(__clksrc_of_table_end))
  104. #else
  105. #define CLKSRC_OF_TABLES()
  106. #endif
  107. @@ -156,8 +156,8 @@
  108. #define IRQCHIP_OF_MATCH_TABLE() \
  109. . = ALIGN(8); \
  110. VMLINUX_SYMBOL(__irqchip_begin) = .; \
  111. - *(__irqchip_of_table) \
  112. - *(__irqchip_of_end)
  113. + KEEP(*(__irqchip_of_table)) \
  114. + KEEP(*(__irqchip_of_end))
  115. #else
  116. #define IRQCHIP_OF_MATCH_TABLE()
  117. #endif
  118. @@ -165,8 +165,8 @@
  119. #ifdef CONFIG_COMMON_CLK
  120. #define CLK_OF_TABLES() . = ALIGN(8); \
  121. VMLINUX_SYMBOL(__clk_of_table) = .; \
  122. - *(__clk_of_table) \
  123. - *(__clk_of_table_end)
  124. + KEEP(*(__clk_of_table)) \
  125. + KEEP(*(__clk_of_table_end))
  126. #else
  127. #define CLK_OF_TABLES()
  128. #endif
  129. @@ -174,7 +174,7 @@
  130. #define KERNEL_DTB() \
  131. STRUCT_ALIGN(); \
  132. VMLINUX_SYMBOL(__dtb_start) = .; \
  133. - *(.dtb.init.rodata) \
  134. + KEEP(*(.dtb.init.rodata)) \
  135. VMLINUX_SYMBOL(__dtb_end) = .;
  136. /* .data section */
  137. @@ -194,15 +194,16 @@
  138. /* implement dynamic printk debug */ \
  139. . = ALIGN(8); \
  140. VMLINUX_SYMBOL(__start___jump_table) = .; \
  141. - *(__jump_table) \
  142. + KEEP(*(__jump_table)) \
  143. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  144. . = ALIGN(8); \
  145. VMLINUX_SYMBOL(__start___verbose) = .; \
  146. - *(__verbose) \
  147. + KEEP(*(__verbose)) \
  148. VMLINUX_SYMBOL(__stop___verbose) = .; \
  149. LIKELY_PROFILE() \
  150. BRANCH_PROFILE() \
  151. - TRACE_PRINTKS()
  152. + TRACE_PRINTKS() \
  153. + *(.data.[a-zA-Z_]*)
  154. /*
  155. * Data section helpers
  156. @@ -256,39 +257,39 @@
  157. /* PCI quirks */ \
  158. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  159. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  160. - *(.pci_fixup_early) \
  161. + KEEP(*(.pci_fixup_early)) \
  162. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  163. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  164. - *(.pci_fixup_header) \
  165. + KEEP(*(.pci_fixup_header)) \
  166. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  167. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  168. - *(.pci_fixup_final) \
  169. + KEEP(*(.pci_fixup_final)) \
  170. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  171. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  172. - *(.pci_fixup_enable) \
  173. + KEEP(*(.pci_fixup_enable)) \
  174. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  175. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  176. - *(.pci_fixup_resume) \
  177. + KEEP(*(.pci_fixup_resume)) \
  178. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  179. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  180. - *(.pci_fixup_resume_early) \
  181. + KEEP(*(.pci_fixup_resume_early)) \
  182. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  183. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  184. - *(.pci_fixup_suspend) \
  185. + KEEP(*(.pci_fixup_suspend)) \
  186. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  187. } \
  188. \
  189. /* Built-in firmware blobs */ \
  190. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  191. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  192. - *(.builtin_fw) \
  193. + KEEP(*(.builtin_fw)) \
  194. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  195. } \
  196. \
  197. /* RapidIO route ops */ \
  198. .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
  199. VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
  200. - *(.rio_switch_ops) \
  201. + KEEP(*(.rio_switch_ops)) \
  202. VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
  203. } \
  204. \
  205. @@ -297,49 +298,49 @@
  206. /* Kernel symbol table: Normal symbols */ \
  207. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  208. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  209. - *(SORT(___ksymtab+*)) \
  210. + KEEP(*(SORT(___ksymtab+*))) \
  211. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  212. } \
  213. \
  214. /* Kernel symbol table: GPL-only symbols */ \
  215. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  216. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  217. - *(SORT(___ksymtab_gpl+*)) \
  218. + KEEP(*(SORT(___ksymtab_gpl+*))) \
  219. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  220. } \
  221. \
  222. /* Kernel symbol table: Normal unused symbols */ \
  223. __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
  224. VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
  225. - *(SORT(___ksymtab_unused+*)) \
  226. + KEEP(*(SORT(___ksymtab_unused+*))) \
  227. VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
  228. } \
  229. \
  230. /* Kernel symbol table: GPL-only unused symbols */ \
  231. __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
  232. VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
  233. - *(SORT(___ksymtab_unused_gpl+*)) \
  234. + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
  235. VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
  236. } \
  237. \
  238. /* Kernel symbol table: GPL-future-only symbols */ \
  239. __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
  240. VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
  241. - *(SORT(___ksymtab_gpl_future+*)) \
  242. + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
  243. VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
  244. } \
  245. \
  246. /* Kernel symbol table: Normal symbols */ \
  247. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  248. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  249. - *(SORT(___kcrctab+*)) \
  250. + KEEP(*(SORT(___kcrctab+*))) \
  251. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  252. } \
  253. \
  254. /* Kernel symbol table: GPL-only symbols */ \
  255. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  256. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  257. - *(SORT(___kcrctab_gpl+*)) \
  258. + KEEP(*(SORT(___kcrctab_gpl+*))) \
  259. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  260. } \
  261. \
  262. @@ -353,14 +354,14 @@
  263. /* Kernel symbol table: GPL-only unused symbols */ \
  264. __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
  265. VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
  266. - *(SORT(___kcrctab_unused_gpl+*)) \
  267. + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
  268. VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
  269. } \
  270. \
  271. /* Kernel symbol table: GPL-future-only symbols */ \
  272. __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
  273. VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
  274. - *(SORT(___kcrctab_gpl_future+*)) \
  275. + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
  276. VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
  277. } \
  278. \
  279. @@ -383,14 +384,14 @@
  280. /* Built-in module parameters. */ \
  281. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  282. VMLINUX_SYMBOL(__start___param) = .; \
  283. - *(__param) \
  284. + KEEP(*(__param)) \
  285. VMLINUX_SYMBOL(__stop___param) = .; \
  286. } \
  287. \
  288. /* Built-in module versions. */ \
  289. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  290. VMLINUX_SYMBOL(__start___modver) = .; \
  291. - *(__modver) \
  292. + KEEP(*(__modver)) \
  293. VMLINUX_SYMBOL(__stop___modver) = .; \
  294. . = ALIGN((align)); \
  295. VMLINUX_SYMBOL(__end_rodata) = .; \
  296. @@ -450,7 +451,7 @@
  297. #define ENTRY_TEXT \
  298. ALIGN_FUNCTION(); \
  299. VMLINUX_SYMBOL(__entry_text_start) = .; \
  300. - *(.entry.text) \
  301. + KEEP(*(.entry.text)) \
  302. VMLINUX_SYMBOL(__entry_text_end) = .;
  303. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  304. @@ -478,7 +479,7 @@
  305. . = ALIGN(align); \
  306. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  307. VMLINUX_SYMBOL(__start___ex_table) = .; \
  308. - *(__ex_table) \
  309. + KEEP(*(__ex_table)) \
  310. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  311. }
  312. @@ -494,7 +495,7 @@
  313. #ifdef CONFIG_CONSTRUCTORS
  314. #define KERNEL_CTORS() . = ALIGN(8); \
  315. VMLINUX_SYMBOL(__ctors_start) = .; \
  316. - *(.ctors) \
  317. + KEEP(*(.ctors)) \
  318. VMLINUX_SYMBOL(__ctors_end) = .;
  319. #else
  320. #define KERNEL_CTORS()
  321. @@ -550,7 +551,7 @@
  322. #define SBSS(sbss_align) \
  323. . = ALIGN(sbss_align); \
  324. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  325. - *(.sbss) \
  326. + *(.sbss .sbss.*) \
  327. *(.scommon) \
  328. }
  329. @@ -568,7 +569,7 @@
  330. BSS_FIRST_SECTIONS \
  331. *(.bss..page_aligned) \
  332. *(.dynbss) \
  333. - *(.bss) \
  334. + *(.bss .bss.*) \
  335. *(COMMON) \
  336. }
  337. @@ -617,7 +618,7 @@
  338. . = ALIGN(8); \
  339. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  340. VMLINUX_SYMBOL(__start___bug_table) = .; \
  341. - *(__bug_table) \
  342. + KEEP(*(__bug_table)) \
  343. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  344. }
  345. #else
  346. @@ -629,7 +630,7 @@
  347. . = ALIGN(4); \
  348. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  349. VMLINUX_SYMBOL(__tracedata_start) = .; \
  350. - *(.tracedata) \
  351. + KEEP(*(.tracedata)) \
  352. VMLINUX_SYMBOL(__tracedata_end) = .; \
  353. }
  354. #else
  355. @@ -646,17 +647,17 @@
  356. #define INIT_SETUP(initsetup_align) \
  357. . = ALIGN(initsetup_align); \
  358. VMLINUX_SYMBOL(__setup_start) = .; \
  359. - *(.init.setup) \
  360. + KEEP(*(.init.setup)) \
  361. VMLINUX_SYMBOL(__setup_end) = .;
  362. #define INIT_CALLS_LEVEL(level) \
  363. VMLINUX_SYMBOL(__initcall##level##_start) = .; \
  364. - *(.initcall##level##.init) \
  365. - *(.initcall##level##s.init) \
  366. + KEEP(*(.initcall##level##.init)) \
  367. + KEEP(*(.initcall##level##s.init)) \
  368. #define INIT_CALLS \
  369. VMLINUX_SYMBOL(__initcall_start) = .; \
  370. - *(.initcallearly.init) \
  371. + KEEP(*(.initcallearly.init)) \
  372. INIT_CALLS_LEVEL(0) \
  373. INIT_CALLS_LEVEL(1) \
  374. INIT_CALLS_LEVEL(2) \
  375. @@ -670,21 +671,21 @@
  376. #define CON_INITCALL \
  377. VMLINUX_SYMBOL(__con_initcall_start) = .; \
  378. - *(.con_initcall.init) \
  379. + KEEP(*(.con_initcall.init)) \
  380. VMLINUX_SYMBOL(__con_initcall_end) = .;
  381. #define SECURITY_INITCALL \
  382. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  383. - *(.security_initcall.init) \
  384. + KEEP(*(.security_initcall.init)) \
  385. VMLINUX_SYMBOL(__security_initcall_end) = .;
  386. #ifdef CONFIG_BLK_DEV_INITRD
  387. #define INIT_RAM_FS \
  388. . = ALIGN(4); \
  389. VMLINUX_SYMBOL(__initramfs_start) = .; \
  390. - *(.init.ramfs) \
  391. + KEEP(*(.init.ramfs)) \
  392. . = ALIGN(8); \
  393. - *(.init.ramfs.info)
  394. + KEEP(*(.init.ramfs.info))
  395. #else
  396. #define INIT_RAM_FS
  397. #endif
  398. --- a/arch/arm/Makefile
  399. +++ b/arch/arm/Makefile
  400. @@ -17,11 +17,16 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
  401. ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
  402. LDFLAGS_vmlinux += --be8
  403. endif
  404. +LDFLAGS_vmlinux += --gc-sections
  405. OBJCOPYFLAGS :=-O binary -R .comment -S
  406. GZFLAGS :=-9
  407. #KBUILD_CFLAGS +=-pipe
  408. +ifndef CONFIG_FUNCTION_TRACER
  409. +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
  410. +endif
  411. +
  412. # Never generate .eh_frame
  413. KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
  414. --- a/arch/arm/kernel/vmlinux.lds.S
  415. +++ b/arch/arm/kernel/vmlinux.lds.S
  416. @@ -12,13 +12,13 @@
  417. #define PROC_INFO \
  418. . = ALIGN(4); \
  419. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  420. - *(.proc.info.init) \
  421. + KEEP(*(.proc.info.init)) \
  422. VMLINUX_SYMBOL(__proc_info_end) = .;
  423. #define IDMAP_TEXT \
  424. ALIGN_FUNCTION(); \
  425. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  426. - *(.idmap.text) \
  427. + KEEP(*(.idmap.text)) \
  428. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  429. . = ALIGN(32); \
  430. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  431. @@ -97,7 +97,7 @@ SECTIONS
  432. .text : { /* Real text segment */
  433. _stext = .; /* Text and read-only data */
  434. __exception_text_start = .;
  435. - *(.exception.text)
  436. + KEEP(*(.exception.text))
  437. __exception_text_end = .;
  438. IRQENTRY_TEXT
  439. TEXT_TEXT
  440. @@ -122,7 +122,7 @@ SECTIONS
  441. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  442. __start___ex_table = .;
  443. #ifdef CONFIG_MMU
  444. - *(__ex_table)
  445. + KEEP(*(__ex_table))
  446. #endif
  447. __stop___ex_table = .;
  448. }
  449. @@ -134,12 +134,12 @@ SECTIONS
  450. . = ALIGN(8);
  451. .ARM.unwind_idx : {
  452. __start_unwind_idx = .;
  453. - *(.ARM.exidx*)
  454. + KEEP(*(.ARM.exidx*))
  455. __stop_unwind_idx = .;
  456. }
  457. .ARM.unwind_tab : {
  458. __start_unwind_tab = .;
  459. - *(.ARM.extab*)
  460. + KEEP(*(.ARM.extab*))
  461. __stop_unwind_tab = .;
  462. }
  463. #endif
  464. @@ -158,14 +158,14 @@ SECTIONS
  465. */
  466. __vectors_start = .;
  467. .vectors 0 : AT(__vectors_start) {
  468. - *(.vectors)
  469. + KEEP(*(.vectors))
  470. }
  471. . = __vectors_start + SIZEOF(.vectors);
  472. __vectors_end = .;
  473. __stubs_start = .;
  474. .stubs 0x1000 : AT(__stubs_start) {
  475. - *(.stubs)
  476. + KEEP(*(.stubs))
  477. }
  478. . = __stubs_start + SIZEOF(.stubs);
  479. __stubs_end = .;
  480. @@ -179,24 +179,24 @@ SECTIONS
  481. }
  482. .init.arch.info : {
  483. __arch_info_begin = .;
  484. - *(.arch.info.init)
  485. + KEEP(*(.arch.info.init))
  486. __arch_info_end = .;
  487. }
  488. .init.tagtable : {
  489. __tagtable_begin = .;
  490. - *(.taglist.init)
  491. + KEEP(*(.taglist.init))
  492. __tagtable_end = .;
  493. }
  494. #ifdef CONFIG_SMP_ON_UP
  495. .init.smpalt : {
  496. __smpalt_begin = .;
  497. - *(.alt.smp.init)
  498. + KEEP(*(.alt.smp.init))
  499. __smpalt_end = .;
  500. }
  501. #endif
  502. .init.pv_table : {
  503. __pv_table_begin = .;
  504. - *(.pv_table)
  505. + KEEP(*(.pv_table))
  506. __pv_table_end = .;
  507. }
  508. .init.data : {
  509. --- a/arch/arm/boot/compressed/Makefile
  510. +++ b/arch/arm/boot/compressed/Makefile
  511. @@ -123,6 +123,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
  512. ORIG_CFLAGS := $(KBUILD_CFLAGS)
  513. KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
  514. endif
  515. +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
  516. ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  517. asflags-y := -DZIMAGE