220-gc_sections.patch 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. From e3d8676f5722b7622685581e06e8f53e6138e3ab Mon Sep 17 00:00:00 2001
  2. From: Felix Fietkau <[email protected]>
  3. Date: Sat, 15 Jul 2017 23:42:36 +0200
  4. Subject: use -ffunction-sections, -fdata-sections and --gc-sections
  5. In combination with kernel symbol export stripping this significantly reduces
  6. the kernel image size. Used on both ARM and MIPS architectures.
  7. Signed-off-by: Felix Fietkau <[email protected]>
  8. Signed-off-by: Jonas Gorski <[email protected]>
  9. Signed-off-by: Gabor Juhos <[email protected]>
  10. ---
  11. Makefile | 10 +++----
  12. arch/arm/Kconfig | 1 +
  13. arch/arm/boot/compressed/Makefile | 1 +
  14. arch/arm/kernel/vmlinux.lds.S | 26 ++++++++--------
  15. arch/mips/Kconfig | 1 +
  16. arch/mips/kernel/vmlinux.lds.S | 4 +--
  17. include/asm-generic/vmlinux.lds.h | 63 ++++++++++++++++++++-------------------
  18. 7 files changed, 55 insertions(+), 51 deletions(-)
  19. --- a/Makefile
  20. +++ b/Makefile
  21. @@ -406,6 +406,11 @@ KBUILD_CFLAGS_MODULE := -DMODULE
  22. KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
  23. GCC_PLUGINS_CFLAGS :=
  24. +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
  25. +KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,)
  26. +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,)
  27. +endif
  28. +
  29. # Read KERNELRELEASE from include/config/kernel.release (if it exists)
  30. KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
  31. KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
  32. @@ -636,11 +641,6 @@ KBUILD_CFLAGS += $(call cc-disable-warni
  33. KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
  34. KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
  35. -ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
  36. -KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
  37. -KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
  38. -endif
  39. -
  40. ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
  41. KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
  42. else
  43. --- a/arch/arm/Kconfig
  44. +++ b/arch/arm/Kconfig
  45. @@ -81,6 +81,7 @@ config ARM
  46. select HAVE_UID16
  47. select HAVE_VIRT_CPU_ACCOUNTING_GEN
  48. select IRQ_FORCED_THREADING
  49. + select LD_DEAD_CODE_DATA_ELIMINATION
  50. select MODULES_USE_ELF_REL
  51. select NO_BOOTMEM
  52. select OF_EARLY_FLATTREE if OF
  53. --- a/arch/arm/boot/compressed/Makefile
  54. +++ b/arch/arm/boot/compressed/Makefile
  55. @@ -102,6 +102,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
  56. ORIG_CFLAGS := $(KBUILD_CFLAGS)
  57. KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
  58. endif
  59. +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
  60. # -fstack-protector-strong triggers protection checks in this code,
  61. # but it is being used too early to link to meaningful stack_chk logic.
  62. --- a/arch/arm/kernel/vmlinux.lds.S
  63. +++ b/arch/arm/kernel/vmlinux.lds.S
  64. @@ -17,7 +17,7 @@
  65. #define PROC_INFO \
  66. . = ALIGN(4); \
  67. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  68. - *(.proc.info.init) \
  69. + KEEP(*(.proc.info.init)) \
  70. VMLINUX_SYMBOL(__proc_info_end) = .;
  71. #define HYPERVISOR_TEXT \
  72. @@ -28,11 +28,11 @@
  73. #define IDMAP_TEXT \
  74. ALIGN_FUNCTION(); \
  75. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  76. - *(.idmap.text) \
  77. + KEEP(*(.idmap.text)) \
  78. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  79. . = ALIGN(PAGE_SIZE); \
  80. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  81. - *(.hyp.idmap.text) \
  82. + KEEP(*(.hyp.idmap.text)) \
  83. VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
  84. #ifdef CONFIG_HOTPLUG_CPU
  85. @@ -105,7 +105,7 @@ SECTIONS
  86. _stext = .; /* Text and read-only data */
  87. IDMAP_TEXT
  88. __exception_text_start = .;
  89. - *(.exception.text)
  90. + KEEP(*(.exception.text))
  91. __exception_text_end = .;
  92. IRQENTRY_TEXT
  93. SOFTIRQENTRY_TEXT
  94. @@ -134,7 +134,7 @@ SECTIONS
  95. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  96. __start___ex_table = .;
  97. #ifdef CONFIG_MMU
  98. - *(__ex_table)
  99. + KEEP(*(__ex_table))
  100. #endif
  101. __stop___ex_table = .;
  102. }
  103. @@ -146,12 +146,12 @@ SECTIONS
  104. . = ALIGN(8);
  105. .ARM.unwind_idx : {
  106. __start_unwind_idx = .;
  107. - *(.ARM.exidx*)
  108. + KEEP(*(.ARM.exidx*))
  109. __stop_unwind_idx = .;
  110. }
  111. .ARM.unwind_tab : {
  112. __start_unwind_tab = .;
  113. - *(.ARM.extab*)
  114. + KEEP(*(.ARM.extab*))
  115. __stop_unwind_tab = .;
  116. }
  117. #endif
  118. @@ -171,14 +171,14 @@ SECTIONS
  119. */
  120. __vectors_start = .;
  121. .vectors 0xffff0000 : AT(__vectors_start) {
  122. - *(.vectors)
  123. + KEEP(*(.vectors))
  124. }
  125. . = __vectors_start + SIZEOF(.vectors);
  126. __vectors_end = .;
  127. __stubs_start = .;
  128. .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
  129. - *(.stubs)
  130. + KEEP(*(.stubs))
  131. }
  132. . = __stubs_start + SIZEOF(.stubs);
  133. __stubs_end = .;
  134. @@ -194,24 +194,24 @@ SECTIONS
  135. }
  136. .init.arch.info : {
  137. __arch_info_begin = .;
  138. - *(.arch.info.init)
  139. + KEEP(*(.arch.info.init))
  140. __arch_info_end = .;
  141. }
  142. .init.tagtable : {
  143. __tagtable_begin = .;
  144. - *(.taglist.init)
  145. + KEEP(*(.taglist.init))
  146. __tagtable_end = .;
  147. }
  148. #ifdef CONFIG_SMP_ON_UP
  149. .init.smpalt : {
  150. __smpalt_begin = .;
  151. - *(.alt.smp.init)
  152. + KEEP(*(.alt.smp.init))
  153. __smpalt_end = .;
  154. }
  155. #endif
  156. .init.pv_table : {
  157. __pv_table_begin = .;
  158. - *(.pv_table)
  159. + KEEP(*(.pv_table))
  160. __pv_table_end = .;
  161. }
  162. .init.data : {
  163. --- a/arch/mips/Kconfig
  164. +++ b/arch/mips/Kconfig
  165. @@ -55,6 +55,7 @@ config MIPS
  166. select CLONE_BACKWARDS
  167. select HAVE_DEBUG_STACKOVERFLOW
  168. select HAVE_CC_STACKPROTECTOR
  169. + select LD_DEAD_CODE_DATA_ELIMINATION
  170. select CPU_PM if CPU_IDLE
  171. select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
  172. select ARCH_BINFMT_ELF_STATE
  173. --- a/arch/mips/kernel/vmlinux.lds.S
  174. +++ b/arch/mips/kernel/vmlinux.lds.S
  175. @@ -71,7 +71,7 @@ SECTIONS
  176. /* Exception table for data bus errors */
  177. __dbe_table : {
  178. __start___dbe_table = .;
  179. - *(__dbe_table)
  180. + KEEP(*(__dbe_table))
  181. __stop___dbe_table = .;
  182. }
  183. @@ -121,7 +121,7 @@ SECTIONS
  184. . = ALIGN(4);
  185. .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
  186. __mips_machines_start = .;
  187. - *(.mips.machines.init)
  188. + KEEP(*(.mips.machines.init))
  189. __mips_machines_end = .;
  190. }
  191. --- a/include/asm-generic/vmlinux.lds.h
  192. +++ b/include/asm-generic/vmlinux.lds.h
  193. @@ -105,7 +105,7 @@
  194. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  195. #define MCOUNT_REC() . = ALIGN(8); \
  196. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  197. - *(__mcount_loc) \
  198. + KEEP(*(__mcount_loc)) \
  199. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  200. #else
  201. #define MCOUNT_REC()
  202. @@ -113,7 +113,7 @@
  203. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  204. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  205. - *(_ftrace_annotated_branch) \
  206. + KEEP(*(_ftrace_annotated_branch)) \
  207. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  208. #else
  209. #define LIKELY_PROFILE()
  210. @@ -121,7 +121,7 @@
  211. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  212. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  213. - *(_ftrace_branch) \
  214. + KEEP(*(_ftrace_branch)) \
  215. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  216. #else
  217. #define BRANCH_PROFILE()
  218. @@ -130,7 +130,7 @@
  219. #ifdef CONFIG_KPROBES
  220. #define KPROBE_BLACKLIST() . = ALIGN(8); \
  221. VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
  222. - *(_kprobe_blacklist) \
  223. + KEEP(*(_kprobe_blacklist)) \
  224. VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
  225. #else
  226. #define KPROBE_BLACKLIST()
  227. @@ -139,10 +139,10 @@
  228. #ifdef CONFIG_EVENT_TRACING
  229. #define FTRACE_EVENTS() . = ALIGN(8); \
  230. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  231. - *(_ftrace_events) \
  232. + KEEP(*(_ftrace_events)) \
  233. VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
  234. VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
  235. - *(_ftrace_enum_map) \
  236. + KEEP(*(_ftrace_enum_map)) \
  237. VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
  238. #else
  239. #define FTRACE_EVENTS()
  240. @@ -163,7 +163,7 @@
  241. #ifdef CONFIG_FTRACE_SYSCALLS
  242. #define TRACE_SYSCALLS() . = ALIGN(8); \
  243. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  244. - *(__syscalls_metadata) \
  245. + KEEP(*(__syscalls_metadata)) \
  246. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  247. #else
  248. #define TRACE_SYSCALLS()
  249. @@ -172,7 +172,7 @@
  250. #ifdef CONFIG_SERIAL_EARLYCON
  251. #define EARLYCON_TABLE() . = ALIGN(8); \
  252. VMLINUX_SYMBOL(__earlycon_table) = .; \
  253. - *(__earlycon_table) \
  254. + KEEP(*(__earlycon_table)) \
  255. VMLINUX_SYMBOL(__earlycon_table_end) = .;
  256. #else
  257. #define EARLYCON_TABLE()
  258. @@ -185,8 +185,8 @@
  259. #define _OF_TABLE_1(name) \
  260. . = ALIGN(8); \
  261. VMLINUX_SYMBOL(__##name##_of_table) = .; \
  262. - *(__##name##_of_table) \
  263. - *(__##name##_of_table_end)
  264. + KEEP(*(__##name##_of_table)) \
  265. + KEEP(*(__##name##_of_table_end))
  266. #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
  267. #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
  268. @@ -209,7 +209,7 @@
  269. #define KERNEL_DTB() \
  270. STRUCT_ALIGN(); \
  271. VMLINUX_SYMBOL(__dtb_start) = .; \
  272. - *(.dtb.init.rodata) \
  273. + KEEP(*(.dtb.init.rodata)) \
  274. VMLINUX_SYMBOL(__dtb_end) = .;
  275. /*
  276. @@ -227,16 +227,17 @@
  277. /* implement dynamic printk debug */ \
  278. . = ALIGN(8); \
  279. VMLINUX_SYMBOL(__start___jump_table) = .; \
  280. - *(__jump_table) \
  281. + KEEP(*(__jump_table)) \
  282. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  283. . = ALIGN(8); \
  284. VMLINUX_SYMBOL(__start___verbose) = .; \
  285. - *(__verbose) \
  286. + KEEP(*(__verbose)) \
  287. VMLINUX_SYMBOL(__stop___verbose) = .; \
  288. LIKELY_PROFILE() \
  289. BRANCH_PROFILE() \
  290. TRACE_PRINTKS() \
  291. - TRACEPOINT_STR()
  292. + TRACEPOINT_STR() \
  293. + *(.data.[a-zA-Z_]*)
  294. /*
  295. * Data section helpers
  296. @@ -304,35 +305,35 @@
  297. /* PCI quirks */ \
  298. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  299. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  300. - *(.pci_fixup_early) \
  301. + KEEP(*(.pci_fixup_early)) \
  302. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  303. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  304. - *(.pci_fixup_header) \
  305. + KEEP(*(.pci_fixup_header)) \
  306. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  307. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  308. - *(.pci_fixup_final) \
  309. + KEEP(*(.pci_fixup_final)) \
  310. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  311. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  312. - *(.pci_fixup_enable) \
  313. + KEEP(*(.pci_fixup_enable)) \
  314. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  315. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  316. - *(.pci_fixup_resume) \
  317. + KEEP(*(.pci_fixup_resume)) \
  318. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  319. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  320. - *(.pci_fixup_resume_early) \
  321. + KEEP(*(.pci_fixup_resume_early)) \
  322. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  323. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  324. - *(.pci_fixup_suspend) \
  325. + KEEP(*(.pci_fixup_suspend)) \
  326. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  327. VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
  328. - *(.pci_fixup_suspend_late) \
  329. + KEEP(*(.pci_fixup_suspend_late)) \
  330. VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
  331. } \
  332. \
  333. /* Built-in firmware blobs */ \
  334. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  335. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  336. - *(.builtin_fw) \
  337. + KEEP(*(.builtin_fw)) \
  338. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  339. } \
  340. \
  341. @@ -410,7 +411,7 @@
  342. \
  343. /* Kernel symbol table: strings */ \
  344. __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
  345. - KEEP(*(__ksymtab_strings)) \
  346. + *(__ksymtab_strings) \
  347. } \
  348. \
  349. /* __*init sections */ \
  350. @@ -423,14 +424,14 @@
  351. /* Built-in module parameters. */ \
  352. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  353. VMLINUX_SYMBOL(__start___param) = .; \
  354. - *(__param) \
  355. + KEEP(*(__param)) \
  356. VMLINUX_SYMBOL(__stop___param) = .; \
  357. } \
  358. \
  359. /* Built-in module versions. */ \
  360. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  361. VMLINUX_SYMBOL(__start___modver) = .; \
  362. - *(__modver) \
  363. + KEEP(*(__modver)) \
  364. VMLINUX_SYMBOL(__stop___modver) = .; \
  365. . = ALIGN((align)); \
  366. VMLINUX_SYMBOL(__end_rodata) = .; \
  367. @@ -496,7 +497,7 @@
  368. #define ENTRY_TEXT \
  369. ALIGN_FUNCTION(); \
  370. VMLINUX_SYMBOL(__entry_text_start) = .; \
  371. - *(.entry.text) \
  372. + KEEP(*(.entry.text)) \
  373. VMLINUX_SYMBOL(__entry_text_end) = .;
  374. #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
  375. @@ -534,7 +535,7 @@
  376. . = ALIGN(align); \
  377. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  378. VMLINUX_SYMBOL(__start___ex_table) = .; \
  379. - *(__ex_table) \
  380. + KEEP(*(__ex_table)) \
  381. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  382. }
  383. @@ -550,9 +551,9 @@
  384. #ifdef CONFIG_CONSTRUCTORS
  385. #define KERNEL_CTORS() . = ALIGN(8); \
  386. VMLINUX_SYMBOL(__ctors_start) = .; \
  387. - *(.ctors) \
  388. + KEEP(*(.ctors)) \
  389. *(SORT(.init_array.*)) \
  390. - *(.init_array) \
  391. + KEEP(*(.init_array)) \
  392. VMLINUX_SYMBOL(__ctors_end) = .;
  393. #else
  394. #define KERNEL_CTORS()
  395. @@ -609,7 +610,7 @@
  396. #define SBSS(sbss_align) \
  397. . = ALIGN(sbss_align); \
  398. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  399. - *(.sbss) \
  400. + *(.sbss .sbss.*) \
  401. *(.scommon) \
  402. }
  403. @@ -676,7 +677,7 @@
  404. . = ALIGN(8); \
  405. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  406. VMLINUX_SYMBOL(__start___bug_table) = .; \
  407. - *(__bug_table) \
  408. + KEEP(*(__bug_table)) \
  409. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  410. }
  411. #else
  412. @@ -688,7 +689,7 @@
  413. . = ALIGN(4); \
  414. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  415. VMLINUX_SYMBOL(__tracedata_start) = .; \
  416. - *(.tracedata) \
  417. + KEEP(*(.tracedata)) \
  418. VMLINUX_SYMBOL(__tracedata_end) = .; \
  419. }
  420. #else
  421. @@ -705,7 +706,7 @@
  422. #define INIT_SETUP(initsetup_align) \
  423. . = ALIGN(initsetup_align); \
  424. VMLINUX_SYMBOL(__setup_start) = .; \
  425. - *(.init.setup) \
  426. + KEEP(*(.init.setup)) \
  427. VMLINUX_SYMBOL(__setup_end) = .;
  428. #define INIT_CALLS_LEVEL(level) \