220-gc_sections.patch 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. From: Felix Fietkau <[email protected]>
  2. use -ffunction-sections, -fdata-sections and --gc-sections
  3. In combination with kernel symbol export stripping this significantly reduces
  4. the kernel image size. Used on both ARM and MIPS architectures.
  5. Signed-off-by: Felix Fietkau <[email protected]>
  6. Signed-off-by: Jonas Gorski <[email protected]>
  7. Signed-off-by: Gabor Juhos <[email protected]>
  8. ---
  9. --- a/arch/mips/kernel/vmlinux.lds.S
  10. +++ b/arch/mips/kernel/vmlinux.lds.S
  11. @@ -71,7 +71,7 @@ SECTIONS
  12. /* Exception table for data bus errors */
  13. __dbe_table : {
  14. __start___dbe_table = .;
  15. - *(__dbe_table)
  16. + KEEP(*(__dbe_table))
  17. __stop___dbe_table = .;
  18. }
  19. @@ -121,7 +121,7 @@ SECTIONS
  20. . = ALIGN(4);
  21. .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
  22. __mips_machines_start = .;
  23. - *(.mips.machines.init)
  24. + KEEP(*(.mips.machines.init))
  25. __mips_machines_end = .;
  26. }
  27. --- a/include/asm-generic/vmlinux.lds.h
  28. +++ b/include/asm-generic/vmlinux.lds.h
  29. @@ -114,7 +114,7 @@
  30. #ifdef CONFIG_KPROBES
  31. #define KPROBE_BLACKLIST() . = ALIGN(8); \
  32. VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
  33. - *(_kprobe_blacklist) \
  34. + KEEP(*(_kprobe_blacklist)) \
  35. VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
  36. #else
  37. #define KPROBE_BLACKLIST()
  38. @@ -123,10 +123,10 @@
  39. #ifdef CONFIG_EVENT_TRACING
  40. #define FTRACE_EVENTS() . = ALIGN(8); \
  41. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  42. - *(_ftrace_events) \
  43. + KEEP(*(_ftrace_events)) \
  44. VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
  45. VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
  46. - *(_ftrace_enum_map) \
  47. + KEEP(*(_ftrace_enum_map)) \
  48. VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
  49. #else
  50. #define FTRACE_EVENTS()
  51. @@ -147,7 +147,7 @@
  52. #ifdef CONFIG_FTRACE_SYSCALLS
  53. #define TRACE_SYSCALLS() . = ALIGN(8); \
  54. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  55. - *(__syscalls_metadata) \
  56. + KEEP(*(__syscalls_metadata)) \
  57. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  58. #else
  59. #define TRACE_SYSCALLS()
  60. @@ -156,7 +156,7 @@
  61. #ifdef CONFIG_SERIAL_EARLYCON
  62. #define EARLYCON_TABLE() STRUCT_ALIGN(); \
  63. VMLINUX_SYMBOL(__earlycon_table) = .; \
  64. - *(__earlycon_table) \
  65. + KEEP(*(__earlycon_table)) \
  66. VMLINUX_SYMBOL(__earlycon_table_end) = .;
  67. #else
  68. #define EARLYCON_TABLE()
  69. @@ -169,8 +169,8 @@
  70. #define _OF_TABLE_1(name) \
  71. . = ALIGN(8); \
  72. VMLINUX_SYMBOL(__##name##_of_table) = .; \
  73. - *(__##name##_of_table) \
  74. - *(__##name##_of_table_end)
  75. + KEEP(*(__##name##_of_table)) \
  76. + KEEP(*(__##name##_of_table_end))
  77. #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
  78. #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
  79. @@ -193,7 +193,7 @@
  80. #define KERNEL_DTB() \
  81. STRUCT_ALIGN(); \
  82. VMLINUX_SYMBOL(__dtb_start) = .; \
  83. - *(.dtb.init.rodata) \
  84. + KEEP(*(.dtb.init.rodata)) \
  85. VMLINUX_SYMBOL(__dtb_end) = .;
  86. /*
  87. @@ -214,16 +214,17 @@
  88. /* implement dynamic printk debug */ \
  89. . = ALIGN(8); \
  90. VMLINUX_SYMBOL(__start___jump_table) = .; \
  91. - *(__jump_table) \
  92. + KEEP(*(__jump_table)) \
  93. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  94. . = ALIGN(8); \
  95. VMLINUX_SYMBOL(__start___verbose) = .; \
  96. - *(__verbose) \
  97. + KEEP(*(__verbose)) \
  98. VMLINUX_SYMBOL(__stop___verbose) = .; \
  99. LIKELY_PROFILE() \
  100. BRANCH_PROFILE() \
  101. TRACE_PRINTKS() \
  102. - TRACEPOINT_STR()
  103. + TRACEPOINT_STR() \
  104. + *(.data.[a-zA-Z_]*)
  105. /*
  106. * Data section helpers
  107. @@ -291,35 +292,35 @@
  108. /* PCI quirks */ \
  109. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  110. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  111. - *(.pci_fixup_early) \
  112. + KEEP(*(.pci_fixup_early)) \
  113. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  114. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  115. - *(.pci_fixup_header) \
  116. + KEEP(*(.pci_fixup_header)) \
  117. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  118. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  119. - *(.pci_fixup_final) \
  120. + KEEP(*(.pci_fixup_final)) \
  121. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  122. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  123. - *(.pci_fixup_enable) \
  124. + KEEP(*(.pci_fixup_enable)) \
  125. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  126. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  127. - *(.pci_fixup_resume) \
  128. + KEEP(*(.pci_fixup_resume)) \
  129. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  130. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  131. - *(.pci_fixup_resume_early) \
  132. + KEEP(*(.pci_fixup_resume_early)) \
  133. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  134. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  135. - *(.pci_fixup_suspend) \
  136. + KEEP(*(.pci_fixup_suspend)) \
  137. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  138. VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
  139. - *(.pci_fixup_suspend_late) \
  140. + KEEP(*(.pci_fixup_suspend_late)) \
  141. VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
  142. } \
  143. \
  144. /* Built-in firmware blobs */ \
  145. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  146. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  147. - *(.builtin_fw) \
  148. + KEEP(*(.builtin_fw)) \
  149. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  150. } \
  151. \
  152. @@ -397,7 +398,7 @@
  153. \
  154. /* Kernel symbol table: strings */ \
  155. __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
  156. - KEEP(*(__ksymtab_strings)) \
  157. + *(__ksymtab_strings) \
  158. } \
  159. \
  160. /* __*init sections */ \
  161. @@ -410,14 +411,14 @@
  162. /* Built-in module parameters. */ \
  163. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  164. VMLINUX_SYMBOL(__start___param) = .; \
  165. - *(__param) \
  166. + KEEP(*(__param)) \
  167. VMLINUX_SYMBOL(__stop___param) = .; \
  168. } \
  169. \
  170. /* Built-in module versions. */ \
  171. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  172. VMLINUX_SYMBOL(__start___modver) = .; \
  173. - *(__modver) \
  174. + KEEP(*(__modver)) \
  175. VMLINUX_SYMBOL(__stop___modver) = .; \
  176. . = ALIGN((align)); \
  177. VMLINUX_SYMBOL(__end_rodata) = .; \
  178. @@ -482,7 +483,7 @@
  179. #define ENTRY_TEXT \
  180. ALIGN_FUNCTION(); \
  181. VMLINUX_SYMBOL(__entry_text_start) = .; \
  182. - *(.entry.text) \
  183. + KEEP(*(.entry.text)) \
  184. VMLINUX_SYMBOL(__entry_text_end) = .;
  185. #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
  186. @@ -520,7 +521,7 @@
  187. . = ALIGN(align); \
  188. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  189. VMLINUX_SYMBOL(__start___ex_table) = .; \
  190. - *(__ex_table) \
  191. + KEEP(*(__ex_table)) \
  192. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  193. }
  194. @@ -536,9 +537,9 @@
  195. #ifdef CONFIG_CONSTRUCTORS
  196. #define KERNEL_CTORS() . = ALIGN(8); \
  197. VMLINUX_SYMBOL(__ctors_start) = .; \
  198. - *(.ctors) \
  199. + KEEP(*(.ctors)) \
  200. *(SORT(.init_array.*)) \
  201. - *(.init_array) \
  202. + KEEP(*(.init_array)) \
  203. VMLINUX_SYMBOL(__ctors_end) = .;
  204. #else
  205. #define KERNEL_CTORS()
  206. @@ -595,7 +596,7 @@
  207. #define SBSS(sbss_align) \
  208. . = ALIGN(sbss_align); \
  209. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  210. - *(.sbss) \
  211. + *(.sbss .sbss.*) \
  212. *(.scommon) \
  213. }
  214. @@ -662,7 +663,7 @@
  215. . = ALIGN(8); \
  216. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  217. VMLINUX_SYMBOL(__start___bug_table) = .; \
  218. - *(__bug_table) \
  219. + KEEP(*(__bug_table)) \
  220. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  221. }
  222. #else
  223. @@ -674,7 +675,7 @@
  224. . = ALIGN(4); \
  225. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  226. VMLINUX_SYMBOL(__tracedata_start) = .; \
  227. - *(.tracedata) \
  228. + KEEP(*(.tracedata)) \
  229. VMLINUX_SYMBOL(__tracedata_end) = .; \
  230. }
  231. #else
  232. @@ -691,7 +692,7 @@
  233. #define INIT_SETUP(initsetup_align) \
  234. . = ALIGN(initsetup_align); \
  235. VMLINUX_SYMBOL(__setup_start) = .; \
  236. - *(.init.setup) \
  237. + KEEP(*(.init.setup)) \
  238. VMLINUX_SYMBOL(__setup_end) = .;
  239. #define INIT_CALLS_LEVEL(level) \
  240. --- a/arch/arm/kernel/vmlinux.lds.S
  241. +++ b/arch/arm/kernel/vmlinux.lds.S
  242. @@ -17,7 +17,7 @@
  243. #define PROC_INFO \
  244. . = ALIGN(4); \
  245. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  246. - *(.proc.info.init) \
  247. + KEEP(*(.proc.info.init)) \
  248. VMLINUX_SYMBOL(__proc_info_end) = .;
  249. #define HYPERVISOR_TEXT \
  250. @@ -28,11 +28,11 @@
  251. #define IDMAP_TEXT \
  252. ALIGN_FUNCTION(); \
  253. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  254. - *(.idmap.text) \
  255. + KEEP(*(.idmap.text)) \
  256. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  257. . = ALIGN(PAGE_SIZE); \
  258. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  259. - *(.hyp.idmap.text) \
  260. + KEEP(*(.hyp.idmap.text)) \
  261. VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
  262. #ifdef CONFIG_HOTPLUG_CPU
  263. @@ -105,7 +105,7 @@ SECTIONS
  264. _stext = .; /* Text and read-only data */
  265. IDMAP_TEXT
  266. __exception_text_start = .;
  267. - *(.exception.text)
  268. + KEEP(*(.exception.text))
  269. __exception_text_end = .;
  270. IRQENTRY_TEXT
  271. SOFTIRQENTRY_TEXT
  272. @@ -134,7 +134,7 @@ SECTIONS
  273. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  274. __start___ex_table = .;
  275. #ifdef CONFIG_MMU
  276. - *(__ex_table)
  277. + KEEP(*(__ex_table))
  278. #endif
  279. __stop___ex_table = .;
  280. }
  281. @@ -146,12 +146,12 @@ SECTIONS
  282. . = ALIGN(8);
  283. .ARM.unwind_idx : {
  284. __start_unwind_idx = .;
  285. - *(.ARM.exidx*)
  286. + KEEP(*(.ARM.exidx*))
  287. __stop_unwind_idx = .;
  288. }
  289. .ARM.unwind_tab : {
  290. __start_unwind_tab = .;
  291. - *(.ARM.extab*)
  292. + KEEP(*(.ARM.extab*))
  293. __stop_unwind_tab = .;
  294. }
  295. #endif
  296. @@ -171,14 +171,14 @@ SECTIONS
  297. */
  298. __vectors_start = .;
  299. .vectors 0xffff0000 : AT(__vectors_start) {
  300. - *(.vectors)
  301. + KEEP(*(.vectors))
  302. }
  303. . = __vectors_start + SIZEOF(.vectors);
  304. __vectors_end = .;
  305. __stubs_start = .;
  306. .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
  307. - *(.stubs)
  308. + KEEP(*(.stubs))
  309. }
  310. . = __stubs_start + SIZEOF(.stubs);
  311. __stubs_end = .;
  312. @@ -194,24 +194,24 @@ SECTIONS
  313. }
  314. .init.arch.info : {
  315. __arch_info_begin = .;
  316. - *(.arch.info.init)
  317. + KEEP(*(.arch.info.init))
  318. __arch_info_end = .;
  319. }
  320. .init.tagtable : {
  321. __tagtable_begin = .;
  322. - *(.taglist.init)
  323. + KEEP(*(.taglist.init))
  324. __tagtable_end = .;
  325. }
  326. #ifdef CONFIG_SMP_ON_UP
  327. .init.smpalt : {
  328. __smpalt_begin = .;
  329. - *(.alt.smp.init)
  330. + KEEP(*(.alt.smp.init))
  331. __smpalt_end = .;
  332. }
  333. #endif
  334. .init.pv_table : {
  335. __pv_table_begin = .;
  336. - *(.pv_table)
  337. + KEEP(*(.pv_table))
  338. __pv_table_end = .;
  339. }
  340. .init.data : {
  341. --- a/arch/arm/boot/compressed/Makefile
  342. +++ b/arch/arm/boot/compressed/Makefile
  343. @@ -102,6 +102,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
  344. ORIG_CFLAGS := $(KBUILD_CFLAGS)
  345. KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
  346. endif
  347. +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
  348. # -fstack-protector-strong triggers protection checks in this code,
  349. # but it is being used too early to link to meaningful stack_chk logic.
  350. --- a/arch/arm/Kconfig
  351. +++ b/arch/arm/Kconfig
  352. @@ -81,6 +81,7 @@ config ARM
  353. select HAVE_UID16
  354. select HAVE_VIRT_CPU_ACCOUNTING_GEN
  355. select IRQ_FORCED_THREADING
  356. + select LD_DEAD_CODE_DATA_ELIMINATION
  357. select MODULES_USE_ELF_REL
  358. select NO_BOOTMEM
  359. select OF_EARLY_FLATTREE if OF
  360. --- a/arch/mips/Kconfig
  361. +++ b/arch/mips/Kconfig
  362. @@ -55,6 +55,7 @@ config MIPS
  363. select CLONE_BACKWARDS
  364. select HAVE_DEBUG_STACKOVERFLOW
  365. select HAVE_CC_STACKPROTECTOR
  366. + select LD_DEAD_CODE_DATA_ELIMINATION
  367. select CPU_PM if CPU_IDLE
  368. select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
  369. select ARCH_BINFMT_ELF_STATE
  370. --- a/Makefile
  371. +++ b/Makefile
  372. @@ -409,6 +409,11 @@ KBUILD_AFLAGS_MODULE := -DMODULE
  373. KBUILD_CFLAGS_MODULE := -DMODULE
  374. KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
  375. +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
  376. +KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,)
  377. +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,)
  378. +endif
  379. +
  380. # Read KERNELRELEASE from include/config/kernel.release (if it exists)
  381. KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
  382. KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
  383. @@ -630,11 +635,6 @@ include arch/$(SRCARCH)/Makefile
  384. KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
  385. KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
  386. -ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
  387. -KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
  388. -KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
  389. -endif
  390. -
  391. ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
  392. KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
  393. else