220-optimize_inlining.patch 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. --- a/arch/arm/kernel/atags.h
  2. +++ b/arch/arm/kernel/atags.h
  3. @@ -5,7 +5,7 @@ void convert_to_tag_list(struct tag *tag
  4. const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
  5. unsigned int machine_nr);
  6. #else
  7. -static inline const struct machine_desc *
  8. +static inline const struct machine_desc * __init __noreturn
  9. setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
  10. {
  11. early_print("no ATAGS support: can't continue\n");
  12. --- a/arch/arm64/include/asm/cpufeature.h
  13. +++ b/arch/arm64/include/asm/cpufeature.h
  14. @@ -366,7 +366,7 @@ static inline bool cpu_have_feature(unsi
  15. }
  16. /* System capability check for constant caps */
  17. -static inline bool __cpus_have_const_cap(int num)
  18. +static __always_inline bool __cpus_have_const_cap(int num)
  19. {
  20. if (num >= ARM64_NCAPS)
  21. return false;
  22. @@ -380,7 +380,7 @@ static inline bool cpus_have_cap(unsigne
  23. return test_bit(num, cpu_hwcaps);
  24. }
  25. -static inline bool cpus_have_const_cap(int num)
  26. +static __always_inline bool cpus_have_const_cap(int num)
  27. {
  28. if (static_branch_likely(&arm64_const_caps_ready))
  29. return __cpus_have_const_cap(num);
  30. --- a/arch/mips/include/asm/bitops.h
  31. +++ b/arch/mips/include/asm/bitops.h
  32. @@ -463,7 +463,7 @@ static inline void __clear_bit_unlock(un
  33. * Return the bit position (0..63) of the most significant 1 bit in a word
  34. * Returns -1 if no 1 bit exists
  35. */
  36. -static inline unsigned long __fls(unsigned long word)
  37. +static __always_inline unsigned long __fls(unsigned long word)
  38. {
  39. int num;
  40. @@ -529,7 +529,7 @@ static inline unsigned long __fls(unsign
  41. * Returns 0..SZLONG-1
  42. * Undefined if no bit exists, so code should check against 0 first.
  43. */
  44. -static inline unsigned long __ffs(unsigned long word)
  45. +static __always_inline unsigned long __ffs(unsigned long word)
  46. {
  47. return __fls(word & -word);
  48. }
  49. --- a/arch/mips/kernel/cpu-bugs64.c
  50. +++ b/arch/mips/kernel/cpu-bugs64.c
  51. @@ -42,8 +42,8 @@ static inline void align_mod(const int a
  52. : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
  53. }
  54. -static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
  55. - const int align, const int mod)
  56. +static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
  57. + const int align, const int mod)
  58. {
  59. unsigned long flags;
  60. int m1, m2;
  61. --- a/arch/powerpc/kernel/prom_init.c
  62. +++ b/arch/powerpc/kernel/prom_init.c
  63. @@ -498,14 +498,14 @@ static int __init prom_next_node(phandle
  64. }
  65. }
  66. -static inline int prom_getprop(phandle node, const char *pname,
  67. - void *value, size_t valuelen)
  68. +static inline int __init prom_getprop(phandle node, const char *pname,
  69. + void *value, size_t valuelen)
  70. {
  71. return call_prom("getprop", 4, 1, node, ADDR(pname),
  72. (u32)(unsigned long) value, (u32) valuelen);
  73. }
  74. -static inline int prom_getproplen(phandle node, const char *pname)
  75. +static inline int __init prom_getproplen(phandle node, const char *pname)
  76. {
  77. return call_prom("getproplen", 2, 1, node, ADDR(pname));
  78. }
  79. --- a/arch/powerpc/mm/tlb-radix.c
  80. +++ b/arch/powerpc/mm/tlb-radix.c
  81. @@ -90,8 +90,8 @@ void radix__tlbiel_all(unsigned int acti
  82. asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
  83. }
  84. -static inline void __tlbiel_pid(unsigned long pid, int set,
  85. - unsigned long ric)
  86. +static __always_inline void __tlbiel_pid(unsigned long pid, int set,
  87. + unsigned long ric)
  88. {
  89. unsigned long rb,rs,prs,r;
  90. @@ -106,7 +106,7 @@ static inline void __tlbiel_pid(unsigned
  91. trace_tlbie(0, 1, rb, rs, ric, prs, r);
  92. }
  93. -static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
  94. +static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
  95. {
  96. unsigned long rb,rs,prs,r;
  97. @@ -136,7 +136,7 @@ static inline void __tlbiel_lpid(unsigne
  98. trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
  99. }
  100. -static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
  101. +static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
  102. {
  103. unsigned long rb,rs,prs,r;
  104. @@ -239,7 +239,7 @@ static inline void fixup_tlbie_lpid(unsi
  105. /*
  106. * We use 128 set in radix mode and 256 set in hpt mode.
  107. */
  108. -static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
  109. +static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
  110. {
  111. int set;
  112. @@ -918,7 +918,7 @@ void radix__tlb_flush(struct mmu_gather
  113. tlb->need_flush_all = 0;
  114. }
  115. -static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
  116. +static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
  117. unsigned long start, unsigned long end,
  118. int psize, bool also_pwc)
  119. {
  120. --- a/arch/s390/include/asm/cpacf.h
  121. +++ b/arch/s390/include/asm/cpacf.h
  122. @@ -202,7 +202,7 @@ static inline int __cpacf_check_opcode(u
  123. }
  124. }
  125. -static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
  126. +static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
  127. {
  128. if (__cpacf_check_opcode(opcode)) {
  129. __cpacf_query(opcode, mask);
  130. --- a/arch/x86/Kconfig.debug
  131. +++ b/arch/x86/Kconfig.debug
  132. @@ -276,20 +276,6 @@ config CPA_DEBUG
  133. ---help---
  134. Do change_page_attr() self-tests every 30 seconds.
  135. -config OPTIMIZE_INLINING
  136. - bool "Allow gcc to uninline functions marked 'inline'"
  137. - ---help---
  138. - This option determines if the kernel forces gcc to inline the functions
  139. - developers have marked 'inline'. Doing so takes away freedom from gcc to
  140. - do what it thinks is best, which is desirable for the gcc 3.x series of
  141. - compilers. The gcc 4.x series have a rewritten inlining algorithm and
  142. - enabling this option will generate a smaller kernel there. Hopefully
  143. - this algorithm is so good that allowing gcc 4.x and above to make the
  144. - decision will become the default in the future. Until then this option
  145. - is there to test gcc for this.
  146. -
  147. - If unsure, say N.
  148. -
  149. config DEBUG_ENTRY
  150. bool "Debug low-level entry code"
  151. depends on DEBUG_KERNEL
  152. --- a/drivers/mtd/nand/raw/vf610_nfc.c
  153. +++ b/drivers/mtd/nand/raw/vf610_nfc.c
  154. @@ -373,7 +373,7 @@ static int vf610_nfc_cmd(struct nand_chi
  155. {
  156. const struct nand_op_instr *instr;
  157. struct vf610_nfc *nfc = chip_to_nfc(chip);
  158. - int op_id = -1, trfr_sz = 0, offset;
  159. + int op_id = -1, trfr_sz = 0, offset = 0;
  160. u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
  161. bool force8bit = false;
  162. --- a/lib/Kconfig.debug
  163. +++ b/lib/Kconfig.debug
  164. @@ -309,6 +309,20 @@ config HEADERS_CHECK
  165. exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
  166. your build tree), to make sure they're suitable.
  167. +config OPTIMIZE_INLINING
  168. + bool "Allow compiler to uninline functions marked 'inline'"
  169. + help
  170. + This option determines if the kernel forces gcc to inline the functions
  171. + developers have marked 'inline'. Doing so takes away freedom from gcc to
  172. + do what it thinks is best, which is desirable for the gcc 3.x series of
  173. + compilers. The gcc 4.x series have a rewritten inlining algorithm and
  174. + enabling this option will generate a smaller kernel there. Hopefully
  175. + this algorithm is so good that allowing gcc 4.x and above to make the
  176. + decision will become the default in the future. Until then this option
  177. + is there to test gcc for this.
  178. +
  179. + If unsure, say N.
  180. +
  181. config DEBUG_SECTION_MISMATCH
  182. bool "Enable full Section mismatch analysis"
  183. help
  184. --- a/arch/x86/Kconfig
  185. +++ b/arch/x86/Kconfig
  186. @@ -306,9 +306,6 @@ config ZONE_DMA32
  187. config AUDIT_ARCH
  188. def_bool y if X86_64
  189. -config ARCH_SUPPORTS_OPTIMIZED_INLINING
  190. - def_bool y
  191. -
  192. config ARCH_SUPPORTS_DEBUG_PAGEALLOC
  193. def_bool y
  194. --- a/include/linux/compiler_types.h
  195. +++ b/include/linux/compiler_types.h
  196. @@ -268,8 +268,7 @@ struct ftrace_likely_data {
  197. * of extern inline functions at link time.
  198. * A lot of inline functions can cause havoc with function tracing.
  199. */
  200. -#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
  201. - !defined(CONFIG_OPTIMIZE_INLINING)
  202. +#if !defined(CONFIG_OPTIMIZE_INLINING)
  203. #define inline \
  204. inline __attribute__((always_inline, unused)) notrace __gnu_inline
  205. #else