2
0

100-private-libgcc.patch 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. --- a/arch/arm/lib/Makefile
  2. +++ b/arch/arm/lib/Makefile
  3. @@ -26,7 +26,6 @@ include $(TOPDIR)/config.mk
  4. LIB = $(obj)lib$(ARCH).o
  5. LIBGCC = $(obj)libgcc.o
  6. -ifndef CONFIG_SPL_BUILD
  7. GLSOBJS += _ashldi3.o
  8. GLSOBJS += _ashrdi3.o
  9. GLSOBJS += _divsi3.o
  10. @@ -34,9 +33,11 @@ GLSOBJS += _lshrdi3.o
  11. GLSOBJS += _modsi3.o
  12. GLSOBJS += _udivsi3.o
  13. GLSOBJS += _umodsi3.o
  14. +GLSOBJS += uldivmod.o
  15. GLCOBJS += div0.o
  16. +ifndef CONFIG_SPL_BUILD
  17. COBJS-y += board.o
  18. COBJS-y += bootm.o
  19. COBJS-$(CONFIG_SYS_L2_PL310) += cache-pl310.o
  20. --- /dev/null
  21. +++ b/arch/arm/lib/uldivmod.S
  22. @@ -0,0 +1,249 @@
  23. +/*
  24. + * Copyright 2010, Google Inc.
  25. + *
  26. + * Brought in from coreboot uldivmod.S
  27. + *
  28. + * SPDX-License-Identifier: GPL-2.0
  29. + */
  30. +
  31. +#include <linux/linkage.h>
  32. +#include <asm/assembler.h>
  33. +
  34. +/*
  35. + * A, Q = r0 + (r1 << 32)
  36. + * B, R = r2 + (r3 << 32)
  37. + * A / B = Q ... R
  38. + */
  39. +
  40. +#define ARM(x...) x
  41. +#define THUMB(x...)
  42. +
  43. +A_0 .req r0
  44. +A_1 .req r1
  45. +B_0 .req r2
  46. +B_1 .req r3
  47. +C_0 .req r4
  48. +C_1 .req r5
  49. +D_0 .req r6
  50. +D_1 .req r7
  51. +
  52. +Q_0 .req r0
  53. +Q_1 .req r1
  54. +R_0 .req r2
  55. +R_1 .req r3
  56. +
  57. +THUMB(
  58. +TMP .req r8
  59. +)
  60. +
  61. +.pushsection .text.__aeabi_uldivmod, "ax"
  62. +ENTRY(__aeabi_uldivmod)
  63. +
  64. + stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr}
  65. + @ Test if B == 0
  66. + orrs ip, B_0, B_1 @ Z set -> B == 0
  67. + beq L_div_by_0
  68. + @ Test if B is power of 2: (B & (B - 1)) == 0
  69. + subs C_0, B_0, #1
  70. + sbc C_1, B_1, #0
  71. + tst C_0, B_0
  72. + tsteq B_1, C_1
  73. + beq L_pow2
  74. + @ Test if A_1 == B_1 == 0
  75. + orrs ip, A_1, B_1
  76. + beq L_div_32_32
  77. +
  78. +L_div_64_64:
  79. +/* CLZ only exists in ARM architecture version 5 and above. */
  80. +#ifdef HAVE_CLZ
  81. + mov C_0, #1
  82. + mov C_1, #0
  83. + @ D_0 = clz A
  84. + teq A_1, #0
  85. + clz D_0, A_1
  86. + clzeq ip, A_0
  87. + addeq D_0, D_0, ip
  88. + @ D_1 = clz B
  89. + teq B_1, #0
  90. + clz D_1, B_1
  91. + clzeq ip, B_0
  92. + addeq D_1, D_1, ip
  93. + @ if clz B - clz A > 0
  94. + subs D_0, D_1, D_0
  95. + bls L_done_shift
  96. + @ B <<= (clz B - clz A)
  97. + subs D_1, D_0, #32
  98. + rsb ip, D_0, #32
  99. + movmi B_1, B_1, lsl D_0
  100. +ARM( orrmi B_1, B_1, B_0, lsr ip )
  101. +THUMB( lsrmi TMP, B_0, ip )
  102. +THUMB( orrmi B_1, B_1, TMP )
  103. + movpl B_1, B_0, lsl D_1
  104. + mov B_0, B_0, lsl D_0
  105. + @ C = 1 << (clz B - clz A)
  106. + movmi C_1, C_1, lsl D_0
  107. +ARM( orrmi C_1, C_1, C_0, lsr ip )
  108. +THUMB( lsrmi TMP, C_0, ip )
  109. +THUMB( orrmi C_1, C_1, TMP )
  110. + movpl C_1, C_0, lsl D_1
  111. + mov C_0, C_0, lsl D_0
  112. +L_done_shift:
  113. + mov D_0, #0
  114. + mov D_1, #0
  115. + @ C: current bit; D: result
  116. +#else
  117. + @ C: current bit; D: result
  118. + mov C_0, #1
  119. + mov C_1, #0
  120. + mov D_0, #0
  121. + mov D_1, #0
  122. +L_lsl_4:
  123. + cmp B_1, #0x10000000
  124. + cmpcc B_1, A_1
  125. + cmpeq B_0, A_0
  126. + bcs L_lsl_1
  127. + @ B <<= 4
  128. + mov B_1, B_1, lsl #4
  129. + orr B_1, B_1, B_0, lsr #28
  130. + mov B_0, B_0, lsl #4
  131. + @ C <<= 4
  132. + mov C_1, C_1, lsl #4
  133. + orr C_1, C_1, C_0, lsr #28
  134. + mov C_0, C_0, lsl #4
  135. + b L_lsl_4
  136. +L_lsl_1:
  137. + cmp B_1, #0x80000000
  138. + cmpcc B_1, A_1
  139. + cmpeq B_0, A_0
  140. + bcs L_subtract
  141. + @ B <<= 1
  142. + mov B_1, B_1, lsl #1
  143. + orr B_1, B_1, B_0, lsr #31
  144. + mov B_0, B_0, lsl #1
  145. + @ C <<= 1
  146. + mov C_1, C_1, lsl #1
  147. + orr C_1, C_1, C_0, lsr #31
  148. + mov C_0, C_0, lsl #1
  149. + b L_lsl_1
  150. +#endif
  151. +L_subtract:
  152. + @ if A >= B
  153. + cmp A_1, B_1
  154. + cmpeq A_0, B_0
  155. + bcc L_update
  156. + @ A -= B
  157. + subs A_0, A_0, B_0
  158. + sbc A_1, A_1, B_1
  159. + @ D |= C
  160. + orr D_0, D_0, C_0
  161. + orr D_1, D_1, C_1
  162. +L_update:
  163. + @ if A == 0: break
  164. + orrs ip, A_1, A_0
  165. + beq L_exit
  166. + @ C >>= 1
  167. + movs C_1, C_1, lsr #1
  168. + movs C_0, C_0, rrx
  169. + @ if C == 0: break
  170. + orrs ip, C_1, C_0
  171. + beq L_exit
  172. + @ B >>= 1
  173. + movs B_1, B_1, lsr #1
  174. + mov B_0, B_0, rrx
  175. + b L_subtract
  176. +L_exit:
  177. + @ Note: A, B & Q, R are aliases
  178. + mov R_0, A_0
  179. + mov R_1, A_1
  180. + mov Q_0, D_0
  181. + mov Q_1, D_1
  182. + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
  183. +
  184. +L_div_32_32:
  185. + @ Note: A_0 & r0 are aliases
  186. + @ Q_1 r1
  187. + mov r1, B_0
  188. + bl __aeabi_uidivmod
  189. + mov R_0, r1
  190. + mov R_1, #0
  191. + mov Q_1, #0
  192. + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
  193. +
  194. +L_pow2:
  195. +#ifdef HAVE_CLZ
  196. + @ Note: A, B and Q, R are aliases
  197. + @ R = A & (B - 1)
  198. + and C_0, A_0, C_0
  199. + and C_1, A_1, C_1
  200. + @ Q = A >> log2(B)
  201. + @ Note: B must not be 0 here!
  202. + clz D_0, B_0
  203. + add D_1, D_0, #1
  204. + rsbs D_0, D_0, #31
  205. + bpl L_1
  206. + clz D_0, B_1
  207. + rsb D_0, D_0, #31
  208. + mov A_0, A_1, lsr D_0
  209. + add D_0, D_0, #32
  210. +L_1:
  211. + movpl A_0, A_0, lsr D_0
  212. +ARM( orrpl A_0, A_0, A_1, lsl D_1 )
  213. +THUMB( lslpl TMP, A_1, D_1 )
  214. +THUMB( orrpl A_0, A_0, TMP )
  215. + mov A_1, A_1, lsr D_0
  216. + @ Mov back C to R
  217. + mov R_0, C_0
  218. + mov R_1, C_1
  219. + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
  220. +#else
  221. + @ Note: A, B and Q, R are aliases
  222. + @ R = A & (B - 1)
  223. + and C_0, A_0, C_0
  224. + and C_1, A_1, C_1
  225. + @ Q = A >> log2(B)
  226. + @ Note: B must not be 0 here!
  227. + @ Count the leading zeroes in B.
  228. + mov D_0, #0
  229. + orrs B_0, B_0, B_0
  230. + @ If B is greater than 1 << 31, divide A and B by 1 << 32.
  231. + moveq A_0, A_1
  232. + moveq A_1, #0
  233. + moveq B_0, B_1
  234. + @ Count the remaining leading zeroes in B.
  235. + movs B_1, B_0, lsl #16
  236. + addeq D_0, #16
  237. + moveq B_0, B_0, lsr #16
  238. + tst B_0, #0xff
  239. + addeq D_0, #8
  240. + moveq B_0, B_0, lsr #8
  241. + tst B_0, #0xf
  242. + addeq D_0, #4
  243. + moveq B_0, B_0, lsr #4
  244. + tst B_0, #0x3
  245. + addeq D_0, #2
  246. + moveq B_0, B_0, lsr #2
  247. + tst B_0, #0x1
  248. + addeq D_0, #1
  249. + @ Shift A to the right by the appropriate amount.
  250. + rsb D_1, D_0, #32
  251. + mov Q_0, A_0, lsr D_0
  252. + ARM( orr Q_0, Q_0, A_1, lsl D_1 )
  253. + THUMB( lsl A_1, D_1 )
  254. + THUMB( orr Q_0, A_1 )
  255. + mov Q_1, A_1, lsr D_0
  256. + @ Move C to R
  257. + mov R_0, C_0
  258. + mov R_1, C_1
  259. + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
  260. +#endif
  261. +
  262. +L_div_by_0:
  263. + bl __div0
  264. + @ As wrong as it could be
  265. + mov Q_0, #0
  266. + mov Q_1, #0
  267. + mov R_0, #0
  268. + mov R_1, #0
  269. + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
  270. +ENDPROC(__aeabi_uldivmod)
  271. +.popsection