6fec563da4a97e156052f5bdb75d5968dc1603aa
[openwrt/openwrt.git] / package / boot / uboot-fritz4040 / patches / 100-private-libgcc.patch
1 --- a/arch/arm/lib/Makefile
2 +++ b/arch/arm/lib/Makefile
3 @@ -26,7 +26,6 @@ include $(TOPDIR)/config.mk
4 LIB = $(obj)lib$(ARCH).o
5 LIBGCC = $(obj)libgcc.o
6
7 -ifndef CONFIG_SPL_BUILD
8 GLSOBJS += _ashldi3.o
9 GLSOBJS += _ashrdi3.o
10 GLSOBJS += _divsi3.o
11 @@ -34,9 +33,11 @@ GLSOBJS += _lshrdi3.o
12 GLSOBJS += _modsi3.o
13 GLSOBJS += _udivsi3.o
14 GLSOBJS += _umodsi3.o
15 +GLSOBJS += uldivmod.o
16
17 GLCOBJS += div0.o
18
19 +ifndef CONFIG_SPL_BUILD
20 COBJS-y += board.o
21 COBJS-y += bootm.o
22 COBJS-$(CONFIG_SYS_L2_PL310) += cache-pl310.o
23 --- /dev/null
24 +++ b/arch/arm/lib/uldivmod.S
25 @@ -0,0 +1,249 @@
26 +/*
27 + * Copyright 2010, Google Inc.
28 + *
29 + * Brought in from coreboot uldivmod.S
30 + *
31 + * SPDX-License-Identifier: GPL-2.0
32 + */
33 +
34 +#include <linux/linkage.h>
35 +#include <asm/assembler.h>
36 +
37 +/*
38 + * A, Q = r0 + (r1 << 32)
39 + * B, R = r2 + (r3 << 32)
40 + * A / B = Q ... R
41 + */
42 +
43 +#define ARM(x...) x
44 +#define THUMB(x...)
45 +
46 +A_0 .req r0
47 +A_1 .req r1
48 +B_0 .req r2
49 +B_1 .req r3
50 +C_0 .req r4
51 +C_1 .req r5
52 +D_0 .req r6
53 +D_1 .req r7
54 +
55 +Q_0 .req r0
56 +Q_1 .req r1
57 +R_0 .req r2
58 +R_1 .req r3
59 +
60 +THUMB(
61 +TMP .req r8
62 +)
63 +
64 +.pushsection .text.__aeabi_uldivmod, "ax"
65 +ENTRY(__aeabi_uldivmod)
66 +
67 + stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr}
68 + @ Test if B == 0
69 + orrs ip, B_0, B_1 @ Z set -> B == 0
70 + beq L_div_by_0
71 + @ Test if B is power of 2: (B & (B - 1)) == 0
72 + subs C_0, B_0, #1
73 + sbc C_1, B_1, #0
74 + tst C_0, B_0
75 + tsteq B_1, C_1
76 + beq L_pow2
77 + @ Test if A_1 == B_1 == 0
78 + orrs ip, A_1, B_1
79 + beq L_div_32_32
80 +
81 +L_div_64_64:
82 +/* CLZ only exists in ARM architecture version 5 and above. */
83 +#ifdef HAVE_CLZ
84 + mov C_0, #1
85 + mov C_1, #0
86 + @ D_0 = clz A
87 + teq A_1, #0
88 + clz D_0, A_1
89 + clzeq ip, A_0
90 + addeq D_0, D_0, ip
91 + @ D_1 = clz B
92 + teq B_1, #0
93 + clz D_1, B_1
94 + clzeq ip, B_0
95 + addeq D_1, D_1, ip
96 + @ if clz B - clz A > 0
97 + subs D_0, D_1, D_0
98 + bls L_done_shift
99 + @ B <<= (clz B - clz A)
100 + subs D_1, D_0, #32
101 + rsb ip, D_0, #32
102 + movmi B_1, B_1, lsl D_0
103 +ARM( orrmi B_1, B_1, B_0, lsr ip )
104 +THUMB( lsrmi TMP, B_0, ip )
105 +THUMB( orrmi B_1, B_1, TMP )
106 + movpl B_1, B_0, lsl D_1
107 + mov B_0, B_0, lsl D_0
108 + @ C = 1 << (clz B - clz A)
109 + movmi C_1, C_1, lsl D_0
110 +ARM( orrmi C_1, C_1, C_0, lsr ip )
111 +THUMB( lsrmi TMP, C_0, ip )
112 +THUMB( orrmi C_1, C_1, TMP )
113 + movpl C_1, C_0, lsl D_1
114 + mov C_0, C_0, lsl D_0
115 +L_done_shift:
116 + mov D_0, #0
117 + mov D_1, #0
118 + @ C: current bit; D: result
119 +#else
120 + @ C: current bit; D: result
121 + mov C_0, #1
122 + mov C_1, #0
123 + mov D_0, #0
124 + mov D_1, #0
125 +L_lsl_4:
126 + cmp B_1, #0x10000000
127 + cmpcc B_1, A_1
128 + cmpeq B_0, A_0
129 + bcs L_lsl_1
130 + @ B <<= 4
131 + mov B_1, B_1, lsl #4
132 + orr B_1, B_1, B_0, lsr #28
133 + mov B_0, B_0, lsl #4
134 + @ C <<= 4
135 + mov C_1, C_1, lsl #4
136 + orr C_1, C_1, C_0, lsr #28
137 + mov C_0, C_0, lsl #4
138 + b L_lsl_4
139 +L_lsl_1:
140 + cmp B_1, #0x80000000
141 + cmpcc B_1, A_1
142 + cmpeq B_0, A_0
143 + bcs L_subtract
144 + @ B <<= 1
145 + mov B_1, B_1, lsl #1
146 + orr B_1, B_1, B_0, lsr #31
147 + mov B_0, B_0, lsl #1
148 + @ C <<= 1
149 + mov C_1, C_1, lsl #1
150 + orr C_1, C_1, C_0, lsr #31
151 + mov C_0, C_0, lsl #1
152 + b L_lsl_1
153 +#endif
154 +L_subtract:
155 + @ if A >= B
156 + cmp A_1, B_1
157 + cmpeq A_0, B_0
158 + bcc L_update
159 + @ A -= B
160 + subs A_0, A_0, B_0
161 + sbc A_1, A_1, B_1
162 + @ D |= C
163 + orr D_0, D_0, C_0
164 + orr D_1, D_1, C_1
165 +L_update:
166 + @ if A == 0: break
167 + orrs ip, A_1, A_0
168 + beq L_exit
169 + @ C >>= 1
170 + movs C_1, C_1, lsr #1
171 + movs C_0, C_0, rrx
172 + @ if C == 0: break
173 + orrs ip, C_1, C_0
174 + beq L_exit
175 + @ B >>= 1
176 + movs B_1, B_1, lsr #1
177 + mov B_0, B_0, rrx
178 + b L_subtract
179 +L_exit:
180 + @ Note: A, B & Q, R are aliases
181 + mov R_0, A_0
182 + mov R_1, A_1
183 + mov Q_0, D_0
184 + mov Q_1, D_1
185 + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
186 +
187 +L_div_32_32:
188 + @ Note: A_0 & r0 are aliases
189 + @ Q_1 r1
190 + mov r1, B_0
191 + bl __aeabi_uidivmod
192 + mov R_0, r1
193 + mov R_1, #0
194 + mov Q_1, #0
195 + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
196 +
197 +L_pow2:
198 +#ifdef HAVE_CLZ
199 + @ Note: A, B and Q, R are aliases
200 + @ R = A & (B - 1)
201 + and C_0, A_0, C_0
202 + and C_1, A_1, C_1
203 + @ Q = A >> log2(B)
204 + @ Note: B must not be 0 here!
205 + clz D_0, B_0
206 + add D_1, D_0, #1
207 + rsbs D_0, D_0, #31
208 + bpl L_1
209 + clz D_0, B_1
210 + rsb D_0, D_0, #31
211 + mov A_0, A_1, lsr D_0
212 + add D_0, D_0, #32
213 +L_1:
214 + movpl A_0, A_0, lsr D_0
215 +ARM( orrpl A_0, A_0, A_1, lsl D_1 )
216 +THUMB( lslpl TMP, A_1, D_1 )
217 +THUMB( orrpl A_0, A_0, TMP )
218 + mov A_1, A_1, lsr D_0
219 + @ Mov back C to R
220 + mov R_0, C_0
221 + mov R_1, C_1
222 + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
223 +#else
224 + @ Note: A, B and Q, R are aliases
225 + @ R = A & (B - 1)
226 + and C_0, A_0, C_0
227 + and C_1, A_1, C_1
228 + @ Q = A >> log2(B)
229 + @ Note: B must not be 0 here!
230 + @ Count the leading zeroes in B.
231 + mov D_0, #0
232 + orrs B_0, B_0, B_0
233 + @ If B is greater than 1 << 31, divide A and B by 1 << 32.
234 + moveq A_0, A_1
235 + moveq A_1, #0
236 + moveq B_0, B_1
237 + @ Count the remaining leading zeroes in B.
238 + movs B_1, B_0, lsl #16
239 + addeq D_0, #16
240 + moveq B_0, B_0, lsr #16
241 + tst B_0, #0xff
242 + addeq D_0, #8
243 + moveq B_0, B_0, lsr #8
244 + tst B_0, #0xf
245 + addeq D_0, #4
246 + moveq B_0, B_0, lsr #4
247 + tst B_0, #0x3
248 + addeq D_0, #2
249 + moveq B_0, B_0, lsr #2
250 + tst B_0, #0x1
251 + addeq D_0, #1
252 + @ Shift A to the right by the appropriate amount.
253 + rsb D_1, D_0, #32
254 + mov Q_0, A_0, lsr D_0
255 + ARM( orr Q_0, Q_0, A_1, lsl D_1 )
256 + THUMB( lsl A_1, D_1 )
257 + THUMB( orr Q_0, A_1 )
258 + mov Q_1, A_1, lsr D_0
259 + @ Move C to R
260 + mov R_0, C_0
261 + mov R_1, C_1
262 + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
263 +#endif
264 +
265 +L_div_by_0:
266 + bl __div0
267 + @ As wrong as it could be
268 + mov Q_0, #0
269 + mov Q_1, #0
270 + mov R_0, #0
271 + mov R_1, #0
272 + ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
273 +ENDPROC(__aeabi_uldivmod)
274 +.popsection