diff options
author | Khem Raj <raj.khem@gmail.com> | 2019-01-29 13:20:51 -0800 |
---|---|---|
committer | Khem Raj <raj.khem@gmail.com> | 2019-01-30 13:34:49 -0800 |
commit | 6fd901de5a59c660a7e9e6d9abec000c7b849b3c (patch) | |
tree | 69153c0695d82e8fce494347adb7d7704dcafb34 | |
parent | 21e3bb57ff6fbe5142ad5920430fe02951e343fc (diff) | |
download | meta-openembedded-6fd901de5a59c660a7e9e6d9abec000c7b849b3c.tar.gz |
tinymembench: Fix build with clang assembler
Signed-off-by: Khem Raj <raj.khem@gmail.com>
-rw-r--r-- | meta-oe/recipes-benchmark/tinymembench/tinymembench/0001-asm-Delete-.func-.endfunc-directives.patch | 444 | ||||
-rw-r--r-- | meta-oe/recipes-benchmark/tinymembench/tinymembench_git.bb | 4 |
2 files changed, 447 insertions, 1 deletions
diff --git a/meta-oe/recipes-benchmark/tinymembench/tinymembench/0001-asm-Delete-.func-.endfunc-directives.patch b/meta-oe/recipes-benchmark/tinymembench/tinymembench/0001-asm-Delete-.func-.endfunc-directives.patch new file mode 100644 index 0000000000..c090700763 --- /dev/null +++ b/meta-oe/recipes-benchmark/tinymembench/tinymembench/0001-asm-Delete-.func-.endfunc-directives.patch | |||
@@ -0,0 +1,444 @@ | |||
1 | From b0a64ddebb517a1678c44d9baf24d8bbe39d02cd Mon Sep 17 00:00:00 2001 | ||
2 | From: Khem Raj <raj.khem@gmail.com> | ||
3 | Date: Tue, 29 Jan 2019 13:15:07 -0800 | ||
4 | Subject: [PATCH] asm: Delete .func/.endfunc directives | ||
5 | |||
6 | These are useful only with stabs debug format, which is not used on | ||
7 | linux systems, gas ignores them silently, but clang assembler does not | ||
8 | and rightly so. | ||
9 | |||
10 | Signed-off-by: Khem Raj <raj.khem@gmail.com> | ||
11 | --- | ||
12 | aarch64-asm.S | 14 +------------- | ||
13 | arm-neon.S | 24 ------------------------ | ||
14 | mips-32.S | 5 ++--- | ||
15 | x86-sse2.S | 21 ++++++++++----------- | ||
16 | 4 files changed, 13 insertions(+), 51 deletions(-) | ||
17 | |||
18 | diff --git a/aarch64-asm.S b/aarch64-asm.S | ||
19 | index 842b9e2..165c8ac 100644 | ||
20 | --- a/aarch64-asm.S | ||
21 | +++ b/aarch64-asm.S | ||
22 | @@ -31,8 +31,7 @@ | ||
23 | |||
24 | .macro asm_function function_name | ||
25 | .global \function_name | ||
26 | - .type \function_name,%function | ||
27 | -.func \function_name | ||
28 | + .type \function_name,%function | ||
29 | \function_name: | ||
30 | DST .req x0 | ||
31 | SRC .req x1 | ||
32 | @@ -54,7 +53,6 @@ asm_function aligned_block_copy_ldpstp_x_aarch64 | ||
33 | subs SIZE, SIZE, #64 | ||
34 | bgt 0b | ||
35 | ret | ||
36 | -.endfunc | ||
37 | |||
38 | asm_function aligned_block_copy_ldpstp_q_aarch64 | ||
39 | 0: | ||
40 | @@ -67,7 +65,6 @@ asm_function aligned_block_copy_ldpstp_q_aarch64 | ||
41 | subs SIZE, SIZE, #64 | ||
42 | bgt 0b | ||
43 | ret | ||
44 | -.endfunc | ||
45 | |||
46 | asm_function aligned_block_copy_ldpstp_q_pf32_l2strm_aarch64 | ||
47 | 0: | ||
48 | @@ -82,7 +79,6 @@ asm_function aligned_block_copy_ldpstp_q_pf32_l2strm_aarch64 | ||
49 | subs SIZE, SIZE, #64 | ||
50 | bgt 0b | ||
51 | ret | ||
52 | -.endfunc | ||
53 | |||
54 | asm_function aligned_block_copy_ldpstp_q_pf64_l2strm_aarch64 | ||
55 | 0: | ||
56 | @@ -96,7 +92,6 @@ asm_function aligned_block_copy_ldpstp_q_pf64_l2strm_aarch64 | ||
57 | subs SIZE, SIZE, #64 | ||
58 | bgt 0b | ||
59 | ret | ||
60 | -.endfunc | ||
61 | |||
62 | asm_function aligned_block_copy_ldpstp_q_pf32_l1keep_aarch64 | ||
63 | 0: | ||
64 | @@ -111,7 +106,6 @@ asm_function aligned_block_copy_ldpstp_q_pf32_l1keep_aarch64 | ||
65 | subs SIZE, SIZE, #64 | ||
66 | bgt 0b | ||
67 | ret | ||
68 | -.endfunc | ||
69 | |||
70 | asm_function aligned_block_copy_ldpstp_q_pf64_l1keep_aarch64 | ||
71 | 0: | ||
72 | @@ -125,7 +119,6 @@ asm_function aligned_block_copy_ldpstp_q_pf64_l1keep_aarch64 | ||
73 | subs SIZE, SIZE, #64 | ||
74 | bgt 0b | ||
75 | ret | ||
76 | -.endfunc | ||
77 | |||
78 | asm_function aligned_block_fill_stp_x_aarch64 | ||
79 | 0: | ||
80 | @@ -137,7 +130,6 @@ asm_function aligned_block_fill_stp_x_aarch64 | ||
81 | subs SIZE, SIZE, #64 | ||
82 | bgt 0b | ||
83 | ret | ||
84 | -.endfunc | ||
85 | |||
86 | asm_function aligned_block_fill_stp_q_aarch64 | ||
87 | 0: | ||
88 | @@ -147,7 +139,6 @@ asm_function aligned_block_fill_stp_q_aarch64 | ||
89 | subs SIZE, SIZE, #64 | ||
90 | bgt 0b | ||
91 | ret | ||
92 | -.endfunc | ||
93 | |||
94 | asm_function aligned_block_fill_stnp_x_aarch64 | ||
95 | 0: | ||
96 | @@ -159,7 +150,6 @@ asm_function aligned_block_fill_stnp_x_aarch64 | ||
97 | subs SIZE, SIZE, #64 | ||
98 | bgt 0b | ||
99 | ret | ||
100 | -.endfunc | ||
101 | |||
102 | asm_function aligned_block_fill_stnp_q_aarch64 | ||
103 | 0: | ||
104 | @@ -169,7 +159,6 @@ asm_function aligned_block_fill_stnp_q_aarch64 | ||
105 | subs SIZE, SIZE, #64 | ||
106 | bgt 0b | ||
107 | ret | ||
108 | -.endfunc | ||
109 | |||
110 | asm_function aligned_block_copy_ld1st1_aarch64 | ||
111 | 0: | ||
112 | @@ -180,6 +169,5 @@ asm_function aligned_block_copy_ld1st1_aarch64 | ||
113 | subs SIZE, SIZE, #64 | ||
114 | bgt 0b | ||
115 | ret | ||
116 | -.endfunc | ||
117 | |||
118 | #endif | ||
119 | diff --git a/arm-neon.S b/arm-neon.S | ||
120 | index 4db78ce..9631d82 100644 | ||
121 | --- a/arm-neon.S | ||
122 | +++ b/arm-neon.S | ||
123 | @@ -32,7 +32,6 @@ | ||
124 | |||
125 | .macro asm_function function_name | ||
126 | .global \function_name | ||
127 | -.func \function_name | ||
128 | \function_name: | ||
129 | DST .req r0 | ||
130 | SRC .req r1 | ||
131 | @@ -66,7 +65,6 @@ asm_function aligned_block_read_neon | ||
132 | vpadd.u32 d31, d31, d31 | ||
133 | vmov.u32 r0, d31[0] | ||
134 | bx lr | ||
135 | -.endfunc | ||
136 | |||
137 | /* Actually this calculates a sum of 32-bit values */ | ||
138 | asm_function aligned_block_read_pf32_neon | ||
139 | @@ -97,7 +95,6 @@ asm_function aligned_block_read_pf32_neon | ||
140 | vpadd.u32 d31, d31, d31 | ||
141 | vmov.u32 r0, d31[0] | ||
142 | bx lr | ||
143 | -.endfunc | ||
144 | |||
145 | /* Actually this calculates a sum of 32-bit values */ | ||
146 | asm_function aligned_block_read_pf64_neon | ||
147 | @@ -127,7 +124,6 @@ asm_function aligned_block_read_pf64_neon | ||
148 | vpadd.u32 d31, d31, d31 | ||
149 | vmov.u32 r0, d31[0] | ||
150 | bx lr | ||
151 | -.endfunc | ||
152 | |||
153 | /* Actually this calculates a sum of 32-bit values */ | ||
154 | asm_function aligned_block_read2_neon | ||
155 | @@ -156,7 +152,6 @@ asm_function aligned_block_read2_neon | ||
156 | vpadd.u32 d31, d31, d31 | ||
157 | vmov.u32 r0, d31[0] | ||
158 | bx lr | ||
159 | -.endfunc | ||
160 | |||
161 | /* Actually this calculates a sum of 32-bit values */ | ||
162 | asm_function aligned_block_read2_pf32_neon | ||
163 | @@ -187,7 +182,6 @@ asm_function aligned_block_read2_pf32_neon | ||
164 | vpadd.u32 d31, d31, d31 | ||
165 | vmov.u32 r0, d31[0] | ||
166 | bx lr | ||
167 | -.endfunc | ||
168 | |||
169 | /* Actually this calculates a sum of 32-bit values */ | ||
170 | asm_function aligned_block_read2_pf64_neon | ||
171 | @@ -217,7 +211,6 @@ asm_function aligned_block_read2_pf64_neon | ||
172 | vpadd.u32 d31, d31, d31 | ||
173 | vmov.u32 r0, d31[0] | ||
174 | bx lr | ||
175 | -.endfunc | ||
176 | |||
177 | asm_function aligned_block_copy_neon | ||
178 | 0: | ||
179 | @@ -226,7 +219,6 @@ asm_function aligned_block_copy_neon | ||
180 | subs SIZE, SIZE, #32 | ||
181 | bgt 0b | ||
182 | bx lr | ||
183 | -.endfunc | ||
184 | |||
185 | asm_function aligned_block_copy_unrolled_neon | ||
186 | vpush {d8-d15} | ||
187 | @@ -244,7 +236,6 @@ asm_function aligned_block_copy_unrolled_neon | ||
188 | bgt 0b | ||
189 | vpop {d8-d15} | ||
190 | bx lr | ||
191 | -.endfunc | ||
192 | |||
193 | asm_function aligned_block_copy_pf32_neon | ||
194 | 0: | ||
195 | @@ -254,7 +245,6 @@ asm_function aligned_block_copy_pf32_neon | ||
196 | subs SIZE, SIZE, #32 | ||
197 | bgt 0b | ||
198 | bx lr | ||
199 | -.endfunc | ||
200 | |||
201 | asm_function aligned_block_copy_unrolled_pf32_neon | ||
202 | vpush {d8-d15} | ||
203 | @@ -280,7 +270,6 @@ asm_function aligned_block_copy_unrolled_pf32_neon | ||
204 | bgt 0b | ||
205 | vpop {d8-d15} | ||
206 | bx lr | ||
207 | -.endfunc | ||
208 | |||
209 | asm_function aligned_block_copy_pf64_neon | ||
210 | 0: | ||
211 | @@ -292,7 +281,6 @@ asm_function aligned_block_copy_pf64_neon | ||
212 | subs SIZE, SIZE, #64 | ||
213 | bgt 0b | ||
214 | bx lr | ||
215 | -.endfunc | ||
216 | |||
217 | asm_function aligned_block_copy_unrolled_pf64_neon | ||
218 | vpush {d8-d15} | ||
219 | @@ -314,7 +302,6 @@ asm_function aligned_block_copy_unrolled_pf64_neon | ||
220 | bgt 0b | ||
221 | vpop {d8-d15} | ||
222 | bx lr | ||
223 | -.endfunc | ||
224 | |||
225 | asm_function aligned_block_copy_backwards_neon | ||
226 | add SRC, SRC, SIZE | ||
227 | @@ -328,7 +315,6 @@ asm_function aligned_block_copy_backwards_neon | ||
228 | subs SIZE, SIZE, #32 | ||
229 | bgt 0b | ||
230 | bx lr | ||
231 | -.endfunc | ||
232 | |||
233 | asm_function aligned_block_copy_backwards_pf32_neon | ||
234 | add SRC, SRC, SIZE | ||
235 | @@ -343,7 +329,6 @@ asm_function aligned_block_copy_backwards_pf32_neon | ||
236 | subs SIZE, SIZE, #32 | ||
237 | bgt 0b | ||
238 | bx lr | ||
239 | -.endfunc | ||
240 | |||
241 | asm_function aligned_block_copy_backwards_pf64_neon | ||
242 | add SRC, SRC, SIZE | ||
243 | @@ -360,7 +345,6 @@ asm_function aligned_block_copy_backwards_pf64_neon | ||
244 | subs SIZE, SIZE, #64 | ||
245 | bgt 0b | ||
246 | bx lr | ||
247 | -.endfunc | ||
248 | |||
249 | asm_function aligned_block_fill_neon | ||
250 | vld1.8 {d0, d1, d2, d3}, [SRC]! | ||
251 | @@ -370,7 +354,6 @@ asm_function aligned_block_fill_neon | ||
252 | subs SIZE, SIZE, #64 | ||
253 | bgt 0b | ||
254 | bx lr | ||
255 | -.endfunc | ||
256 | |||
257 | asm_function aligned_block_fill_backwards_neon | ||
258 | add SRC, SRC, SIZE | ||
259 | @@ -383,7 +366,6 @@ asm_function aligned_block_fill_backwards_neon | ||
260 | subs SIZE, SIZE, #32 | ||
261 | bgt 0b | ||
262 | bx lr | ||
263 | -.endfunc | ||
264 | |||
265 | /* some code for older ARM processors */ | ||
266 | |||
267 | @@ -398,7 +380,6 @@ asm_function aligned_block_fill_stm4_armv4 | ||
268 | subs SIZE, SIZE, #64 | ||
269 | bgt 0b | ||
270 | pop {r4-r12, pc} | ||
271 | -.endfunc | ||
272 | |||
273 | asm_function aligned_block_fill_stm8_armv4 | ||
274 | push {r4-r12, lr} | ||
275 | @@ -409,7 +390,6 @@ asm_function aligned_block_fill_stm8_armv4 | ||
276 | subs SIZE, SIZE, #64 | ||
277 | bgt 0b | ||
278 | pop {r4-r12, pc} | ||
279 | -.endfunc | ||
280 | |||
281 | asm_function aligned_block_fill_strd_armv5te | ||
282 | push {r4-r12, lr} | ||
283 | @@ -426,7 +406,6 @@ asm_function aligned_block_fill_strd_armv5te | ||
284 | subs SIZE, SIZE, #64 | ||
285 | bgt 0b | ||
286 | pop {r4-r12, pc} | ||
287 | -.endfunc | ||
288 | |||
289 | asm_function aligned_block_copy_incr_armv5te | ||
290 | push {r4-r12, lr} | ||
291 | @@ -442,7 +421,6 @@ asm_function aligned_block_copy_incr_armv5te | ||
292 | stmia DST!, {r8-r11} | ||
293 | bgt 0b | ||
294 | pop {r4-r12, pc} | ||
295 | -.endfunc | ||
296 | |||
297 | asm_function aligned_block_copy_wrap_armv5te | ||
298 | push {r4-r12, lr} | ||
299 | @@ -458,7 +436,6 @@ asm_function aligned_block_copy_wrap_armv5te | ||
300 | stmia DST!, {r8-r11} | ||
301 | bgt 0b | ||
302 | pop {r4-r12, pc} | ||
303 | -.endfunc | ||
304 | |||
305 | asm_function aligned_block_copy_vfp | ||
306 | push {r4-r12, lr} | ||
307 | @@ -470,6 +447,5 @@ asm_function aligned_block_copy_vfp | ||
308 | bgt 0b | ||
309 | vpop {d8-d15} | ||
310 | pop {r4-r12, pc} | ||
311 | -.endfunc | ||
312 | |||
313 | #endif | ||
314 | diff --git a/mips-32.S b/mips-32.S | ||
315 | index 17b2b7f..4f7ddae 100644 | ||
316 | --- a/mips-32.S | ||
317 | +++ b/mips-32.S | ||
318 | @@ -32,7 +32,6 @@ | ||
319 | .macro asm_function function_name | ||
320 | .global \function_name | ||
321 | .type \function_name, @function | ||
322 | - .func \function_name | ||
323 | \function_name: | ||
324 | .endm | ||
325 | |||
326 | @@ -93,7 +92,7 @@ asm_function aligned_block_fill_pf32_mips32 | ||
327 | 2: | ||
328 | jr $ra | ||
329 | nop | ||
330 | -.endfunc | ||
331 | + | ||
332 | |||
333 | /* | ||
334 | * void aligned_block_copy_pf32_mips32(int64_t *dst, int64_t *src, int size) | ||
335 | @@ -178,6 +177,6 @@ asm_function aligned_block_copy_pf32_mips32 | ||
336 | lw $s7, 28($sp) | ||
337 | jr $ra | ||
338 | addi $sp, $sp, 32 | ||
339 | -.endfunc | ||
340 | + | ||
341 | |||
342 | #endif | ||
343 | diff --git a/x86-sse2.S b/x86-sse2.S | ||
344 | index d8840e4..409031b 100644 | ||
345 | --- a/x86-sse2.S | ||
346 | +++ b/x86-sse2.S | ||
347 | @@ -30,7 +30,6 @@ | ||
348 | |||
349 | .macro asm_function_helper function_name | ||
350 | .global \function_name | ||
351 | -.func \function_name | ||
352 | \function_name: | ||
353 | #ifdef __amd64__ | ||
354 | #ifdef _WIN64 | ||
355 | @@ -90,7 +89,7 @@ asm_function aligned_block_copy_movsb | ||
356 | pop3 edi esi ecx | ||
357 | #endif | ||
358 | ret | ||
359 | -.endfunc | ||
360 | + | ||
361 | |||
362 | asm_function aligned_block_copy_movsd | ||
363 | 0: | ||
364 | @@ -110,7 +109,7 @@ asm_function aligned_block_copy_movsd | ||
365 | pop3 edi esi ecx | ||
366 | #endif | ||
367 | ret | ||
368 | -.endfunc | ||
369 | + | ||
370 | |||
371 | asm_function aligned_block_copy_sse2 | ||
372 | 0: | ||
373 | @@ -127,7 +126,7 @@ asm_function aligned_block_copy_sse2 | ||
374 | sub SIZE, 64 | ||
375 | jg 0b | ||
376 | ret | ||
377 | -.endfunc | ||
378 | + | ||
379 | |||
380 | asm_function aligned_block_copy_nt_sse2 | ||
381 | 0: | ||
382 | @@ -144,7 +143,7 @@ asm_function aligned_block_copy_nt_sse2 | ||
383 | sub SIZE, 64 | ||
384 | jg 0b | ||
385 | ret | ||
386 | -.endfunc | ||
387 | + | ||
388 | |||
389 | asm_function aligned_block_copy_pf32_sse2 | ||
390 | 0: | ||
391 | @@ -163,7 +162,7 @@ asm_function aligned_block_copy_pf32_sse2 | ||
392 | sub SIZE, 64 | ||
393 | jg 0b | ||
394 | ret | ||
395 | -.endfunc | ||
396 | + | ||
397 | |||
398 | asm_function aligned_block_copy_nt_pf32_sse2 | ||
399 | 0: | ||
400 | @@ -182,7 +181,7 @@ asm_function aligned_block_copy_nt_pf32_sse2 | ||
401 | sub SIZE, 64 | ||
402 | jg 0b | ||
403 | ret | ||
404 | -.endfunc | ||
405 | + | ||
406 | |||
407 | asm_function aligned_block_copy_pf64_sse2 | ||
408 | 0: | ||
409 | @@ -200,7 +199,7 @@ asm_function aligned_block_copy_pf64_sse2 | ||
410 | sub SIZE, 64 | ||
411 | jg 0b | ||
412 | ret | ||
413 | -.endfunc | ||
414 | + | ||
415 | |||
416 | asm_function aligned_block_copy_nt_pf64_sse2 | ||
417 | 0: | ||
418 | @@ -218,7 +217,7 @@ asm_function aligned_block_copy_nt_pf64_sse2 | ||
419 | sub SIZE, 64 | ||
420 | jg 0b | ||
421 | ret | ||
422 | -.endfunc | ||
423 | + | ||
424 | |||
425 | asm_function aligned_block_fill_sse2 | ||
426 | movdqa xmm0, [SRC + 0] | ||
427 | @@ -231,7 +230,7 @@ asm_function aligned_block_fill_sse2 | ||
428 | sub SIZE, 64 | ||
429 | jg 0b | ||
430 | ret | ||
431 | -.endfunc | ||
432 | + | ||
433 | |||
434 | asm_function aligned_block_fill_nt_sse2 | ||
435 | movdqa xmm0, [SRC + 0] | ||
436 | @@ -244,7 +243,7 @@ asm_function aligned_block_fill_nt_sse2 | ||
437 | sub SIZE, 64 | ||
438 | jg 0b | ||
439 | ret | ||
440 | -.endfunc | ||
441 | + | ||
442 | |||
443 | /*****************************************************************************/ | ||
444 | |||
diff --git a/meta-oe/recipes-benchmark/tinymembench/tinymembench_git.bb b/meta-oe/recipes-benchmark/tinymembench/tinymembench_git.bb index 842644b8a1..2ce10f9c44 100644 --- a/meta-oe/recipes-benchmark/tinymembench/tinymembench_git.bb +++ b/meta-oe/recipes-benchmark/tinymembench/tinymembench_git.bb | |||
@@ -9,7 +9,9 @@ LIC_FILES_CHKSUM = "file://main.c;endline=22;md5=879b9bbb60851454885b5fa47eb6b34 | |||
9 | PV = "0.4.0+git${SRCPV}" | 9 | PV = "0.4.0+git${SRCPV}" |
10 | 10 | ||
11 | SRCREV = "a2cf6d7e382e3aea1eb39173174d9fa28cad15f3" | 11 | SRCREV = "a2cf6d7e382e3aea1eb39173174d9fa28cad15f3" |
12 | SRC_URI = "git://github.com/ssvb/tinymembench.git" | 12 | SRC_URI = "git://github.com/ssvb/tinymembench.git \ |
13 | file://0001-asm-Delete-.func-.endfunc-directives.patch \ | ||
14 | " | ||
13 | 15 | ||
14 | S = "${WORKDIR}/git" | 16 | S = "${WORKDIR}/git" |
15 | 17 | ||