source: branches/libc-0.6/src/emx/include/386/builtin.h

Last change on this file was 2780, checked in by bird, 19 years ago

Corrected two unsigned returns which should've been signed.

  • Property cvs2svn:cvs-rev set to 1.13
  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 17.8 KB
Line 
1/* 386/builtin.h (emx+gcc) */
2/** @file
3 * EMX 0.9d-fix04
4 */
5
6#ifndef _I386_BUILTIN_H
7#define _I386_BUILTIN_H
8
9#include <sys/cdefs.h>
10#include <stdint.h>
11
12__BEGIN_DECLS
13
14
15static __inline__ signed char __cxchg (__volatile__ signed char *p,
16 signed char v)
17{
18 __asm__ __volatile__ ("xchgb %0, %1" : "=m"(*p), "=r"(v) : "1"(v));
19 return v;
20}
21
22static __inline__ short __sxchg (__volatile__ short *p, short v)
23{
24 __asm__ __volatile__ ("xchgw %0, %1" : "=m"(*p), "=r"(v) : "1"(v));
25 return v;
26}
27
28static __inline__ int __lxchg (__volatile__ int *p, int v)
29{
30 __asm__ __volatile__ ("xchgl %0, %1" : "=m"(*p), "=r"(v) : "1"(v));
31 return v;
32}
33
34static __inline__ void __enable (void)
35{
36 __asm__ __volatile__ ("sti");
37}
38
39static __inline__ void __disable (void)
40{
41 __asm__ __volatile__ ("cli");
42}
43
44
45/**
46 * Performs an atomical xchg on an unsigned int.
47 * @returns old value.
48 * @param pu Pointer to the value to update.
49 * @param u The new value.
50 */
51static __inline__ unsigned __atomic_xchg(__volatile__ unsigned *pu, unsigned u)
52{
53 __asm__ __volatile__ ("xchgl %0, %1" : "=m" (*pu), "=r" (u) : "1" (u));
54 return u;
55}
56
57/**
58 * Performs an atomical xchg on an 16-bit unsigned integer.
59 *
60 * @returns old value.
61 * @param pu16 Pointer to the value to update.
62 * @param u16 The new value.
63 */
64static inline uint16_t __atomic_xchg_word(volatile uint16_t *pu16, uint16_t u16)
65{
66 __asm__ __volatile__ ("xchgw %0, %1" : "=m" (*pu16), "=r" (u16) : "1" (u16));
67 return u16;
68}
69
70/**
71 * Atomically sets a bit and return the old one.
72 *
73 * @returns 1 if the bwit was set, 0 if it was clear.
74 * @param pv Pointer to base of bitmap.
75 * @param uBit Bit in question.
76 */
77static __inline__ int __atomic_set_bit(__volatile__ void *pv, unsigned uBit)
78{
79 __asm__ __volatile__("lock; btsl %2, %1\n\t"
80 "sbbl %0,%0"
81 : "=r" (uBit),
82 "=m" (*(__volatile__ unsigned *)pv)
83 : "0" (uBit)
84 : "memory");
85 return uBit;
86}
87
88
89/**
90 * Atomically clears a bit.
91 *
92 * @param pv Pointer to base of bitmap.
93 * @param uBit Bit in question.
94 */
95static __inline__ void __atomic_clear_bit(__volatile__ void *pv, unsigned uBit)
96{
97 __asm__ __volatile__("lock; btrl %1, %0"
98 : "=m" (*(__volatile__ unsigned *)pv)
99 : "r" (uBit));
100}
101
102
103/**
104 * Atomically (er?) tests if a bit is set.
105 *
106 * @returns non zero if the bit was set.
107 * @returns 0 if the bit was clear.
108 * @param pv Pointer to base of bitmap.
109 * @param uBit Bit in question.
110 */
111static __inline__ int __atomic_test_bit(const __volatile__ void *pv, unsigned uBit)
112{
113 __asm__ __volatile__("btl %0, %1\n\t"
114 "sbbl %0, %0\t\n"
115 : "=r" (uBit)
116 : "m" (*(const __volatile__ unsigned *)pv),
117 "0" (uBit));
118 return uBit;
119}
120
121
122/**
123 * Atomically add a 32-bit unsigned value to another.
124 *
125 * @param pu Pointer to the value to add to.
126 * @param uAdd The value to add to *pu.
127 */
128static __inline__ void __atomic_add(__volatile__ unsigned *pu, const unsigned uAdd)
129{
130 __asm__ __volatile__("lock; addl %1, %0"
131 : "=m" (*pu)
132 : "nr" (uAdd),
133 "m" (*pu));
134}
135
136/**
137 * Atomically subtract a 32-bit unsigned value from another.
138 *
139 * @param pu Pointer to the value to subtract from.
140 * @param uAdd The value to subtract from *pu.
141 */
142static __inline__ void __atomic_sub(__volatile__ unsigned *pu, const unsigned uSub)
143{
144 __asm__ __volatile__("lock; subl %1, %0"
145 : "=m" (*pu)
146 : "nr" (uSub),
147 "m" (*pu));
148}
149
150/**
151 * Atomically increments a 32-bit unsigned value.
152 *
153 * @param pu Pointer to the value to increment.
154 */
155static __inline__ void __atomic_increment(__volatile__ unsigned *pu)
156{
157 __asm__ __volatile__("lock; incl %0"
158 : "=m" (*pu)
159 : "m" (*pu));
160}
161
162/**
163 * Atomically increments a 32-bit unsigned value.
164 *
165 * @returns The new value.
166 * @param pu32 Pointer to the value to increment.
167 */
168static __inline__ uint32_t __atomic_increment_u32(uint32_t __volatile__ *pu32)
169{
170 uint32_t u32;
171 __asm__ __volatile__("lock; xadd %0, %1\n\t"
172 "incl %0\n\t"
173 : "=r" (u32),
174 "=m" (*pu32)
175 : "0" (1)
176 : "memory");
177 return u32;
178}
179
180/**
181 * Atomically increments a 32-bit signed value.
182 *
183 * @returns The new value.
184 * @param pi32 Pointer to the value to increment.
185 */
186static __inline__ int32_t __atomic_increment_s32(int32_t __volatile__ *pi32)
187{
188 int32_t i32;
189 __asm__ __volatile__("lock; xadd %0, %1\n\t"
190 "incl %0\n\t"
191 : "=r" (i32),
192 "=m" (*pi32)
193 : "0" (1)
194 : "memory");
195 return i32;
196}
197
198/**
199 * Atomically increments a 16-bit unsigned value.
200 *
201 * @param pu16 Pointer to the value to increment.
202 */
203static __inline__ void __atomic_increment_u16(uint16_t __volatile__ *pu16)
204{
205 __asm__ __volatile__("lock; incw %0"
206 : "=m" (*pu16)
207 : "m" (*pu16));
208}
209
210/**
211 * Atomically decrements a 32-bit unsigned value.
212 *
213 * @param pu Pointer to the value to decrement.
214 */
215static __inline__ void __atomic_decrement(__volatile__ unsigned *pu)
216{
217 __asm__ __volatile__("lock; decl %0"
218 : "=m" (*pu)
219 : "m" (*pu));
220}
221
222/**
223 * Atomically decrements a 32-bit unsigned value.
224 *
225 * @returns The new value.
226 * @param pu32 Pointer to the value to decrement.
227 */
228static __inline__ uint32_t __atomic_decrement_u32(__volatile__ uint32_t *pu32)
229{
230 uint32_t u32;
231 __asm__ __volatile__("lock; xadd %0, %1\n\t"
232 "decl %0\n\t"
233 : "=r" (u32),
234 "=m" (*pu32)
235 : "0" (-1)
236 : "memory");
237 return u32;
238}
239
240/**
241 * Atomically decrements a 32-bit signed value.
242 *
243 * @returns The new value.
244 * @param pi32 Pointer to the value to decrement.
245 */
246static __inline__ int32_t __atomic_decrement_s32(__volatile__ int32_t *pi32)
247{
248 int32_t i32;
249 __asm__ __volatile__("lock; xadd %0, %1\n\t"
250 "decl %0\n\t"
251 : "=r" (i32),
252 "=m" (*pi32)
253 : "0" (-1)
254 : "memory");
255 return i32;
256}
257
258/**
259 * Atomically decrements a 16-bit unsigned value.
260 *
261 * @returns The new value.
262 * @param pu16 Pointer to the value to decrement.
263 */
264static __inline__ void __atomic_decrement_u16(uint16_t __volatile__ *pu16)
265{
266 __asm__ __volatile__("lock; decw %0"
267 : "=m" (*pu16)
268 : "m" (*pu16));
269}
270
271/**
272 * Atomically increments a 32-bit unsigned value if less than max.
273 *
274 * @returns 0 if incremented.
275 * @returns uMax when not updated.
276 * @param pu Pointer to the value to increment.
277 * @param uMax *pu must not be above this value.
278 */
279static __inline__ int __atomic_increment_max(__volatile__ unsigned *pu, const unsigned uMax)
280{
281 unsigned rc = 0;
282 __asm__ __volatile__("movl %2, %%eax\n\t"
283 "1:\n\t"
284 "movl %%eax, %0\n\t"
285 "cmpl %3, %0\n\t"
286 "jb 2f\n\t"
287 "jmp 4f\n"
288 "2:\n\t"
289 "incl %0\n\t"
290 "lock; cmpxchgl %0, %1\n\t"
291 "jz 3f\n\t"
292 "jmp 1b\n"
293 "3:"
294 "xorl %0, %0\n\t"
295 "4:"
296 : "=b" (rc),
297 "=m" (*pu)
298 : "m" (*pu),
299 "nd" (uMax),
300 "0" (rc)
301 : "%eax");
302 return rc;
303}
304
305
306/**
307 * Atomically increments a 16-bit unsigned value if less than max.
308 *
309 * @returns New value.
310 * @returns Current value | 0xffff0000 if current value is less or equal to u16Min.
311 * @param pu16 Pointer to the value to increment.
312 * @param u16Max *pu16 must not be above this value after the incrementation.
313 */
314static inline unsigned __atomic_increment_word_max(volatile uint16_t *pu16, const uint16_t u16Max)
315{
316 unsigned rc = 0;
317 __asm__ __volatile__("movw %2, %%ax\n\t"
318 "1:\n\t"
319 "movw %%ax, %w0\n\t"
320 "cmpw %w3, %%ax\n\t"
321 "jb 2f\n\t"
322 "orl $0xffff0000, %0\n\t"
323 "jmp 3f\n"
324 "2:\n\t"
325 "incw %w0\n\t"
326 "lock; cmpxchgw %w0, %2\n\t"
327 "jz 3f\n\t"
328 "jmp 1b\n\t"
329 "3:"
330 : "=r" (rc),
331 "=m" (*pu16)
332 : "m" (*pu16),
333 "nr" (u16Max),
334 "0" (rc)
335 : "%eax");
336 return rc;
337}
338
339
340/**
341 * Atomically decrements a 32-bit unsigned value if greater than a min.
342 *
343 * @returns 0 if decremented.
344 * @returns uMin when not updated.
345 * @param pu Pointer to the value to decrement.
346 * @param uMin *pu must not be below this value.
347 */
348static __inline__ int __atomic_decrement_min(__volatile__ unsigned *pu, const unsigned uMin)
349{
350 unsigned rc = 0;
351 __asm__ __volatile__("movl %2, %%eax\n"
352 "1:\n\t"
353 "movl %%eax, %0\n\t"
354 "cmpl %3, %0\n\t"
355 "ja 2f\n\t"
356 "jmp 4f\n"
357 "2:\n\t"
358 "decl %0\n\t"
359 "lock; cmpxchgl %0, %1\n\t"
360 "jz 3f\n\t"
361 "jmp 1b\n"
362 "3:"
363 "xorl %0, %0\n\t"
364 "4:"
365 : "=b" (rc),
366 "=m" (*pu)
367 : "m" (*pu),
368 "nr" (uMin),
369 "0" (rc)
370 : "%eax");
371 return rc;
372}
373
374
375/**
376 * Atomically decrements a 16-bit unsigned value if greater than a min.
377 *
378 * @returns New value.
379 * @returns Current value | 0xffff0000 if current value is less or equal to u16Min.
380 * @param pu16 Pointer to the value to decrement.
381 * @param u16Min *pu16 must not be below this value after the decrementation.
382 */
383static inline unsigned __atomic_decrement_word_min(volatile uint16_t *pu16, const uint16_t u16Min)
384{
385 unsigned rc = 0;
386 __asm__ __volatile__("movw %2, %%ax\n\t"
387 "1:\n\t"
388 "movw %%ax, %w0\n\t"
389 "cmpw %w3, %%ax\n\t"
390 "ja 2f\n\t"
391 "orl $0xffff0000, %0\n\t"
392 "jmp 3f\n"
393 "2:\n\t"
394 "decw %%bx\n\t"
395 "lock; cmpxchgw %w0, %1\n\t"
396 "jz 3f\n\t"
397 "jmp 1b\n"
398 "3:"
399 : "=b" (rc),
400 "=m" (*pu16)
401 : "m" (*pu16),
402 "nr" (u16Min),
403 "0" (rc)
404 : "%eax");
405 return rc;
406}
407
408
409/**
410 * Atomically compare and exchange a 32-bit word.
411 *
412 * @returns 1 if changed, 0 if unchanged (i.e. boolean).
413 * @param pu32 Pointer to the value to compare & exchange.
414 * @param u32New The new value.
415 * @param u32Cur The current value. Only update if *pu32 equals this one.
416 */
417static inline unsigned __atomic_cmpxchg32(volatile uint32_t *pu32, uint32_t u32New, uint32_t u32Old)
418{
419 __asm__ __volatile__("lock; cmpxchgl %2, %1\n\t"
420 "setz %%al\n\t"
421 "movzx %%al, %%eax\n\t"
422 : "=a" (u32Old),
423 "=m" (*pu32)
424 : "r" (u32New),
425 "0" (u32Old));
426 return (unsigned)u32Old;
427}
428
429
430#define __ROTATE_FUN(F,I,T) \
431 static __inline__ T F (T value, int shift) \
432 { \
433 __asm__ (I " %b2, %0" : "=g"(value) : "0"(value), "c"(shift) : "cc"); \
434 return value; \
435 } \
436 static __inline__ T F##1 (T value) \
437 { \
438 __asm__ (I " $1, %0" : "=g"(value) : "0"(value) : "cc"); \
439 return value; \
440 }
441
442#define __ROTATE(V,S,F) ((__builtin_constant_p (S) && (int)(S) == 1) \
443 ? F##1 (V) : F (V, S))
444
445__ROTATE_FUN (__crotr, "rorb", unsigned char)
446__ROTATE_FUN (__srotr, "rorw", unsigned short)
447__ROTATE_FUN (__lrotr, "rorl", unsigned long)
448
449__ROTATE_FUN (__crotl, "rolb", unsigned char)
450__ROTATE_FUN (__srotl, "rolw", unsigned short)
451__ROTATE_FUN (__lrotl, "roll", unsigned long)
452
453#define _crotr(V,S) __ROTATE (V, S, __crotr)
454#define _srotr(V,S) __ROTATE (V, S, __srotr)
455#define _lrotr(V,S) __ROTATE (V, S, __lrotr)
456#define _crotl(V,S) __ROTATE (V, S, __crotl)
457#define _srotl(V,S) __ROTATE (V, S, __srotl)
458#define _lrotl(V,S) __ROTATE (V, S, __lrotl)
459
460#define _rotr(V,S) _lrotr (V, S)
461#define _rotl(V,S) _lrotl (V, S)
462
463
464static __inline__ int __fls (int v)
465{
466 int r;
467
468 __asm__ __volatile__ ("bsrl %1, %0;"
469 "jnz 1f;"
470 "movl $-1, %0;"
471 ".align 2, 0x90;"
472 "1:"
473 : "=r"(r) : "r"(v) : "cc");
474 return r + 1;
475}
476
477/* Quick routines similar to div() and friends, but inline */
478
479static __inline__ long __ldivmod (long num, long den, long *rem)
480{
481 long q, r;
482 __asm__ ("cltd; idivl %2"
483 : "=a" (q), "=&d" (r)
484 : "r?m" (den), "a" (num));
485 *rem = r;
486 return q;
487}
488
489static __inline__ unsigned long __uldivmod (unsigned long num,
490 unsigned long den, unsigned long *rem)
491{
492 unsigned long q, r;
493 __asm__ ("xorl %%edx,%%edx; divl %2"
494 : "=a" (q), "=&d" (r)
495 : "r?m" (den), "a" (num));
496 *rem = r;
497 return q;
498}
499
500/*
501 Divide a 64-bit integer by a 32-bit one:
502
503 A*2^32 + B A B + (A mod 32)
504 ---------- = --- * 2^32 + ----------------
505 C C C
506*/
507static __inline__ long long __lldivmod (long long num, long den, long *rem)
508{
509 long long q;
510 long r;
511 __asm__ (" movl %%eax,%%esi;"
512 " movl %%edx,%%eax;"
513 " pushl %%edx;"
514 " cltd;"
515 " idivl %2;"
516 " ;"
517/* Now ensure remainder is smallest of possible two values (negative and
518 positive). For this we compare the remainder with positive and negative
519 denominator/2; if it is smaller than one and bigger than another we
520 consider it optimal, otherwise it can be made smaller by adding or
521 subtracting denominator to it. This is done to ensure no overflow
522 will occur at next division. */
523 " movl %2,%%ecx;"
524 " sarl $1,%%ecx;" /* ecx = den/2 */
525 " cmpl %%ecx,%%edx;"
526 " setl %%bl;"
527 " negl %%ecx;"
528 " cmpl %%ecx,%%edx;"
529 " setl %%bh;"
530 " xorb %%bh,%%bl;"
531 " jnz 1f;" /* Remainder is between -den/2...den/2 */
532 " ;"
533/* If remainder has same sign as denominator, we have to do r -= den; q++;
534 otherwise we have to do r += den; q--; */
535 " movl %2,%%ebx;" /* ebx = den */
536 " xorl %%edx,%%ebx;" /* r ^ den */
537 " js 0f;" /* Different signs */
538 " subl %2,%%edx;" /* r -= den */
539 " addl $1,%%eax;" /* q++ */
540 " adcl $0,%%edx;"
541 " jmp 1f;"
542 " ;"
543 "0: addl %2,%%edx;" /* r += den */
544 " subl $1,%%eax;" /* q-- */
545 " sbbl $0,%%edx;"
546 " ;"
547 "1: xchgl %%eax,%%esi;"
548 " idivl %2;"
549 " ;"
550 " movl %%edx,%1;"
551 " cltd;"
552 " addl %%esi,%%edx;"
553 " ;"
554/* Check if numerator has the same sign as remainder; if they have different
555 sign we should make the remainder have same sign as numerator to comply
556 with ANSI standard, which says we always should truncate the quotient
557 towards zero. */
558 " popl %%ebx;" /* ebx = num >> 32 */
559 " xorl %1,%%ebx;" /* sign(r) ^ sign(num) */
560 " jns 3f;" /* jump if same sign */
561 " ;"
562/* If remainder has same sign as denominator, we have to do r -= den; q++;
563 otherwise we have to do r += den; q--; */
564 " movl %2,%%ebx;"
565 " xorl %1,%%ebx;" /* r ^ den */
566 " js 2f;" /* Different signs */
567 " subl %2,%1;" /* r -= den */
568 " addl $1,%%eax;" /* q++ */
569 " adcl $0,%%edx;"
570 " jmp 3f;"
571 " ;"
572 "2: addl %2,%1;" /* r += den */
573 " subl $1,%%eax;" /* q-- */
574 " sbbl $0,%%edx;"
575 " ;"
576 "3: ;"
577 : "=A" (q), "=&c" (r)
578 : "r" (den), "A" (num)
579 : "ebx", "esi");
580 *rem = r;
581 return q;
582}
583
584/*
585 Same as __lldivmod except that if A < C, we can do just one division
586 instead of two because the result is always a 32-bit integer.
587*/
588static __inline__ unsigned long long __ulldivmod (unsigned long long num,
589 unsigned long den, unsigned long *rem)
590{
591 unsigned long long q;
592 unsigned long r;
593 __asm__ (" movl %%eax,%1;"
594 " movl %%edx,%%eax;"
595 " xorl %%edx,%%edx;"
596 " divl %2;"
597 " xchgl %%eax,%%ecx;"
598 " divl %2;"
599 " xchgl %%edx,%1;"
600 : "=A" (q), "=c" (r)
601 : "r?m" (den), "A" (num));
602 *rem = r;
603 return q;
604}
605
606__END_DECLS
607#endif /* not _I386_BUILTIN_H */
Note: See TracBrowser for help on using the repository browser.