1 | /*
|
---|
2 | * div_u64() compat
|
---|
3 | */
|
---|
4 |
|
---|
5 | #ifndef MATH64_COMPAT_H
|
---|
6 | #define MATH64_COMPAT_H
|
---|
7 |
|
---|
8 | #if BITS_PER_LONG >= 64
|
---|
9 |
|
---|
10 | static inline u64 div_u64_rem(u64 n, u32 div, u32 *rem)
|
---|
11 | {
|
---|
12 | *rem = n % div;
|
---|
13 | return n / div;
|
---|
14 | }
|
---|
15 |
|
---|
16 | static inline u64 div_u64(u64 n, u32 div)
|
---|
17 | {
|
---|
18 | return n / div;
|
---|
19 | }
|
---|
20 |
|
---|
21 | #elif defined(i386)
|
---|
22 |
|
---|
23 | static inline u64 div_u64_rem(u64 n, u32 div, u32 *rem)
|
---|
24 | {
|
---|
25 | u32 low, high;
|
---|
26 |
|
---|
27 | low = (u32)n;
|
---|
28 | high = n >> 32;
|
---|
29 | if (high) {
|
---|
30 | u32 high1 = high % div;
|
---|
31 | high /= div;
|
---|
32 | asm("divl %2" : "=a" (low), "=d" (*rem) : \
|
---|
33 | "rm" (div), "a" (low), "d" (high1));
|
---|
34 | return (u64)high << 32 | low;
|
---|
35 | } else {
|
---|
36 | *rem = low % div;
|
---|
37 | return low / div;
|
---|
38 | }
|
---|
39 | }
|
---|
40 |
|
---|
41 | static inline u64 div_u64(u64 n, u32 div)
|
---|
42 | {
|
---|
43 | u32 low, high;
|
---|
44 |
|
---|
45 | low = (u32)n;
|
---|
46 | high = n >> 32;
|
---|
47 | if (high) {
|
---|
48 | u32 high1 = high % div;
|
---|
49 | high /= div;
|
---|
50 | asm("divl %2" : "=a" (low) : \
|
---|
51 | "rm" (div), "a" (low), "d" (high1));
|
---|
52 | return (u64)high << 32 | low;
|
---|
53 | } else
|
---|
54 | return low / div;
|
---|
55 | }
|
---|
56 |
|
---|
57 | #else
|
---|
58 |
|
---|
59 | static inline void divl(u32 high, u32 low, u32 div, u32 *q, u32 *r)
|
---|
60 | {
|
---|
61 | u64 n = (u64)high << 32 | low;
|
---|
62 | u64 d = (u64)div << 31;
|
---|
63 | u32 q1 = 0;
|
---|
64 | int c = 32;
|
---|
65 | while (n > 0xffffffffUi64) {
|
---|
66 | q1 <<= 1;
|
---|
67 | if (n >= d) {
|
---|
68 | n -= d;
|
---|
69 | q1 |= 1;
|
---|
70 | }
|
---|
71 | d >>= 1;
|
---|
72 | c--;
|
---|
73 | }
|
---|
74 | q1 <<= c;
|
---|
75 | if (n) {
|
---|
76 | low = n;
|
---|
77 | *q = q1 | (low / div);
|
---|
78 | *r = low % div;
|
---|
79 | } else {
|
---|
80 | *r = 0;
|
---|
81 | *q = q1;
|
---|
82 | }
|
---|
83 | return;
|
---|
84 | }
|
---|
85 |
|
---|
86 | static inline u64 div_u64_rem(u64 n, u32 div, u32 *rem)
|
---|
87 | {
|
---|
88 | u32 low, high;
|
---|
89 | low = (u32)n;
|
---|
90 | high = n >> 32;
|
---|
91 | if (high) {
|
---|
92 | u32 high1 = high % div;
|
---|
93 | u32 low1 = low;
|
---|
94 | high /= div;
|
---|
95 | divl(high1, low1, div, &low, rem);
|
---|
96 | return (u64)high << 32 | low;
|
---|
97 | } else {
|
---|
98 | *rem = low % div;
|
---|
99 | return low / div;
|
---|
100 | }
|
---|
101 | }
|
---|
102 |
|
---|
103 | static inline u64 div_u64(u64 n, u32 div)
|
---|
104 | {
|
---|
105 | u32 low, high, rem;
|
---|
106 | low = (u32)n;
|
---|
107 | high = n >> 32;
|
---|
108 | if (high) {
|
---|
109 | u32 high1 = high % div;
|
---|
110 | u32 low1 = low;
|
---|
111 | high /= div;
|
---|
112 | divl(high1, low1, div, &low, &rem);
|
---|
113 | return (u64)high << 32 | low;
|
---|
114 | } else {
|
---|
115 | return low / div;
|
---|
116 | }
|
---|
117 | }
|
---|
118 |
|
---|
119 | #endif
|
---|
120 |
|
---|
121 | #endif /* MATH64_COMPAT_H */
|
---|