1 |
|
---|
2 | /* This code implemented by Dag.Gruneau@elsa.preseco.comm.se */
|
---|
3 | /* Fast NonRecursiveMutex support by Yakov Markovitch, markovitch@iso.ru */
|
---|
4 | /* Eliminated some memory leaks, gsw@agere.com */
|
---|
5 |
|
---|
6 | #include <windows.h>
|
---|
7 | #include <limits.h>
|
---|
8 | #ifdef HAVE_PROCESS_H
|
---|
9 | #include <process.h>
|
---|
10 | #endif
|
---|
11 |
|
---|
12 | typedef struct NRMUTEX {
|
---|
13 | LONG owned ;
|
---|
14 | DWORD thread_id ;
|
---|
15 | HANDLE hevent ;
|
---|
16 | } NRMUTEX, *PNRMUTEX ;
|
---|
17 |
|
---|
18 | typedef PVOID WINAPI interlocked_cmp_xchg_t(PVOID *dest, PVOID exc, PVOID comperand) ;
|
---|
19 |
|
---|
20 | /* Sorry mate, but we haven't got InterlockedCompareExchange in Win95! */
|
---|
21 | static PVOID WINAPI
|
---|
22 | interlocked_cmp_xchg(PVOID *dest, PVOID exc, PVOID comperand)
|
---|
23 | {
|
---|
24 | static LONG spinlock = 0 ;
|
---|
25 | PVOID result ;
|
---|
26 | DWORD dwSleep = 0;
|
---|
27 |
|
---|
28 | /* Acqire spinlock (yielding control to other threads if cant aquire for the moment) */
|
---|
29 | while(InterlockedExchange(&spinlock, 1))
|
---|
30 | {
|
---|
31 | // Using Sleep(0) can cause a priority inversion.
|
---|
32 | // Sleep(0) only yields the processor if there's
|
---|
33 | // another thread of the same priority that's
|
---|
34 | // ready to run. If a high-priority thread is
|
---|
35 | // trying to acquire the lock, which is held by
|
---|
36 | // a low-priority thread, then the low-priority
|
---|
37 | // thread may never get scheduled and hence never
|
---|
38 | // free the lock. NT attempts to avoid priority
|
---|
39 | // inversions by temporarily boosting the priority
|
---|
40 | // of low-priority runnable threads, but the problem
|
---|
41 | // can still occur if there's a medium-priority
|
---|
42 | // thread that's always runnable. If Sleep(1) is used,
|
---|
43 | // then the thread unconditionally yields the CPU. We
|
---|
44 | // only do this for the second and subsequent even
|
---|
45 | // iterations, since a millisecond is a long time to wait
|
---|
46 | // if the thread can be scheduled in again sooner
|
---|
47 | // (~100,000 instructions).
|
---|
48 | // Avoid priority inversion: 0, 1, 0, 1,...
|
---|
49 | Sleep(dwSleep);
|
---|
50 | dwSleep = !dwSleep;
|
---|
51 | }
|
---|
52 | result = *dest ;
|
---|
53 | if (result == comperand)
|
---|
54 | *dest = exc ;
|
---|
55 | /* Release spinlock */
|
---|
56 | spinlock = 0 ;
|
---|
57 | return result ;
|
---|
58 | } ;
|
---|
59 |
|
---|
60 | static interlocked_cmp_xchg_t *ixchg;
|
---|
61 |
|
---|
62 | BOOL
|
---|
63 | InitializeNonRecursiveMutex(PNRMUTEX mutex)
|
---|
64 | {
|
---|
65 | if (!ixchg)
|
---|
66 | {
|
---|
67 | /* Sorely, Win95 has no InterlockedCompareExchange API (Win98 has), so we have to use emulation */
|
---|
68 | HANDLE kernel = GetModuleHandle("kernel32.dll") ;
|
---|
69 | if (!kernel || (ixchg = (interlocked_cmp_xchg_t *)GetProcAddress(kernel, "InterlockedCompareExchange")) == NULL)
|
---|
70 | ixchg = interlocked_cmp_xchg ;
|
---|
71 | }
|
---|
72 |
|
---|
73 | mutex->owned = -1 ; /* No threads have entered NonRecursiveMutex */
|
---|
74 | mutex->thread_id = 0 ;
|
---|
75 | mutex->hevent = CreateEvent(NULL, FALSE, FALSE, NULL) ;
|
---|
76 | return mutex->hevent != NULL ; /* TRUE if the mutex is created */
|
---|
77 | }
|
---|
78 |
|
---|
79 | #ifdef InterlockedCompareExchange
|
---|
80 | #undef InterlockedCompareExchange
|
---|
81 | #endif
|
---|
82 | #define InterlockedCompareExchange(dest,exchange,comperand) (ixchg((dest), (exchange), (comperand)))
|
---|
83 |
|
---|
84 | VOID
|
---|
85 | DeleteNonRecursiveMutex(PNRMUTEX mutex)
|
---|
86 | {
|
---|
87 | /* No in-use check */
|
---|
88 | CloseHandle(mutex->hevent) ;
|
---|
89 | mutex->hevent = NULL ; /* Just in case */
|
---|
90 | }
|
---|
91 |
|
---|
92 | DWORD
|
---|
93 | EnterNonRecursiveMutex(PNRMUTEX mutex, BOOL wait)
|
---|
94 | {
|
---|
95 | /* Assume that the thread waits successfully */
|
---|
96 | DWORD ret ;
|
---|
97 |
|
---|
98 | /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */
|
---|
99 | if (!wait)
|
---|
100 | {
|
---|
101 | if (InterlockedCompareExchange((PVOID *)&mutex->owned, (PVOID)0, (PVOID)-1) != (PVOID)-1)
|
---|
102 | return WAIT_TIMEOUT ;
|
---|
103 | ret = WAIT_OBJECT_0 ;
|
---|
104 | }
|
---|
105 | else
|
---|
106 | ret = InterlockedIncrement(&mutex->owned) ?
|
---|
107 | /* Some thread owns the mutex, let's wait... */
|
---|
108 | WaitForSingleObject(mutex->hevent, INFINITE) : WAIT_OBJECT_0 ;
|
---|
109 |
|
---|
110 | mutex->thread_id = GetCurrentThreadId() ; /* We own it */
|
---|
111 | return ret ;
|
---|
112 | }
|
---|
113 |
|
---|
114 | BOOL
|
---|
115 | LeaveNonRecursiveMutex(PNRMUTEX mutex)
|
---|
116 | {
|
---|
117 | /* We don't own the mutex */
|
---|
118 | mutex->thread_id = 0 ;
|
---|
119 | return
|
---|
120 | InterlockedDecrement(&mutex->owned) < 0 ||
|
---|
121 | SetEvent(mutex->hevent) ; /* Other threads are waiting, wake one on them up */
|
---|
122 | }
|
---|
123 |
|
---|
124 | PNRMUTEX
|
---|
125 | AllocNonRecursiveMutex(void)
|
---|
126 | {
|
---|
127 | PNRMUTEX mutex = (PNRMUTEX)malloc(sizeof(NRMUTEX)) ;
|
---|
128 | if (mutex && !InitializeNonRecursiveMutex(mutex))
|
---|
129 | {
|
---|
130 | free(mutex) ;
|
---|
131 | mutex = NULL ;
|
---|
132 | }
|
---|
133 | return mutex ;
|
---|
134 | }
|
---|
135 |
|
---|
136 | void
|
---|
137 | FreeNonRecursiveMutex(PNRMUTEX mutex)
|
---|
138 | {
|
---|
139 | if (mutex)
|
---|
140 | {
|
---|
141 | DeleteNonRecursiveMutex(mutex) ;
|
---|
142 | free(mutex) ;
|
---|
143 | }
|
---|
144 | }
|
---|
145 |
|
---|
146 | long PyThread_get_thread_ident(void);
|
---|
147 |
|
---|
148 | /*
|
---|
149 | * Initialization of the C package, should not be needed.
|
---|
150 | */
|
---|
151 | static void
|
---|
152 | PyThread__init_thread(void)
|
---|
153 | {
|
---|
154 | }
|
---|
155 |
|
---|
156 | /*
|
---|
157 | * Thread support.
|
---|
158 | */
|
---|
159 |
|
---|
160 | typedef struct {
|
---|
161 | void (*func)(void*);
|
---|
162 | void *arg;
|
---|
163 | long id;
|
---|
164 | HANDLE done;
|
---|
165 | } callobj;
|
---|
166 |
|
---|
167 | static int
|
---|
168 | bootstrap(void *call)
|
---|
169 | {
|
---|
170 | callobj *obj = (callobj*)call;
|
---|
171 | /* copy callobj since other thread might free it before we're done */
|
---|
172 | void (*func)(void*) = obj->func;
|
---|
173 | void *arg = obj->arg;
|
---|
174 |
|
---|
175 | obj->id = PyThread_get_thread_ident();
|
---|
176 | ReleaseSemaphore(obj->done, 1, NULL);
|
---|
177 | func(arg);
|
---|
178 | return 0;
|
---|
179 | }
|
---|
180 |
|
---|
181 | long
|
---|
182 | PyThread_start_new_thread(void (*func)(void *), void *arg)
|
---|
183 | {
|
---|
184 | Py_uintptr_t rv;
|
---|
185 | callobj obj;
|
---|
186 |
|
---|
187 | dprintf(("%ld: PyThread_start_new_thread called\n",
|
---|
188 | PyThread_get_thread_ident()));
|
---|
189 | if (!initialized)
|
---|
190 | PyThread_init_thread();
|
---|
191 |
|
---|
192 | obj.id = -1; /* guilty until proved innocent */
|
---|
193 | obj.func = func;
|
---|
194 | obj.arg = arg;
|
---|
195 | obj.done = CreateSemaphore(NULL, 0, 1, NULL);
|
---|
196 | if (obj.done == NULL)
|
---|
197 | return -1;
|
---|
198 |
|
---|
199 | rv = _beginthread(bootstrap, _pythread_stacksize, &obj);
|
---|
200 | if (rv == (Py_uintptr_t)-1) {
|
---|
201 | /* I've seen errno == EAGAIN here, which means "there are
|
---|
202 | * too many threads".
|
---|
203 | */
|
---|
204 | dprintf(("%ld: PyThread_start_new_thread failed: %p errno %d\n",
|
---|
205 | PyThread_get_thread_ident(), rv, errno));
|
---|
206 | obj.id = -1;
|
---|
207 | }
|
---|
208 | else {
|
---|
209 | dprintf(("%ld: PyThread_start_new_thread succeeded: %p\n",
|
---|
210 | PyThread_get_thread_ident(), rv));
|
---|
211 | /* wait for thread to initialize, so we can get its id */
|
---|
212 | WaitForSingleObject(obj.done, INFINITE);
|
---|
213 | assert(obj.id != -1);
|
---|
214 | }
|
---|
215 | CloseHandle((HANDLE)obj.done);
|
---|
216 | return obj.id;
|
---|
217 | }
|
---|
218 |
|
---|
219 | /*
|
---|
220 | * Return the thread Id instead of an handle. The Id is said to uniquely identify the
|
---|
221 | * thread in the system
|
---|
222 | */
|
---|
223 | long
|
---|
224 | PyThread_get_thread_ident(void)
|
---|
225 | {
|
---|
226 | if (!initialized)
|
---|
227 | PyThread_init_thread();
|
---|
228 |
|
---|
229 | return GetCurrentThreadId();
|
---|
230 | }
|
---|
231 |
|
---|
232 | static void
|
---|
233 | do_PyThread_exit_thread(int no_cleanup)
|
---|
234 | {
|
---|
235 | dprintf(("%ld: PyThread_exit_thread called\n", PyThread_get_thread_ident()));
|
---|
236 | if (!initialized)
|
---|
237 | if (no_cleanup)
|
---|
238 | _exit(0);
|
---|
239 | else
|
---|
240 | exit(0);
|
---|
241 | _endthread();
|
---|
242 | }
|
---|
243 |
|
---|
244 | void
|
---|
245 | PyThread_exit_thread(void)
|
---|
246 | {
|
---|
247 | do_PyThread_exit_thread(0);
|
---|
248 | }
|
---|
249 |
|
---|
250 | void
|
---|
251 | PyThread__exit_thread(void)
|
---|
252 | {
|
---|
253 | do_PyThread_exit_thread(1);
|
---|
254 | }
|
---|
255 |
|
---|
256 | #ifndef NO_EXIT_PROG
|
---|
257 | static void
|
---|
258 | do_PyThread_exit_prog(int status, int no_cleanup)
|
---|
259 | {
|
---|
260 | dprintf(("PyThread_exit_prog(%d) called\n", status));
|
---|
261 | if (!initialized)
|
---|
262 | if (no_cleanup)
|
---|
263 | _exit(status);
|
---|
264 | else
|
---|
265 | exit(status);
|
---|
266 | }
|
---|
267 |
|
---|
268 | void
|
---|
269 | PyThread_exit_prog(int status)
|
---|
270 | {
|
---|
271 | do_PyThread_exit_prog(status, 0);
|
---|
272 | }
|
---|
273 |
|
---|
274 | void
|
---|
275 | PyThread__exit_prog(int status)
|
---|
276 | {
|
---|
277 | do_PyThread_exit_prog(status, 1);
|
---|
278 | }
|
---|
279 | #endif /* NO_EXIT_PROG */
|
---|
280 |
|
---|
281 | /*
|
---|
282 | * Lock support. It has too be implemented as semaphores.
|
---|
283 | * I [Dag] tried to implement it with mutex but I could find a way to
|
---|
284 | * tell whether a thread already own the lock or not.
|
---|
285 | */
|
---|
286 | PyThread_type_lock
|
---|
287 | PyThread_allocate_lock(void)
|
---|
288 | {
|
---|
289 | PNRMUTEX aLock;
|
---|
290 |
|
---|
291 | dprintf(("PyThread_allocate_lock called\n"));
|
---|
292 | if (!initialized)
|
---|
293 | PyThread_init_thread();
|
---|
294 |
|
---|
295 | aLock = AllocNonRecursiveMutex() ;
|
---|
296 |
|
---|
297 | dprintf(("%ld: PyThread_allocate_lock() -> %p\n", PyThread_get_thread_ident(), aLock));
|
---|
298 |
|
---|
299 | return (PyThread_type_lock) aLock;
|
---|
300 | }
|
---|
301 |
|
---|
302 | void
|
---|
303 | PyThread_free_lock(PyThread_type_lock aLock)
|
---|
304 | {
|
---|
305 | dprintf(("%ld: PyThread_free_lock(%p) called\n", PyThread_get_thread_ident(),aLock));
|
---|
306 |
|
---|
307 | FreeNonRecursiveMutex(aLock) ;
|
---|
308 | }
|
---|
309 |
|
---|
310 | /*
|
---|
311 | * Return 1 on success if the lock was acquired
|
---|
312 | *
|
---|
313 | * and 0 if the lock was not acquired. This means a 0 is returned
|
---|
314 | * if the lock has already been acquired by this thread!
|
---|
315 | */
|
---|
316 | int
|
---|
317 | PyThread_acquire_lock(PyThread_type_lock aLock, int waitflag)
|
---|
318 | {
|
---|
319 | int success ;
|
---|
320 |
|
---|
321 | dprintf(("%ld: PyThread_acquire_lock(%p, %d) called\n", PyThread_get_thread_ident(),aLock, waitflag));
|
---|
322 |
|
---|
323 | success = aLock && EnterNonRecursiveMutex((PNRMUTEX) aLock, (waitflag ? INFINITE : 0)) == WAIT_OBJECT_0 ;
|
---|
324 |
|
---|
325 | dprintf(("%ld: PyThread_acquire_lock(%p, %d) -> %d\n", PyThread_get_thread_ident(),aLock, waitflag, success));
|
---|
326 |
|
---|
327 | return success;
|
---|
328 | }
|
---|
329 |
|
---|
330 | void
|
---|
331 | PyThread_release_lock(PyThread_type_lock aLock)
|
---|
332 | {
|
---|
333 | dprintf(("%ld: PyThread_release_lock(%p) called\n", PyThread_get_thread_ident(),aLock));
|
---|
334 |
|
---|
335 | if (!(aLock && LeaveNonRecursiveMutex((PNRMUTEX) aLock)))
|
---|
336 | dprintf(("%ld: Could not PyThread_release_lock(%p) error: %l\n", PyThread_get_thread_ident(), aLock, GetLastError()));
|
---|
337 | }
|
---|
338 |
|
---|
339 | /* minimum/maximum thread stack sizes supported */
|
---|
340 | #define THREAD_MIN_STACKSIZE 0x8000 /* 32kB */
|
---|
341 | #define THREAD_MAX_STACKSIZE 0x10000000 /* 256MB */
|
---|
342 |
|
---|
343 | /* set the thread stack size.
|
---|
344 | * Return 0 if size is valid, -1 otherwise.
|
---|
345 | */
|
---|
346 | static int
|
---|
347 | _pythread_nt_set_stacksize(size_t size)
|
---|
348 | {
|
---|
349 | /* set to default */
|
---|
350 | if (size == 0) {
|
---|
351 | _pythread_stacksize = 0;
|
---|
352 | return 0;
|
---|
353 | }
|
---|
354 |
|
---|
355 | /* valid range? */
|
---|
356 | if (size >= THREAD_MIN_STACKSIZE && size < THREAD_MAX_STACKSIZE) {
|
---|
357 | _pythread_stacksize = size;
|
---|
358 | return 0;
|
---|
359 | }
|
---|
360 |
|
---|
361 | return -1;
|
---|
362 | }
|
---|
363 |
|
---|
364 | #define THREAD_SET_STACKSIZE(x) _pythread_nt_set_stacksize(x)
|
---|