source: trunk/gcc/boehm-gc/irix_threads.c@ 3232

Last change on this file since 3232 was 1392, checked in by bird, 21 years ago

This commit was generated by cvs2svn to compensate for changes in r1391,
which included commits to RCS files with non-trunk default branches.

  • Property cvs2svn:cvs-rev set to 1.1.1.2
  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 21.4 KB
Line 
1/*
2 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15/*
16 * Support code for Irix (>=6.2) Pthreads. This relies on properties
17 * not guaranteed by the Pthread standard. It may or may not be portable
18 * to other implementations.
19 *
20 * This now also includes an initial attempt at thread support for
21 * HP/UX 11.
22 *
23 * Note that there is a lot of code duplication between linux_threads.c
24 * and irix_threads.c; any changes made here may need to be reflected
25 * there too.
26 */
27
28# if defined(GC_IRIX_THREADS)
29
30# include "private/gc_priv.h"
31# include <pthread.h>
32# include <semaphore.h>
33# include <time.h>
34# include <errno.h>
35# include <unistd.h>
36# include <sys/mman.h>
37# include <sys/time.h>
38
39#undef pthread_create
40#undef pthread_sigmask
41#undef pthread_join
42#undef pthread_detach
43
44void GC_thr_init();
45
46#if 0
47void GC_print_sig_mask()
48{
49 sigset_t blocked;
50 int i;
51
52 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
53 ABORT("pthread_sigmask");
54 GC_printf0("Blocked: ");
55 for (i = 1; i <= MAXSIG; i++) {
56 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
57 }
58 GC_printf0("\n");
59}
60#endif
61
62/* We use the allocation lock to protect thread-related data structures. */
63
64/* The set of all known threads. We intercept thread creation and */
65/* joins. We never actually create detached threads. We allocate all */
66/* new thread stacks ourselves. These allow us to maintain this */
67/* data structure. */
68/* Protected by GC_thr_lock. */
69/* Some of this should be declared volatile, but that's incosnsistent */
70/* with some library routine declarations. */
71typedef struct GC_Thread_Rep {
72 struct GC_Thread_Rep * next; /* More recently allocated threads */
73 /* with a given pthread id come */
74 /* first. (All but the first are */
75 /* guaranteed to be dead, but we may */
76 /* not yet have registered the join.) */
77 pthread_t id;
78 word stop;
79# define NOT_STOPPED 0
80# define PLEASE_STOP 1
81# define STOPPED 2
82 word flags;
83# define FINISHED 1 /* Thread has exited. */
84# define DETACHED 2 /* Thread is intended to be detached. */
85# define CLIENT_OWNS_STACK 4
86 /* Stack was supplied by client. */
87 ptr_t stack;
88 ptr_t stack_ptr; /* Valid only when stopped. */
89 /* But must be within stack region at */
90 /* all times. */
91 size_t stack_size; /* 0 for original thread. */
92 void * status; /* Used only to avoid premature */
93 /* reclamation of any data it might */
94 /* reference. */
95} * GC_thread;
96
97GC_thread GC_lookup_thread(pthread_t id);
98
99/*
100 * The only way to suspend threads given the pthread interface is to send
101 * signals. Unfortunately, this means we have to reserve
102 * a signal, and intercept client calls to change the signal mask.
103 * We use SIG_SUSPEND, defined in gc_priv.h.
104 */
105
106pthread_mutex_t GC_suspend_lock = PTHREAD_MUTEX_INITIALIZER;
107 /* Number of threads stopped so far */
108pthread_cond_t GC_suspend_ack_cv = PTHREAD_COND_INITIALIZER;
109pthread_cond_t GC_continue_cv = PTHREAD_COND_INITIALIZER;
110
111void GC_suspend_handler(int sig)
112{
113 int dummy;
114 GC_thread me;
115 sigset_t all_sigs;
116 sigset_t old_sigs;
117 int i;
118
119 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
120 me = GC_lookup_thread(pthread_self());
121 /* The lookup here is safe, since I'm doing this on behalf */
122 /* of a thread which holds the allocation lock in order */
123 /* to stop the world. Thus concurrent modification of the */
124 /* data structure is impossible. */
125 if (PLEASE_STOP != me -> stop) {
126 /* Misdirected signal. */
127 pthread_mutex_unlock(&GC_suspend_lock);
128 return;
129 }
130 pthread_mutex_lock(&GC_suspend_lock);
131 me -> stack_ptr = (ptr_t)(&dummy);
132 me -> stop = STOPPED;
133 pthread_cond_signal(&GC_suspend_ack_cv);
134 pthread_cond_wait(&GC_continue_cv, &GC_suspend_lock);
135 pthread_mutex_unlock(&GC_suspend_lock);
136 /* GC_printf1("Continuing 0x%x\n", pthread_self()); */
137}
138
139
140GC_bool GC_thr_initialized = FALSE;
141
142size_t GC_min_stack_sz;
143
144# define N_FREE_LISTS 25
145ptr_t GC_stack_free_lists[N_FREE_LISTS] = { 0 };
146 /* GC_stack_free_lists[i] is free list for stacks of */
147 /* size GC_min_stack_sz*2**i. */
148 /* Free lists are linked through first word. */
149
150/* Return a stack of size at least *stack_size. *stack_size is */
151/* replaced by the actual stack size. */
152/* Caller holds allocation lock. */
153ptr_t GC_stack_alloc(size_t * stack_size)
154{
155 register size_t requested_sz = *stack_size;
156 register size_t search_sz = GC_min_stack_sz;
157 register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
158 register ptr_t result;
159
160 while (search_sz < requested_sz) {
161 search_sz *= 2;
162 index++;
163 }
164 if ((result = GC_stack_free_lists[index]) == 0
165 && (result = GC_stack_free_lists[index+1]) != 0) {
166 /* Try next size up. */
167 search_sz *= 2; index++;
168 }
169 if (result != 0) {
170 GC_stack_free_lists[index] = *(ptr_t *)result;
171 } else {
172 result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);
173 result = (ptr_t)(((word)result + GC_page_size) & ~(GC_page_size - 1));
174 /* Protect hottest page to detect overflow. */
175# ifdef STACK_GROWS_UP
176 /* mprotect(result + search_sz, GC_page_size, PROT_NONE); */
177# else
178 /* mprotect(result, GC_page_size, PROT_NONE); */
179 result += GC_page_size;
180# endif
181 }
182 *stack_size = search_sz;
183 return(result);
184}
185
186/* Caller holds allocation lock. */
187void GC_stack_free(ptr_t stack, size_t size)
188{
189 register int index = 0;
190 register size_t search_sz = GC_min_stack_sz;
191
192 while (search_sz < size) {
193 search_sz *= 2;
194 index++;
195 }
196 if (search_sz != size) ABORT("Bad stack size");
197 *(ptr_t *)stack = GC_stack_free_lists[index];
198 GC_stack_free_lists[index] = stack;
199}
200
201
202
203# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
204volatile GC_thread GC_threads[THREAD_TABLE_SZ];
205
206void GC_push_thread_structures GC_PROTO((void))
207{
208 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
209}
210
211/* Add a thread to GC_threads. We assume it wasn't already there. */
212/* Caller holds allocation lock. */
213GC_thread GC_new_thread(pthread_t id)
214{
215 int hv = ((word)id) % THREAD_TABLE_SZ;
216 GC_thread result;
217 static struct GC_Thread_Rep first_thread;
218 static GC_bool first_thread_used = FALSE;
219
220 if (!first_thread_used) {
221 result = &first_thread;
222 first_thread_used = TRUE;
223 /* Dont acquire allocation lock, since we may already hold it. */
224 } else {
225 result = (struct GC_Thread_Rep *)
226 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
227 }
228 if (result == 0) return(0);
229 result -> id = id;
230 result -> next = GC_threads[hv];
231 GC_threads[hv] = result;
232 /* result -> flags = 0; */
233 /* result -> stop = 0; */
234 return(result);
235}
236
237/* Delete a thread from GC_threads. We assume it is there. */
238/* (The code intentionally traps if it wasn't.) */
239/* Caller holds allocation lock. */
240void GC_delete_thread(pthread_t id)
241{
242 int hv = ((word)id) % THREAD_TABLE_SZ;
243 register GC_thread p = GC_threads[hv];
244 register GC_thread prev = 0;
245
246 while (!pthread_equal(p -> id, id)) {
247 prev = p;
248 p = p -> next;
249 }
250 if (prev == 0) {
251 GC_threads[hv] = p -> next;
252 } else {
253 prev -> next = p -> next;
254 }
255}
256
257/* If a thread has been joined, but we have not yet */
258/* been notified, then there may be more than one thread */
259/* in the table with the same pthread id. */
260/* This is OK, but we need a way to delete a specific one. */
261void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
262{
263 int hv = ((word)id) % THREAD_TABLE_SZ;
264 register GC_thread p = GC_threads[hv];
265 register GC_thread prev = 0;
266
267 while (p != gc_id) {
268 prev = p;
269 p = p -> next;
270 }
271 if (prev == 0) {
272 GC_threads[hv] = p -> next;
273 } else {
274 prev -> next = p -> next;
275 }
276}
277
278/* Return a GC_thread corresponding to a given thread_t. */
279/* Returns 0 if it's not there. */
280/* Caller holds allocation lock or otherwise inhibits */
281/* updates. */
282/* If there is more than one thread with the given id we */
283/* return the most recent one. */
284GC_thread GC_lookup_thread(pthread_t id)
285{
286 int hv = ((word)id) % THREAD_TABLE_SZ;
287 register GC_thread p = GC_threads[hv];
288
289 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
290 return(p);
291}
292
293
294/* Caller holds allocation lock. */
295void GC_stop_world()
296{
297 pthread_t my_thread = pthread_self();
298 register int i;
299 register GC_thread p;
300 register int result;
301 struct timespec timeout;
302
303 for (i = 0; i < THREAD_TABLE_SZ; i++) {
304 for (p = GC_threads[i]; p != 0; p = p -> next) {
305 if (p -> id != my_thread) {
306 if (p -> flags & FINISHED) {
307 p -> stop = STOPPED;
308 continue;
309 }
310 p -> stop = PLEASE_STOP;
311 result = pthread_kill(p -> id, SIG_SUSPEND);
312 /* GC_printf1("Sent signal to 0x%x\n", p -> id); */
313 switch(result) {
314 case ESRCH:
315 /* Not really there anymore. Possible? */
316 p -> stop = STOPPED;
317 break;
318 case 0:
319 break;
320 default:
321 ABORT("pthread_kill failed");
322 }
323 }
324 }
325 }
326 pthread_mutex_lock(&GC_suspend_lock);
327 for (i = 0; i < THREAD_TABLE_SZ; i++) {
328 for (p = GC_threads[i]; p != 0; p = p -> next) {
329 while (p -> id != my_thread && p -> stop != STOPPED) {
330 clock_gettime(CLOCK_REALTIME, &timeout);
331 timeout.tv_nsec += 50000000; /* 50 msecs */
332 if (timeout.tv_nsec >= 1000000000) {
333 timeout.tv_nsec -= 1000000000;
334 ++timeout.tv_sec;
335 }
336 result = pthread_cond_timedwait(&GC_suspend_ack_cv,
337 &GC_suspend_lock,
338 &timeout);
339 if (result == ETIMEDOUT) {
340 /* Signal was lost or misdirected. Try again. */
341 /* Duplicate signals should be benign. */
342 result = pthread_kill(p -> id, SIG_SUSPEND);
343 }
344 }
345 }
346 }
347 pthread_mutex_unlock(&GC_suspend_lock);
348 /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
349}
350
351/* Caller holds allocation lock. */
352void GC_start_world()
353{
354 GC_thread p;
355 unsigned i;
356
357 /* GC_printf0("World starting\n"); */
358 for (i = 0; i < THREAD_TABLE_SZ; i++) {
359 for (p = GC_threads[i]; p != 0; p = p -> next) {
360 p -> stop = NOT_STOPPED;
361 }
362 }
363 pthread_mutex_lock(&GC_suspend_lock);
364 /* All other threads are at pthread_cond_wait in signal handler. */
365 /* Otherwise we couldn't have acquired the lock. */
366 pthread_mutex_unlock(&GC_suspend_lock);
367 pthread_cond_broadcast(&GC_continue_cv);
368}
369
370# ifdef MMAP_STACKS
371--> not really supported yet.
372int GC_is_thread_stack(ptr_t addr)
373{
374 register int i;
375 register GC_thread p;
376
377 for (i = 0; i < THREAD_TABLE_SZ; i++) {
378 for (p = GC_threads[i]; p != 0; p = p -> next) {
379 if (p -> stack_size != 0) {
380 if (p -> stack <= addr &&
381 addr < p -> stack + p -> stack_size)
382 return 1;
383 }
384 }
385 }
386 return 0;
387}
388# endif
389
390/* We hold allocation lock. Should do exactly the right thing if the */
391/* world is stopped. Should not fail if it isn't. */
392void GC_push_all_stacks()
393{
394 register int i;
395 register GC_thread p;
396 register ptr_t sp = GC_approx_sp();
397 register ptr_t hot, cold;
398 pthread_t me = pthread_self();
399
400 if (!GC_thr_initialized) GC_thr_init();
401 /* GC_printf1("Pushing stacks from thread 0x%x\n", me); */
402 for (i = 0; i < THREAD_TABLE_SZ; i++) {
403 for (p = GC_threads[i]; p != 0; p = p -> next) {
404 if (p -> flags & FINISHED) continue;
405 if (pthread_equal(p -> id, me)) {
406 hot = GC_approx_sp();
407 } else {
408 hot = p -> stack_ptr;
409 }
410 if (p -> stack_size != 0) {
411# ifdef STACK_GROWS_UP
412 cold = p -> stack;
413# else
414 cold = p -> stack + p -> stack_size;
415# endif
416 } else {
417 /* The original stack. */
418 cold = GC_stackbottom;
419 }
420# ifdef STACK_GROWS_UP
421 GC_push_all_stack(cold, hot);
422# else
423 GC_push_all_stack(hot, cold);
424# endif
425 }
426 }
427}
428
429
430/* We hold the allocation lock. */
431void GC_thr_init()
432{
433 GC_thread t;
434 struct sigaction act;
435
436 if (GC_thr_initialized) return;
437 GC_thr_initialized = TRUE;
438 GC_min_stack_sz = HBLKSIZE;
439 (void) sigaction(SIG_SUSPEND, 0, &act);
440 if (act.sa_handler != SIG_DFL)
441 ABORT("Previously installed SIG_SUSPEND handler");
442 /* Install handler. */
443 act.sa_handler = GC_suspend_handler;
444 act.sa_flags = SA_RESTART;
445 (void) sigemptyset(&act.sa_mask);
446 if (0 != sigaction(SIG_SUSPEND, &act, 0))
447 ABORT("Failed to install SIG_SUSPEND handler");
448 /* Add the initial thread, so we can stop it. */
449 t = GC_new_thread(pthread_self());
450 t -> stack_size = 0;
451 t -> stack_ptr = (ptr_t)(&t);
452 t -> flags = DETACHED;
453}
454
455int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
456{
457 sigset_t fudged_set;
458
459 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
460 fudged_set = *set;
461 sigdelset(&fudged_set, SIG_SUSPEND);
462 set = &fudged_set;
463 }
464 return(pthread_sigmask(how, set, oset));
465}
466
467struct start_info {
468 void *(*start_routine)(void *);
469 void *arg;
470 word flags;
471 ptr_t stack;
472 size_t stack_size;
473 sem_t registered; /* 1 ==> in our thread table, but */
474 /* parent hasn't yet noticed. */
475};
476
477void GC_thread_exit_proc(void *arg)
478{
479 GC_thread me;
480
481 LOCK();
482 me = GC_lookup_thread(pthread_self());
483 if (me -> flags & DETACHED) {
484 GC_delete_thread(pthread_self());
485 } else {
486 me -> flags |= FINISHED;
487 }
488 UNLOCK();
489}
490
491int GC_pthread_join(pthread_t thread, void **retval)
492{
493 int result;
494 GC_thread thread_gc_id;
495
496 LOCK();
497 thread_gc_id = GC_lookup_thread(thread);
498 /* This is guaranteed to be the intended one, since the thread id */
499 /* cant have been recycled by pthreads. */
500 UNLOCK();
501 result = pthread_join(thread, retval);
502 /* Some versions of the Irix pthreads library can erroneously */
503 /* return EINTR when the call succeeds. */
504 if (EINTR == result) result = 0;
505 if (result == 0) {
506 LOCK();
507 /* Here the pthread thread id may have been recycled. */
508 GC_delete_gc_thread(thread, thread_gc_id);
509 UNLOCK();
510 }
511 return result;
512}
513
514int GC_pthread_detach(pthread_t thread)
515{
516 int result;
517 GC_thread thread_gc_id;
518
519 LOCK();
520 thread_gc_id = GC_lookup_thread(thread);
521 UNLOCK();
522 result = pthread_detach(thread);
523 if (result == 0) {
524 LOCK();
525 thread_gc_id -> flags |= DETACHED;
526 /* Here the pthread thread id may have been recycled. */
527 if (thread_gc_id -> flags & FINISHED) {
528 GC_delete_gc_thread(thread, thread_gc_id);
529 }
530 UNLOCK();
531 }
532 return result;
533}
534
535void * GC_start_routine(void * arg)
536{
537 struct start_info * si = arg;
538 void * result;
539 GC_thread me;
540 pthread_t my_pthread;
541 void *(*start)(void *);
542 void *start_arg;
543
544 my_pthread = pthread_self();
545 /* If a GC occurs before the thread is registered, that GC will */
546 /* ignore this thread. That's fine, since it will block trying to */
547 /* acquire the allocation lock, and won't yet hold interesting */
548 /* pointers. */
549 LOCK();
550 /* We register the thread here instead of in the parent, so that */
551 /* we don't need to hold the allocation lock during pthread_create. */
552 /* Holding the allocation lock there would make REDIRECT_MALLOC */
553 /* impossible. It probably still doesn't work, but we're a little */
554 /* closer ... */
555 /* This unfortunately means that we have to be careful the parent */
556 /* doesn't try to do a pthread_join before we're registered. */
557 me = GC_new_thread(my_pthread);
558 me -> flags = si -> flags;
559 me -> stack = si -> stack;
560 me -> stack_size = si -> stack_size;
561 me -> stack_ptr = (ptr_t)si -> stack + si -> stack_size - sizeof(word);
562 UNLOCK();
563 start = si -> start_routine;
564 start_arg = si -> arg;
565 sem_post(&(si -> registered));
566 pthread_cleanup_push(GC_thread_exit_proc, 0);
567 result = (*start)(start_arg);
568 me -> status = result;
569 me -> flags |= FINISHED;
570 pthread_cleanup_pop(1);
571 /* This involves acquiring the lock, ensuring that we can't exit */
572 /* while a collection that thinks we're alive is trying to stop */
573 /* us. */
574 return(result);
575}
576
577# define copy_attr(pa_ptr, source) *(pa_ptr) = *(source)
578
579int
580GC_pthread_create(pthread_t *new_thread,
581 const pthread_attr_t *attr,
582 void *(*start_routine)(void *), void *arg)
583{
584 int result;
585 GC_thread t;
586 void * stack;
587 size_t stacksize;
588 pthread_attr_t new_attr;
589 int detachstate;
590 word my_flags = 0;
591 struct start_info * si = GC_malloc(sizeof(struct start_info));
592 /* This is otherwise saved only in an area mmapped by the thread */
593 /* library, which isn't visible to the collector. */
594
595 if (0 == si) return(ENOMEM);
596 if (0 != sem_init(&(si -> registered), 0, 0)) {
597 ABORT("sem_init failed");
598 }
599 si -> start_routine = start_routine;
600 si -> arg = arg;
601 LOCK();
602 if (!GC_is_initialized) GC_init();
603 if (NULL == attr) {
604 stack = 0;
605 (void) pthread_attr_init(&new_attr);
606 } else {
607 copy_attr(&new_attr, attr);
608 pthread_attr_getstackaddr(&new_attr, &stack);
609 }
610 pthread_attr_getstacksize(&new_attr, &stacksize);
611 pthread_attr_getdetachstate(&new_attr, &detachstate);
612 if (stacksize < GC_min_stack_sz) ABORT("Stack too small");
613 if (0 == stack) {
614 stack = (void *)GC_stack_alloc(&stacksize);
615 if (0 == stack) {
616 UNLOCK();
617 return(ENOMEM);
618 }
619 pthread_attr_setstackaddr(&new_attr, stack);
620 } else {
621 my_flags |= CLIENT_OWNS_STACK;
622 }
623 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
624 si -> flags = my_flags;
625 si -> stack = stack;
626 si -> stack_size = stacksize;
627 result = pthread_create(new_thread, &new_attr, GC_start_routine, si);
628 if (0 == new_thread && !(my_flags & CLIENT_OWNS_STACK)) {
629 GC_stack_free(stack, stacksize);
630 }
631 UNLOCK();
632 /* Wait until child has been added to the thread table. */
633 /* This also ensures that we hold onto si until the child is done */
634 /* with it. Thus it doesn't matter whether it is otherwise */
635 /* visible to the collector. */
636 while (0 != sem_wait(&(si -> registered))) {
637 if (errno != EINTR) {
638 GC_printf1("Sem_wait: errno = %ld\n", (unsigned long) errno);
639 ABORT("sem_wait failed");
640 }
641 }
642 sem_destroy(&(si -> registered));
643 pthread_attr_destroy(&new_attr); /* Probably unnecessary under Irix */
644 return(result);
645}
646
647VOLATILE GC_bool GC_collecting = 0;
648 /* A hint that we're in the collector and */
649 /* holding the allocation lock for an */
650 /* extended period. */
651
652/* Reasonably fast spin locks. Basically the same implementation */
653/* as STL alloc.h. */
654
655#define SLEEP_THRESHOLD 3
656
657unsigned long GC_allocate_lock = 0;
658# define GC_TRY_LOCK() !GC_test_and_set(&GC_allocate_lock)
659# define GC_LOCK_TAKEN GC_allocate_lock
660
661void GC_lock()
662{
663# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
664# define high_spin_max 1000 /* spin cycles for multiprocessor */
665 static unsigned spin_max = low_spin_max;
666 unsigned my_spin_max;
667 static unsigned last_spins = 0;
668 unsigned my_last_spins;
669 volatile unsigned junk;
670# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
671 int i;
672
673 if (GC_TRY_LOCK()) {
674 return;
675 }
676 junk = 0;
677 my_spin_max = spin_max;
678 my_last_spins = last_spins;
679 for (i = 0; i < my_spin_max; i++) {
680 if (GC_collecting) goto yield;
681 if (i < my_last_spins/2 || GC_LOCK_TAKEN) {
682 PAUSE;
683 continue;
684 }
685 if (GC_TRY_LOCK()) {
686 /*
687 * got it!
688 * Spinning worked. Thus we're probably not being scheduled
689 * against the other process with which we were contending.
690 * Thus it makes sense to spin longer the next time.
691 */
692 last_spins = i;
693 spin_max = high_spin_max;
694 return;
695 }
696 }
697 /* We are probably being scheduled against the other process. Sleep. */
698 spin_max = low_spin_max;
699yield:
700 for (i = 0;; ++i) {
701 if (GC_TRY_LOCK()) {
702 return;
703 }
704 if (i < SLEEP_THRESHOLD) {
705 sched_yield();
706 } else {
707 struct timespec ts;
708
709 if (i > 26) i = 26;
710 /* Don't wait for more than about 60msecs, even */
711 /* under extreme contention. */
712 ts.tv_sec = 0;
713 ts.tv_nsec = 1 << i;
714 nanosleep(&ts, 0);
715 }
716 }
717}
718
719# else
720
721#ifndef LINT
722 int GC_no_Irix_threads;
723#endif
724
725# endif /* GC_IRIX_THREADS */
726
Note: See TracBrowser for help on using the repository browser.