source: trunk/gcc/boehm-gc/include/private/gc_locks.h

Last change on this file was 1392, checked in by bird, 21 years ago

This commit was generated by cvs2svn to compensate for changes in r1391,
which included commits to RCS files with non-trunk default branches.

  • Property cvs2svn:cvs-rev set to 1.1.1.2
  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 19.0 KB
Line 
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 *
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 *
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 */
17
18#ifndef GC_LOCKS_H
19#define GC_LOCKS_H
20
21/*
22 * Mutual exclusion between allocator/collector routines.
23 * Needed if there is more than one allocator thread.
24 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
25 * dirty way that is acceptable for a few instructions, e.g. by
26 * inhibiting preemption. This is assumed to have succeeded only
27 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29 * If signals cannot be tolerated with the FASTLOCK held, then
30 * FASTLOCK should disable signals. The code executed under
31 * FASTLOCK is otherwise immune to interruption, provided it is
32 * not restarted.
33 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35 * (There is currently no equivalent for FASTLOCK.)
36 *
37 * In the PARALLEL_MARK case, we also need to define a number of
38 * other inline finctions here:
39 * GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40 * GC_word old, GC_word new )
41 * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42 * void GC_memory_barrier( )
43 *
44 */
45# ifdef THREADS
46 void GC_noop1 GC_PROTO((word));
47# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
48# include "th/PCR_Th.h"
49# include "th/PCR_ThCrSec.h"
50 extern struct PCR_Th_MLRep GC_allocate_ml;
51# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
52# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
53# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55# define FASTLOCK() PCR_ThCrSec_EnterSys()
56 /* Here we cheat (a lot): */
57# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58 /* TRUE if nobody currently holds the lock */
59# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
60# endif
61# ifdef PCR
62# include <base/PCR_Base.h>
63# include <th/PCR_Th.h>
64 extern PCR_Th_ML GC_allocate_ml;
65# define DCL_LOCK_STATE \
66 PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70# define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71# define FASTUNLOCK() {\
72 if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
73# endif
74# ifdef SRC_M3
75 extern GC_word RT0u__inCritical;
76# define LOCK() RT0u__inCritical++
77# define UNLOCK() RT0u__inCritical--
78# endif
79# ifdef GC_SOLARIS_THREADS
80# include <thread.h>
81# include <signal.h>
82 extern mutex_t GC_allocate_ml;
83# define LOCK() mutex_lock(&GC_allocate_ml);
84# define UNLOCK() mutex_unlock(&GC_allocate_ml);
85# endif
86
87/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
88/* acquisition and release. We need this for correct operation of the */
89/* incremental GC. */
90# ifdef __GNUC__
91# if defined(I386)
92 inline static int GC_test_and_set(volatile unsigned int *addr) {
93 int oldval;
94 /* Note: the "xchg" instruction does not need a "lock" prefix */
95 __asm__ __volatile__("xchgl %0, %1"
96 : "=r"(oldval), "=m"(*(addr))
97 : "0"(1), "m"(*(addr)) : "memory");
98 return oldval;
99 }
100# define GC_TEST_AND_SET_DEFINED
101# endif
102# if defined(IA64)
103 inline static int GC_test_and_set(volatile unsigned int *addr) {
104 long oldval, n = 1;
105 __asm__ __volatile__("xchg4 %0=%1,%2"
106 : "=r"(oldval), "=m"(*addr)
107 : "r"(n), "1"(*addr) : "memory");
108 return oldval;
109 }
110# define GC_TEST_AND_SET_DEFINED
111 /* Should this handle post-increment addressing?? */
112 inline static void GC_clear(volatile unsigned int *addr) {
113 __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
114 }
115# define GC_CLEAR_DEFINED
116# endif
117# ifdef SPARC
118 inline static int GC_test_and_set(volatile unsigned int *addr) {
119 int oldval;
120
121 __asm__ __volatile__("ldstub %1,%0"
122 : "=r"(oldval), "=m"(*addr)
123 : "m"(*addr) : "memory");
124 return oldval;
125 }
126# define GC_TEST_AND_SET_DEFINED
127# endif
128# ifdef M68K
129 /* Contributed by Tony Mantler. I'm not sure how well it was */
130 /* tested. */
131 inline static int GC_test_and_set(volatile unsigned int *addr) {
132 char oldval; /* this must be no longer than 8 bits */
133
134 /* The return value is semi-phony. */
135 /* 'tas' sets bit 7 while the return */
136 /* value pretends bit 0 was set */
137 __asm__ __volatile__(
138 "tas %1@; sne %0; negb %0"
139 : "=d" (oldval)
140 : "a" (addr) : "memory");
141 return oldval;
142 }
143# define GC_TEST_AND_SET_DEFINED
144# endif
145# if defined(POWERPC)
146 inline static int GC_test_and_set(volatile unsigned int *addr) {
147 int oldval;
148 int temp = 1; // locked value
149
150 __asm__ __volatile__(
151 "1:\tlwarx %0,0,%3\n" // load and reserve
152 "\tcmpwi %0, 0\n" // if load is
153 "\tbne 2f\n" // non-zero, return already set
154 "\tstwcx. %2,0,%1\n" // else store conditional
155 "\tbne- 1b\n" // retry if lost reservation
156 "2:\t\n" // oldval is zero if we set
157 : "=&r"(oldval), "=p"(addr)
158 : "r"(temp), "1"(addr)
159 : "memory");
160 return (int)oldval;
161 }
162# define GC_TEST_AND_SET_DEFINED
163 inline static void GC_clear(volatile unsigned int *addr) {
164 __asm__ __volatile__("eieio" ::: "memory");
165 *(addr) = 0;
166 }
167# define GC_CLEAR_DEFINED
168# endif
169# if defined(ALPHA)
170 inline static int GC_test_and_set(volatile unsigned int * addr)
171 {
172 unsigned long oldvalue;
173 unsigned long temp;
174
175 __asm__ __volatile__(
176 "1: ldl_l %0,%1\n"
177 " and %0,%3,%2\n"
178 " bne %2,2f\n"
179 " xor %0,%3,%0\n"
180 " stl_c %0,%1\n"
181 " beq %0,3f\n"
182 " mb\n"
183 "2:\n"
184 ".section .text2,\"ax\"\n"
185 "3: br 1b\n"
186 ".previous"
187 :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
188 :"Ir" (1), "m" (*addr)
189 :"memory");
190
191 return oldvalue;
192 }
193# define GC_TEST_AND_SET_DEFINED
194 /* Should probably also define GC_clear, since it needs */
195 /* a memory barrier ?? */
196# endif /* ALPHA */
197# ifdef ARM32
198 inline static int GC_test_and_set(volatile unsigned int *addr) {
199 int oldval;
200 /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the
201 * bus because there are no SMP ARM machines. If/when there are,
202 * this code will likely need to be updated. */
203 /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
204 __asm__ __volatile__("swp %0, %1, [%2]"
205 : "=r"(oldval)
206 : "r"(1), "r"(addr)
207 : "memory");
208 return oldval;
209 }
210# define GC_TEST_AND_SET_DEFINED
211# endif /* ARM32 */
212# ifdef S390
213 inline static int GC_test_and_set(volatile unsigned int *addr) {
214 int ret;
215 __asm__ __volatile__ (
216 " l %0,0(%2)\n"
217 "0: cs %0,%1,0(%2)\n"
218 " jl 0b"
219 : "=&d" (ret)
220 : "d" (1), "a" (addr)
221 : "cc", "memory");
222 return ret;
223 }
224# endif
225# endif /* __GNUC__ */
226# if (defined(ALPHA) && !defined(__GNUC__))
227# define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1)
228# define GC_TEST_AND_SET_DEFINED
229# endif
230# if defined(MSWIN32)
231# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
232# define GC_TEST_AND_SET_DEFINED
233# endif
234# ifdef MIPS
235# ifdef LINUX
236# include <sys/tas.h>
237# define GC_test_and_set(addr) _test_and_set((int *) addr,1)
238# define GC_TEST_AND_SET_DEFINED
239# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
240 || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
241# define GC_test_and_set(addr) test_and_set(addr, 1)
242# else
243# define GC_test_and_set(addr) __test_and_set(addr,1)
244# define GC_clear(addr) __lock_release(addr);
245# define GC_CLEAR_DEFINED
246# endif
247# define GC_TEST_AND_SET_DEFINED
248# endif /* MIPS */
249# if 0 /* defined(HP_PA) */
250 /* The official recommendation seems to be to not use ldcw from */
251 /* user mode. Since multithreaded incremental collection doesn't */
252 /* work anyway on HP_PA, this shouldn't be a major loss. */
253
254 /* "set" means 0 and "clear" means 1 here. */
255# define GC_test_and_set(addr) !GC_test_and_clear(addr);
256# define GC_TEST_AND_SET_DEFINED
257# define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
258 /* The above needs a memory barrier! */
259# define GC_CLEAR_DEFINED
260# endif
261# if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
262# ifdef __GNUC__
263 inline static void GC_clear(volatile unsigned int *addr) {
264 /* Try to discourage gcc from moving anything past this. */
265 __asm__ __volatile__(" " : : : "memory");
266 *(addr) = 0;
267 }
268# else
269 /* The function call in the following should prevent the */
270 /* compiler from moving assignments to below the UNLOCK. */
271# define GC_clear(addr) GC_noop1((word)(addr)); \
272 *((volatile unsigned int *)(addr)) = 0;
273# endif
274# define GC_CLEAR_DEFINED
275# endif /* !GC_CLEAR_DEFINED */
276
277# if !defined(GC_TEST_AND_SET_DEFINED)
278# define USE_PTHREAD_LOCKS
279# endif
280
281# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
282 && !defined(GC_IRIX_THREADS)
283# define NO_THREAD (pthread_t)(-1)
284# include <pthread.h>
285# if defined(PARALLEL_MARK)
286 /* We need compare-and-swap to update mark bits, where it's */
287 /* performance critical. If USE_MARK_BYTES is defined, it is */
288 /* no longer needed for this purpose. However we use it in */
289 /* either case to implement atomic fetch-and-add, though that's */
290 /* less performance critical, and could perhaps be done with */
291 /* a lock. */
292# if defined(GENERIC_COMPARE_AND_SWAP)
293 /* Probably not useful, except for debugging. */
294 /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
295 /* minimize its use. */
296 extern pthread_mutex_t GC_compare_and_swap_lock;
297
298 /* Note that if GC_word updates are not atomic, a concurrent */
299 /* reader should acquire GC_compare_and_swap_lock. On */
300 /* currently supported platforms, such updates are atomic. */
301 extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
302 GC_word old, GC_word new_val);
303# endif /* GENERIC_COMPARE_AND_SWAP */
304# if defined(I386)
305# if !defined(GENERIC_COMPARE_AND_SWAP)
306 /* Returns TRUE if the comparison succeeded. */
307 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
308 GC_word old,
309 GC_word new_val)
310 {
311 char result;
312 __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
313 : "=m"(*(addr)), "=r"(result)
314 : "r" (new_val), "0"(*(addr)), "a"(old) : "memory");
315 return (GC_bool) result;
316 }
317# endif /* !GENERIC_COMPARE_AND_SWAP */
318 inline static void GC_memory_write_barrier()
319 {
320 /* We believe the processor ensures at least processor */
321 /* consistent ordering. Thus a compiler barrier */
322 /* should suffice. */
323 __asm__ __volatile__("" : : : "memory");
324 }
325# endif /* I386 */
326# if defined(IA64)
327# if !defined(GENERIC_COMPARE_AND_SWAP)
328 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
329 GC_word old, GC_word new_val)
330 {
331 unsigned long oldval;
332 __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
333 : "=r"(oldval), "=m"(*addr)
334 : "r"(new_val), "1"(*addr), "r"(old) : "memory");
335 return (oldval == old);
336 }
337# endif /* !GENERIC_COMPARE_AND_SWAP */
338# if 0
339 /* Shouldn't be needed; we use volatile stores instead. */
340 inline static void GC_memory_write_barrier()
341 {
342 __asm__ __volatile__("mf" : : : "memory");
343 }
344# endif /* 0 */
345# endif /* IA64 */
346# if defined(S390)
347# if !defined(GENERIC_COMPARE_AND_SWAP)
348 inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
349 GC_word old, GC_word new_val)
350 {
351 int retval;
352 __asm__ __volatile__ (
353# ifndef __s390x__
354 " cs %1,%2,0(%3)\n"
355# else
356 " csg %1,%2,0(%3)\n"
357# endif
358 " ipm %0\n"
359 " srl %0,28\n"
360 : "=&d" (retval), "+d" (old)
361 : "d" (new_val), "a" (addr)
362 : "cc", "memory");
363 return retval == 0;
364 }
365# endif
366# endif
367# if !defined(GENERIC_COMPARE_AND_SWAP)
368 /* Returns the original value of *addr. */
369 inline static GC_word GC_atomic_add(volatile GC_word *addr,
370 GC_word how_much)
371 {
372 GC_word old;
373 do {
374 old = *addr;
375 } while (!GC_compare_and_exchange(addr, old, old+how_much));
376 return old;
377 }
378# else /* GENERIC_COMPARE_AND_SWAP */
379 /* So long as a GC_word can be atomically updated, it should */
380 /* be OK to read *addr without a lock. */
381 extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
382# endif /* GENERIC_COMPARE_AND_SWAP */
383
384# endif /* PARALLEL_MARK */
385
386# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
387 /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
388 /* be held for long periods, if it is held at all. Thus spinning */
389 /* and sleeping for fixed periods are likely to result in */
390 /* significant wasted time. We thus rely mostly on queued locks. */
391# define USE_SPIN_LOCK
392 extern volatile unsigned int GC_allocate_lock;
393 extern void GC_lock(void);
394 /* Allocation lock holder. Only set if acquired by client through */
395 /* GC_call_with_alloc_lock. */
396# ifdef GC_ASSERTIONS
397# define LOCK() \
398 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
399 SET_LOCK_HOLDER(); }
400# define UNLOCK() \
401 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
402 GC_clear(&GC_allocate_lock); }
403# else
404# define LOCK() \
405 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
406# define UNLOCK() \
407 GC_clear(&GC_allocate_lock)
408# endif /* !GC_ASSERTIONS */
409# if 0
410 /* Another alternative for OSF1 might be: */
411# include <sys/mman.h>
412 extern msemaphore GC_allocate_semaphore;
413# define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
414 != 0) GC_lock(); else GC_allocate_lock = 1; }
415 /* The following is INCORRECT, since the memory model is too weak. */
416 /* Is this true? Presumably msem_unlock has the right semantics? */
417 /* - HB */
418# define UNLOCK() { GC_allocate_lock = 0; \
419 msem_unlock(&GC_allocate_semaphore, 0); }
420# endif /* 0 */
421# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
422# ifndef USE_PTHREAD_LOCKS
423# define USE_PTHREAD_LOCKS
424# endif
425# endif /* THREAD_LOCAL_ALLOC */
426# ifdef USE_PTHREAD_LOCKS
427# include <pthread.h>
428 extern pthread_mutex_t GC_allocate_ml;
429# ifdef GC_ASSERTIONS
430# define LOCK() \
431 { GC_lock(); \
432 SET_LOCK_HOLDER(); }
433# define UNLOCK() \
434 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
435 pthread_mutex_unlock(&GC_allocate_ml); }
436# else /* !GC_ASSERTIONS */
437# define LOCK() \
438 { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
439# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
440# endif /* !GC_ASSERTIONS */
441# endif /* USE_PTHREAD_LOCKS */
442# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
443# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
444# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
445 extern VOLATILE GC_bool GC_collecting;
446# define ENTER_GC() GC_collecting = 1;
447# define EXIT_GC() GC_collecting = 0;
448 extern void GC_lock(void);
449 extern pthread_t GC_lock_holder;
450# ifdef GC_ASSERTIONS
451 extern pthread_t GC_mark_lock_holder;
452# endif
453# endif /* GC_PTHREADS with linux_threads.c implementation */
454# if defined(GC_IRIX_THREADS)
455# include <pthread.h>
456 /* This probably should never be included, but I can't test */
457 /* on Irix anymore. */
458# include <mutex.h>
459
460 extern unsigned long GC_allocate_lock;
461 /* This is not a mutex because mutexes that obey the (optional) */
462 /* POSIX scheduling rules are subject to convoys in high contention */
463 /* applications. This is basically a spin lock. */
464 extern pthread_t GC_lock_holder;
465 extern void GC_lock(void);
466 /* Allocation lock holder. Only set if acquired by client through */
467 /* GC_call_with_alloc_lock. */
468# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
469# define NO_THREAD (pthread_t)(-1)
470# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
471# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
472# define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
473# define UNLOCK() GC_clear(&GC_allocate_lock);
474 extern VOLATILE GC_bool GC_collecting;
475# define ENTER_GC() \
476 { \
477 GC_collecting = 1; \
478 }
479# define EXIT_GC() GC_collecting = 0;
480# endif /* GC_IRIX_THREADS */
481# ifdef GC_WIN32_THREADS
482# include <windows.h>
483 GC_API CRITICAL_SECTION GC_allocate_ml;
484# define LOCK() EnterCriticalSection(&GC_allocate_ml);
485# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
486# endif
487# ifndef SET_LOCK_HOLDER
488# define SET_LOCK_HOLDER()
489# define UNSET_LOCK_HOLDER()
490# define I_HOLD_LOCK() FALSE
491 /* Used on platforms were locks can be reacquired, */
492 /* so it doesn't matter if we lie. */
493# endif
494# else /* !THREADS */
495# define LOCK()
496# define UNLOCK()
497# endif /* !THREADS */
498# ifndef SET_LOCK_HOLDER
499# define SET_LOCK_HOLDER()
500# define UNSET_LOCK_HOLDER()
501# define I_HOLD_LOCK() FALSE
502 /* Used on platforms were locks can be reacquired, */
503 /* so it doesn't matter if we lie. */
504# endif
505# ifndef ENTER_GC
506# define ENTER_GC()
507# define EXIT_GC()
508# endif
509
510# ifndef DCL_LOCK_STATE
511# define DCL_LOCK_STATE
512# endif
513# ifndef FASTLOCK
514# define FASTLOCK() LOCK()
515# define FASTLOCK_SUCCEEDED() TRUE
516# define FASTUNLOCK() UNLOCK()
517# endif
518
519#endif /* GC_LOCKS_H */
Note: See TracBrowser for help on using the repository browser.