1 | /*
|
---|
2 | * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
|
---|
3 | *
|
---|
4 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
---|
5 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
---|
6 | *
|
---|
7 | * Permission is hereby granted to use or copy this program
|
---|
8 | * for any purpose, provided the above notices are retained on all copies.
|
---|
9 | * Permission to modify the code and to distribute modified code is granted,
|
---|
10 | * provided the above notices are retained, and a notice that the code was
|
---|
11 | * modified is included with the above copyright notice.
|
---|
12 | */
|
---|
13 |
|
---|
14 | #if defined(GC_LINUX_THREADS)
|
---|
15 |
|
---|
16 | #include "private/gc_priv.h" /* For GC_compare_and_exchange, GC_memory_barrier */
|
---|
17 | #include "private/specific.h"
|
---|
18 |
|
---|
19 | static tse invalid_tse = {INVALID_QTID, 0, 0, INVALID_THREADID};
|
---|
20 | /* A thread-specific data entry which will never */
|
---|
21 | /* appear valid to a reader. Used to fill in empty */
|
---|
22 | /* cache entries to avoid a check for 0. */
|
---|
23 |
|
---|
24 | int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *)) {
|
---|
25 | int i;
|
---|
26 | tsd * result = (tsd *)MALLOC_CLEAR(sizeof (tsd));
|
---|
27 |
|
---|
28 | /* A quick alignment check, since we need atomic stores */
|
---|
29 | GC_ASSERT((unsigned long)(&invalid_tse.next) % sizeof(tse *) == 0);
|
---|
30 | if (0 == result) return ENOMEM;
|
---|
31 | pthread_mutex_init(&(result -> lock), NULL);
|
---|
32 | for (i = 0; i < TS_CACHE_SIZE; ++i) {
|
---|
33 | result -> cache[i] = &invalid_tse;
|
---|
34 | }
|
---|
35 | # ifdef GC_ASSERTIONS
|
---|
36 | for (i = 0; i < TS_HASH_SIZE; ++i) {
|
---|
37 | GC_ASSERT(result -> hash[i] == 0);
|
---|
38 | }
|
---|
39 | # endif
|
---|
40 | *key_ptr = result;
|
---|
41 | return 0;
|
---|
42 | }
|
---|
43 |
|
---|
44 | int PREFIXED(setspecific) (tsd * key, void * value) {
|
---|
45 | pthread_t self = pthread_self();
|
---|
46 | int hash_val = HASH(self);
|
---|
47 | volatile tse * entry = (volatile tse *)MALLOC_CLEAR(sizeof (tse));
|
---|
48 |
|
---|
49 | GC_ASSERT(self != INVALID_THREADID);
|
---|
50 | if (0 == entry) return ENOMEM;
|
---|
51 | pthread_mutex_lock(&(key -> lock));
|
---|
52 | /* Could easily check for an existing entry here. */
|
---|
53 | entry -> next = key -> hash[hash_val];
|
---|
54 | entry -> thread = self;
|
---|
55 | entry -> value = value;
|
---|
56 | GC_ASSERT(entry -> qtid == INVALID_QTID);
|
---|
57 | /* There can only be one writer at a time, but this needs to be */
|
---|
58 | /* atomic with respect to concurrent readers. */
|
---|
59 | *(volatile tse **)(key -> hash + hash_val) = entry;
|
---|
60 | pthread_mutex_unlock(&(key -> lock));
|
---|
61 | return 0;
|
---|
62 | }
|
---|
63 |
|
---|
64 | /* Remove thread-specific data for this thread. Should be called on */
|
---|
65 | /* thread exit. */
|
---|
66 | void PREFIXED(remove_specific) (tsd * key) {
|
---|
67 | pthread_t self = pthread_self();
|
---|
68 | unsigned hash_val = HASH(self);
|
---|
69 | tse *entry;
|
---|
70 | tse **link = key -> hash + hash_val;
|
---|
71 |
|
---|
72 | pthread_mutex_lock(&(key -> lock));
|
---|
73 | entry = *link;
|
---|
74 | while (entry != NULL && entry -> thread != self) {
|
---|
75 | link = &(entry -> next);
|
---|
76 | entry = *link;
|
---|
77 | }
|
---|
78 | /* Invalidate qtid field, since qtids may be reused, and a later */
|
---|
79 | /* cache lookup could otherwise find this entry. */
|
---|
80 | entry -> qtid = INVALID_QTID;
|
---|
81 | if (entry != NULL) {
|
---|
82 | *link = entry -> next;
|
---|
83 | /* Atomic! concurrent accesses still work. */
|
---|
84 | /* They must, since readers don't lock. */
|
---|
85 | /* We shouldn't need a volatile access here, */
|
---|
86 | /* since both this and the preceding write */
|
---|
87 | /* should become visible no later than */
|
---|
88 | /* the pthread_mutex_unlock() call. */
|
---|
89 | }
|
---|
90 | /* If we wanted to deallocate the entry, we'd first have to clear */
|
---|
91 | /* any cache entries pointing to it. That probably requires */
|
---|
92 | /* additional synchronization, since we can't prevent a concurrent */
|
---|
93 | /* cache lookup, which should still be examining deallocated memory.*/
|
---|
94 | /* This can only happen if the concurrent access is from another */
|
---|
95 | /* thread, and hence has missed the cache, but still... */
|
---|
96 |
|
---|
97 | /* With GC, we're done, since the pointers from the cache will */
|
---|
98 | /* be overwritten, all local pointers to the entries will be */
|
---|
99 | /* dropped, and the entry will then be reclaimed. */
|
---|
100 | pthread_mutex_unlock(&(key -> lock));
|
---|
101 | }
|
---|
102 |
|
---|
103 | /* Note that even the slow path doesn't lock. */
|
---|
104 | void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
|
---|
105 | tse * volatile * cache_ptr) {
|
---|
106 | pthread_t self = pthread_self();
|
---|
107 | unsigned hash_val = HASH(self);
|
---|
108 | tse *entry = key -> hash[hash_val];
|
---|
109 |
|
---|
110 | GC_ASSERT(qtid != INVALID_QTID);
|
---|
111 | while (entry != NULL && entry -> thread != self) {
|
---|
112 | entry = entry -> next;
|
---|
113 | }
|
---|
114 | if (entry == NULL) return NULL;
|
---|
115 | /* Set cache_entry. */
|
---|
116 | entry -> qtid = qtid;
|
---|
117 | /* It's safe to do this asynchronously. Either value */
|
---|
118 | /* is safe, though may produce spurious misses. */
|
---|
119 | /* We're replacing one qtid with another one for the */
|
---|
120 | /* same thread. */
|
---|
121 | *cache_ptr = entry;
|
---|
122 | /* Again this is safe since pointer assignments are */
|
---|
123 | /* presumed atomic, and either pointer is valid. */
|
---|
124 | return entry -> value;
|
---|
125 | }
|
---|
126 |
|
---|
127 | #endif /* GC_LINUX_THREADS */
|
---|