source: trunk/gcc/libjava/java/lang/natObject.cc

Last change on this file was 1392, checked in by bird, 21 years ago

This commit was generated by cvs2svn to compensate for changes in r1391,
which included commits to RCS files with non-trunk default branches.

  • Property cvs2svn:cvs-rev set to 1.1.1.2
  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 41.7 KB
Line 
1// natObject.cc - Implementation of the Object class.
2
3/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation
4
5 This file is part of libgcj.
6
7This software is copyrighted work licensed under the terms of the
8Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
9details. */
10
11#include <config.h>
12#include <platform.h>
13
14#include <string.h>
15
16#pragma implementation "Object.h"
17
18#include <gcj/cni.h>
19#include <jvm.h>
20#include <java/lang/Object.h>
21#include <java-threads.h>
22#include <java-signal.h>
23#include <java/lang/CloneNotSupportedException.h>
24#include <java/lang/IllegalArgumentException.h>
25#include <java/lang/IllegalMonitorStateException.h>
26#include <java/lang/InterruptedException.h>
27#include <java/lang/NullPointerException.h>
28#include <java/lang/Class.h>
29#include <java/lang/Cloneable.h>
30#include <java/lang/Thread.h>
31
32#ifdef LOCK_DEBUG
33# include <stdio.h>
34#endif
35
36
37
38
39// This is used to represent synchronization information.
40struct _Jv_SyncInfo
41{
42#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
43 // We only need to keep track of initialization state if we can
44 // possibly finalize this object.
45 bool init;
46#endif
47 _Jv_ConditionVariable_t condition;
48 _Jv_Mutex_t mutex;
49};
50
51
52
53
54jclass
55java::lang::Object::getClass (void)
56{
57 _Jv_VTable **dt = (_Jv_VTable **) this;
58 return (*dt)->clas;
59}
60
61jint
62java::lang::Object::hashCode (void)
63{
64 return _Jv_HashCode (this);
65}
66
67jobject
68java::lang::Object::clone (void)
69{
70 jclass klass = getClass ();
71 jobject r;
72 jint size;
73
74 // We also clone arrays here. If we put the array code into
75 // __JArray, then we'd have to figure out a way to find the array
76 // vtbl when creating a new array class. This is easier, if uglier.
77 if (klass->isArray())
78 {
79 __JArray *array = (__JArray *) this;
80 jclass comp = getClass()->getComponentType();
81 jint eltsize;
82 if (comp->isPrimitive())
83 {
84 r = _Jv_NewPrimArray (comp, array->length);
85 eltsize = comp->size();
86 }
87 else
88 {
89 r = _Jv_NewObjectArray (array->length, comp, NULL);
90 eltsize = sizeof (jobject);
91 }
92 // We can't use sizeof on __JArray because we must account for
93 // alignment of the element type.
94 size = (_Jv_GetArrayElementFromElementType (array, comp) - (char *) array
95 + array->length * eltsize);
96 }
97 else
98 {
99 if (! java::lang::Cloneable::class$.isAssignableFrom(klass))
100 throw new CloneNotSupportedException;
101
102 size = klass->size();
103 r = JvAllocObject (klass, size);
104 }
105
106 memcpy ((void *) r, (void *) this, size);
107 return r;
108}
109
110void
111_Jv_FinalizeObject (jobject obj)
112{
113 // Ignore exceptions. From section 12.6 of the Java Language Spec.
114 try
115 {
116 obj->finalize ();
117 }
118 catch (java::lang::Throwable *t)
119 {
120 // Ignore.
121 }
122}
123
124
125//
126// Synchronization code.
127//
128
129#ifndef JV_HASH_SYNCHRONIZATION
130// This global is used to make sure that only one thread sets an
131// object's `sync_info' field.
132static _Jv_Mutex_t sync_mutex;
133
134// This macro is used to see if synchronization initialization is
135// needed.
136#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
137# define INIT_NEEDED(Obj) (! (Obj)->sync_info \
138 || ! ((_Jv_SyncInfo *) ((Obj)->sync_info))->init)
139#else
140# define INIT_NEEDED(Obj) (! (Obj)->sync_info)
141#endif
142
143#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
144// If we have to run a destructor for a sync_info member, then this
145// function is registered as a finalizer for the sync_info.
146static void
147finalize_sync_info (jobject obj)
148{
149 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj;
150#if defined (_Jv_HaveCondDestroy)
151 _Jv_CondDestroy (&si->condition);
152#endif
153#if defined (_Jv_HaveMutexDestroy)
154 _Jv_MutexDestroy (&si->mutex);
155#endif
156 si->init = false;
157}
158#endif
159
160// This is called to initialize the sync_info element of an object.
161void
162java::lang::Object::sync_init (void)
163{
164 _Jv_MutexLock (&sync_mutex);
165 // Check again to see if initialization is needed now that we have
166 // the lock.
167 if (INIT_NEEDED (this))
168 {
169 // We assume there are no pointers in the sync_info
170 // representation.
171 _Jv_SyncInfo *si;
172 // We always create a new sync_info, even if there is already
173 // one available. Any given object can only be finalized once.
174 // If we get here and sync_info is not null, then it has already
175 // been finalized. So if we just reinitialize the old one,
176 // we'll never be able to (re-)destroy the mutex and/or
177 // condition variable.
178 si = (_Jv_SyncInfo *) _Jv_AllocBytes (sizeof (_Jv_SyncInfo));
179 _Jv_MutexInit (&si->mutex);
180 _Jv_CondInit (&si->condition);
181#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
182 // Register a finalizer.
183 si->init = true;
184 _Jv_RegisterFinalizer (si, finalize_sync_info);
185#endif
186 sync_info = (jobject) si;
187 }
188 _Jv_MutexUnlock (&sync_mutex);
189}
190
191void
192java::lang::Object::notify (void)
193{
194 if (__builtin_expect (INIT_NEEDED (this), false))
195 sync_init ();
196 _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
197 if (__builtin_expect (_Jv_CondNotify (&si->condition, &si->mutex), false))
198 throw new IllegalMonitorStateException(JvNewStringLatin1
199 ("current thread not owner"));
200}
201
202void
203java::lang::Object::notifyAll (void)
204{
205 if (__builtin_expect (INIT_NEEDED (this), false))
206 sync_init ();
207 _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
208 if (__builtin_expect (_Jv_CondNotifyAll (&si->condition, &si->mutex), false))
209 throw new IllegalMonitorStateException(JvNewStringLatin1
210 ("current thread not owner"));
211}
212
213void
214java::lang::Object::wait (jlong timeout, jint nanos)
215{
216 if (__builtin_expect (INIT_NEEDED (this), false))
217 sync_init ();
218 if (__builtin_expect (timeout < 0 || nanos < 0 || nanos > 999999, false))
219 throw new IllegalArgumentException;
220 _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
221 switch (_Jv_CondWait (&si->condition, &si->mutex, timeout, nanos))
222 {
223 case _JV_NOT_OWNER:
224 throw new IllegalMonitorStateException (JvNewStringLatin1
225 ("current thread not owner"));
226 case _JV_INTERRUPTED:
227 if (Thread::interrupted ())
228 throw new InterruptedException;
229 }
230}
231
232//
233// Some runtime code.
234//
235
236// This function is called at system startup to initialize the
237// `sync_mutex'.
238void
239_Jv_InitializeSyncMutex (void)
240{
241 _Jv_MutexInit (&sync_mutex);
242}
243
244void
245_Jv_MonitorEnter (jobject obj)
246{
247#ifndef HANDLE_SEGV
248 if (__builtin_expect (! obj, false))
249 throw new java::lang::NullPointerException;
250#endif
251 if (__builtin_expect (INIT_NEEDED (obj), false))
252 obj->sync_init ();
253 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
254 _Jv_MutexLock (&si->mutex);
255 // FIXME: In the Windows case, this can return a nonzero error code.
256 // We should turn that into some exception ...
257}
258
259void
260_Jv_MonitorExit (jobject obj)
261{
262 JvAssert (obj);
263 JvAssert (! INIT_NEEDED (obj));
264 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
265 if (__builtin_expect (_Jv_MutexUnlock (&si->mutex), false))
266 throw new java::lang::IllegalMonitorStateException;
267}
268
269#else /* JV_HASH_SYNCHRONIZATION */
270
271// FIXME: We shouldn't be calling GC_register_finalizer directly.
272#ifndef HAVE_BOEHM_GC
273# error Hash synchronization currently requires boehm-gc
274// That's actually a bit of a lie: It should also work with the null GC,
275// probably even better than the alternative.
276// To really support alternate GCs here, we would need to widen the
277// interface to finalization, since we sometimes have to register a
278// second finalizer for an object that already has one.
279// We might also want to move the GC interface to a .h file, since
280// the number of procedure call levels involved in some of these
281// operations is already ridiculous, and would become worse if we
282// went through the proper intermediaries.
283#else
284# include "gc.h"
285#endif
286
287// What follows currenly assumes a Linux-like platform.
288// Some of it specifically assumes X86 or IA64 Linux, though that
289// should be easily fixable.
290
291// A Java monitor implemention based on a table of locks.
292// Each entry in the table describes
293// locks held for objects that hash to that location.
294// This started out as a reimplementation of the technique used in SGIs JVM,
295// for which we obtained permission from SGI.
296// But in fact, this ended up quite different, though some ideas are
297// still shared with the original.
298// It was also influenced by some of the published IBM work,
299// though it also differs in many ways from that.
300// We could speed this up if we had a way to atomically update
301// an entire cache entry, i.e. 2 contiguous words of memory.
302// That would usually be the case with a 32 bit ABI on a 64 bit processor.
303// But we don't currently go out of our way to target those.
304// I don't know how to do much better with a N bit ABI on a processor
305// that can atomically update only N bits at a time.
306// Author: Hans-J. Boehm (Hans_Boehm@hp.com, boehm@acm.org)
307
308#include <assert.h>
309#include <limits.h>
310#include <unistd.h> // for usleep, sysconf.
311#include <gcj/javaprims.h>
312#include <sysdep/locks.h>
313#include <java/lang/Thread.h>
314
315// Try to determine whether we are on a multiprocessor, i.e. whether
316// spinning may be profitable.
317// This should really use a suitable autoconf macro.
318// False is the conservative answer, though the right one is much better.
319static bool
320is_mp()
321{
322#ifdef _SC_NPROCESSORS_ONLN
323 long nprocs = sysconf(_SC_NPROCESSORS_ONLN);
324 return (nprocs > 1);
325#else
326 return false;
327#endif
328}
329
330// A call to keep_live(p) forces p to be accessible to the GC
331// at this point.
332inline static void
333keep_live(obj_addr_t p)
334{
335 __asm__ __volatile__("" : : "rm"(p) : "memory");
336}
337
338// Each hash table entry holds a single preallocated "lightweight" lock.
339// In addition, it holds a chain of "heavyweight" locks. Lightweight
340// locks do not support Object.wait(), and are converted to heavyweight
341// status in response to contention. Unlike the SGI scheme, both
342// ligtweight and heavyweight locks in one hash entry can be simultaneously
343// in use. (The SGI scheme requires that we be able to acquire a heavyweight
344// lock on behalf of another thread, and can thus convert a lock we don't
345// hold to heavyweight status. Here we don't insist on that, and thus
346// let the original holder of the lighweight lock keep it.)
347
348struct heavy_lock {
349 void * reserved_for_gc;
350 struct heavy_lock *next; // Hash chain link.
351 // Traced by GC.
352 void * old_client_data; // The only other field traced by GC.
353 GC_finalization_proc old_finalization_proc;
354 obj_addr_t address; // Object to which this lock corresponds.
355 // Should not be traced by GC.
356 // Cleared as heavy_lock is destroyed.
357 // Together with the rest of the hevy lock
358 // chain, this is protected by the lock
359 // bit in the hash table entry to which
360 // the chain is attached.
361 _Jv_SyncInfo si;
362 // The remaining fields save prior finalization info for
363 // the object, which we needed to replace in order to arrange
364 // for cleanup of the lock structure.
365};
366
367#ifdef LOCK_DEBUG
368void
369print_hl_list(heavy_lock *hl)
370{
371 heavy_lock *p = hl;
372 for (; 0 != p; p = p->next)
373 fprintf (stderr, "(hl = %p, addr = %p)", p, (void *)(p -> address));
374}
375#endif /* LOCK_DEBUG */
376
377#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
378// If we have to run a destructor for a sync_info member, then this
379// function could be registered as a finalizer for the sync_info.
380// In fact, we now only invoke it explicitly.
381static inline void
382heavy_lock_finalization_proc (heavy_lock *hl)
383{
384#if defined (_Jv_HaveCondDestroy)
385 _Jv_CondDestroy (&hl->si.condition);
386#endif
387#if defined (_Jv_HaveMutexDestroy)
388 _Jv_MutexDestroy (&hl->si.mutex);
389#endif
390 hl->si.init = false;
391}
392#endif /* defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) */
393
394// We convert the lock back to lightweight status when
395// we exit, so that a single contention episode doesn't doom the lock
396// forever. But we also need to make sure that lock structures for dead
397// objects are eventually reclaimed. We do that in a an additional
398// finalizer on the underlying object.
399// Note that if the corresponding object is dead, it is safe to drop
400// the heavy_lock structure from its list. It is not necessarily
401// safe to deallocate it, since the unlock code could still be running.
402
403struct hash_entry {
404 volatile obj_addr_t address; // Address of object for which lightweight
405 // k is held.
406 // We assume the 3 low order bits are zero.
407 // With the Boehm collector and bitmap
408 // allocation, objects of size 4 bytes are
409 // broken anyway. Thus this is primarily
410 // a constraint on statically allocated
411 // objects used for synchronization.
412 // This allows us to use the low order
413 // bits as follows:
414# define LOCKED 1 // This hash entry is locked, and its
415 // state may be invalid.
416 // The lock protects both the hash_entry
417 // itself (except for the light_count
418 // and light_thr_id fields, which
419 // are protected by the lightweight
420 // lock itself), and any heavy_monitor
421 // structures attached to it.
422# define HEAVY 2 // There may be heavyweight locks
423 // associated with this cache entry.
424 // The lightweight entry is still valid,
425 // if the leading bits of the address
426 // field are nonzero.
427 // Set if heavy_count is > 0 .
428 // Stored redundantly so a single
429 // compare-and-swap works in the easy case.
430# define REQUEST_CONVERSION 4 // The lightweight lock is held. But
431 // one or more other threads have tried
432 // to acquire the lock, and hence request
433 // conversion to heavyweight status.
434# define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
435 volatile _Jv_ThreadId_t light_thr_id;
436 // Thr_id of holder of lightweight lock.
437 // Only updated by lightweight lock holder.
438 // Must be recognizably invalid if the
439 // lightweight lock is not held.
440# define INVALID_THREAD_ID 0 // Works for Linux?
441 // If zero doesn't work, we have to
442 // initialize lock table.
443 volatile unsigned short light_count;
444 // Number of times the lightweight lock
445 // is held minus one. Zero if lightweight
446 // lock is not held.
447 unsigned short heavy_count; // Total number of times heavyweight locks
448 // associated with this hash entry are held
449 // or waiting to be acquired.
450 // Threads in wait() are included eventhough
451 // they have temporarily released the lock.
452 struct heavy_lock * heavy_locks;
453 // Chain of heavy locks. Protected
454 // by lockbit for he. Locks may
455 // remain allocated here even if HEAVY
456 // is not set and heavy_count is 0.
457 // If a lightweight and heavyweight lock
458 // correspond to the same address, the
459 // lightweight lock is the right one.
460};
461
462#ifndef JV_SYNC_TABLE_SZ
463# define JV_SYNC_TABLE_SZ 2048
464#endif
465
466hash_entry light_locks[JV_SYNC_TABLE_SZ];
467
468#define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) % JV_SYNC_TABLE_SZ)
469
470// Note that the light_locks table is scanned conservatively by the
471// collector. It is essential the the heavy_locks field is scanned.
472// Currently the address field may or may not cause the associated object
473// to be retained, depending on whether flag bits are set.
474// This means that we can conceivable get an unexpected deadlock if
475// 1) Object at address A is locked.
476// 2) The client drops A without unlocking it.
477// 3) Flag bits in the address entry are set, so the collector reclaims
478// the object at A.
479// 4) A is reallocated, and an attempt is made to lock the result.
480// This could be fixed by scanning light_locks in a more customized
481// manner that ignores the flag bits. But it can only happen with hand
482// generated semi-illegal .class files, and then it doesn't present a
483// security hole.
484
485#ifdef LOCK_DEBUG
486 void print_he(hash_entry *he)
487 {
488 fprintf(stderr, "lock hash entry = %p, index = %d, address = 0x%lx\n"
489 "\tlight_thr_id = 0x%lx, light_count = %d, "
490 "heavy_count = %d\n\theavy_locks:", he,
491 he - light_locks, he -> address, he -> light_thr_id,
492 he -> light_count, he -> heavy_count);
493 print_hl_list(he -> heavy_locks);
494 fprintf(stderr, "\n");
495 }
496#endif /* LOCK_DEBUG */
497
498static bool mp = false; // Known multiprocesssor.
499
500// Wait for roughly 2^n units, touching as little memory as possible.
501static void
502spin(unsigned n)
503{
504 const unsigned MP_SPINS = 10;
505 const unsigned YIELDS = 4;
506 const unsigned SPINS_PER_UNIT = 30;
507 const unsigned MIN_SLEEP_USECS = 2001; // Shorter times spin under Linux.
508 const unsigned MAX_SLEEP_USECS = 200000;
509 static unsigned spin_limit = 0;
510 static unsigned yield_limit = YIELDS;
511 static bool spin_initialized = false;
512
513 if (!spin_initialized)
514 {
515 mp = is_mp();
516 if (mp)
517 {
518 spin_limit = MP_SPINS;
519 yield_limit = MP_SPINS + YIELDS;
520 }
521 spin_initialized = true;
522 }
523 if (n < spin_limit)
524 {
525 unsigned i = SPINS_PER_UNIT << n;
526 for (; i > 0; --i)
527 __asm__ __volatile__("");
528 }
529 else if (n < yield_limit)
530 {
531 _Jv_ThreadYield();
532 }
533 else
534 {
535 unsigned duration = MIN_SLEEP_USECS << (n - yield_limit);
536 if (n >= 15 + yield_limit || duration > MAX_SLEEP_USECS)
537 duration = MAX_SLEEP_USECS;
538 _Jv_platform_usleep(duration);
539 }
540}
541
542// Wait for a hash entry to become unlocked.
543static void
544wait_unlocked (hash_entry *he)
545{
546 unsigned i = 0;
547 while (he -> address & LOCKED)
548 spin (i++);
549}
550
551// Return the heavy lock for addr if it was already allocated.
552// The client passes in the appropriate hash_entry.
553// We hold the lock for he.
554static inline heavy_lock *
555find_heavy (obj_addr_t addr, hash_entry *he)
556{
557 heavy_lock *hl = he -> heavy_locks;
558 while (hl != 0 && hl -> address != addr) hl = hl -> next;
559 return hl;
560}
561
562// Unlink the heavy lock for the given address from its hash table chain.
563// Dies miserably and conspicuously if it's not there, since that should
564// be impossible.
565static inline void
566unlink_heavy (obj_addr_t addr, hash_entry *he)
567{
568 heavy_lock **currentp = &(he -> heavy_locks);
569 while ((*currentp) -> address != addr)
570 currentp = &((*currentp) -> next);
571 *currentp = (*currentp) -> next;
572}
573
574// Finalization procedure for objects that have associated heavy-weight
575// locks. This may replace the real finalization procedure.
576static void
577heavy_lock_obj_finalization_proc (void *obj, void *cd)
578{
579 heavy_lock *hl = (heavy_lock *)cd;
580
581// This only addresses misalignment of statics, not heap objects. It
582// works only because registering statics for finalization is a noop,
583// no matter what the least significant bits are.
584#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
585 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)0x7);
586#else
587 obj_addr_t addr = (obj_addr_t)obj;
588#endif
589 hash_entry *he = light_locks + JV_SYNC_HASH(addr);
590 obj_addr_t he_address = (he -> address & ~LOCKED);
591
592 // Acquire lock bit immediately. It's possible that the hl was already
593 // destroyed while we were waiting for the finalizer to run. If it
594 // was, the address field was set to zero. The address filed access is
595 // protected by the lock bit to ensure that we do this exactly once.
596 // The lock bit also protects updates to the objects finalizer.
597 while (!compare_and_swap(&(he -> address), he_address, he_address|LOCKED ))
598 {
599 // Hash table entry is currently locked. We can't safely
600 // touch the list of heavy locks.
601 wait_unlocked(he);
602 he_address = (he -> address & ~LOCKED);
603 }
604 if (0 == hl -> address)
605 {
606 // remove_all_heavy destroyed hl, and took care of the real finalizer.
607 release_set(&(he -> address), he_address);
608 return;
609 }
610 assert(hl -> address == addr);
611 GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc;
612 if (old_finalization_proc != 0)
613 {
614 // We still need to run a real finalizer. In an idealized
615 // world, in which people write thread-safe finalizers, that is
616 // likely to require synchronization. Thus we reregister
617 // ourselves as the only finalizer, and simply run the real one.
618 // Thus we don't clean up the lock yet, but we're likely to do so
619 // on the next GC cycle.
620 // It's OK if remove_all_heavy actually destroys the heavy lock,
621 // since we've updated old_finalization_proc, and thus the user's
622 // finalizer won't be rerun.
623 void * old_client_data = hl -> old_client_data;
624 hl -> old_finalization_proc = 0;
625 hl -> old_client_data = 0;
626# ifdef HAVE_BOEHM_GC
627 GC_REGISTER_FINALIZER_NO_ORDER(obj, heavy_lock_obj_finalization_proc, cd, 0, 0);
628# endif
629 release_set(&(he -> address), he_address);
630 old_finalization_proc(obj, old_client_data);
631 }
632 else
633 {
634 // The object is really dead, although it's conceivable that
635 // some thread may still be in the process of releasing the
636 // heavy lock. Unlink it and, if necessary, register a finalizer
637 // to destroy sync_info.
638 unlink_heavy(addr, he);
639 hl -> address = 0; // Don't destroy it again.
640 release_set(&(he -> address), he_address);
641# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
642 // Make sure lock is not held and then destroy condvar and mutex.
643 _Jv_MutexLock(&(hl->si.mutex));
644 _Jv_MutexUnlock(&(hl->si.mutex));
645 heavy_lock_finalization_proc (hl);
646# endif
647 }
648}
649
650// We hold the lock on he, and heavy_count is 0.
651// Release the lock by replacing the address with new_address_val.
652// Remove all heavy locks on the list. Note that the only possible way
653// in which a lock may still be in use is if it's in the process of
654// being unlocked.
655static void
656remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
657{
658 assert(he -> heavy_count == 0);
659 assert(he -> address & LOCKED);
660 heavy_lock *hl = he -> heavy_locks;
661 he -> heavy_locks = 0;
662 // We would really like to release the lock bit here. Unfortunately, that
663 // Creates a race between or finalizer removal, and the potential
664 // reinstallation of a new finalizer as a new heavy lock is created.
665 // This may need to be revisited.
666 for(; 0 != hl; hl = hl->next)
667 {
668 obj_addr_t obj = hl -> address;
669 assert(0 != obj); // If this was previously finalized, it should no
670 // longer appear on our list.
671 hl -> address = 0; // Finalization proc might still see it after we
672 // finish.
673 GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc;
674 void * old_client_data = hl -> old_client_data;
675# ifdef HAVE_BOEHM_GC
676 // Remove our finalization procedure.
677 // Reregister the clients if applicable.
678 GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR)obj, old_finalization_proc,
679 old_client_data, 0, 0);
680 // Note that our old finalization procedure may have been
681 // previously determined to be runnable, and may still run.
682 // FIXME - direct dependency on boehm GC.
683# endif
684# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
685 // Wait for a possible lock holder to finish unlocking it.
686 // This is only an issue if we have to explicitly destroy the mutex
687 // or possibly if we have to destroy a condition variable that is
688 // still being notified.
689 _Jv_MutexLock(&(hl->si.mutex));
690 _Jv_MutexUnlock(&(hl->si.mutex));
691 heavy_lock_finalization_proc (hl);
692# endif
693 }
694 release_set(&(he -> address), new_address_val);
695}
696
697// We hold the lock on he and heavy_count is 0.
698// We release it by replacing the address field with new_address_val.
699// Remove all heavy locks on the list if the list is sufficiently long.
700// This is called periodically to avoid very long lists of heavy locks.
701// This seems to otherwise become an issue with SPECjbb, for example.
702static inline void
703maybe_remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
704{
705 static const int max_len = 5;
706 heavy_lock *hl = he -> heavy_locks;
707
708 for (int i = 0; i < max_len; ++i)
709 {
710 if (0 == hl)
711 {
712 release_set(&(he -> address), new_address_val);
713 return;
714 }
715 hl = hl -> next;
716 }
717 remove_all_heavy(he, new_address_val);
718}
719
720// Allocate a new heavy lock for addr, returning its address.
721// Assumes we already have the hash_entry locked, and there
722// is currently no lightweight or allocated lock for addr.
723// We register a finalizer for addr, which is responsible for
724// removing the heavy lock when addr goes away, in addition
725// to the responsibilities of any prior finalizer.
726// This unfortunately holds the lock bit for the hash entry while it
727// allocates two objects (on for the finalizer).
728// It would be nice to avoid that somehow ...
729static heavy_lock *
730alloc_heavy(obj_addr_t addr, hash_entry *he)
731{
732 heavy_lock * hl = (heavy_lock *) _Jv_AllocTraceTwo(sizeof (heavy_lock));
733
734 hl -> address = addr;
735 _Jv_MutexInit (&(hl -> si.mutex));
736 _Jv_CondInit (&(hl -> si.condition));
737# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
738 hl->si.init = true; // needed ?
739# endif
740 hl -> next = he -> heavy_locks;
741 he -> heavy_locks = hl;
742 // FIXME: The only call that cheats and goes directly to the GC interface.
743# ifdef HAVE_BOEHM_GC
744 GC_REGISTER_FINALIZER_NO_ORDER(
745 (void *)addr, heavy_lock_obj_finalization_proc,
746 hl, &hl->old_finalization_proc,
747 &hl->old_client_data);
748# endif /* HAVE_BOEHM_GC */
749 return hl;
750}
751
752// Return the heavy lock for addr, allocating if necessary.
753// Assumes we have the cache entry locked, and there is no lightweight
754// lock for addr.
755static heavy_lock *
756get_heavy(obj_addr_t addr, hash_entry *he)
757{
758 heavy_lock *hl = find_heavy(addr, he);
759 if (0 == hl)
760 hl = alloc_heavy(addr, he);
761 return hl;
762}
763
764void
765_Jv_MonitorEnter (jobject obj)
766{
767#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
768 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
769#else
770 obj_addr_t addr = (obj_addr_t)obj;
771#endif
772 obj_addr_t address;
773 unsigned hash = JV_SYNC_HASH(addr);
774 hash_entry * he = light_locks + hash;
775 _Jv_ThreadId_t self = _Jv_ThreadSelf();
776 unsigned count;
777 const unsigned N_SPINS = 18;
778
779 // We need to somehow check that addr is not NULL on the fast path.
780 // A very predictable
781 // branch on a register value is probably cheaper than dereferencing addr.
782 // We could also permanently lock the NULL entry in the hash table.
783 // But it's not clear that's cheaper either.
784 if (__builtin_expect(!addr, false))
785 throw new java::lang::NullPointerException;
786
787 assert(!(addr & FLAGS));
788retry:
789 if (__builtin_expect(compare_and_swap(&(he -> address),
790 0, addr),true))
791 {
792 assert(he -> light_thr_id == INVALID_THREAD_ID);
793 assert(he -> light_count == 0);
794 he -> light_thr_id = self;
795 // Count fields are set correctly. Heavy_count was also zero,
796 // but can change asynchronously.
797 // This path is hopefully both fast and the most common.
798 return;
799 }
800 address = he -> address;
801 if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
802 {
803 if (he -> light_thr_id == self)
804 {
805 // We hold the lightweight lock, and it's for the right
806 // address.
807 count = he -> light_count;
808 if (count == USHRT_MAX)
809 {
810 // I think most JVMs don't check for this.
811 // But I'm not convinced I couldn't turn this into a security
812 // hole, even with a 32 bit counter.
813 throw new java::lang::IllegalMonitorStateException(
814 JvNewStringLatin1("maximum monitor nesting level exceeded"));
815 }
816 he -> light_count = count + 1;
817 return;
818 }
819 else
820 {
821 // Lightweight lock is held, but by somone else.
822 // Spin a few times. This avoids turning this into a heavyweight
823 // lock if the current holder is about to release it.
824 for (unsigned int i = 0; i < N_SPINS; ++i)
825 {
826 if ((he -> address & ~LOCKED) != (address & ~LOCKED)) goto retry;
827 spin(i);
828 }
829 address &= ~LOCKED;
830 if (!compare_and_swap(&(he -> address), address, address | LOCKED ))
831 {
832 wait_unlocked(he);
833 goto retry;
834 }
835 heavy_lock *hl = get_heavy(addr, he);
836 ++ (he -> heavy_count);
837 // The hl lock acquisition can't block for long, since it can
838 // only be held by other threads waiting for conversion, and
839 // they, like us, drop it quickly without blocking.
840 _Jv_MutexLock(&(hl->si.mutex));
841 assert(he -> address == address | LOCKED );
842 release_set(&(he -> address), (address | REQUEST_CONVERSION | HEAVY));
843 // release lock on he
844 while ((he -> address & ~FLAGS) == (address & ~FLAGS))
845 {
846 // Once converted, the lock has to retain heavyweight
847 // status, since heavy_count > 0 .
848 _Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), 0, 0);
849 }
850 keep_live(addr);
851 // Guarantee that hl doesn't get unlinked by finalizer.
852 // This is only an issue if the client fails to release
853 // the lock, which is unlikely.
854 assert(he -> address & HEAVY);
855 // Lock has been converted, we hold the heavyweight lock,
856 // heavy_count has been incremented.
857 return;
858 }
859 }
860 obj_addr_t was_heavy = (address & HEAVY);
861 address &= ~LOCKED;
862 if (!compare_and_swap(&(he -> address), address, (address | LOCKED )))
863 {
864 wait_unlocked(he);
865 goto retry;
866 }
867 if ((address & ~(HEAVY | REQUEST_CONVERSION)) == 0)
868 {
869 // Either was_heavy is true, or something changed out from under us,
870 // since the initial test for 0 failed.
871 assert(!(address & REQUEST_CONVERSION));
872 // Can't convert a nonexistent lightweight lock.
873 heavy_lock *hl;
874 hl = (was_heavy? find_heavy(addr, he) : 0);
875 if (0 == hl)
876 {
877 // It is OK to use the lighweight lock, since either the
878 // heavyweight lock does not exist, or none of the
879 // heavyweight locks currently exist. Future threads
880 // trying to acquire the lock will see the lightweight
881 // one first and use that.
882 he -> light_thr_id = self; // OK, since nobody else can hold
883 // light lock or do this at the same time.
884 assert(he -> light_count == 0);
885 assert(was_heavy == (he -> address & HEAVY));
886 release_set(&(he -> address), (addr | was_heavy));
887 }
888 else
889 {
890 // Must use heavy lock.
891 ++ (he -> heavy_count);
892 assert(0 == (address & ~HEAVY));
893 release_set(&(he -> address), HEAVY);
894 _Jv_MutexLock(&(hl->si.mutex));
895 keep_live(addr);
896 }
897 return;
898 }
899 // Lightweight lock is held, but does not correspond to this object.
900 // We hold the lock on the hash entry, and he -> address can't
901 // change from under us. Neither can the chain of heavy locks.
902 {
903 assert(0 == he -> heavy_count || (address & HEAVY));
904 heavy_lock *hl = get_heavy(addr, he);
905 ++ (he -> heavy_count);
906 release_set(&(he -> address), address | HEAVY);
907 _Jv_MutexLock(&(hl->si.mutex));
908 keep_live(addr);
909 }
910}
911
912
913void
914_Jv_MonitorExit (jobject obj)
915{
916#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
917 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
918#else
919 obj_addr_t addr = (obj_addr_t)obj;
920#endif
921 _Jv_ThreadId_t self = _Jv_ThreadSelf();
922 unsigned hash = JV_SYNC_HASH(addr);
923 hash_entry * he = light_locks + hash;
924 _Jv_ThreadId_t light_thr_id;
925 unsigned count;
926 obj_addr_t address;
927
928retry:
929 light_thr_id = he -> light_thr_id;
930 // Unfortunately, it turns out we always need to read the address
931 // first. Even if we are going to update it with compare_and_swap,
932 // we need to reset light_thr_id, and that's not safe unless we know
933 // that we hold the lock.
934 address = he -> address;
935 // First the (relatively) fast cases:
936 if (__builtin_expect(light_thr_id == self, true))
937 // Above must fail if addr == 0 .
938 {
939 count = he -> light_count;
940 if (__builtin_expect((address & ~HEAVY) == addr, true))
941 {
942 if (count != 0)
943 {
944 // We held the lightweight lock all along. Thus the values
945 // we saw for light_thr_id and light_count must have been valid.
946 he -> light_count = count - 1;
947 return;
948 }
949 else
950 {
951 // We hold the lightweight lock once.
952 he -> light_thr_id = INVALID_THREAD_ID;
953 if (compare_and_swap_release(&(he -> address), address,
954 address & HEAVY))
955 return;
956 else
957 {
958 he -> light_thr_id = light_thr_id; // Undo prior damage.
959 goto retry;
960 }
961 }
962 }
963 // else lock is not for this address, conversion is requested,
964 // or the lock bit in the address field is set.
965 }
966 else
967 {
968 if (__builtin_expect(!addr, false))
969 throw new java::lang::NullPointerException;
970 if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
971 {
972# ifdef LOCK_DEBUG
973 fprintf(stderr, "Lightweight lock held by other thread\n\t"
974 "light_thr_id = 0x%lx, self = 0x%lx, "
975 "address = 0x%lx, pid = %d\n",
976 light_thr_id, self, address, getpid());
977 print_he(he);
978 for(;;) {}
979# endif
980 // Someone holds the lightweight lock for this object, and
981 // it can't be us.
982 throw new java::lang::IllegalMonitorStateException(
983 JvNewStringLatin1("current thread not owner"));
984 }
985 else
986 count = he -> light_count;
987 }
988 if (address & LOCKED)
989 {
990 wait_unlocked(he);
991 goto retry;
992 }
993 // Now the unlikely cases.
994 // We do know that:
995 // - Address is set, and doesn't contain the LOCKED bit.
996 // - If address refers to the same object as addr, then he -> light_thr_id
997 // refers to this thread, and count is valid.
998 // - The case in which we held the lightweight lock has been
999 // completely handled, except for the REQUEST_CONVERSION case.
1000 //
1001 if ((address & ~FLAGS) == addr)
1002 {
1003 // The lightweight lock is assigned to this object.
1004 // Thus we must be in the REQUEST_CONVERSION case.
1005 if (0 != count)
1006 {
1007 // Defer conversion until we exit completely.
1008 he -> light_count = count - 1;
1009 return;
1010 }
1011 assert(he -> light_thr_id == self);
1012 assert(address & REQUEST_CONVERSION);
1013 // Conversion requested
1014 // Convert now.
1015 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1016 goto retry;
1017 heavy_lock *hl = find_heavy(addr, he);
1018 assert (0 != hl);
1019 // Requestor created it.
1020 he -> light_count = 0;
1021 assert(he -> heavy_count > 0);
1022 // was incremented by requestor.
1023 _Jv_MutexLock(&(hl->si.mutex));
1024 // Release the he lock after acquiring the mutex.
1025 // Otherwise we can accidentally
1026 // notify a thread that has already seen a heavyweight
1027 // lock.
1028 he -> light_thr_id = INVALID_THREAD_ID;
1029 release_set(&(he -> address), HEAVY);
1030 // lightweight lock now unused.
1031 _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
1032 _Jv_MutexUnlock(&(hl->si.mutex));
1033 // heavy_count was already incremented by original requestor.
1034 keep_live(addr);
1035 return;
1036 }
1037 // lightweight lock not for this object.
1038 assert(!(address & LOCKED));
1039 assert((address & ~FLAGS) != addr);
1040 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1041 goto retry;
1042 heavy_lock *hl = find_heavy(addr, he);
1043 if (NULL == hl)
1044 {
1045# ifdef LOCK_DEBUG
1046 fprintf(stderr, "Failed to find heavyweight lock for addr 0x%lx"
1047 " pid = %d\n", addr, getpid());
1048 print_he(he);
1049 for(;;) {}
1050# endif
1051 throw new java::lang::IllegalMonitorStateException(
1052 JvNewStringLatin1("current thread not owner"));
1053 }
1054 assert(address & HEAVY);
1055 count = he -> heavy_count;
1056 assert(count > 0);
1057 --count;
1058 he -> heavy_count = count;
1059 if (0 == count)
1060 {
1061 const unsigned test_freq = 16; // Power of 2
1062 static volatile unsigned counter = 0;
1063 unsigned my_counter = counter;
1064
1065 counter = my_counter + 1;
1066 if (my_counter%test_freq == 0)
1067 {
1068 // Randomize the interval length a bit.
1069 counter = my_counter + (my_counter >> 4) % (test_freq/2);
1070 // Unlock mutex first, to avoid self-deadlock, or worse.
1071 _Jv_MutexUnlock(&(hl->si.mutex));
1072 maybe_remove_all_heavy(he, address &~HEAVY);
1073 // release lock bit, preserving
1074 // REQUEST_CONVERSION
1075 // and object address.
1076 }
1077 else
1078 {
1079 release_set(&(he -> address), address &~HEAVY);
1080 _Jv_MutexUnlock(&(hl->si.mutex));
1081 // Unlock after releasing the lock bit, so that
1082 // we don't switch to another thread prematurely.
1083 }
1084 }
1085 else
1086 {
1087 release_set(&(he -> address), address);
1088 _Jv_MutexUnlock(&(hl->si.mutex));
1089 }
1090 keep_live(addr);
1091}
1092
1093// The rest of these are moderately thin veneers on _Jv_Cond ops.
1094// The current version of Notify might be able to make the pthread
1095// call AFTER releasing the lock, thus saving some context switches??
1096
1097void
1098java::lang::Object::wait (jlong timeout, jint nanos)
1099{
1100#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1101 obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1102#else
1103 obj_addr_t addr = (obj_addr_t)this;
1104#endif
1105 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1106 unsigned hash = JV_SYNC_HASH(addr);
1107 hash_entry * he = light_locks + hash;
1108 unsigned count;
1109 obj_addr_t address;
1110 heavy_lock *hl;
1111
1112 if (__builtin_expect (timeout < 0 || nanos < 0 || nanos > 999999, false))
1113 throw new IllegalArgumentException;
1114retry:
1115 address = he -> address;
1116 address &= ~LOCKED;
1117 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1118 {
1119 wait_unlocked(he);
1120 goto retry;
1121 }
1122 // address does not have the lock bit set. We hold the lock on he.
1123 if ((address & ~FLAGS) == addr)
1124 {
1125 // Convert to heavyweight.
1126 if (he -> light_thr_id != self)
1127 {
1128# ifdef LOCK_DEBUG
1129 fprintf(stderr, "Found wrong lightweight lock owner in wait "
1130 "address = 0x%lx pid = %d\n", address, getpid());
1131 print_he(he);
1132 for(;;) {}
1133# endif
1134 release_set(&(he -> address), address);
1135 throw new IllegalMonitorStateException (JvNewStringLatin1
1136 ("current thread not owner"));
1137 }
1138 count = he -> light_count;
1139 hl = get_heavy(addr, he);
1140 he -> light_count = 0;
1141 he -> heavy_count += count + 1;
1142 for (unsigned i = 0; i <= count; ++i)
1143 _Jv_MutexLock(&(hl->si.mutex));
1144 // Again release the he lock after acquiring the mutex.
1145 he -> light_thr_id = INVALID_THREAD_ID;
1146 release_set(&(he -> address), HEAVY); // lightweight lock now unused.
1147 if (address & REQUEST_CONVERSION)
1148 _Jv_CondNotify (&(hl->si.condition), &(hl->si.mutex));
1149 }
1150 else /* We should hold the heavyweight lock. */
1151 {
1152 hl = find_heavy(addr, he);
1153 release_set(&(he -> address), address);
1154 if (0 == hl)
1155 {
1156# ifdef LOCK_DEBUG
1157 fprintf(stderr, "Couldn't find heavy lock in wait "
1158 "addr = 0x%lx pid = %d\n", addr, getpid());
1159 print_he(he);
1160 for(;;) {}
1161# endif
1162 throw new IllegalMonitorStateException (JvNewStringLatin1
1163 ("current thread not owner"));
1164 }
1165 assert(address & HEAVY);
1166 }
1167 switch (_Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), timeout, nanos))
1168 {
1169 case _JV_NOT_OWNER:
1170 throw new IllegalMonitorStateException (JvNewStringLatin1
1171 ("current thread not owner"));
1172 case _JV_INTERRUPTED:
1173 if (Thread::interrupted ())
1174 throw new InterruptedException;
1175 }
1176}
1177
1178void
1179java::lang::Object::notify (void)
1180{
1181#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1182 obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1183#else
1184 obj_addr_t addr = (obj_addr_t)this;
1185#endif
1186 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1187 unsigned hash = JV_SYNC_HASH(addr);
1188 hash_entry * he = light_locks + hash;
1189 heavy_lock *hl;
1190 obj_addr_t address;
1191 int result;
1192
1193retry:
1194 address = ((he -> address) & ~LOCKED);
1195 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1196 {
1197 wait_unlocked(he);
1198 goto retry;
1199 }
1200 if ((address & ~FLAGS) == addr && he -> light_thr_id == self)
1201 {
1202 // We hold lightweight lock. Since it has not
1203 // been inflated, there are no waiters.
1204 release_set(&(he -> address), address); // unlock
1205 return;
1206 }
1207 hl = find_heavy(addr, he);
1208 // Hl can't disappear since we point to the underlying object.
1209 // It's important that we release the lock bit before the notify, since
1210 // otherwise we will try to wake up thee target while we still hold the
1211 // bit. This results in lock bit contention, which we don't handle
1212 // terribly well.
1213 release_set(&(he -> address), address); // unlock
1214 if (0 == hl)
1215 {
1216 throw new IllegalMonitorStateException(JvNewStringLatin1
1217 ("current thread not owner"));
1218 return;
1219 }
1220 result = _Jv_CondNotify(&(hl->si.condition), &(hl->si.mutex));
1221 keep_live(addr);
1222 if (__builtin_expect (result, 0))
1223 throw new IllegalMonitorStateException(JvNewStringLatin1
1224 ("current thread not owner"));
1225}
1226
1227void
1228java::lang::Object::notifyAll (void)
1229{
1230#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1231 obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1232#else
1233 obj_addr_t addr = (obj_addr_t)this;
1234#endif
1235 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1236 unsigned hash = JV_SYNC_HASH(addr);
1237 hash_entry * he = light_locks + hash;
1238 heavy_lock *hl;
1239 obj_addr_t address;
1240 int result;
1241
1242retry:
1243 address = (he -> address) & ~LOCKED;
1244 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1245 {
1246 wait_unlocked(he);
1247 goto retry;
1248 }
1249 hl = find_heavy(addr, he);
1250 if ((address & ~FLAGS) == addr && he -> light_thr_id == self)
1251 {
1252 // We hold lightweight lock. Since it has not
1253 // been inflated, there are no waiters.
1254 release_set(&(he -> address), address); // unlock
1255 return;
1256 }
1257 release_set(&(he -> address), address); // unlock
1258 if (0 == hl)
1259 {
1260 throw new IllegalMonitorStateException(JvNewStringLatin1
1261 ("current thread not owner"));
1262 }
1263 result = _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
1264 if (__builtin_expect (result, 0))
1265 throw new IllegalMonitorStateException(JvNewStringLatin1
1266 ("current thread not owner"));
1267}
1268
1269// This is declared in Java code and in Object.h.
1270// It should never be called with JV_HASH_SYNCHRONIZATION
1271void
1272java::lang::Object::sync_init (void)
1273{
1274 throw new IllegalMonitorStateException(JvNewStringLatin1
1275 ("internal error: sync_init"));
1276}
1277
1278// This is called on startup and declared in Object.h.
1279// For now we just make it a no-op.
1280void
1281_Jv_InitializeSyncMutex (void)
1282{
1283}
1284
1285#endif /* JV_HASH_SYNCHRONIZATION */
1286
Note: See TracBrowser for help on using the repository browser.