1 | // locks.h - Thread synchronization primitives. X86 implementation.
|
---|
2 |
|
---|
3 | /* Copyright (C) 2002 Free Software Foundation
|
---|
4 |
|
---|
5 | This file is part of libgcj.
|
---|
6 |
|
---|
7 | This software is copyrighted work licensed under the terms of the
|
---|
8 | Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
|
---|
9 | details. */
|
---|
10 |
|
---|
11 | #ifndef __SYSDEP_LOCKS_H__
|
---|
12 | #define __SYSDEP_LOCKS_H__
|
---|
13 |
|
---|
14 | typedef size_t obj_addr_t; /* Integer type big enough for object */
|
---|
15 | /* address. */
|
---|
16 |
|
---|
17 | // Atomically replace *addr by new_val if it was initially equal to old.
|
---|
18 | // Return true if the comparison succeeded.
|
---|
19 | // Assumed to have acquire semantics, i.e. later memory operations
|
---|
20 | // cannot execute before the compare_and_swap finishes.
|
---|
21 | inline static bool
|
---|
22 | compare_and_swap(volatile obj_addr_t *addr,
|
---|
23 | obj_addr_t old,
|
---|
24 | obj_addr_t new_val)
|
---|
25 | {
|
---|
26 | char result;
|
---|
27 | __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
|
---|
28 | : "+m"(*(addr)), "=q"(result)
|
---|
29 | : "r" (new_val), "a"(old)
|
---|
30 | : "memory");
|
---|
31 | return (bool) result;
|
---|
32 | }
|
---|
33 |
|
---|
34 | // Set *addr to new_val with release semantics, i.e. making sure
|
---|
35 | // that prior loads and stores complete before this
|
---|
36 | // assignment.
|
---|
37 | // On X86, the hardware shouldn't reorder reads and writes,
|
---|
38 | // so we just have to convince gcc not to do it either.
|
---|
39 | inline static void
|
---|
40 | release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
|
---|
41 | {
|
---|
42 | __asm__ __volatile__(" " : : : "memory");
|
---|
43 | *(addr) = new_val;
|
---|
44 | }
|
---|
45 |
|
---|
46 | // Compare_and_swap with release semantics instead of acquire semantics.
|
---|
47 | // On many architecture, the operation makes both guarantees, so the
|
---|
48 | // implementation can be the same.
|
---|
49 | inline static bool
|
---|
50 | compare_and_swap_release(volatile obj_addr_t *addr,
|
---|
51 | obj_addr_t old,
|
---|
52 | obj_addr_t new_val)
|
---|
53 | {
|
---|
54 | return compare_and_swap(addr, old, new_val);
|
---|
55 | }
|
---|
56 |
|
---|
57 | // Ensure that subsequent instructions do not execute on stale
|
---|
58 | // data that was loaded from memory before the barrier.
|
---|
59 | // On X86, the hardware ensures that reads are properly ordered.
|
---|
60 | inline static void
|
---|
61 | read_barrier()
|
---|
62 | {
|
---|
63 | }
|
---|
64 |
|
---|
65 | // Ensure that prior stores to memory are completed with respect to other
|
---|
66 | // processors.
|
---|
67 | inline static void
|
---|
68 | write_barrier()
|
---|
69 | {
|
---|
70 | // X86 does not reorder writes. We just need to ensure that gcc also doesn't.
|
---|
71 | __asm__ __volatile__(" " : : : "memory");
|
---|
72 | }
|
---|
73 | #endif
|
---|