1 | /* locks.h - Thread synchronization primitives. x86-64 implementation.
|
---|
2 |
|
---|
3 | Copyright (C) 2002 Free Software Foundation
|
---|
4 |
|
---|
5 | Contributed by Bo Thorsen <bo@suse.de>.
|
---|
6 |
|
---|
7 | This file is part of libgcj.
|
---|
8 |
|
---|
9 | This software is copyrighted work licensed under the terms of the
|
---|
10 | Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
|
---|
11 | details. */
|
---|
12 |
|
---|
13 | #ifndef __SYSDEP_LOCKS_H__
|
---|
14 | #define __SYSDEP_LOCKS_H__
|
---|
15 |
|
---|
16 | typedef size_t obj_addr_t; /* Integer type big enough for object */
|
---|
17 | /* address. */
|
---|
18 |
|
---|
19 | // Atomically replace *addr by new_val if it was initially equal to old.
|
---|
20 | // Return true if the comparison succeeded.
|
---|
21 | // Assumed to have acquire semantics, i.e. later memory operations
|
---|
22 | // cannot execute before the compare_and_swap finishes.
|
---|
23 | inline static bool
|
---|
24 | compare_and_swap(volatile obj_addr_t *addr, obj_addr_t old, obj_addr_t new_val)
|
---|
25 | {
|
---|
26 | char result;
|
---|
27 | #ifdef __x86_64__
|
---|
28 | __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
|
---|
29 | : "+m"(*(addr)), "=q"(result)
|
---|
30 | : "r" (new_val), "a"(old)
|
---|
31 | : "memory");
|
---|
32 | #else
|
---|
33 | __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
|
---|
34 | : "+m"(*(addr)), "=q"(result)
|
---|
35 | : "r" (new_val), "a"(old)
|
---|
36 | : "memory");
|
---|
37 | #endif
|
---|
38 | return (bool) result;
|
---|
39 | }
|
---|
40 |
|
---|
41 | // Set *addr to new_val with release semantics, i.e. making sure
|
---|
42 | // that prior loads and stores complete before this
|
---|
43 | // assignment.
|
---|
44 | // On x86-64, the hardware shouldn't reorder reads and writes,
|
---|
45 | // so we just have to convince gcc not to do it either.
|
---|
46 | inline static void
|
---|
47 | release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
|
---|
48 | {
|
---|
49 | __asm__ __volatile__(" " : : : "memory");
|
---|
50 | *(addr) = new_val;
|
---|
51 | }
|
---|
52 |
|
---|
53 | // Compare_and_swap with release semantics instead of acquire semantics.
|
---|
54 | // On many architecture, the operation makes both guarantees, so the
|
---|
55 | // implementation can be the same.
|
---|
56 | inline static bool
|
---|
57 | compare_and_swap_release(volatile obj_addr_t *addr,
|
---|
58 | obj_addr_t old,
|
---|
59 | obj_addr_t new_val)
|
---|
60 | {
|
---|
61 | return compare_and_swap(addr, old, new_val);
|
---|
62 | }
|
---|
63 |
|
---|
64 | // Ensure that subsequent instructions do not execute on stale
|
---|
65 | // data that was loaded from memory before the barrier.
|
---|
66 | // On x86-64, the hardware ensures that reads are properly ordered.
|
---|
67 | inline static void
|
---|
68 | read_barrier()
|
---|
69 | {
|
---|
70 | }
|
---|
71 |
|
---|
72 | // Ensure that prior stores to memory are completed with respect to other
|
---|
73 | // processors.
|
---|
74 | inline static void
|
---|
75 | write_barrier()
|
---|
76 | {
|
---|
77 | /* x86-64 does not reorder writes. We just need to ensure that gcc also
|
---|
78 | doesn't. */
|
---|
79 | __asm__ __volatile__(" " : : : "memory");
|
---|
80 | }
|
---|
81 | #endif
|
---|