1 | // locks.h - Thread synchronization primitives. PowerPC implementation.
|
---|
2 |
|
---|
3 | /* Copyright (C) 2002 Free Software Foundation
|
---|
4 |
|
---|
5 | This file is part of libgcj.
|
---|
6 |
|
---|
7 | This software is copyrighted work licensed under the terms of the
|
---|
8 | Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
|
---|
9 | details. */
|
---|
10 |
|
---|
11 | #ifndef __SYSDEP_LOCKS_H__
|
---|
12 | #define __SYSDEP_LOCKS_H__
|
---|
13 |
|
---|
14 | #ifdef __powerpc64__
|
---|
15 | #define _LARX "ldarx "
|
---|
16 | #define _STCX "stdcx. "
|
---|
17 | #else
|
---|
18 | #define _LARX "lwarx "
|
---|
19 | #ifdef __PPC405__
|
---|
20 | #define _STCX "sync; stwcx. "
|
---|
21 | #else
|
---|
22 | #define _STCX "stwcx. "
|
---|
23 | #endif
|
---|
24 | #endif
|
---|
25 |
|
---|
26 | typedef size_t obj_addr_t; /* Integer type big enough for object */
|
---|
27 | /* address. */
|
---|
28 |
|
---|
29 | inline static bool
|
---|
30 | compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old,
|
---|
31 | obj_addr_t new_val)
|
---|
32 | {
|
---|
33 | int ret;
|
---|
34 |
|
---|
35 | __asm__ __volatile__ (
|
---|
36 | "0: " _LARX "%0,0,%1 ;"
|
---|
37 | " xor. %0,%3,%0;"
|
---|
38 | " bne 1f;"
|
---|
39 | " " _STCX "%2,0,%1;"
|
---|
40 | " bne- 0b;"
|
---|
41 | "1: "
|
---|
42 | : "=&r" (ret)
|
---|
43 | : "r" (addr), "r" (new_val), "r" (old)
|
---|
44 | : "cr0", "memory");
|
---|
45 |
|
---|
46 | /* This version of __compare_and_swap is to be used when acquiring
|
---|
47 | a lock, so we don't need to worry about whether other memory
|
---|
48 | operations have completed, but we do need to be sure that any loads
|
---|
49 | after this point really occur after we have acquired the lock. */
|
---|
50 | __asm__ __volatile__ ("isync" : : : "memory");
|
---|
51 | return ret == 0;
|
---|
52 | }
|
---|
53 |
|
---|
54 | inline static void
|
---|
55 | release_set (volatile obj_addr_t *addr, obj_addr_t new_val)
|
---|
56 | {
|
---|
57 | __asm__ __volatile__ ("sync" : : : "memory");
|
---|
58 | *addr = new_val;
|
---|
59 | }
|
---|
60 |
|
---|
61 | inline static bool
|
---|
62 | compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old,
|
---|
63 | obj_addr_t new_val)
|
---|
64 | {
|
---|
65 | int ret;
|
---|
66 |
|
---|
67 | __asm__ __volatile__ ("sync" : : : "memory");
|
---|
68 |
|
---|
69 | __asm__ __volatile__ (
|
---|
70 | "0: " _LARX "%0,0,%1 ;"
|
---|
71 | " xor. %0,%3,%0;"
|
---|
72 | " bne 1f;"
|
---|
73 | " " _STCX "%2,0,%1;"
|
---|
74 | " bne- 0b;"
|
---|
75 | "1: "
|
---|
76 | : "=&r" (ret)
|
---|
77 | : "r" (addr), "r" (new_val), "r" (old)
|
---|
78 | : "cr0", "memory");
|
---|
79 |
|
---|
80 | return ret == 0;
|
---|
81 | }
|
---|
82 |
|
---|
83 | // Ensure that subsequent instructions do not execute on stale
|
---|
84 | // data that was loaded from memory before the barrier.
|
---|
85 | inline static void
|
---|
86 | read_barrier ()
|
---|
87 | {
|
---|
88 | __asm__ __volatile__ ("isync" : : : "memory");
|
---|
89 | }
|
---|
90 |
|
---|
91 | // Ensure that prior stores to memory are completed with respect to other
|
---|
92 | // processors.
|
---|
93 | inline static void
|
---|
94 | write_barrier ()
|
---|
95 | {
|
---|
96 | __asm__ __volatile__ ("sync" : : : "memory");
|
---|
97 | }
|
---|
98 |
|
---|
99 | #endif
|
---|