145
|
1 /* Copyright (C) 2005-2020 Free Software Foundation, Inc.
|
0
|
2 Contributed by Richard Henderson <rth@redhat.com>.
|
|
3
|
111
|
4 This file is part of the GNU Offloading and Multi Processing Library
|
|
5 (libgomp).
|
0
|
6
|
|
7 Libgomp is free software; you can redistribute it and/or modify it
|
|
8 under the terms of the GNU General Public License as published by
|
|
9 the Free Software Foundation; either version 3, or (at your option)
|
|
10 any later version.
|
|
11
|
|
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
15 more details.
|
|
16
|
|
17 Under Section 7 of GPL version 3, you are granted additional
|
|
18 permissions described in the GCC Runtime Library Exception, version
|
|
19 3.1, as published by the Free Software Foundation.
|
|
20
|
|
21 You should have received a copy of the GNU General Public License and
|
|
22 a copy of the GCC Runtime Library Exception along with this program;
|
|
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
24 <http://www.gnu.org/licenses/>. */
|
|
25
|
|
26 /* This file handles the CRITICAL construct. */
|
|
27
|
|
28 #include "libgomp.h"
|
|
29 #include <stdlib.h>
|
|
30
|
|
31
|
|
32 static gomp_mutex_t default_lock;
|
|
33
|
|
34 void
|
|
35 GOMP_critical_start (void)
|
|
36 {
|
111
|
37 /* There is an implicit flush on entry to a critical region. */
|
|
38 __atomic_thread_fence (MEMMODEL_RELEASE);
|
0
|
39 gomp_mutex_lock (&default_lock);
|
|
40 }
|
|
41
|
|
42 void
|
|
43 GOMP_critical_end (void)
|
|
44 {
|
|
45 gomp_mutex_unlock (&default_lock);
|
|
46 }
|
|
47
|
|
48 #ifndef HAVE_SYNC_BUILTINS
|
|
49 static gomp_mutex_t create_lock_lock;
|
|
50 #endif
|
|
51
|
|
52 void
|
|
53 GOMP_critical_name_start (void **pptr)
|
|
54 {
|
|
55 gomp_mutex_t *plock;
|
|
56
|
|
57 /* If a mutex fits within the space for a pointer, and is zero initialized,
|
|
58 then use the pointer space directly. */
|
|
59 if (GOMP_MUTEX_INIT_0
|
|
60 && sizeof (gomp_mutex_t) <= sizeof (void *)
|
|
61 && __alignof (gomp_mutex_t) <= sizeof (void *))
|
|
62 plock = (gomp_mutex_t *)pptr;
|
|
63
|
|
64 /* Otherwise we have to be prepared to malloc storage. */
|
|
65 else
|
|
66 {
|
|
67 plock = *pptr;
|
|
68
|
|
69 if (plock == NULL)
|
|
70 {
|
|
71 #ifdef HAVE_SYNC_BUILTINS
|
|
72 gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
|
|
73 gomp_mutex_init (nlock);
|
|
74
|
|
75 plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
|
|
76 if (plock != NULL)
|
|
77 {
|
|
78 gomp_mutex_destroy (nlock);
|
|
79 free (nlock);
|
|
80 }
|
|
81 else
|
|
82 plock = nlock;
|
|
83 #else
|
|
84 gomp_mutex_lock (&create_lock_lock);
|
|
85 plock = *pptr;
|
|
86 if (plock == NULL)
|
|
87 {
|
|
88 plock = gomp_malloc (sizeof (gomp_mutex_t));
|
|
89 gomp_mutex_init (plock);
|
|
90 __sync_synchronize ();
|
|
91 *pptr = plock;
|
|
92 }
|
|
93 gomp_mutex_unlock (&create_lock_lock);
|
|
94 #endif
|
|
95 }
|
|
96 }
|
|
97
|
|
98 gomp_mutex_lock (plock);
|
|
99 }
|
|
100
|
|
101 void
|
|
102 GOMP_critical_name_end (void **pptr)
|
|
103 {
|
|
104 gomp_mutex_t *plock;
|
|
105
|
|
106 /* If a mutex fits within the space for a pointer, and is zero initialized,
|
|
107 then use the pointer space directly. */
|
|
108 if (GOMP_MUTEX_INIT_0
|
|
109 && sizeof (gomp_mutex_t) <= sizeof (void *)
|
|
110 && __alignof (gomp_mutex_t) <= sizeof (void *))
|
|
111 plock = (gomp_mutex_t *)pptr;
|
|
112 else
|
|
113 plock = *pptr;
|
|
114
|
|
115 gomp_mutex_unlock (plock);
|
|
116 }
|
|
117
|
|
118 #if !GOMP_MUTEX_INIT_0
|
|
119 static void __attribute__((constructor))
|
|
120 initialize_critical (void)
|
|
121 {
|
|
122 gomp_mutex_init (&default_lock);
|
|
123 #ifndef HAVE_SYNC_BUILTINS
|
|
124 gomp_mutex_init (&create_lock_lock);
|
|
125 #endif
|
|
126 }
|
|
127 #endif
|