PAPI  5.6.0.0
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
linux-lock.h
Go to the documentation of this file.
1 #ifndef _LINUX_LOCK_H
2 #define _LINUX_LOCK_H
3 
4 #include "mb.h"
5 
6 /* Locking functions */
7 
8 #if defined(USE_PTHREAD_MUTEXES)
9 
10 #include <pthread.h>
11 
12 extern pthread_mutex_t _papi_hwd_lock_data[PAPI_MAX_LOCK];
13 
14 #define _papi_hwd_lock(lck) \
15 do \
16 { \
17  pthread_mutex_lock (&_papi_hwd_lock_data[lck]); \
18 } while(0)
19 #define _papi_hwd_unlock(lck) \
20 do \
21 { \
22  pthread_mutex_unlock(&_papi_hwd_lock_data[lck]); \
23 } while(0)
24 
25 
26 #else
27 
28 extern volatile unsigned int _papi_hwd_lock_data[PAPI_MAX_LOCK];
29 #define MUTEX_OPEN 0
30 #define MUTEX_CLOSED 1
31 
32 /********/
33 /* ia64 */
34 /********/
35 
36 #if defined(__ia64__)
37 #ifdef __INTEL_COMPILER
38 #define _papi_hwd_lock(lck) { while(_InterlockedCompareExchange_acq(&_papi_hwd_lock_data[lck],MUTEX_CLOSED,MUTEX_OPEN) != MUTEX_OPEN) { ; } }
39 #define _papi_hwd_unlock(lck) { _InterlockedExchange((volatile int *)&_papi_hwd_lock_data[lck], MUTEX_OPEN); }
40 #else /* GCC */
41 #define _papi_hwd_lock(lck) \
42  { int res = 0; \
43  do { \
44  __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "r"(MUTEX_OPEN)); \
45  __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" : "=r"(res) : "r"(&_papi_hwd_lock_data[lck]), "r"(MUTEX_CLOSED) : "memory"); \
46  } while (res != MUTEX_OPEN); }
47 
48 #define _papi_hwd_unlock(lck) { __asm__ __volatile__ ("st4.rel [%0]=%1" : : "r"(&_papi_hwd_lock_data[lck]), "r"(MUTEX_OPEN) : "memory"); }
49 #endif
50 
51 /***********/
52 /* x86 */
53 /***********/
54 
55 #elif defined(__i386__)||defined(__x86_64__)
56 #define _papi_hwd_lock(lck) \
57 do \
58 { \
59  unsigned int res = 0; \
60  do { \
61  __asm__ __volatile__ ("lock ; " "cmpxchg %1,%2" : "=a"(res) : "q"(MUTEX_CLOSED), "m"(_papi_hwd_lock_data[lck]), "0"(MUTEX_OPEN) : "memory"); \
62  } while(res != (unsigned int)MUTEX_OPEN); \
63 } while(0)
64 #define _papi_hwd_unlock(lck) \
65 do \
66 { \
67  unsigned int res = 0; \
68  __asm__ __volatile__ ("xchg %0,%1" : "=r"(res) : "m"(_papi_hwd_lock_data[lck]), "0"(MUTEX_OPEN) : "memory"); \
69 } while(0)
70 
71 /***************/
72 /* power */
73 /***************/
74 
75 #elif defined(__powerpc__)
76 
77 /*
78  * These functions are slight modifications of the functions in
79  * /usr/include/asm-ppc/system.h.
80  *
81  * We can't use the ones in system.h directly because they are defined
82  * only when __KERNEL__ is defined.
83  */
84 
85 static __inline__ unsigned long
86 papi_xchg_u32( volatile void *p, unsigned long val )
87 {
88  unsigned long prev;
89 
90  __asm__ __volatile__( "\n\
91  sync \n\
92 1: lwarx %0,0,%2 \n\
93  stwcx. %3,0,%2 \n\
94  bne- 1b \n\
95  isync":"=&r"( prev ), "=m"( *( volatile unsigned long * ) p )
96  :"r"( p ), "r"( val ),
97  "m"( *( volatile unsigned long * ) p )
98  :"cc", "memory" );
99 
100  return prev;
101 }
102 
103 #define _papi_hwd_lock(lck) \
104 do { \
105  unsigned int retval; \
106  do { \
107  retval = papi_xchg_u32(&_papi_hwd_lock_data[lck],MUTEX_CLOSED); \
108  } while(retval != (unsigned int)MUTEX_OPEN); \
109 } while(0)
110 #define _papi_hwd_unlock(lck) \
111 do { \
112  unsigned int retval; \
113  do { \
114  retval = papi_xchg_u32(&_papi_hwd_lock_data[lck],MUTEX_OPEN); \
115  } while(retval != (unsigned int)MUTEX_CLOSED); \
116 } while (0)
117 
118 /*****************/
119 /* SPARC */
120 /*****************/
121 
122 #elif defined(__sparc__)
123 static inline void
124 __raw_spin_lock( volatile unsigned int *lock )
125 {
126  __asm__ __volatile__( "\n1:\n\t" "ldstub [%0], %%g2\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2f\n\t" " ldub [%0], %%g2\n\t" ".subsection 2\n" "2:\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2b\n\t" " ldub [%0], %%g2\n\t" "b,a 1b\n\t" ".previous\n": /* no outputs */
127  :"r"( lock )
128  :"g2", "memory", "cc" );
129 }
130 static inline void
131 __raw_spin_unlock( volatile unsigned int *lock )
132 {
133  __asm__ __volatile__( "stb %%g0, [%0]"::"r"( lock ):"memory" );
134 }
135 
136 #define _papi_hwd_lock(lck) __raw_spin_lock(&_papi_hwd_lock_data[lck]);
137 #define _papi_hwd_unlock(lck) __raw_spin_unlock(&_papi_hwd_lock_data[lck])
138 
139 /*******************/
140 /* ARM */
141 /*******************/
142 
143 #elif defined(__arm__)
144 
145 #if 0
146 
147 /* OLD CODE FROM VINCE BELOW */
148 
149 /* FIXME */
150 /* not sure if this even works */
151 /* also the various flavors of ARM */
152 /* have differing levels of atomic */
153 /* instruction support. A proper */
154 /* implementation needs to handle this :( */
155 
156 #warning "WARNING! Verify mutexes work on ARM!"
157 
158 /*
159  * For arm/gcc, 0 is clear, 1 is set.
160  */
161 #define MUTEX_SET(tsl) ({ \
162  int __r; \
163  asm volatile( \
164  "swpb %0, %1, [%2]\n\t" \
165  "eor %0, %0, #1\n\t" \
166  : "=&r" (__r) \
167  : "r" (1), "r" (tsl) \
168  ); \
169  __r & 1; \
170  })
171 
172 #define _papi_hwd_lock(lck) MUTEX_SET(lck)
173 #define _papi_hwd_unlock(lck) (*(volatile int *)(lck) = 0)
174 #endif
175 
176 /* NEW CODE FROM PHIL */
177 
178 static inline int __arm_papi_spin_lock (volatile unsigned int *lock)
179 {
180  unsigned int val;
181 
182  do
183  asm volatile ("swp %0, %1, [%2]"
184  : "=r" (val)
185  : "0" (1), "r" (lock)
186  : "memory");
187  while (val != 0);
188 
189  return 0;
190 }
191 #define _papi_hwd_lock(lck) { rmb(); __arm_papi_spin_lock(&_papi_hwd_lock_data[lck]); rmb(); }
192 #define _papi_hwd_unlock(lck) { rmb(); _papi_hwd_lock_data[lck] = 0; rmb(); }
193 
194 #elif defined(__mips__)
195 static inline void __raw_spin_lock(volatile unsigned int *lock)
196 {
197  unsigned int tmp;
198  __asm__ __volatile__(
199  " .set noreorder # __raw_spin_lock \n"
200  "1: ll %1, %2 \n"
201  " bnez %1, 1b \n"
202  " li %1, 1 \n"
203  " sc %1, %0 \n"
204  " beqzl %1, 1b \n"
205  " nop \n"
206  " sync \n"
207  " .set reorder \n"
208  : "=m" (*lock), "=&r" (tmp)
209  : "m" (*lock)
210  : "memory");
211 }
212 
213 static inline void __raw_spin_unlock(volatile unsigned int *lock)
214 {
215  __asm__ __volatile__(
216  " .set noreorder # __raw_spin_unlock \n"
217  " sync \n"
218  " sw $0, %0 \n"
219  " .set\treorder \n"
220  : "=m" (*lock)
221  : "m" (*lock)
222  : "memory");
223 }
224 #define _papi_hwd_lock(lck) __raw_spin_lock(&_papi_hwd_lock_data[lck]);
225 #define _papi_hwd_unlock(lck) __raw_spin_unlock(&_papi_hwd_lock_data[lck])
226 #else
227 
228 #error "_papi_hwd_lock/unlock undefined!"
229 #endif
230 
231 #endif
232 
233 #endif /* defined(USE_PTHREAD_MUTEXES) */
int val
Definition: libbif.c:235
#define PAPI_MAX_LOCK
Definition: papi_lock.h:18
volatile unsigned int _papi_hwd_lock_data[PAPI_MAX_LOCK]
Definition: darwin-common.c:32
atomic_p lock[]
Definition: aix.c:25
long long tmp
Definition: iozone.c:12031