]> code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
Change return value of cmpxchg atomic op to pa_bool_t
[pulseaudio] / src / pulsecore / atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3
4 /***
5 This file is part of PulseAudio.
6
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
9
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 USA.
24 ***/
25
26 #include <pulsecore/macro.h>
27
28 /*
29 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
30 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
31 * however very likely.
32 *
33 * For now we do only full memory barriers. Eventually we might want
34 * to support more elaborate memory barriers, in which case we will add
35 * suffixes to the function names.
36 *
37 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
38 * libatomic_ops
39 */
40
41 #ifndef PACKAGE
42 #error "Please include config.h before including this file!"
43 #endif
44
45 #ifdef HAVE_ATOMIC_BUILTINS
46
47 /* __sync based implementation */
48
49 typedef struct pa_atomic {
50 volatile int value;
51 } pa_atomic_t;
52
53 #define PA_ATOMIC_INIT(v) { .value = (v) }
54
55 static inline int pa_atomic_load(const pa_atomic_t *a) {
56 __sync_synchronize();
57 return a->value;
58 }
59
60 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
61 a->value = i;
62 __sync_synchronize();
63 }
64
65 /* Returns the previously set value */
66 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
67 return __sync_fetch_and_add(&a->value, i);
68 }
69
70 /* Returns the previously set value */
71 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
72 return __sync_fetch_and_sub(&a->value, i);
73 }
74
75 /* Returns the previously set value */
76 static inline int pa_atomic_inc(pa_atomic_t *a) {
77 return pa_atomic_add(a, 1);
78 }
79
80 /* Returns the previously set value */
81 static inline int pa_atomic_dec(pa_atomic_t *a) {
82 return pa_atomic_sub(a, 1);
83 }
84
85 /* Returns TRUE when the operation was successful. */
86 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
87 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
88 }
89
90 typedef struct pa_atomic_ptr {
91 volatile unsigned long value;
92 } pa_atomic_ptr_t;
93
94 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
95
96 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
97 __sync_synchronize();
98 return (void*) a->value;
99 }
100
101 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
102 a->value = (unsigned long) p;
103 __sync_synchronize();
104 }
105
106 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
107 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
108 }
109
110 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
111
112 #error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
113
114 /* Addapted from glibc */
115
116 typedef struct pa_atomic {
117 volatile int value;
118 } pa_atomic_t;
119
120 #define PA_ATOMIC_INIT(v) { .value = (v) }
121
122 static inline int pa_atomic_load(const pa_atomic_t *a) {
123 return a->value;
124 }
125
126 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
127 a->value = i;
128 }
129
130 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
131 int result;
132
133 __asm __volatile ("lock; xaddl %0, %1"
134 : "=r" (result), "=m" (a->value)
135 : "0" (i), "m" (a->value));
136
137 return result;
138 }
139
140 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
141 return pa_atomic_add(a, -i);
142 }
143
144 static inline int pa_atomic_inc(pa_atomic_t *a) {
145 return pa_atomic_add(a, 1);
146 }
147
148 static inline int pa_atomic_dec(pa_atomic_t *a) {
149 return pa_atomic_sub(a, 1);
150 }
151
152 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
153 int result;
154
155 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
156 : "=a" (result), "=m" (a->value)
157 : "r" (new_i), "m" (a->value), "0" (old_i));
158
159 return result == oldval;
160 }
161
162 typedef struct pa_atomic_ptr {
163 volatile unsigned long value;
164 } pa_atomic_ptr_t;
165
166 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
167
168 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
169 return (void*) a->value;
170 }
171
172 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
173 a->value = (unsigned long) p;
174 }
175
176 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
177 void *result;
178
179 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
180 : "=a" (result), "=m" (a->value)
181 : "r" (new_p), "m" (a->value), "0" (old_p));
182
183 return result == old_p;
184 }
185
186 #elif defined(ATOMIC_ARM_INLINE_ASM)
187
188 /*
189 These should only be enabled if we have ARMv6 or better.
190 */
191
192 typedef struct pa_atomic {
193 volatile int value;
194 } pa_atomic_t;
195
196 #define PA_ATOMIC_INIT(v) { .value = (v) }
197
198 static inline void pa_memory_barrier(void) {
199 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
200 asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
201 #endif
202 }
203
204 static inline int pa_atomic_load(const pa_atomic_t *a) {
205 pa_memory_barrier();
206 return a->value;
207 }
208
209 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
210 a->value = i;
211 pa_memory_barrier();
212 }
213
214 /* Returns the previously set value */
215 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
216 unsigned long not_exclusive;
217 int new_val, old_val;
218
219 pa_memory_barrier();
220 do {
221 asm volatile ("ldrex %0, [%3]\n"
222 "add %2, %0, %4\n"
223 "strex %1, %2, [%3]\n"
224 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
225 : "r" (&a->value), "Ir" (i)
226 : "cc");
227 } while(not_exclusive);
228 pa_memory_barrier();
229
230 return old_val;
231 }
232
233 /* Returns the previously set value */
234 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
235 unsigned long not_exclusive;
236 int new_val, old_val;
237
238 pa_memory_barrier();
239 do {
240 asm volatile ("ldrex %0, [%3]\n"
241 "sub %2, %0, %4\n"
242 "strex %1, %2, [%3]\n"
243 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
244 : "r" (&a->value), "Ir" (i)
245 : "cc");
246 } while(not_exclusive);
247 pa_memory_barrier();
248
249 return old_val;
250 }
251
252 static inline int pa_atomic_inc(pa_atomic_t *a) {
253 return pa_atomic_add(a, 1);
254 }
255
256 static inline int pa_atomic_dec(pa_atomic_t *a) {
257 return pa_atomic_sub(a, 1);
258 }
259
260 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
261 unsigned long not_equal, not_exclusive;
262
263 pa_memory_barrier();
264 do {
265 asm volatile ("ldrex %0, [%2]\n"
266 "subs %0, %0, %3\n"
267 "mov %1, %0\n"
268 "strexeq %0, %4, [%2]\n"
269 : "=&r" (not_exclusive), "=&r" (not_equal)
270 : "r" (&a->value), "Ir" (old_i), "r" (new_i)
271 : "cc");
272 } while(not_exclusive && !not_equal);
273 pa_memory_barrier();
274
275 return !not_equal;
276 }
277
278 typedef struct pa_atomic_ptr {
279 volatile unsigned long value;
280 } pa_atomic_ptr_t;
281
282 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
283
284 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
285 pa_memory_barrier();
286 return (void*) a->value;
287 }
288
289 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
290 a->value = (unsigned long) p;
291 pa_memory_barrier();
292 }
293
294 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
295 unsigned long not_equal, not_exclusive;
296
297 pa_memory_barrier();
298 do {
299 asm volatile ("ldrex %0, [%2]\n"
300 "subs %0, %0, %3\n"
301 "mov %1, %0\n"
302 "strexeq %0, %4, [%2]\n"
303 : "=&r" (not_exclusive), "=&r" (not_equal)
304 : "r" (&a->value), "Ir" (old_p), "r" (new_p)
305 : "cc");
306 } while(not_exclusive && !not_equal);
307 pa_memory_barrier();
308
309 return !not_equal;
310 }
311
312 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
313
314 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
315 information about these functions. The arm kernel helper functions first
316 appeared in 2.6.16.
317 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
318 inline asm implementation or you have an obsolete Linux kernel.
319 */
320 /* Memory barrier */
321 typedef void (__kernel_dmb_t)(void);
322 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
323
324 static inline void pa_memory_barrier(void) {
325 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
326 __kernel_dmb();
327 #endif
328 }
329
330 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
331 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
332 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
333
334 /* This is just to get rid of all warnings */
335 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
336 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
337
338 typedef struct pa_atomic {
339 volatile int value;
340 } pa_atomic_t;
341
342 #define PA_ATOMIC_INIT(v) { .value = (v) }
343
344 static inline int pa_atomic_load(const pa_atomic_t *a) {
345 pa_memory_barrier();
346 return a->value;
347 }
348
349 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
350 a->value = i;
351 pa_memory_barrier();
352 }
353
354 /* Returns the previously set value */
355 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
356 int old_val;
357 do {
358 old_val = a->value;
359 } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
360 return old_val;
361 }
362
363 /* Returns the previously set value */
364 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
365 int old_val;
366 do {
367 old_val = a->value;
368 } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
369 return old_val;
370 }
371
372 /* Returns the previously set value */
373 static inline int pa_atomic_inc(pa_atomic_t *a) {
374 return pa_atomic_add(a, 1);
375 }
376
377 /* Returns the previously set value */
378 static inline int pa_atomic_dec(pa_atomic_t *a) {
379 return pa_atomic_sub(a, 1);
380 }
381
382 /* Returns TRUE when the operation was successful. */
383 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
384 pa_bool_t failed;
385 do {
386 failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
387 } while(failed && a->value == old_i);
388 return !failed;
389 }
390
391 typedef struct pa_atomic_ptr {
392 volatile unsigned long value;
393 } pa_atomic_ptr_t;
394
395 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
396
397 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
398 pa_memory_barrier();
399 return (void*) a->value;
400 }
401
402 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
403 a->value = (unsigned long) p;
404 pa_memory_barrier();
405 }
406
407 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
408 pa_bool_t failed;
409 do {
410 failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
411 } while(failed && a->value == (unsigned long) old_p);
412 return !failed;
413 }
414
415 #else
416
417 /* libatomic_ops based implementation */
418
419 #include <atomic_ops.h>
420
421 typedef struct pa_atomic {
422 volatile AO_t value;
423 } pa_atomic_t;
424
425 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
426
427 static inline int pa_atomic_load(const pa_atomic_t *a) {
428 return (int) AO_load_full((AO_t*) &a->value);
429 }
430
431 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
432 AO_store_full(&a->value, (AO_t) i);
433 }
434
435 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
436 return (int) AO_fetch_and_add_full(&a->value, (AO_t) i);
437 }
438
439 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
440 return (int) AO_fetch_and_add_full(&a->value, (AO_t) -i);
441 }
442
443 static inline int pa_atomic_inc(pa_atomic_t *a) {
444 return (int) AO_fetch_and_add1_full(&a->value);
445 }
446
447 static inline int pa_atomic_dec(pa_atomic_t *a) {
448 return (int) AO_fetch_and_sub1_full(&a->value);
449 }
450
451 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
452 return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i);
453 }
454
455 typedef struct pa_atomic_ptr {
456 volatile AO_t value;
457 } pa_atomic_ptr_t;
458
459 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
460
461 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
462 return (void*) AO_load_full((AO_t*) &a->value);
463 }
464
465 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
466 AO_store_full(&a->value, (AO_t) p);
467 }
468
469 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
470 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
471 }
472
473 #endif
474
475 #endif