]> code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
Merge commit 'origin/master-tx'
[pulseaudio] / src / pulsecore / atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3
4 /***
5 This file is part of PulseAudio.
6
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
9
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2.1 of the
13 License, or (at your option) any later version.
14
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 USA.
24 ***/
25
26 #include <pulsecore/macro.h>
27
28 /*
29 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
30 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
31 * however very likely.
32 *
33 * For now we do only full memory barriers. Eventually we might want
34 * to support more elaborate memory barriers, in which case we will add
35 * suffixes to the function names.
36 *
37 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
38 * libatomic_ops
39 */
40
41 #ifndef PACKAGE
42 #error "Please include config.h before including this file!"
43 #endif
44
45 #ifdef HAVE_ATOMIC_BUILTINS
46
47 /* __sync based implementation */
48
49 typedef struct pa_atomic {
50 volatile int value;
51 } pa_atomic_t;
52
53 #define PA_ATOMIC_INIT(v) { .value = (v) }
54
55 static inline int pa_atomic_load(const pa_atomic_t *a) {
56 __sync_synchronize();
57 return a->value;
58 }
59
60 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
61 a->value = i;
62 __sync_synchronize();
63 }
64
65 /* Returns the previously set value */
66 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
67 return __sync_fetch_and_add(&a->value, i);
68 }
69
70 /* Returns the previously set value */
71 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
72 return __sync_fetch_and_sub(&a->value, i);
73 }
74
75 /* Returns the previously set value */
76 static inline int pa_atomic_inc(pa_atomic_t *a) {
77 return pa_atomic_add(a, 1);
78 }
79
80 /* Returns the previously set value */
81 static inline int pa_atomic_dec(pa_atomic_t *a) {
82 return pa_atomic_sub(a, 1);
83 }
84
85 /* Returns TRUE when the operation was successful. */
86 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
87 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
88 }
89
90 typedef struct pa_atomic_ptr {
91 volatile unsigned long value;
92 } pa_atomic_ptr_t;
93
94 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
95
96 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
97 __sync_synchronize();
98 return (void*) a->value;
99 }
100
101 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
102 a->value = (unsigned long) p;
103 __sync_synchronize();
104 }
105
106 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
107 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
108 }
109
110 #elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
111
112 /* NetBSD 5.0+ atomic_ops(3) implementation */
113
114 #include <sys/atomic.h>
115
116 typedef struct pa_atomic {
117 volatile unsigned int value;
118 } pa_atomic_t;
119
120 #define PA_ATOMIC_INIT(v) { .value = (unsigned int) (v) }
121
122 static inline int pa_atomic_load(const pa_atomic_t *a) {
123 membar_sync();
124 return (int) a->value;
125 }
126
127 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
128 a->value = (unsigned int) i;
129 membar_sync();
130 }
131
132 /* Returns the previously set value */
133 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
134 int nv = (int) atomic_add_int_nv(&a->value, i);
135 return nv - i;
136 }
137
138 /* Returns the previously set value */
139 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
140 int nv = (int) atomic_add_int_nv(&a->value, -i);
141 return nv + i;
142 }
143
144 /* Returns the previously set value */
145 static inline int pa_atomic_inc(pa_atomic_t *a) {
146 int nv = (int) atomic_inc_uint_nv(&a->value);
147 return nv - 1;
148 }
149
150 /* Returns the previously set value */
151 static inline int pa_atomic_dec(pa_atomic_t *a) {
152 int nv = (int) atomic_dec_uint_nv(&a->value);
153 return nv + 1;
154 }
155
156 /* Returns TRUE when the operation was successful. */
157 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
158 unsigned int r = atomic_cas_uint(&a->value, (unsigned int) old_i, (unsigned int) new_i);
159 return (int) r == old_i;
160 }
161
162 typedef struct pa_atomic_ptr {
163 volatile void *value;
164 } pa_atomic_ptr_t;
165
166 #define PA_ATOMIC_PTR_INIT(v) { .value = (v) }
167
168 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
169 membar_sync();
170 return (void *) a->value;
171 }
172
173 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
174 a->value = p;
175 membar_sync();
176 }
177
178 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
179 void *r = atomic_cas_ptr(&a->value, old_p, new_p);
180 return r == old_p;
181 }
182
183 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
184
185 #warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
186
187 /* Addapted from glibc */
188
189 typedef struct pa_atomic {
190 volatile int value;
191 } pa_atomic_t;
192
193 #define PA_ATOMIC_INIT(v) { .value = (v) }
194
195 static inline int pa_atomic_load(const pa_atomic_t *a) {
196 return a->value;
197 }
198
199 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
200 a->value = i;
201 }
202
203 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
204 int result;
205
206 __asm __volatile ("lock; xaddl %0, %1"
207 : "=r" (result), "=m" (a->value)
208 : "0" (i), "m" (a->value));
209
210 return result;
211 }
212
213 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
214 return pa_atomic_add(a, -i);
215 }
216
217 static inline int pa_atomic_inc(pa_atomic_t *a) {
218 return pa_atomic_add(a, 1);
219 }
220
221 static inline int pa_atomic_dec(pa_atomic_t *a) {
222 return pa_atomic_sub(a, 1);
223 }
224
225 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
226 int result;
227
228 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
229 : "=a" (result), "=m" (a->value)
230 : "r" (new_i), "m" (a->value), "0" (old_i));
231
232 return result == old_i;
233 }
234
235 typedef struct pa_atomic_ptr {
236 volatile unsigned long value;
237 } pa_atomic_ptr_t;
238
239 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
240
241 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
242 return (void*) a->value;
243 }
244
245 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
246 a->value = (unsigned long) p;
247 }
248
249 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
250 void *result;
251
252 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
253 : "=a" (result), "=m" (a->value)
254 : "r" (new_p), "m" (a->value), "0" (old_p));
255
256 return result == old_p;
257 }
258
259 #elif defined(ATOMIC_ARM_INLINE_ASM)
260
261 /*
262 These should only be enabled if we have ARMv6 or better.
263 */
264
265 typedef struct pa_atomic {
266 volatile int value;
267 } pa_atomic_t;
268
269 #define PA_ATOMIC_INIT(v) { .value = (v) }
270
271 static inline void pa_memory_barrier(void) {
272 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
273 asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
274 #endif
275 }
276
277 static inline int pa_atomic_load(const pa_atomic_t *a) {
278 pa_memory_barrier();
279 return a->value;
280 }
281
282 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
283 a->value = i;
284 pa_memory_barrier();
285 }
286
287 /* Returns the previously set value */
288 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
289 unsigned long not_exclusive;
290 int new_val, old_val;
291
292 pa_memory_barrier();
293 do {
294 asm volatile ("ldrex %0, [%3]\n"
295 "add %2, %0, %4\n"
296 "strex %1, %2, [%3]\n"
297 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
298 : "r" (&a->value), "Ir" (i)
299 : "cc");
300 } while(not_exclusive);
301 pa_memory_barrier();
302
303 return old_val;
304 }
305
306 /* Returns the previously set value */
307 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
308 unsigned long not_exclusive;
309 int new_val, old_val;
310
311 pa_memory_barrier();
312 do {
313 asm volatile ("ldrex %0, [%3]\n"
314 "sub %2, %0, %4\n"
315 "strex %1, %2, [%3]\n"
316 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
317 : "r" (&a->value), "Ir" (i)
318 : "cc");
319 } while(not_exclusive);
320 pa_memory_barrier();
321
322 return old_val;
323 }
324
325 static inline int pa_atomic_inc(pa_atomic_t *a) {
326 return pa_atomic_add(a, 1);
327 }
328
329 static inline int pa_atomic_dec(pa_atomic_t *a) {
330 return pa_atomic_sub(a, 1);
331 }
332
333 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
334 unsigned long not_equal, not_exclusive;
335
336 pa_memory_barrier();
337 do {
338 asm volatile ("ldrex %0, [%2]\n"
339 "subs %0, %0, %3\n"
340 "mov %1, %0\n"
341 "strexeq %0, %4, [%2]\n"
342 : "=&r" (not_exclusive), "=&r" (not_equal)
343 : "r" (&a->value), "Ir" (old_i), "r" (new_i)
344 : "cc");
345 } while(not_exclusive && !not_equal);
346 pa_memory_barrier();
347
348 return !not_equal;
349 }
350
351 typedef struct pa_atomic_ptr {
352 volatile unsigned long value;
353 } pa_atomic_ptr_t;
354
355 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
356
357 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
358 pa_memory_barrier();
359 return (void*) a->value;
360 }
361
362 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
363 a->value = (unsigned long) p;
364 pa_memory_barrier();
365 }
366
367 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
368 unsigned long not_equal, not_exclusive;
369
370 pa_memory_barrier();
371 do {
372 asm volatile ("ldrex %0, [%2]\n"
373 "subs %0, %0, %3\n"
374 "mov %1, %0\n"
375 "strexeq %0, %4, [%2]\n"
376 : "=&r" (not_exclusive), "=&r" (not_equal)
377 : "r" (&a->value), "Ir" (old_p), "r" (new_p)
378 : "cc");
379 } while(not_exclusive && !not_equal);
380 pa_memory_barrier();
381
382 return !not_equal;
383 }
384
385 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
386
387 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
388 information about these functions. The arm kernel helper functions first
389 appeared in 2.6.16.
390 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
391 inline asm implementation or you have an obsolete Linux kernel.
392 */
393 /* Memory barrier */
394 typedef void (__kernel_dmb_t)(void);
395 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
396
397 static inline void pa_memory_barrier(void) {
398 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
399 __kernel_dmb();
400 #endif
401 }
402
403 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
404 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
405 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
406
407 /* This is just to get rid of all warnings */
408 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
409 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
410
411 typedef struct pa_atomic {
412 volatile int value;
413 } pa_atomic_t;
414
415 #define PA_ATOMIC_INIT(v) { .value = (v) }
416
417 static inline int pa_atomic_load(const pa_atomic_t *a) {
418 pa_memory_barrier();
419 return a->value;
420 }
421
422 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
423 a->value = i;
424 pa_memory_barrier();
425 }
426
427 /* Returns the previously set value */
428 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
429 int old_val;
430 do {
431 old_val = a->value;
432 } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
433 return old_val;
434 }
435
436 /* Returns the previously set value */
437 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
438 int old_val;
439 do {
440 old_val = a->value;
441 } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
442 return old_val;
443 }
444
445 /* Returns the previously set value */
446 static inline int pa_atomic_inc(pa_atomic_t *a) {
447 return pa_atomic_add(a, 1);
448 }
449
450 /* Returns the previously set value */
451 static inline int pa_atomic_dec(pa_atomic_t *a) {
452 return pa_atomic_sub(a, 1);
453 }
454
455 /* Returns TRUE when the operation was successful. */
456 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
457 pa_bool_t failed;
458 do {
459 failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
460 } while(failed && a->value == old_i);
461 return !failed;
462 }
463
464 typedef struct pa_atomic_ptr {
465 volatile unsigned long value;
466 } pa_atomic_ptr_t;
467
468 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
469
470 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
471 pa_memory_barrier();
472 return (void*) a->value;
473 }
474
475 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
476 a->value = (unsigned long) p;
477 pa_memory_barrier();
478 }
479
480 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
481 pa_bool_t failed;
482 do {
483 failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
484 } while(failed && a->value == (unsigned long) old_p);
485 return !failed;
486 }
487
488 #else
489
490 /* libatomic_ops based implementation */
491
492 #include <atomic_ops.h>
493
494 typedef struct pa_atomic {
495 volatile AO_t value;
496 } pa_atomic_t;
497
498 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
499
500 static inline int pa_atomic_load(const pa_atomic_t *a) {
501 return (int) AO_load_full((AO_t*) &a->value);
502 }
503
504 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
505 AO_store_full(&a->value, (AO_t) i);
506 }
507
508 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
509 return (int) AO_fetch_and_add_full(&a->value, (AO_t) i);
510 }
511
512 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
513 return (int) AO_fetch_and_add_full(&a->value, (AO_t) -i);
514 }
515
516 static inline int pa_atomic_inc(pa_atomic_t *a) {
517 return (int) AO_fetch_and_add1_full(&a->value);
518 }
519
520 static inline int pa_atomic_dec(pa_atomic_t *a) {
521 return (int) AO_fetch_and_sub1_full(&a->value);
522 }
523
524 static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
525 return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i);
526 }
527
528 typedef struct pa_atomic_ptr {
529 volatile AO_t value;
530 } pa_atomic_ptr_t;
531
532 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
533
534 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
535 return (void*) AO_load_full((AO_t*) &a->value);
536 }
537
538 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
539 AO_store_full(&a->value, (AO_t) p);
540 }
541
542 static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
543 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
544 }
545
546 #endif
547
548 #endif