]> code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
remap: Change remapping function argument type from void to int16_t / float as approp...
[pulseaudio] / src / pulsecore / atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3
4 /***
5 This file is part of PulseAudio.
6
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
9
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2.1 of the
13 License, or (at your option) any later version.
14
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 USA.
24 ***/
25
26 #include <pulsecore/macro.h>
27
28 /*
29 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
30 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
31 * however very likely.
32 *
33 * For now we do only full memory barriers. Eventually we might want
34 * to support more elaborate memory barriers, in which case we will add
35 * suffixes to the function names.
36 *
37 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
38 * libatomic_ops
39 */
40
41 #ifndef PACKAGE
42 #error "Please include config.h before including this file!"
43 #endif
44
45 #ifdef HAVE_ATOMIC_BUILTINS
46
47 /* __sync based implementation */
48
49 typedef struct pa_atomic {
50 volatile int value;
51 } pa_atomic_t;
52
53 #define PA_ATOMIC_INIT(v) { .value = (v) }
54
55 static inline int pa_atomic_load(const pa_atomic_t *a) {
56 __sync_synchronize();
57 return a->value;
58 }
59
60 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
61 a->value = i;
62 __sync_synchronize();
63 }
64
65 /* Returns the previously set value */
66 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
67 return __sync_fetch_and_add(&a->value, i);
68 }
69
70 /* Returns the previously set value */
71 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
72 return __sync_fetch_and_sub(&a->value, i);
73 }
74
75 /* Returns the previously set value */
76 static inline int pa_atomic_inc(pa_atomic_t *a) {
77 return pa_atomic_add(a, 1);
78 }
79
80 /* Returns the previously set value */
81 static inline int pa_atomic_dec(pa_atomic_t *a) {
82 return pa_atomic_sub(a, 1);
83 }
84
85 /* Returns true when the operation was successful. */
86 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
87 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
88 }
89
90 typedef struct pa_atomic_ptr {
91 volatile unsigned long value;
92 } pa_atomic_ptr_t;
93
94 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
95
96 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
97 __sync_synchronize();
98 return (void*) a->value;
99 }
100
101 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
102 a->value = (unsigned long) p;
103 __sync_synchronize();
104 }
105
106 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
107 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
108 }
109
110 #elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
111
112 /* NetBSD 5.0+ atomic_ops(3) implementation */
113
114 #include <sys/atomic.h>
115
116 typedef struct pa_atomic {
117 volatile unsigned int value;
118 } pa_atomic_t;
119
120 #define PA_ATOMIC_INIT(v) { .value = (unsigned int) (v) }
121
122 static inline int pa_atomic_load(const pa_atomic_t *a) {
123 membar_sync();
124 return (int) a->value;
125 }
126
127 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
128 a->value = (unsigned int) i;
129 membar_sync();
130 }
131
132 /* Returns the previously set value */
133 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
134 int nv = (int) atomic_add_int_nv(&a->value, i);
135 return nv - i;
136 }
137
138 /* Returns the previously set value */
139 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
140 int nv = (int) atomic_add_int_nv(&a->value, -i);
141 return nv + i;
142 }
143
144 /* Returns the previously set value */
145 static inline int pa_atomic_inc(pa_atomic_t *a) {
146 int nv = (int) atomic_inc_uint_nv(&a->value);
147 return nv - 1;
148 }
149
150 /* Returns the previously set value */
151 static inline int pa_atomic_dec(pa_atomic_t *a) {
152 int nv = (int) atomic_dec_uint_nv(&a->value);
153 return nv + 1;
154 }
155
156 /* Returns true when the operation was successful. */
157 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
158 unsigned int r = atomic_cas_uint(&a->value, (unsigned int) old_i, (unsigned int) new_i);
159 return (int) r == old_i;
160 }
161
162 typedef struct pa_atomic_ptr {
163 volatile void *value;
164 } pa_atomic_ptr_t;
165
166 #define PA_ATOMIC_PTR_INIT(v) { .value = (v) }
167
168 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
169 membar_sync();
170 return (void *) a->value;
171 }
172
173 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
174 a->value = p;
175 membar_sync();
176 }
177
178 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
179 void *r = atomic_cas_ptr(&a->value, old_p, new_p);
180 return r == old_p;
181 }
182
183 #elif defined(__FreeBSD__)
184
185 #include <sys/cdefs.h>
186 #include <sys/types.h>
187 #include <sys/param.h>
188 #include <machine/atomic.h>
189
190 #if __FreeBSD_version < 600000
191 #if defined(__i386__) || defined(__amd64__)
192 #if defined(__amd64__)
193 #define atomic_load_acq_64 atomic_load_acq_long
194 #endif
195 static inline u_int atomic_fetchadd_int(volatile u_int *p, u_int v) {
196 __asm __volatile(
197 " " __XSTRING(MPLOCKED) " "
198 " xaddl %0, %1 ; "
199 "# atomic_fetchadd_int"
200 : "+r" (v),
201 "=m" (*p)
202 : "m" (*p));
203
204 return (v);
205 }
206 #elif defined(__sparc64__)
207 #define atomic_load_acq_64 atomic_load_acq_long
208 #define atomic_fetchadd_int atomic_add_int
209 #elif defined(__ia64__)
210 #define atomic_load_acq_64 atomic_load_acq_long
211 static inline uint32_t
212 atomic_fetchadd_int(volatile uint32_t *p, uint32_t v) {
213 uint32_t value;
214
215 do {
216 value = *p;
217 } while (!atomic_cmpset_32(p, value, value + v));
218 return (value);
219 }
220 #endif
221 #endif
222
223 typedef struct pa_atomic {
224 volatile unsigned long value;
225 } pa_atomic_t;
226
227 #define PA_ATOMIC_INIT(v) { .value = (v) }
228
229 static inline int pa_atomic_load(const pa_atomic_t *a) {
230 return (int) atomic_load_acq_int((unsigned int *) &a->value);
231 }
232
233 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
234 atomic_store_rel_int((unsigned int *) &a->value, i);
235 }
236
237 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
238 return atomic_fetchadd_int((unsigned int *) &a->value, i);
239 }
240
241 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
242 return atomic_fetchadd_int((unsigned int *) &a->value, -(i));
243 }
244
245 static inline int pa_atomic_inc(pa_atomic_t *a) {
246 return atomic_fetchadd_int((unsigned int *) &a->value, 1);
247 }
248
249 static inline int pa_atomic_dec(pa_atomic_t *a) {
250 return atomic_fetchadd_int((unsigned int *) &a->value, -1);
251 }
252
253 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
254 return atomic_cmpset_int((unsigned int *) &a->value, old_i, new_i);
255 }
256
257 typedef struct pa_atomic_ptr {
258 volatile unsigned long value;
259 } pa_atomic_ptr_t;
260
261 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
262
263 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
264 #ifdef atomic_load_acq_64
265 return (void*) atomic_load_acq_ptr((unsigned long *) &a->value);
266 #else
267 return (void*) atomic_load_acq_ptr((unsigned int *) &a->value);
268 #endif
269 }
270
271 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
272 #ifdef atomic_load_acq_64
273 atomic_store_rel_ptr(&a->value, (unsigned long) p);
274 #else
275 atomic_store_rel_ptr((unsigned int *) &a->value, (unsigned int) p);
276 #endif
277 }
278
279 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
280 #ifdef atomic_load_acq_64
281 return atomic_cmpset_ptr(&a->value, (unsigned long) old_p, (unsigned long) new_p);
282 #else
283 return atomic_cmpset_ptr((unsigned int *) &a->value, (unsigned int) old_p, (unsigned int) new_p);
284 #endif
285 }
286
287 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
288
289 #warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
290
291 /* Adapted from glibc */
292
293 typedef struct pa_atomic {
294 volatile int value;
295 } pa_atomic_t;
296
297 #define PA_ATOMIC_INIT(v) { .value = (v) }
298
299 static inline int pa_atomic_load(const pa_atomic_t *a) {
300 return a->value;
301 }
302
303 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
304 a->value = i;
305 }
306
307 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
308 int result;
309
310 __asm __volatile ("lock; xaddl %0, %1"
311 : "=r" (result), "=m" (a->value)
312 : "0" (i), "m" (a->value));
313
314 return result;
315 }
316
317 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
318 return pa_atomic_add(a, -i);
319 }
320
321 static inline int pa_atomic_inc(pa_atomic_t *a) {
322 return pa_atomic_add(a, 1);
323 }
324
325 static inline int pa_atomic_dec(pa_atomic_t *a) {
326 return pa_atomic_sub(a, 1);
327 }
328
329 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
330 int result;
331
332 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
333 : "=a" (result), "=m" (a->value)
334 : "r" (new_i), "m" (a->value), "0" (old_i));
335
336 return result == old_i;
337 }
338
339 typedef struct pa_atomic_ptr {
340 volatile unsigned long value;
341 } pa_atomic_ptr_t;
342
343 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
344
345 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
346 return (void*) a->value;
347 }
348
349 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
350 a->value = (unsigned long) p;
351 }
352
353 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
354 void *result;
355
356 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
357 : "=a" (result), "=m" (a->value)
358 : "r" (new_p), "m" (a->value), "0" (old_p));
359
360 return result == old_p;
361 }
362
363 #elif defined(ATOMIC_ARM_INLINE_ASM)
364
365 /*
366 These should only be enabled if we have ARMv6 or better.
367 */
368
369 typedef struct pa_atomic {
370 volatile int value;
371 } pa_atomic_t;
372
373 #define PA_ATOMIC_INIT(v) { .value = (v) }
374
375 static inline void pa_memory_barrier(void) {
376 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
377 asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
378 #endif
379 }
380
381 static inline int pa_atomic_load(const pa_atomic_t *a) {
382 pa_memory_barrier();
383 return a->value;
384 }
385
386 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
387 a->value = i;
388 pa_memory_barrier();
389 }
390
391 /* Returns the previously set value */
392 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
393 unsigned long not_exclusive;
394 int new_val, old_val;
395
396 pa_memory_barrier();
397 do {
398 asm volatile ("ldrex %0, [%3]\n"
399 "add %2, %0, %4\n"
400 "strex %1, %2, [%3]\n"
401 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
402 : "r" (&a->value), "Ir" (i)
403 : "cc");
404 } while(not_exclusive);
405 pa_memory_barrier();
406
407 return old_val;
408 }
409
410 /* Returns the previously set value */
411 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
412 unsigned long not_exclusive;
413 int new_val, old_val;
414
415 pa_memory_barrier();
416 do {
417 asm volatile ("ldrex %0, [%3]\n"
418 "sub %2, %0, %4\n"
419 "strex %1, %2, [%3]\n"
420 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
421 : "r" (&a->value), "Ir" (i)
422 : "cc");
423 } while(not_exclusive);
424 pa_memory_barrier();
425
426 return old_val;
427 }
428
429 static inline int pa_atomic_inc(pa_atomic_t *a) {
430 return pa_atomic_add(a, 1);
431 }
432
433 static inline int pa_atomic_dec(pa_atomic_t *a) {
434 return pa_atomic_sub(a, 1);
435 }
436
437 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
438 unsigned long not_equal, not_exclusive;
439
440 pa_memory_barrier();
441 do {
442 asm volatile ("ldrex %0, [%2]\n"
443 "subs %0, %0, %3\n"
444 "mov %1, %0\n"
445 "strexeq %0, %4, [%2]\n"
446 : "=&r" (not_exclusive), "=&r" (not_equal)
447 : "r" (&a->value), "Ir" (old_i), "r" (new_i)
448 : "cc");
449 } while(not_exclusive && !not_equal);
450 pa_memory_barrier();
451
452 return !not_equal;
453 }
454
455 typedef struct pa_atomic_ptr {
456 volatile unsigned long value;
457 } pa_atomic_ptr_t;
458
459 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
460
461 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
462 pa_memory_barrier();
463 return (void*) a->value;
464 }
465
466 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
467 a->value = (unsigned long) p;
468 pa_memory_barrier();
469 }
470
471 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
472 unsigned long not_equal, not_exclusive;
473
474 pa_memory_barrier();
475 do {
476 asm volatile ("ldrex %0, [%2]\n"
477 "subs %0, %0, %3\n"
478 "mov %1, %0\n"
479 "strexeq %0, %4, [%2]\n"
480 : "=&r" (not_exclusive), "=&r" (not_equal)
481 : "r" (&a->value), "Ir" (old_p), "r" (new_p)
482 : "cc");
483 } while(not_exclusive && !not_equal);
484 pa_memory_barrier();
485
486 return !not_equal;
487 }
488
489 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
490
491 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
492 information about these functions. The arm kernel helper functions first
493 appeared in 2.6.16.
494 Apply --disable-atomic-arm-linux-helpers flag to configure if you prefer
495 inline asm implementation or you have an obsolete Linux kernel.
496 */
497 /* Memory barrier */
498 typedef void (__kernel_dmb_t)(void);
499 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
500
501 static inline void pa_memory_barrier(void) {
502 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
503 __kernel_dmb();
504 #endif
505 }
506
507 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
508 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
509 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
510
511 /* This is just to get rid of all warnings */
512 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
513 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
514
515 typedef struct pa_atomic {
516 volatile int value;
517 } pa_atomic_t;
518
519 #define PA_ATOMIC_INIT(v) { .value = (v) }
520
521 static inline int pa_atomic_load(const pa_atomic_t *a) {
522 pa_memory_barrier();
523 return a->value;
524 }
525
526 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
527 a->value = i;
528 pa_memory_barrier();
529 }
530
531 /* Returns the previously set value */
532 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
533 int old_val;
534 do {
535 old_val = a->value;
536 } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
537 return old_val;
538 }
539
540 /* Returns the previously set value */
541 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
542 int old_val;
543 do {
544 old_val = a->value;
545 } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
546 return old_val;
547 }
548
549 /* Returns the previously set value */
550 static inline int pa_atomic_inc(pa_atomic_t *a) {
551 return pa_atomic_add(a, 1);
552 }
553
554 /* Returns the previously set value */
555 static inline int pa_atomic_dec(pa_atomic_t *a) {
556 return pa_atomic_sub(a, 1);
557 }
558
559 /* Returns true when the operation was successful. */
560 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
561 bool failed;
562 do {
563 failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
564 } while(failed && a->value == old_i);
565 return !failed;
566 }
567
568 typedef struct pa_atomic_ptr {
569 volatile unsigned long value;
570 } pa_atomic_ptr_t;
571
572 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
573
574 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
575 pa_memory_barrier();
576 return (void*) a->value;
577 }
578
579 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
580 a->value = (unsigned long) p;
581 pa_memory_barrier();
582 }
583
584 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
585 bool failed;
586 do {
587 failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
588 } while(failed && a->value == (unsigned long) old_p);
589 return !failed;
590 }
591
592 #else
593
594 /* libatomic_ops based implementation */
595
596 #include <atomic_ops.h>
597
598 typedef struct pa_atomic {
599 volatile AO_t value;
600 } pa_atomic_t;
601
602 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
603
604 static inline int pa_atomic_load(const pa_atomic_t *a) {
605 return (int) AO_load_full((AO_t*) &a->value);
606 }
607
608 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
609 AO_store_full(&a->value, (AO_t) i);
610 }
611
612 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
613 return (int) AO_fetch_and_add_full(&a->value, (AO_t) i);
614 }
615
616 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
617 return (int) AO_fetch_and_add_full(&a->value, (AO_t) -i);
618 }
619
620 static inline int pa_atomic_inc(pa_atomic_t *a) {
621 return (int) AO_fetch_and_add1_full(&a->value);
622 }
623
624 static inline int pa_atomic_dec(pa_atomic_t *a) {
625 return (int) AO_fetch_and_sub1_full(&a->value);
626 }
627
628 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
629 return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i);
630 }
631
632 typedef struct pa_atomic_ptr {
633 volatile AO_t value;
634 } pa_atomic_ptr_t;
635
636 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
637
638 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
639 return (void*) AO_load_full((AO_t*) &a->value);
640 }
641
642 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
643 AO_store_full(&a->value, (AO_t) p);
644 }
645
646 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
647 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
648 }
649
650 #endif
651
652 #endif