]>
code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
5 This file is part of PulseAudio.
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2.1 of the
13 License, or (at your option) any later version.
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 #include <pulsecore/macro.h>
29 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
30 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
31 * however very likely.
33 * For now we do only full memory barriers. Eventually we might want
34 * to support more elaborate memory barriers, in which case we will add
35 * suffixes to the function names.
37 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
42 #error "Please include config.h before including this file!"
45 #ifdef HAVE_ATOMIC_BUILTINS
47 /* __sync based implementation */
49 typedef struct pa_atomic
{
53 #define PA_ATOMIC_INIT(v) { .value = (v) }
55 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
60 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
65 /* Returns the previously set value */
66 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
67 return __sync_fetch_and_add (& a
-> value
, i
);
70 /* Returns the previously set value */
71 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
72 return __sync_fetch_and_sub (& a
-> value
, i
);
75 /* Returns the previously set value */
76 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
77 return pa_atomic_add ( a
, 1 );
80 /* Returns the previously set value */
81 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
82 return pa_atomic_sub ( a
, 1 );
85 /* Returns true when the operation was successful. */
86 static inline bool pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
87 return __sync_bool_compare_and_swap (& a
-> value
, old_i
, new_i
);
90 typedef struct pa_atomic_ptr
{
91 volatile unsigned long value
;
94 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
96 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
98 return ( void *) a
-> value
;
101 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
102 a
-> value
= ( unsigned long ) p
;
103 __sync_synchronize ();
106 static inline bool pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
107 return __sync_bool_compare_and_swap (& a
-> value
, ( long ) old_p
, ( long ) new_p
);
110 #elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
112 /* NetBSD 5.0+ atomic_ops(3) implementation */
114 #include <sys/atomic.h>
116 typedef struct pa_atomic
{
117 volatile unsigned int value
;
120 #define PA_ATOMIC_INIT(v) { .value = (unsigned int) (v) }
122 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
124 return ( int ) a
-> value
;
127 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
128 a
-> value
= ( unsigned int ) i
;
132 /* Returns the previously set value */
133 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
134 int nv
= ( int ) atomic_add_int_nv (& a
-> value
, i
);
138 /* Returns the previously set value */
139 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
140 int nv
= ( int ) atomic_add_int_nv (& a
-> value
, - i
);
144 /* Returns the previously set value */
145 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
146 int nv
= ( int ) atomic_inc_uint_nv (& a
-> value
);
150 /* Returns the previously set value */
151 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
152 int nv
= ( int ) atomic_dec_uint_nv (& a
-> value
);
156 /* Returns true when the operation was successful. */
157 static inline bool pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
158 unsigned int r
= atomic_cas_uint (& a
-> value
, ( unsigned int ) old_i
, ( unsigned int ) new_i
);
159 return ( int ) r
== old_i
;
162 typedef struct pa_atomic_ptr
{
163 volatile void * value
;
166 #define PA_ATOMIC_PTR_INIT(v) { .value = (v) }
168 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
170 return ( void *) a
-> value
;
173 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
178 static inline bool pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
179 void * r
= atomic_cas_ptr (& a
-> value
, old_p
, new_p
);
183 #elif defined(__FreeBSD__)
185 #include <sys/cdefs.h>
186 #include <sys/types.h>
187 #include <sys/param.h>
188 #include <machine/atomic.h>
190 #if __FreeBSD_version < 600000
191 #if defined(__i386__) || defined(__amd64__)
192 #if defined(__amd64__)
193 #define atomic_load_acq_64 atomic_load_acq_long
195 static inline u_int
atomic_fetchadd_int ( volatile u_int
* p
, u_int v
) {
197 " " __XSTRING ( MPLOCKED
) " "
199 "# atomic_fetchadd_int"
206 #elif defined(__sparc64__)
207 #define atomic_load_acq_64 atomic_load_acq_long
208 #define atomic_fetchadd_int atomic_add_int
209 #elif defined(__ia64__)
210 #define atomic_load_acq_64 atomic_load_acq_long
211 static inline uint32_t
212 atomic_fetchadd_int ( volatile uint32_t * p
, uint32_t v
) {
217 } while (! atomic_cmpset_32 ( p
, value
, value
+ v
));
223 typedef struct pa_atomic
{
224 volatile unsigned long value
;
227 #define PA_ATOMIC_INIT(v) { .value = (v) }
229 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
230 return ( int ) atomic_load_acq_int (( unsigned int *) & a
-> value
);
233 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
234 atomic_store_rel_int (( unsigned int *) & a
-> value
, i
);
237 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
238 return atomic_fetchadd_int (( unsigned int *) & a
-> value
, i
);
241 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
242 return atomic_fetchadd_int (( unsigned int *) & a
-> value
, -( i
));
245 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
246 return atomic_fetchadd_int (( unsigned int *) & a
-> value
, 1 );
249 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
250 return atomic_fetchadd_int (( unsigned int *) & a
-> value
, - 1 );
253 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
254 return atomic_cmpset_int (( unsigned int *) & a
-> value
, old_i
, new_i
);
257 typedef struct pa_atomic_ptr
{
258 volatile unsigned long value
;
261 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
263 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
264 #ifdef atomic_load_acq_64
265 return ( void *) atomic_load_acq_ptr (( unsigned long *) & a
-> value
);
267 return ( void *) atomic_load_acq_ptr (( unsigned int *) & a
-> value
);
271 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
272 #ifdef atomic_load_acq_64
273 atomic_store_rel_ptr (& a
-> value
, ( unsigned long ) p
);
275 atomic_store_rel_ptr (( unsigned int *) & a
-> value
, ( unsigned int ) p
);
279 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
280 #ifdef atomic_load_acq_64
281 return atomic_cmpset_ptr (& a
-> value
, ( unsigned long ) old_p
, ( unsigned long ) new_p
);
283 return atomic_cmpset_ptr (( unsigned int *) & a
-> value
, ( unsigned int ) old_p
, ( unsigned int ) new_p
);
287 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
289 #warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
291 /* Adapted from glibc */
293 typedef struct pa_atomic
{
297 #define PA_ATOMIC_INIT(v) { .value = (v) }
299 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
303 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
307 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
310 __asm
__volatile ( "lock; xaddl %0, %1"
311 : "=r" ( result
), "=m" ( a
-> value
)
312 : "0" ( i
), "m" ( a
-> value
));
317 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
318 return pa_atomic_add ( a
, - i
);
321 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
322 return pa_atomic_add ( a
, 1 );
325 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
326 return pa_atomic_sub ( a
, 1 );
329 static inline bool pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
332 __asm__
__volatile__ ( "lock; cmpxchgl %2, %1"
333 : "=a" ( result
), "=m" ( a
-> value
)
334 : "r" ( new_i
), "m" ( a
-> value
), "0" ( old_i
));
336 return result
== old_i
;
339 typedef struct pa_atomic_ptr
{
340 volatile unsigned long value
;
343 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
345 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
346 return ( void *) a
-> value
;
349 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
350 a
-> value
= ( unsigned long ) p
;
353 static inline bool pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
356 __asm__
__volatile__ ( "lock; cmpxchgq %q2, %1"
357 : "=a" ( result
), "=m" ( a
-> value
)
358 : "r" ( new_p
), "m" ( a
-> value
), "0" ( old_p
));
360 return result
== old_p
;
363 #elif defined(ATOMIC_ARM_INLINE_ASM)
366 These should only be enabled if we have ARMv6 or better.
369 typedef struct pa_atomic
{
373 #define PA_ATOMIC_INIT(v) { .value = (v) }
375 static inline void pa_memory_barrier ( void ) {
376 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
377 asm volatile ( "mcr p15, 0, r0, c7, c10, 5 @ dmb" );
381 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
386 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
391 /* Returns the previously set value */
392 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
393 unsigned long not_exclusive
;
394 int new_val
, old_val
;
398 asm volatile ( "ldrex %0, [%3] \n "
400 "strex %1, %2, [%3] \n "
401 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
402 : "r" (& a
-> value
), "Ir" ( i
)
404 } while ( not_exclusive
);
410 /* Returns the previously set value */
411 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
412 unsigned long not_exclusive
;
413 int new_val
, old_val
;
417 asm volatile ( "ldrex %0, [%3] \n "
419 "strex %1, %2, [%3] \n "
420 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
421 : "r" (& a
-> value
), "Ir" ( i
)
423 } while ( not_exclusive
);
429 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
430 return pa_atomic_add ( a
, 1 );
433 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
434 return pa_atomic_sub ( a
, 1 );
437 static inline bool pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
438 unsigned long not_equal
, not_exclusive
;
442 asm volatile ( "ldrex %0, [%2] \n "
445 "strexeq %0, %4, [%2] \n "
446 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
447 : "r" (& a
-> value
), "Ir" ( old_i
), "r" ( new_i
)
449 } while ( not_exclusive
&& ! not_equal
);
455 typedef struct pa_atomic_ptr
{
456 volatile unsigned long value
;
459 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
461 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
463 return ( void *) a
-> value
;
466 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
467 a
-> value
= ( unsigned long ) p
;
471 static inline bool pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
472 unsigned long not_equal
, not_exclusive
;
476 asm volatile ( "ldrex %0, [%2] \n "
479 "strexeq %0, %4, [%2] \n "
480 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
481 : "r" (& a
-> value
), "Ir" ( old_p
), "r" ( new_p
)
483 } while ( not_exclusive
&& ! not_equal
);
489 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
491 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
492 information about these functions. The arm kernel helper functions first
494 Apply --disable-atomic-arm-linux-helpers flag to configure if you prefer
495 inline asm implementation or you have an obsolete Linux kernel.
498 typedef void ( __kernel_dmb_t
)( void );
499 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
501 static inline void pa_memory_barrier ( void ) {
502 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
507 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
508 typedef int ( __kernel_cmpxchg_t
)( int oldval
, int newval
, volatile int * ptr
);
509 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
511 /* This is just to get rid of all warnings */
512 typedef int ( __kernel_cmpxchg_u_t
)( unsigned long oldval
, unsigned long newval
, volatile unsigned long * ptr
);
513 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
515 typedef struct pa_atomic
{
519 #define PA_ATOMIC_INIT(v) { .value = (v) }
521 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
526 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
531 /* Returns the previously set value */
532 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
536 } while ( __kernel_cmpxchg ( old_val
, old_val
+ i
, & a
-> value
));
540 /* Returns the previously set value */
541 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
545 } while ( __kernel_cmpxchg ( old_val
, old_val
- i
, & a
-> value
));
549 /* Returns the previously set value */
550 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
551 return pa_atomic_add ( a
, 1 );
554 /* Returns the previously set value */
555 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
556 return pa_atomic_sub ( a
, 1 );
559 /* Returns true when the operation was successful. */
560 static inline bool pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
563 failed
= !! __kernel_cmpxchg ( old_i
, new_i
, & a
-> value
);
564 } while ( failed
&& a
-> value
== old_i
);
568 typedef struct pa_atomic_ptr
{
569 volatile unsigned long value
;
572 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
574 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
576 return ( void *) a
-> value
;
579 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
580 a
-> value
= ( unsigned long ) p
;
584 static inline bool pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
587 failed
= !! __kernel_cmpxchg_u (( unsigned long ) old_p
, ( unsigned long ) new_p
, & a
-> value
);
588 } while ( failed
&& a
-> value
== ( unsigned long ) old_p
);
594 /* libatomic_ops based implementation */
596 #include <atomic_ops.h>
598 typedef struct pa_atomic
{
602 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
604 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
605 return ( int ) AO_load_full (( AO_t
*) & a
-> value
);
608 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
609 AO_store_full (& a
-> value
, ( AO_t
) i
);
612 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
613 return ( int ) AO_fetch_and_add_full (& a
-> value
, ( AO_t
) i
);
616 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
617 return ( int ) AO_fetch_and_add_full (& a
-> value
, ( AO_t
) - i
);
620 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
621 return ( int ) AO_fetch_and_add1_full (& a
-> value
);
624 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
625 return ( int ) AO_fetch_and_sub1_full (& a
-> value
);
628 static inline bool pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
629 return AO_compare_and_swap_full (& a
-> value
, ( unsigned long ) old_i
, ( unsigned long ) new_i
);
632 typedef struct pa_atomic_ptr
{
636 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
638 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
639 return ( void *) AO_load_full (( AO_t
*) & a
-> value
);
642 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
643 AO_store_full (& a
-> value
, ( AO_t
) p
);
646 static inline bool pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
647 return AO_compare_and_swap_full (& a
-> value
, ( AO_t
) old_p
, ( AO_t
) new_p
);