]>
code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
5 This file is part of PulseAudio.
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 #include <pulsecore/macro.h>
29 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
30 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
31 * however very likely.
33 * For now we do only full memory barriers. Eventually we might want
34 * to support more elaborate memory barriers, in which case we will add
35 * suffixes to the function names.
37 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
42 #error "Please include config.h before including this file!"
45 #if HAVE_ATOMIC_BUILTINS
47 /* __sync based implementation */
49 typedef struct pa_atomic
{
53 #define PA_ATOMIC_INIT(v) { .value = (v) }
55 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
60 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
65 /* Returns the previously set value */
66 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
67 return __sync_fetch_and_add (& a
-> value
, i
);
70 /* Returns the previously set value */
71 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
72 return __sync_fetch_and_sub (& a
-> value
, i
);
75 /* Returns the previously set value */
76 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
77 return pa_atomic_add ( a
, 1 );
80 /* Returns the previously set value */
81 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
82 return pa_atomic_sub ( a
, 1 );
85 /* Returns TRUE when the operation was successful. */
86 static inline pa_bool_t
pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
87 return __sync_bool_compare_and_swap (& a
-> value
, old_i
, new_i
);
90 typedef struct pa_atomic_ptr
{
91 volatile unsigned long value
;
94 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
96 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
98 return ( void *) a
-> value
;
101 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
102 a
-> value
= ( unsigned long ) p
;
103 __sync_synchronize ();
106 static inline pa_bool_t
pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
107 return __sync_bool_compare_and_swap (& a
-> value
, ( long ) old_p
, ( long ) new_p
);
110 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
112 #warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
114 /* Addapted from glibc */
116 typedef struct pa_atomic
{
120 #define PA_ATOMIC_INIT(v) { .value = (v) }
122 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
126 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
130 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
133 __asm
__volatile ( "lock; xaddl %0, %1"
134 : "=r" ( result
), "=m" ( a
-> value
)
135 : "0" ( i
), "m" ( a
-> value
));
140 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
141 return pa_atomic_add ( a
, - i
);
144 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
145 return pa_atomic_add ( a
, 1 );
148 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
149 return pa_atomic_sub ( a
, 1 );
152 static inline pa_bool_t
pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
155 __asm__
__volatile__ ( "lock; cmpxchgl %2, %1"
156 : "=a" ( result
), "=m" ( a
-> value
)
157 : "r" ( new_i
), "m" ( a
-> value
), "0" ( old_i
));
159 return result
== old_i
;
162 typedef struct pa_atomic_ptr
{
163 volatile unsigned long value
;
166 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
168 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
169 return ( void *) a
-> value
;
172 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
173 a
-> value
= ( unsigned long ) p
;
176 static inline pa_bool_t
pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
179 __asm__
__volatile__ ( "lock; cmpxchgq %q2, %1"
180 : "=a" ( result
), "=m" ( a
-> value
)
181 : "r" ( new_p
), "m" ( a
-> value
), "0" ( old_p
));
183 return result
== old_p
;
186 #elif defined(ATOMIC_ARM_INLINE_ASM)
189 These should only be enabled if we have ARMv6 or better.
192 typedef struct pa_atomic
{
196 #define PA_ATOMIC_INIT(v) { .value = (v) }
198 static inline void pa_memory_barrier ( void ) {
199 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
200 asm volatile ( "mcr p15, 0, r0, c7, c10, 5 @ dmb" );
204 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
209 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
214 /* Returns the previously set value */
215 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
216 unsigned long not_exclusive
;
217 int new_val
, old_val
;
221 asm volatile ( "ldrex %0, [%3] \n "
223 "strex %1, %2, [%3] \n "
224 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
225 : "r" (& a
-> value
), "Ir" ( i
)
227 } while ( not_exclusive
);
233 /* Returns the previously set value */
234 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
235 unsigned long not_exclusive
;
236 int new_val
, old_val
;
240 asm volatile ( "ldrex %0, [%3] \n "
242 "strex %1, %2, [%3] \n "
243 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
244 : "r" (& a
-> value
), "Ir" ( i
)
246 } while ( not_exclusive
);
252 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
253 return pa_atomic_add ( a
, 1 );
256 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
257 return pa_atomic_sub ( a
, 1 );
260 static inline pa_bool_t
pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
261 unsigned long not_equal
, not_exclusive
;
265 asm volatile ( "ldrex %0, [%2] \n "
268 "strexeq %0, %4, [%2] \n "
269 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
270 : "r" (& a
-> value
), "Ir" ( old_i
), "r" ( new_i
)
272 } while ( not_exclusive
&& ! not_equal
);
278 typedef struct pa_atomic_ptr
{
279 volatile unsigned long value
;
282 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
284 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
286 return ( void *) a
-> value
;
289 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
290 a
-> value
= ( unsigned long ) p
;
294 static inline pa_bool_t
pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
295 unsigned long not_equal
, not_exclusive
;
299 asm volatile ( "ldrex %0, [%2] \n "
302 "strexeq %0, %4, [%2] \n "
303 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
304 : "r" (& a
-> value
), "Ir" ( old_p
), "r" ( new_p
)
306 } while ( not_exclusive
&& ! not_equal
);
312 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
314 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
315 information about these functions. The arm kernel helper functions first
317 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
318 inline asm implementation or you have an obsolete Linux kernel.
321 typedef void ( __kernel_dmb_t
)( void );
322 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
324 static inline void pa_memory_barrier ( void ) {
325 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
330 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
331 typedef int ( __kernel_cmpxchg_t
)( int oldval
, int newval
, volatile int * ptr
);
332 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
334 /* This is just to get rid of all warnings */
335 typedef int ( __kernel_cmpxchg_u_t
)( unsigned long oldval
, unsigned long newval
, volatile unsigned long * ptr
);
336 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
338 typedef struct pa_atomic
{
342 #define PA_ATOMIC_INIT(v) { .value = (v) }
344 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
349 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
354 /* Returns the previously set value */
355 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
359 } while ( __kernel_cmpxchg ( old_val
, old_val
+ i
, & a
-> value
));
363 /* Returns the previously set value */
364 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
368 } while ( __kernel_cmpxchg ( old_val
, old_val
- i
, & a
-> value
));
372 /* Returns the previously set value */
373 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
374 return pa_atomic_add ( a
, 1 );
377 /* Returns the previously set value */
378 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
379 return pa_atomic_sub ( a
, 1 );
382 /* Returns TRUE when the operation was successful. */
383 static inline pa_bool_t
pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
386 failed
= !! __kernel_cmpxchg ( old_i
, new_i
, & a
-> value
);
387 } while ( failed
&& a
-> value
== old_i
);
391 typedef struct pa_atomic_ptr
{
392 volatile unsigned long value
;
395 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
397 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
399 return ( void *) a
-> value
;
402 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
403 a
-> value
= ( unsigned long ) p
;
407 static inline pa_bool_t
pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
410 failed
= !! __kernel_cmpxchg_u (( unsigned long ) old_p
, ( unsigned long ) new_p
, & a
-> value
);
411 } while ( failed
&& a
-> value
== ( unsigned long ) old_p
);
417 /* libatomic_ops based implementation */
419 #include <atomic_ops.h>
421 typedef struct pa_atomic
{
425 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
427 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
428 return ( int ) AO_load_full (( AO_t
*) & a
-> value
);
431 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
432 AO_store_full (& a
-> value
, ( AO_t
) i
);
435 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
436 return ( int ) AO_fetch_and_add_full (& a
-> value
, ( AO_t
) i
);
439 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
440 return ( int ) AO_fetch_and_add_full (& a
-> value
, ( AO_t
) - i
);
443 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
444 return ( int ) AO_fetch_and_add1_full (& a
-> value
);
447 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
448 return ( int ) AO_fetch_and_sub1_full (& a
-> value
);
451 static inline pa_bool_t
pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
452 return AO_compare_and_swap_full (& a
-> value
, ( unsigned long ) old_i
, ( unsigned long ) new_i
);
455 typedef struct pa_atomic_ptr
{
459 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
461 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
462 return ( void *) AO_load_full (( AO_t
*) & a
-> value
);
465 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
466 AO_store_full (& a
-> value
, ( AO_t
) p
);
469 static inline pa_bool_t
pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
470 return AO_compare_and_swap_full (& a
-> value
, ( AO_t
) old_p
, ( AO_t
) new_p
);