]> code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
get rid of svn $ keywords
[pulseaudio] / src / pulsecore / atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3
4 /***
5 This file is part of PulseAudio.
6
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
9
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 USA.
24 ***/
25
26 /*
27 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
28 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
29 * however very likely.
30 *
31 * For now we do only full memory barriers. Eventually we might want
32 * to support more elaborate memory barriers, in which case we will add
33 * suffixes to the function names.
34 *
35 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
36 * libatomic_ops
37 */
38 #
39 #ifndef PACKAGE
40 #error "Please include config.h before including this file!"
41 #endif
42
43 #ifdef HAVE_ATOMIC_BUILTINS
44
45 /* __sync based implementation */
46
47 typedef struct pa_atomic {
48 volatile int value;
49 } pa_atomic_t;
50
51 #define PA_ATOMIC_INIT(v) { .value = (v) }
52
53 static inline int pa_atomic_load(const pa_atomic_t *a) {
54 __sync_synchronize();
55 return a->value;
56 }
57
58 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
59 a->value = i;
60 __sync_synchronize();
61 }
62
63 /* Returns the previously set value */
64 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
65 return __sync_fetch_and_add(&a->value, i);
66 }
67
68 /* Returns the previously set value */
69 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
70 return __sync_fetch_and_sub(&a->value, i);
71 }
72
73 /* Returns the previously set value */
74 static inline int pa_atomic_inc(pa_atomic_t *a) {
75 return pa_atomic_add(a, 1);
76 }
77
78 /* Returns the previously set value */
79 static inline int pa_atomic_dec(pa_atomic_t *a) {
80 return pa_atomic_sub(a, 1);
81 }
82
83 /* Returns non-zero when the operation was successful. */
84 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
85 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
86 }
87
88 typedef struct pa_atomic_ptr {
89 volatile unsigned long value;
90 } pa_atomic_ptr_t;
91
92 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
93
94 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
95 __sync_synchronize();
96 return (void*) a->value;
97 }
98
99 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
100 a->value = (unsigned long) p;
101 __sync_synchronize();
102 }
103
104 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
105 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
106 }
107
108 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
109
110 #error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
111
112 /* Addapted from glibc */
113
114 typedef struct pa_atomic {
115 volatile int value;
116 } pa_atomic_t;
117
118 #define PA_ATOMIC_INIT(v) { .value = (v) }
119
120 static inline int pa_atomic_load(const pa_atomic_t *a) {
121 return a->value;
122 }
123
124 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
125 a->value = i;
126 }
127
128 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
129 int result;
130
131 __asm __volatile ("lock; xaddl %0, %1"
132 : "=r" (result), "=m" (a->value)
133 : "0" (i), "m" (a->value));
134
135 return result;
136 }
137
138 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
139 return pa_atomic_add(a, -i);
140 }
141
142 static inline int pa_atomic_inc(pa_atomic_t *a) {
143 return pa_atomic_add(a, 1);
144 }
145
146 static inline int pa_atomic_dec(pa_atomic_t *a) {
147 return pa_atomic_sub(a, 1);
148 }
149
150 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
151 int result;
152
153 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
154 : "=a" (result), "=m" (a->value)
155 : "r" (new_i), "m" (a->value), "0" (old_i));
156
157 return result == oldval;
158 }
159
160 typedef struct pa_atomic_ptr {
161 volatile unsigned long value;
162 } pa_atomic_ptr_t;
163
164 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
165
166 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
167 return (void*) a->value;
168 }
169
170 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
171 a->value = (unsigned long) p;
172 }
173
174 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
175 void *result;
176
177 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
178 : "=a" (result), "=m" (a->value)
179 : "r" (new_p), "m" (a->value), "0" (old_p));
180
181 return result;
182 }
183
184 #elif defined(ATOMIC_ARM_INLINE_ASM)
185
186 /*
187 These should only be enabled if we have ARMv6 or better.
188 */
189
190 typedef struct pa_atomic {
191 volatile int value;
192 } pa_atomic_t;
193
194 #define PA_ATOMIC_INIT(v) { .value = (v) }
195
196 static inline void pa_memory_barrier(void) {
197 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
198 asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
199 #endif
200 }
201
202 static inline int pa_atomic_load(const pa_atomic_t *a) {
203 pa_memory_barrier();
204 return a->value;
205 }
206
207 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
208 a->value = i;
209 pa_memory_barrier();
210 }
211
212 /* Returns the previously set value */
213 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
214 unsigned long not_exclusive;
215 int new_val, old_val;
216
217 pa_memory_barrier();
218 do {
219 asm volatile ("ldrex %0, [%3]\n"
220 "add %2, %0, %4\n"
221 "strex %1, %2, [%3]\n"
222 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
223 : "r" (&a->value), "Ir" (i)
224 : "cc");
225 } while(not_exclusive);
226 pa_memory_barrier();
227
228 return old_val;
229 }
230
231 /* Returns the previously set value */
232 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
233 unsigned long not_exclusive;
234 int new_val, old_val;
235
236 pa_memory_barrier();
237 do {
238 asm volatile ("ldrex %0, [%3]\n"
239 "sub %2, %0, %4\n"
240 "strex %1, %2, [%3]\n"
241 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
242 : "r" (&a->value), "Ir" (i)
243 : "cc");
244 } while(not_exclusive);
245 pa_memory_barrier();
246
247 return old_val;
248 }
249
250 static inline int pa_atomic_inc(pa_atomic_t *a) {
251 return pa_atomic_add(a, 1);
252 }
253
254 static inline int pa_atomic_dec(pa_atomic_t *a) {
255 return pa_atomic_sub(a, 1);
256 }
257
258 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
259 unsigned long not_equal, not_exclusive;
260
261 pa_memory_barrier();
262 do {
263 asm volatile ("ldrex %0, [%2]\n"
264 "subs %0, %0, %3\n"
265 "mov %1, %0\n"
266 "strexeq %0, %4, [%2]\n"
267 : "=&r" (not_exclusive), "=&r" (not_equal)
268 : "r" (&a->value), "Ir" (old_i), "r" (new_i)
269 : "cc");
270 } while(not_exclusive && !not_equal);
271 pa_memory_barrier();
272
273 return !not_equal;
274 }
275
276 typedef struct pa_atomic_ptr {
277 volatile unsigned long value;
278 } pa_atomic_ptr_t;
279
280 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
281
282 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
283 pa_memory_barrier();
284 return (void*) a->value;
285 }
286
287 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
288 a->value = (unsigned long) p;
289 pa_memory_barrier();
290 }
291
292 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
293 unsigned long not_equal, not_exclusive;
294
295 pa_memory_barrier();
296 do {
297 asm volatile ("ldrex %0, [%2]\n"
298 "subs %0, %0, %3\n"
299 "mov %1, %0\n"
300 "strexeq %0, %4, [%2]\n"
301 : "=&r" (not_exclusive), "=&r" (not_equal)
302 : "r" (&a->value), "Ir" (old_p), "r" (new_p)
303 : "cc");
304 } while(not_exclusive && !not_equal);
305 pa_memory_barrier();
306
307 return !not_equal;
308 }
309
310 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
311
312 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
313 information about these functions. The arm kernel helper functions first
314 appeared in 2.6.16.
315 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
316 inline asm implementation or you have an obsolete Linux kernel.
317 */
318 /* Memory barrier */
319 typedef void (__kernel_dmb_t)(void);
320 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
321
322 static inline void pa_memory_barrier(void) {
323 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
324 __kernel_dmb();
325 #endif
326 }
327
328 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
329 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
330 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
331
332 /* This is just to get rid of all warnings */
333 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
334 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
335
336 typedef struct pa_atomic {
337 volatile int value;
338 } pa_atomic_t;
339
340 #define PA_ATOMIC_INIT(v) { .value = (v) }
341
342 static inline int pa_atomic_load(const pa_atomic_t *a) {
343 pa_memory_barrier();
344 return a->value;
345 }
346
347 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
348 a->value = i;
349 pa_memory_barrier();
350 }
351
352 /* Returns the previously set value */
353 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
354 int old_val;
355 do {
356 old_val = a->value;
357 } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
358 return old_val;
359 }
360
361 /* Returns the previously set value */
362 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
363 int old_val;
364 do {
365 old_val = a->value;
366 } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
367 return old_val;
368 }
369
370 /* Returns the previously set value */
371 static inline int pa_atomic_inc(pa_atomic_t *a) {
372 return pa_atomic_add(a, 1);
373 }
374
375 /* Returns the previously set value */
376 static inline int pa_atomic_dec(pa_atomic_t *a) {
377 return pa_atomic_sub(a, 1);
378 }
379
380 /* Returns non-zero when the operation was successful. */
381 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
382 pa_bool_t failed;
383 do {
384 failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
385 } while(failed && a->value == old_i);
386 return !failed;
387 }
388
389 typedef struct pa_atomic_ptr {
390 volatile unsigned long value;
391 } pa_atomic_ptr_t;
392
393 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
394
395 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
396 pa_memory_barrier();
397 return (void*) a->value;
398 }
399
400 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
401 a->value = (unsigned long) p;
402 pa_memory_barrier();
403 }
404
405 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
406 pa_bool_t failed;
407 do {
408 failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
409 } while(failed && a->value == (unsigned long) old_p);
410 return !failed;
411 }
412
413 #else
414
415 /* libatomic_ops based implementation */
416
417 #include <atomic_ops.h>
418
419 typedef struct pa_atomic {
420 volatile AO_t value;
421 } pa_atomic_t;
422
423 #define PA_ATOMIC_INIT(v) { .value = (v) }
424
425 static inline int pa_atomic_load(const pa_atomic_t *a) {
426 return (int) AO_load_full((AO_t*) &a->value);
427 }
428
429 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
430 AO_store_full(&a->value, (AO_t) i);
431 }
432
433 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
434 return AO_fetch_and_add_full(&a->value, (AO_t) i);
435 }
436
437 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
438 return AO_fetch_and_add_full(&a->value, (AO_t) -i);
439 }
440
441 static inline int pa_atomic_inc(pa_atomic_t *a) {
442 return AO_fetch_and_add1_full(&a->value);
443 }
444
445 static inline int pa_atomic_dec(pa_atomic_t *a) {
446 return AO_fetch_and_sub1_full(&a->value);
447 }
448
449 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
450 return AO_compare_and_swap_full(&a->value, old_i, new_i);
451 }
452
453 typedef struct pa_atomic_ptr {
454 volatile AO_t value;
455 } pa_atomic_ptr_t;
456
457 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
458
459 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
460 return (void*) AO_load_full((AO_t*) &a->value);
461 }
462
463 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
464 AO_store_full(&a->value, (AO_t) p);
465 }
466
467 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
468 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
469 }
470
471 #endif
472
473 #endif