]>
code.delx.au - pulseaudio/blob - src/pulsecore/flist.c
4 This file is part of PulseAudio.
6 Copyright 2006 Lennart Poettering
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as
10 published by the Free Software Foundation; either version 2.1 of the
11 License, or (at your option) any later version.
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <pulsecore/atomic.h>
31 #include <pulsecore/log.h>
32 #include <pulsecore/thread.h>
33 #include <pulsecore/macro.h>
34 #include <pulse/xmalloc.h>
38 /* Algorithm is not perfect, in a few corner cases it will fail to pop
39 * from the flist although it isn't empty, and fail to push into the
40 * flist, although it isn't full.
42 * We keep a fixed size array of entries, each item is either marked
43 * UNUSED, USED or BUSY and contains a user data pointer. When pushing
44 * into the queue we look for an UNUSED cell and mark it BUSY with a
45 * CAS operation. If successful we use it and mark it USED, otherwise
46 * we go on and look for the next UNUSED cell. The algorithm for
47 * popping an item from the queue is practically inverse: look for a
48 * USED cell and and mark it BUSY with a CAS operation, after reading
49 * from it mark it UNUSED again.
51 * To accelerate finding of used/unused cells we maintain a read and a
52 * write index which is used like a ring buffer. After each push we
53 * increase the write index and after each pop we increase the read
56 * The indexes are incremented atomically and are never truncated to
57 * the buffer size. Instead we assume that the buffer size is a power
58 * of two and that the truncation can thus be done by applying a
61 * To make sure that we do not look for empty cells indefinitely we
62 * maintain a length value which stores the number of used cells. From
63 * this value the number of unused cells is easily calculated. Please
64 * note that the length value is not updated atomically with the read
65 * and write index and might thus be a few cells off the real
66 * value. To deal with this we always look for N_EXTRA_SCAN extra
67 * cells when pushing/popping entries.
69 * It might make sense to replace this implementation with a link list
70 * stack or queue, which however requires DCAS to be simple. Patches
73 * Please note that this algorithm is home grown.*/
75 #define FLIST_SIZE 128
76 #define N_EXTRA_SCAN 2
78 /* For debugging purposes we can define _Y to put and extra thread
79 * yield between each operation. */
82 #define _Y pa_thread_yield()
84 #define _Y do { } while(0)
101 pa_atomic_t read_idx
;
102 pa_atomic_t write_idx
;
105 #define PA_FLIST_CELLS(x) ((struct cell*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
107 pa_flist
*pa_flist_new(unsigned size
) {
113 assert(pa_is_power_of_two(size
));
115 l
= pa_xmalloc0(PA_ALIGN(sizeof(pa_flist
)) + (sizeof(struct cell
) * size
));
119 pa_atomic_store(&l
->read_idx
, 0);
120 pa_atomic_store(&l
->write_idx
, 0);
121 pa_atomic_store(&l
->length
, 0);
126 static int reduce(pa_flist
*l
, int value
) {
127 return value
& (unsigned) (l
->size
- 1);
130 void pa_flist_free(pa_flist
*l
, pa_free_cb_t free_cb
) {
137 cells
= PA_FLIST_CELLS(l
);
139 idx
= reduce(l
, pa_atomic_load(&l
->read_idx
));
140 len
= pa_atomic_load(&l
->length
);
142 for (; len
> 0; len
--) {
144 if (pa_atomic_load(&cells
[idx
].state
) == STATE_USED
)
145 free_cb(cells
[idx
].data
);
147 idx
= reduce(l
, idx
+ 1);
154 int pa_flist_push(pa_flist
*l
, void *p
) {
161 cells
= PA_FLIST_CELLS(l
);
163 n
= len
= (int) l
->size
- pa_atomic_load(&l
->length
) + N_EXTRA_SCAN
;
165 idx
= reduce(l
, pa_atomic_load(&l
->write_idx
));
167 for (; n
> 0 ; n
--) {
170 if (pa_atomic_cmpxchg(&cells
[idx
].state
, STATE_UNUSED
, STATE_BUSY
)) {
172 pa_atomic_inc(&l
->write_idx
);
176 pa_atomic_store(&cells
[idx
].state
, STATE_USED
);
178 pa_atomic_inc(&l
->length
);
183 idx
= reduce(l
, idx
+ 1);
187 if (len
> N_EXTRA_SCAN
)
188 pa_log("WARNING: Didn't find free cell after %u iterations.", len
);
194 void* pa_flist_pop(pa_flist
*l
) {
200 cells
= PA_FLIST_CELLS(l
);
202 n
= len
= pa_atomic_load(&l
->length
) + N_EXTRA_SCAN
;
204 idx
= reduce(l
, pa_atomic_load(&l
->read_idx
));
206 for (; n
> 0 ; n
--) {
209 if (pa_atomic_cmpxchg(&cells
[idx
].state
, STATE_USED
, STATE_BUSY
)) {
212 pa_atomic_inc(&l
->read_idx
);
216 pa_atomic_store(&cells
[idx
].state
, STATE_UNUSED
);
219 pa_atomic_dec(&l
->length
);
224 idx
= reduce(l
, idx
+1);
228 if (len
> N_EXTRA_SCAN
)
229 pa_log("WARNING: Didn't find used cell after %u iterations.", len
);