]>
code.delx.au - pulseaudio/blob - src/pulsecore/flist.c
2 This file is part of PulseAudio.
4 Copyright 2006 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 #include <pulse/xmalloc.h>
28 #include <pulsecore/atomic.h>
29 #include <pulsecore/log.h>
30 #include <pulsecore/thread.h>
31 #include <pulsecore/macro.h>
32 #include <pulsecore/core-util.h>
33 #include <pulsecore/macro.h>
37 /* Algorithm is not perfect, in a few corner cases it will fail to pop
38 * from the flist although it isn't empty, and fail to push into the
39 * flist, although it isn't full.
41 * We keep a fixed size array of entries, each item is either marked
42 * UNUSED, USED or BUSY and contains a user data pointer. When pushing
43 * into the queue we look for an UNUSED cell and mark it BUSY with a
44 * CAS operation. If successful we use it and mark it USED, otherwise
45 * we go on and look for the next UNUSED cell. The algorithm for
46 * popping an item from the queue is practically inverse: look for a
47 * USED cell and and mark it BUSY with a CAS operation, after reading
48 * from it mark it UNUSED again.
50 * To accelerate finding of used/unused cells we maintain a read and a
51 * write index which is used like a ring buffer. After each push we
52 * increase the write index and after each pop we increase the read
55 * The indexes are incremented atomically and are never truncated to
56 * the buffer size. Instead we assume that the buffer size is a power
57 * of two and that the truncation can thus be done by applying a
60 * To make sure that we do not look for empty cells indefinitely we
61 * maintain a length value which stores the number of used cells. From
62 * this value the number of unused cells is easily calculated. Please
63 * note that the length value is not updated atomically with the read
64 * and write index and might thus be a few cells off the real
65 * value. To deal with this we always look for N_EXTRA_SCAN extra
66 * cells when pushing/popping entries.
68 * It might make sense to replace this implementation with a link list
69 * stack or queue, which however requires DCAS to be simple. Patches
72 * Please note that this algorithm is home grown.*/
74 #define FLIST_SIZE 128
75 #define N_EXTRA_SCAN 2
77 /* For debugging purposes we can define _Y to put and extra thread
78 * yield between each operation. */
81 #define _Y pa_thread_yield()
83 #define _Y do { } while(0)
100 pa_atomic_t read_idx
;
101 pa_atomic_t write_idx
;
104 #define PA_FLIST_CELLS(x) ((struct cell*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
106 pa_flist
*pa_flist_new(unsigned size
) {
112 pa_assert(pa_is_power_of_two(size
));
114 l
= pa_xmalloc0(PA_ALIGN(sizeof(pa_flist
)) + (sizeof(struct cell
) * size
));
118 pa_atomic_store(&l
->read_idx
, 0);
119 pa_atomic_store(&l
->write_idx
, 0);
120 pa_atomic_store(&l
->length
, 0);
125 static int reduce(pa_flist
*l
, int value
) {
126 return value
& (unsigned) (l
->size
- 1);
129 void pa_flist_free(pa_flist
*l
, pa_free_cb_t free_cb
) {
136 cells
= PA_FLIST_CELLS(l
);
138 idx
= reduce(l
, pa_atomic_load(&l
->read_idx
));
139 len
= pa_atomic_load(&l
->length
);
141 for (; len
> 0; len
--) {
143 if (pa_atomic_load(&cells
[idx
].state
) == STATE_USED
)
144 free_cb(cells
[idx
].data
);
146 idx
= reduce(l
, idx
+ 1);
153 int pa_flist_push(pa_flist
*l
, void *p
) {
160 cells
= PA_FLIST_CELLS(l
);
162 n
= len
= (int) l
->size
- pa_atomic_load(&l
->length
) + N_EXTRA_SCAN
;
164 idx
= reduce(l
, pa_atomic_load(&l
->write_idx
));
166 for (; n
> 0 ; n
--) {
169 if (pa_atomic_cmpxchg(&cells
[idx
].state
, STATE_UNUSED
, STATE_BUSY
)) {
171 pa_atomic_inc(&l
->write_idx
);
175 pa_atomic_store(&cells
[idx
].state
, STATE_USED
);
177 pa_atomic_inc(&l
->length
);
182 idx
= reduce(l
, idx
+ 1);
186 if (len
> N_EXTRA_SCAN
)
187 pa_log_warn("Didn't find free cell after %u iterations.", len
);
193 void* pa_flist_pop(pa_flist
*l
) {
199 cells
= PA_FLIST_CELLS(l
);
201 n
= len
= pa_atomic_load(&l
->length
) + N_EXTRA_SCAN
;
203 idx
= reduce(l
, pa_atomic_load(&l
->read_idx
));
205 for (; n
> 0 ; n
--) {
208 if (pa_atomic_cmpxchg(&cells
[idx
].state
, STATE_USED
, STATE_BUSY
)) {
211 pa_atomic_inc(&l
->read_idx
);
215 pa_atomic_store(&cells
[idx
].state
, STATE_UNUSED
);
218 pa_atomic_dec(&l
->length
);
223 idx
= reduce(l
, idx
+1);
227 if (len
> N_EXTRA_SCAN
)
228 pa_log_warn("Didn't find used cell after %u iterations.", len
);