]> code.delx.au - pulseaudio/blob - src/pulsecore/flist.c
merge 'lennart' branch back into trunk.
[pulseaudio] / src / pulsecore / flist.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2006 Lennart Poettering
7
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as
10 published by the Free Software Foundation; either version 2.1 of the
11 License, or (at your option) any later version.
12
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 USA.
22 ***/
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <pulse/xmalloc.h>
29
30 #include <pulsecore/atomic.h>
31 #include <pulsecore/log.h>
32 #include <pulsecore/thread.h>
33 #include <pulsecore/macro.h>
34 #include <pulsecore/core-util.h>
35 #include <pulsecore/macro.h>
36
37 #include "flist.h"
38
39 /* Algorithm is not perfect, in a few corner cases it will fail to pop
40 * from the flist although it isn't empty, and fail to push into the
41 * flist, although it isn't full.
42 *
43 * We keep a fixed size array of entries, each item is either marked
44 * UNUSED, USED or BUSY and contains a user data pointer. When pushing
45 * into the queue we look for an UNUSED cell and mark it BUSY with a
46 * CAS operation. If successful we use it and mark it USED, otherwise
47 * we go on and look for the next UNUSED cell. The algorithm for
48 * popping an item from the queue is practically inverse: look for a
49 * USED cell and and mark it BUSY with a CAS operation, after reading
50 * from it mark it UNUSED again.
51 *
52 * To accelerate finding of used/unused cells we maintain a read and a
53 * write index which is used like a ring buffer. After each push we
54 * increase the write index and after each pop we increase the read
55 * index.
56 *
57 * The indexes are incremented atomically and are never truncated to
58 * the buffer size. Instead we assume that the buffer size is a power
59 * of two and that the truncation can thus be done by applying a
60 * simple AND on read.
61 *
62 * To make sure that we do not look for empty cells indefinitely we
63 * maintain a length value which stores the number of used cells. From
64 * this value the number of unused cells is easily calculated. Please
65 * note that the length value is not updated atomically with the read
66 * and write index and might thus be a few cells off the real
67 * value. To deal with this we always look for N_EXTRA_SCAN extra
68 * cells when pushing/popping entries.
69 *
70 * It might make sense to replace this implementation with a link list
71 * stack or queue, which however requires DCAS to be simple. Patches
72 * welcome.
73 *
74 * Please note that this algorithm is home grown.*/
75
76 #define FLIST_SIZE 128
77 #define N_EXTRA_SCAN 2
78
79 /* For debugging purposes we can define _Y to put and extra thread
80 * yield between each operation. */
81
82 #ifdef PROFILE
83 #define _Y pa_thread_yield()
84 #else
85 #define _Y do { } while(0)
86 #endif
87
88 enum {
89 STATE_UNUSED,
90 STATE_USED,
91 STATE_BUSY
92 };
93
94 struct cell {
95 pa_atomic_t state;
96 void *data;
97 };
98
99 struct pa_flist {
100 unsigned size;
101 pa_atomic_t length;
102 pa_atomic_t read_idx;
103 pa_atomic_t write_idx;
104 };
105
106 #define PA_FLIST_CELLS(x) ((struct cell*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
107
108 pa_flist *pa_flist_new(unsigned size) {
109 pa_flist *l;
110
111 if (!size)
112 size = FLIST_SIZE;
113
114 pa_assert(pa_is_power_of_two(size));
115
116 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_flist)) + (sizeof(struct cell) * size));
117
118 l->size = size;
119
120 pa_atomic_store(&l->read_idx, 0);
121 pa_atomic_store(&l->write_idx, 0);
122 pa_atomic_store(&l->length, 0);
123
124 return l;
125 }
126
127 static int reduce(pa_flist *l, int value) {
128 return value & (unsigned) (l->size - 1);
129 }
130
131 void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
132 pa_assert(l);
133
134 if (free_cb) {
135 struct cell *cells;
136 int len, idx;
137
138 cells = PA_FLIST_CELLS(l);
139
140 idx = reduce(l, pa_atomic_load(&l->read_idx));
141 len = pa_atomic_load(&l->length);
142
143 for (; len > 0; len--) {
144
145 if (pa_atomic_load(&cells[idx].state) == STATE_USED)
146 free_cb(cells[idx].data);
147
148 idx = reduce(l, idx + 1);
149 }
150 }
151
152 pa_xfree(l);
153 }
154
155 int pa_flist_push(pa_flist*l, void *p) {
156 int idx, len, n;
157 struct cell *cells;
158
159 pa_assert(l);
160 pa_assert(p);
161
162 cells = PA_FLIST_CELLS(l);
163
164 n = len = (int) l->size - pa_atomic_load(&l->length) + N_EXTRA_SCAN;
165 _Y;
166 idx = reduce(l, pa_atomic_load(&l->write_idx));
167
168 for (; n > 0 ; n--) {
169 _Y;
170
171 if (pa_atomic_cmpxchg(&cells[idx].state, STATE_UNUSED, STATE_BUSY)) {
172 _Y;
173 pa_atomic_inc(&l->write_idx);
174 _Y;
175 cells[idx].data = p;
176 _Y;
177 pa_atomic_store(&cells[idx].state, STATE_USED);
178 _Y;
179 pa_atomic_inc(&l->length);
180 return 0;
181 }
182
183 _Y;
184 idx = reduce(l, idx + 1);
185 }
186
187 #ifdef PROFILE
188 if (len > N_EXTRA_SCAN)
189 pa_log_warn("Didn't find free cell after %u iterations.", len);
190 #endif
191
192 return -1;
193 }
194
195 void* pa_flist_pop(pa_flist*l) {
196 int idx, len, n;
197 struct cell *cells;
198
199 pa_assert(l);
200
201 cells = PA_FLIST_CELLS(l);
202
203 n = len = pa_atomic_load(&l->length) + N_EXTRA_SCAN;
204 _Y;
205 idx = reduce(l, pa_atomic_load(&l->read_idx));
206
207 for (; n > 0 ; n--) {
208 _Y;
209
210 if (pa_atomic_cmpxchg(&cells[idx].state, STATE_USED, STATE_BUSY)) {
211 void *p;
212 _Y;
213 pa_atomic_inc(&l->read_idx);
214 _Y;
215 p = cells[idx].data;
216 _Y;
217 pa_atomic_store(&cells[idx].state, STATE_UNUSED);
218 _Y;
219
220 pa_atomic_dec(&l->length);
221 return p;
222 }
223
224 _Y;
225 idx = reduce(l, idx+1);
226 }
227
228 #ifdef PROFILE
229 if (len > N_EXTRA_SCAN)
230 pa_log_warn("Didn't find used cell after %u iterations.", len);
231 #endif
232
233 return NULL;
234 }