]> code.delx.au - pulseaudio/blob - src/pulsecore/flist.c
Merge remote branch 'origin/master-tx'
[pulseaudio] / src / pulsecore / flist.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2006-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <pulse/xmalloc.h>
27
28 #include <pulsecore/atomic.h>
29 #include <pulsecore/log.h>
30 #include <pulsecore/thread.h>
31 #include <pulsecore/macro.h>
32 #include <pulsecore/core-util.h>
33 #include <pulsecore/macro.h>
34
35 #include "flist.h"
36
37 /* Algorithm is not perfect, in a few corner cases it will fail to pop
38 * from the flist although it isn't empty, and fail to push into the
39 * flist, although it isn't full.
40 *
41 * We keep a fixed size array of entries, each item is an atomic
42 * pointer.
43 *
44 * To accelerate finding of used/unused cells we maintain a read and a
45 * write index which is used like a ring buffer. After each push we
46 * increase the write index and after each pop we increase the read
47 * index.
48 *
49 * The indexes are incremented atomically and are never truncated to
50 * the buffer size. Instead we assume that the buffer size is a power
51 * of two and that the truncation can thus be done by applying a
52 * simple AND on read.
53 *
54 * To make sure that we do not look for empty cells indefinitely we
55 * maintain a length value which stores the number of used cells. From
56 * this value the number of unused cells is easily calculated. Please
57 * note that the length value is not updated atomically with the read
58 * and write index and might thus be a few cells off the real
59 * value. To deal with this we always look for N_EXTRA_SCAN extra
60 * cells when pushing/popping entries.
61 *
62 * It might make sense to replace this implementation with a link list
63 * stack or queue, which however requires DCAS to be simple. Patches
64 * welcome.
65 *
66 * Please note that this algorithm is home grown.*/
67
68 #define FLIST_SIZE 128
69 #define N_EXTRA_SCAN 3
70
71 /* For debugging purposes we can define _Y to put and extra thread
72 * yield between each operation. */
73
74 #ifdef PROFILE
75 #define _Y pa_thread_yield()
76 #else
77 #define _Y do { } while(0)
78 #endif
79
80 struct pa_flist {
81 unsigned size;
82 pa_atomic_t length;
83 pa_atomic_t read_idx;
84 pa_atomic_t write_idx;
85 };
86
87 #define PA_FLIST_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
88
89 pa_flist *pa_flist_new(unsigned size) {
90 pa_flist *l;
91
92 if (!size)
93 size = FLIST_SIZE;
94
95 pa_assert(pa_is_power_of_two(size));
96
97 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_flist)) + (sizeof(pa_atomic_ptr_t) * size));
98
99 l->size = size;
100
101 pa_atomic_store(&l->read_idx, 0);
102 pa_atomic_store(&l->write_idx, 0);
103 pa_atomic_store(&l->length, 0);
104
105 return l;
106 }
107
108 static unsigned reduce(pa_flist *l, unsigned value) {
109 return value & (l->size - 1);
110 }
111
112 void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
113 pa_assert(l);
114
115 if (free_cb) {
116 pa_atomic_ptr_t*cells;
117 unsigned idx;
118
119 cells = PA_FLIST_CELLS(l);
120
121 for (idx = 0; idx < l->size; idx ++) {
122 void *p;
123
124 if ((p = pa_atomic_ptr_load(&cells[idx])))
125 free_cb(p);
126 }
127 }
128
129 pa_xfree(l);
130 }
131
132 int pa_flist_push(pa_flist*l, void *p) {
133 unsigned idx, n;
134 pa_atomic_ptr_t*cells;
135 #ifdef PROFILE
136 unsigned len;
137 #endif
138
139 pa_assert(l);
140 pa_assert(p);
141
142 cells = PA_FLIST_CELLS(l);
143
144 n = l->size + N_EXTRA_SCAN - (unsigned) pa_atomic_load(&l->length);
145
146 #ifdef PROFILE
147 len = n;
148 #endif
149
150 _Y;
151 idx = reduce(l, (unsigned) pa_atomic_load(&l->write_idx));
152
153 for (; n > 0 ; n--) {
154
155 _Y;
156
157 if (pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
158
159 _Y;
160 pa_atomic_inc(&l->write_idx);
161
162 _Y;
163 pa_atomic_inc(&l->length);
164
165 return 0;
166 }
167
168 _Y;
169 idx = reduce(l, idx + 1);
170 }
171
172 #ifdef PROFILE
173 if (len > N_EXTRA_SCAN)
174 pa_log_warn("Didn't find free cell after %u iterations.", len);
175 #endif
176
177 return -1;
178 }
179
180 void* pa_flist_pop(pa_flist*l) {
181 unsigned idx, n;
182 pa_atomic_ptr_t *cells;
183 #ifdef PROFILE
184 unsigned len;
185 #endif
186
187 pa_assert(l);
188
189 cells = PA_FLIST_CELLS(l);
190
191 n = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;
192
193 #ifdef PROFILE
194 len = n;
195 #endif
196
197 _Y;
198 idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));
199
200 for (; n > 0 ; n--) {
201 void *p;
202
203 _Y;
204 p = pa_atomic_ptr_load(&cells[idx]);
205
206 if (p) {
207
208 _Y;
209 if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
210 continue;
211
212 _Y;
213 pa_atomic_inc(&l->read_idx);
214
215 _Y;
216 pa_atomic_dec(&l->length);
217
218 return p;
219 }
220
221 _Y;
222 idx = reduce(l, idx+1);
223 }
224
225 #ifdef PROFILE
226 if (len > N_EXTRA_SCAN)
227 pa_log_warn("Didn't find used cell after %u iterations.", len);
228 #endif
229
230 return NULL;
231 }