]> code.delx.au - pulseaudio/blob - src/pulsecore/flist.c
Merge branch 'master' of git://0pointer.de/pulseaudio into dbus-work
[pulseaudio] / src / pulsecore / flist.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2006-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <pulse/xmalloc.h>
27
28 #include <pulsecore/atomic.h>
29 #include <pulsecore/log.h>
30 #include <pulsecore/thread.h>
31 #include <pulsecore/macro.h>
32 #include <pulsecore/core-util.h>
33 #include <pulsecore/macro.h>
34
35 #include "flist.h"
36
37 /* Algorithm is not perfect, in a few corner cases it will fail to pop
38 * from the flist although it isn't empty, and fail to push into the
39 * flist, although it isn't full.
40 *
41 * We keep a fixed size array of entries, each item is an atomic
42 * pointer.
43 *
44 * To accelerate finding of used/unused cells we maintain a read and a
45 * write index which is used like a ring buffer. After each push we
46 * increase the write index and after each pop we increase the read
47 * index.
48 *
49 * The indexes are incremented atomically and are never truncated to
50 * the buffer size. Instead we assume that the buffer size is a power
51 * of two and that the truncation can thus be done by applying a
52 * simple AND on read.
53 *
54 * To make sure that we do not look for empty cells indefinitely we
55 * maintain a length value which stores the number of used cells. From
56 * this value the number of unused cells is easily calculated. Please
57 * note that the length value is not updated atomically with the read
58 * and write index and might thus be a few cells off the real
59 * value. To deal with this we always look for N_EXTRA_SCAN extra
60 * cells when pushing/popping entries.
61 *
62 * It might make sense to replace this implementation with a link list
63 * stack or queue, which however requires DCAS to be simple. Patches
64 * welcome.
65 *
66 * Please note that this algorithm is home grown.*/
67
68 #define FLIST_SIZE 128
69 #define N_EXTRA_SCAN 3
70
71 /* For debugging purposes we can define _Y to put and extra thread
72 * yield between each operation. */
73
74 #ifdef PROFILE
75 #define _Y pa_thread_yield()
76 #else
77 #define _Y do { } while(0)
78 #endif
79
80 struct pa_flist {
81 unsigned size;
82 pa_atomic_t length;
83 pa_atomic_t read_idx;
84 pa_atomic_t write_idx;
85 };
86
87 #define PA_FLIST_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
88
89 pa_flist *pa_flist_new(unsigned size) {
90 pa_flist *l;
91
92 if (!size)
93 size = FLIST_SIZE;
94
95 pa_assert(pa_is_power_of_two(size));
96
97 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_flist)) + (sizeof(pa_atomic_ptr_t) * size));
98
99 l->size = size;
100
101 pa_atomic_store(&l->read_idx, 0);
102 pa_atomic_store(&l->write_idx, 0);
103 pa_atomic_store(&l->length, 0);
104
105 return l;
106 }
107
108 static unsigned reduce(pa_flist *l, unsigned value) {
109 return value & (l->size - 1);
110 }
111
112 void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
113 pa_assert(l);
114
115 if (free_cb) {
116 pa_atomic_ptr_t*cells;
117 unsigned idx;
118
119 cells = PA_FLIST_CELLS(l);
120
121 for (idx = 0; idx < l->size; idx ++) {
122 void *p;
123
124 if ((p = pa_atomic_ptr_load(&cells[idx])))
125 free_cb(p);
126 }
127 }
128
129 pa_xfree(l);
130 }
131
132 int pa_flist_push(pa_flist*l, void *p) {
133 unsigned idx, n, len;
134 pa_atomic_ptr_t*cells;
135
136 pa_assert(l);
137 pa_assert(p);
138
139 cells = PA_FLIST_CELLS(l);
140
141 n = len = l->size + N_EXTRA_SCAN - (unsigned) pa_atomic_load(&l->length);
142
143 _Y;
144 idx = reduce(l, (unsigned) pa_atomic_load(&l->write_idx));
145
146 for (; n > 0 ; n--) {
147
148 _Y;
149
150 if (pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
151
152 _Y;
153 pa_atomic_inc(&l->write_idx);
154
155 _Y;
156 pa_atomic_inc(&l->length);
157
158 return 0;
159 }
160
161 _Y;
162 idx = reduce(l, idx + 1);
163 }
164
165 #ifdef PROFILE
166 if (len > N_EXTRA_SCAN)
167 pa_log_warn("Didn't find free cell after %u iterations.", len);
168 #endif
169
170 return -1;
171 }
172
173 void* pa_flist_pop(pa_flist*l) {
174 unsigned idx, len, n;
175 pa_atomic_ptr_t *cells;
176
177 pa_assert(l);
178
179 cells = PA_FLIST_CELLS(l);
180
181 n = len = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;
182
183 _Y;
184 idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));
185
186 for (; n > 0 ; n--) {
187 void *p;
188
189 _Y;
190 p = pa_atomic_ptr_load(&cells[idx]);
191
192 if (p) {
193
194 _Y;
195 if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
196 continue;
197
198 _Y;
199 pa_atomic_inc(&l->read_idx);
200
201 _Y;
202 pa_atomic_dec(&l->length);
203
204 return p;
205 }
206
207 _Y;
208 idx = reduce(l, idx+1);
209 }
210
211 #ifdef PROFILE
212 if (len > N_EXTRA_SCAN)
213 pa_log_warn("Didn't find used cell after %u iterations.", len);
214 #endif
215
216 return NULL;
217 }