]> code.delx.au - pulseaudio/blob - src/pulsecore/flist.c
Merge dead branch 'ossman'
[pulseaudio] / src / pulsecore / flist.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <pulse/xmalloc.h>
27
28 #include <pulsecore/atomic.h>
29 #include <pulsecore/log.h>
30 #include <pulsecore/thread.h>
31 #include <pulsecore/macro.h>
32 #include <pulsecore/core-util.h>
33 #include <pulsecore/macro.h>
34
35 #include "flist.h"
36
37 /* Algorithm is not perfect, in a few corner cases it will fail to pop
38 * from the flist although it isn't empty, and fail to push into the
39 * flist, although it isn't full.
40 *
41 * We keep a fixed size array of entries, each item is either marked
42 * UNUSED, USED or BUSY and contains a user data pointer. When pushing
43 * into the queue we look for an UNUSED cell and mark it BUSY with a
44 * CAS operation. If successful we use it and mark it USED, otherwise
45 * we go on and look for the next UNUSED cell. The algorithm for
46 * popping an item from the queue is practically inverse: look for a
47 * USED cell and and mark it BUSY with a CAS operation, after reading
48 * from it mark it UNUSED again.
49 *
50 * To accelerate finding of used/unused cells we maintain a read and a
51 * write index which is used like a ring buffer. After each push we
52 * increase the write index and after each pop we increase the read
53 * index.
54 *
55 * The indexes are incremented atomically and are never truncated to
56 * the buffer size. Instead we assume that the buffer size is a power
57 * of two and that the truncation can thus be done by applying a
58 * simple AND on read.
59 *
60 * To make sure that we do not look for empty cells indefinitely we
61 * maintain a length value which stores the number of used cells. From
62 * this value the number of unused cells is easily calculated. Please
63 * note that the length value is not updated atomically with the read
64 * and write index and might thus be a few cells off the real
65 * value. To deal with this we always look for N_EXTRA_SCAN extra
66 * cells when pushing/popping entries.
67 *
68 * It might make sense to replace this implementation with a link list
69 * stack or queue, which however requires DCAS to be simple. Patches
70 * welcome.
71 *
72 * Please note that this algorithm is home grown.*/
73
74 #define FLIST_SIZE 128
75 #define N_EXTRA_SCAN 2
76
77 /* For debugging purposes we can define _Y to put and extra thread
78 * yield between each operation. */
79
80 #ifdef PROFILE
81 #define _Y pa_thread_yield()
82 #else
83 #define _Y do { } while(0)
84 #endif
85
86 enum {
87 STATE_UNUSED,
88 STATE_USED,
89 STATE_BUSY
90 };
91
92 struct cell {
93 pa_atomic_t state;
94 void *data;
95 };
96
97 struct pa_flist {
98 unsigned size;
99 pa_atomic_t length;
100 pa_atomic_t read_idx;
101 pa_atomic_t write_idx;
102 };
103
104 #define PA_FLIST_CELLS(x) ((struct cell*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
105
106 pa_flist *pa_flist_new(unsigned size) {
107 pa_flist *l;
108
109 if (!size)
110 size = FLIST_SIZE;
111
112 pa_assert(pa_is_power_of_two(size));
113
114 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_flist)) + (sizeof(struct cell) * size));
115
116 l->size = size;
117
118 pa_atomic_store(&l->read_idx, 0);
119 pa_atomic_store(&l->write_idx, 0);
120 pa_atomic_store(&l->length, 0);
121
122 return l;
123 }
124
125 static int reduce(pa_flist *l, int value) {
126 return value & (unsigned) (l->size - 1);
127 }
128
129 void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
130 pa_assert(l);
131
132 if (free_cb) {
133 struct cell *cells;
134 int len, idx;
135
136 cells = PA_FLIST_CELLS(l);
137
138 idx = reduce(l, pa_atomic_load(&l->read_idx));
139 len = pa_atomic_load(&l->length);
140
141 for (; len > 0; len--) {
142
143 if (pa_atomic_load(&cells[idx].state) == STATE_USED)
144 free_cb(cells[idx].data);
145
146 idx = reduce(l, idx + 1);
147 }
148 }
149
150 pa_xfree(l);
151 }
152
153 int pa_flist_push(pa_flist*l, void *p) {
154 int idx, len, n;
155 struct cell *cells;
156
157 pa_assert(l);
158 pa_assert(p);
159
160 cells = PA_FLIST_CELLS(l);
161
162 n = len = (int) l->size - pa_atomic_load(&l->length) + N_EXTRA_SCAN;
163 _Y;
164 idx = reduce(l, pa_atomic_load(&l->write_idx));
165
166 for (; n > 0 ; n--) {
167 _Y;
168
169 if (pa_atomic_cmpxchg(&cells[idx].state, STATE_UNUSED, STATE_BUSY)) {
170 _Y;
171 pa_atomic_inc(&l->write_idx);
172 _Y;
173 cells[idx].data = p;
174 _Y;
175 pa_atomic_store(&cells[idx].state, STATE_USED);
176 _Y;
177 pa_atomic_inc(&l->length);
178 return 0;
179 }
180
181 _Y;
182 idx = reduce(l, idx + 1);
183 }
184
185 #ifdef PROFILE
186 if (len > N_EXTRA_SCAN)
187 pa_log_warn("Didn't find free cell after %u iterations.", len);
188 #endif
189
190 return -1;
191 }
192
193 void* pa_flist_pop(pa_flist*l) {
194 int idx, len, n;
195 struct cell *cells;
196
197 pa_assert(l);
198
199 cells = PA_FLIST_CELLS(l);
200
201 n = len = pa_atomic_load(&l->length) + N_EXTRA_SCAN;
202 _Y;
203 idx = reduce(l, pa_atomic_load(&l->read_idx));
204
205 for (; n > 0 ; n--) {
206 _Y;
207
208 if (pa_atomic_cmpxchg(&cells[idx].state, STATE_USED, STATE_BUSY)) {
209 void *p;
210 _Y;
211 pa_atomic_inc(&l->read_idx);
212 _Y;
213 p = cells[idx].data;
214 _Y;
215 pa_atomic_store(&cells[idx].state, STATE_UNUSED);
216 _Y;
217
218 pa_atomic_dec(&l->length);
219 return p;
220 }
221
222 _Y;
223 idx = reduce(l, idx+1);
224 }
225
226 #ifdef PROFILE
227 if (len > N_EXTRA_SCAN)
228 pa_log_warn("Didn't find used cell after %u iterations.", len);
229 #endif
230
231 return NULL;
232 }