]> code.delx.au - pulseaudio/blob - src/pulsecore/asyncq.c
remap: Change remapping function argument type from void to int16_t / float as approp...
[pulseaudio] / src / pulsecore / asyncq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2006-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <unistd.h>
27 #include <errno.h>
28
29 #include <pulse/xmalloc.h>
30
31 #include <pulsecore/atomic.h>
32 #include <pulsecore/log.h>
33 #include <pulsecore/thread.h>
34 #include <pulsecore/macro.h>
35 #include <pulsecore/core-util.h>
36 #include <pulsecore/llist.h>
37 #include <pulsecore/flist.h>
38 #include <pulsecore/fdsem.h>
39
40 #include "asyncq.h"
41
42 #define ASYNCQ_SIZE 256
43
44 /* For debugging purposes we can define _Y to put an extra thread
45 * yield between each operation. */
46
47 /* #define PROFILE */
48
49 #ifdef PROFILE
50 #define _Y pa_thread_yield()
51 #else
52 #define _Y do { } while(0)
53 #endif
54
55 struct localq {
56 void *data;
57 PA_LLIST_FIELDS(struct localq);
58 };
59
60 struct pa_asyncq {
61 unsigned size;
62 unsigned read_idx;
63 unsigned write_idx;
64 pa_fdsem *read_fdsem, *write_fdsem;
65
66 PA_LLIST_HEAD(struct localq, localq);
67 struct localq *last_localq;
68 bool waiting_for_post;
69 };
70
71 PA_STATIC_FLIST_DECLARE(localq, 0, pa_xfree);
72
73 #define PA_ASYNCQ_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_asyncq))))
74
75 static unsigned reduce(pa_asyncq *l, unsigned value) {
76 return value & (unsigned) (l->size - 1);
77 }
78
79 pa_asyncq *pa_asyncq_new(unsigned size) {
80 pa_asyncq *l;
81
82 if (!size)
83 size = ASYNCQ_SIZE;
84
85 pa_assert(pa_is_power_of_two(size));
86
87 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_asyncq)) + (sizeof(pa_atomic_ptr_t) * size));
88
89 l->size = size;
90
91 PA_LLIST_HEAD_INIT(struct localq, l->localq);
92 l->last_localq = NULL;
93 l->waiting_for_post = false;
94
95 if (!(l->read_fdsem = pa_fdsem_new())) {
96 pa_xfree(l);
97 return NULL;
98 }
99
100 if (!(l->write_fdsem = pa_fdsem_new())) {
101 pa_fdsem_free(l->read_fdsem);
102 pa_xfree(l);
103 return NULL;
104 }
105
106 return l;
107 }
108
109 void pa_asyncq_free(pa_asyncq *l, pa_free_cb_t free_cb) {
110 struct localq *q;
111 pa_assert(l);
112
113 if (free_cb) {
114 void *p;
115
116 while ((p = pa_asyncq_pop(l, 0)))
117 free_cb(p);
118 }
119
120 while ((q = l->localq)) {
121 if (free_cb)
122 free_cb(q->data);
123
124 PA_LLIST_REMOVE(struct localq, l->localq, q);
125
126 if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
127 pa_xfree(q);
128 }
129
130 pa_fdsem_free(l->read_fdsem);
131 pa_fdsem_free(l->write_fdsem);
132 pa_xfree(l);
133 }
134
135 static int push(pa_asyncq*l, void *p, bool wait_op) {
136 unsigned idx;
137 pa_atomic_ptr_t *cells;
138
139 pa_assert(l);
140 pa_assert(p);
141
142 cells = PA_ASYNCQ_CELLS(l);
143
144 _Y;
145 idx = reduce(l, l->write_idx);
146
147 if (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
148
149 if (!wait_op)
150 return -1;
151
152 /* pa_log("sleeping on push"); */
153
154 do {
155 pa_fdsem_wait(l->read_fdsem);
156 } while (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p));
157 }
158
159 _Y;
160 l->write_idx++;
161
162 pa_fdsem_post(l->write_fdsem);
163
164 return 0;
165 }
166
167 static bool flush_postq(pa_asyncq *l, bool wait_op) {
168 struct localq *q;
169
170 pa_assert(l);
171
172 while ((q = l->last_localq)) {
173
174 if (push(l, q->data, wait_op) < 0)
175 return false;
176
177 l->last_localq = q->prev;
178
179 PA_LLIST_REMOVE(struct localq, l->localq, q);
180
181 if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
182 pa_xfree(q);
183 }
184
185 return true;
186 }
187
188 int pa_asyncq_push(pa_asyncq*l, void *p, bool wait_op) {
189 pa_assert(l);
190
191 if (!flush_postq(l, wait_op))
192 return -1;
193
194 return push(l, p, wait_op);
195 }
196
197 void pa_asyncq_post(pa_asyncq*l, void *p) {
198 struct localq *q;
199
200 pa_assert(l);
201 pa_assert(p);
202
203 if (flush_postq(l, false))
204 if (pa_asyncq_push(l, p, false) >= 0)
205 return;
206
207 /* OK, we couldn't push anything in the queue. So let's queue it
208 * locally and push it later */
209
210 if (pa_log_ratelimit(PA_LOG_WARN))
211 pa_log_warn("q overrun, queuing locally");
212
213 if (!(q = pa_flist_pop(PA_STATIC_FLIST_GET(localq))))
214 q = pa_xnew(struct localq, 1);
215
216 q->data = p;
217 PA_LLIST_PREPEND(struct localq, l->localq, q);
218
219 if (!l->last_localq)
220 l->last_localq = q;
221
222 return;
223 }
224
225 void* pa_asyncq_pop(pa_asyncq*l, bool wait_op) {
226 unsigned idx;
227 void *ret;
228 pa_atomic_ptr_t *cells;
229
230 pa_assert(l);
231
232 cells = PA_ASYNCQ_CELLS(l);
233
234 _Y;
235 idx = reduce(l, l->read_idx);
236
237 if (!(ret = pa_atomic_ptr_load(&cells[idx]))) {
238
239 if (!wait_op)
240 return NULL;
241
242 /* pa_log("sleeping on pop"); */
243
244 do {
245 pa_fdsem_wait(l->write_fdsem);
246 } while (!(ret = pa_atomic_ptr_load(&cells[idx])));
247 }
248
249 pa_assert(ret);
250
251 /* Guaranteed to succeed if we only have a single reader */
252 pa_assert_se(pa_atomic_ptr_cmpxchg(&cells[idx], ret, NULL));
253
254 _Y;
255 l->read_idx++;
256
257 pa_fdsem_post(l->read_fdsem);
258
259 return ret;
260 }
261
262 int pa_asyncq_read_fd(pa_asyncq *q) {
263 pa_assert(q);
264
265 return pa_fdsem_get(q->write_fdsem);
266 }
267
268 int pa_asyncq_read_before_poll(pa_asyncq *l) {
269 unsigned idx;
270 pa_atomic_ptr_t *cells;
271
272 pa_assert(l);
273
274 cells = PA_ASYNCQ_CELLS(l);
275
276 _Y;
277 idx = reduce(l, l->read_idx);
278
279 for (;;) {
280 if (pa_atomic_ptr_load(&cells[idx]))
281 return -1;
282
283 if (pa_fdsem_before_poll(l->write_fdsem) >= 0)
284 return 0;
285 }
286 }
287
288 void pa_asyncq_read_after_poll(pa_asyncq *l) {
289 pa_assert(l);
290
291 pa_fdsem_after_poll(l->write_fdsem);
292 }
293
294 int pa_asyncq_write_fd(pa_asyncq *q) {
295 pa_assert(q);
296
297 return pa_fdsem_get(q->read_fdsem);
298 }
299
300 void pa_asyncq_write_before_poll(pa_asyncq *l) {
301 pa_assert(l);
302
303 for (;;) {
304
305 if (flush_postq(l, false))
306 break;
307
308 if (pa_fdsem_before_poll(l->read_fdsem) >= 0) {
309 l->waiting_for_post = true;
310 break;
311 }
312 }
313 }
314
315 void pa_asyncq_write_after_poll(pa_asyncq *l) {
316 pa_assert(l);
317
318 if (l->waiting_for_post) {
319 pa_fdsem_after_poll(l->read_fdsem);
320 l->waiting_for_post = false;
321 }
322 }