]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
drop chunk argument from various drop() functions, since it doesn't make any sense...
[pulseaudio] / src / pulsecore / memblockq.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published
10 by the Free Software Foundation; either version 2 of the License,
11 or (at your option) any later version.
12
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 USA.
22 ***/
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <sys/time.h>
29 #include <time.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/log.h>
37 #include <pulsecore/mcalign.h>
38 #include <pulsecore/macro.h>
39 #include <pulsecore/flist.h>
40
41 #include "memblockq.h"
42
43 struct list_item {
44 struct list_item *next, *prev;
45 int64_t index;
46 pa_memchunk chunk;
47 };
48
49 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
50
51 struct pa_memblockq {
52 struct list_item *blocks, *blocks_tail;
53 unsigned n_blocks;
54 size_t maxlength, tlength, base, prebuf, minreq;
55 int64_t read_index, write_index;
56 int in_prebuf;
57 pa_memblock *silence;
58 pa_mcalign *mcalign;
59 };
60
61 pa_memblockq* pa_memblockq_new(
62 int64_t idx,
63 size_t maxlength,
64 size_t tlength,
65 size_t base,
66 size_t prebuf,
67 size_t minreq,
68 pa_memblock *silence) {
69
70 pa_memblockq* bq;
71
72 pa_assert(base > 0);
73 pa_assert(maxlength >= base);
74
75 bq = pa_xnew(pa_memblockq, 1);
76 bq->blocks = bq->blocks_tail = NULL;
77 bq->n_blocks = 0;
78
79 bq->base = base;
80 bq->read_index = bq->write_index = idx;
81
82 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
83 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq);
84
85 bq->maxlength = ((maxlength+base-1)/base)*base;
86 pa_assert(bq->maxlength >= base);
87
88 bq->tlength = ((tlength+base-1)/base)*base;
89 if (bq->tlength <= 0 || bq->tlength > bq->maxlength)
90 bq->tlength = bq->maxlength;
91
92 bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength/2 : prebuf;
93 bq->prebuf = ((bq->prebuf+base-1)/base)*base;
94 if (bq->prebuf > bq->maxlength)
95 bq->prebuf = bq->maxlength;
96
97 bq->minreq = (minreq/base)*base;
98
99 if (bq->minreq > bq->tlength - bq->prebuf)
100 bq->minreq = bq->tlength - bq->prebuf;
101
102 if (!bq->minreq)
103 bq->minreq = 1;
104
105 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
106 (unsigned long)bq->maxlength, (unsigned long)bq->tlength, (unsigned long)bq->base, (unsigned long)bq->prebuf, (unsigned long)bq->minreq);
107
108 bq->in_prebuf = bq->prebuf > 0;
109 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
110 bq->mcalign = NULL;
111
112 return bq;
113 }
114
115 void pa_memblockq_free(pa_memblockq* bq) {
116 pa_assert(bq);
117
118 pa_memblockq_flush(bq);
119
120 if (bq->silence)
121 pa_memblock_unref(bq->silence);
122
123 if (bq->mcalign)
124 pa_mcalign_free(bq->mcalign);
125
126 pa_xfree(bq);
127 }
128
129 static void drop_block(pa_memblockq *bq, struct list_item *q) {
130 pa_assert(bq);
131 pa_assert(q);
132
133 pa_assert(bq->n_blocks >= 1);
134
135 if (q->prev)
136 q->prev->next = q->next;
137 else
138 bq->blocks = q->next;
139
140 if (q->next)
141 q->next->prev = q->prev;
142 else
143 bq->blocks_tail = q->prev;
144
145 pa_memblock_unref(q->chunk.memblock);
146
147 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
148 pa_xfree(q);
149
150 bq->n_blocks--;
151 }
152
153 static int can_push(pa_memblockq *bq, size_t l) {
154 int64_t end;
155
156 pa_assert(bq);
157
158 if (bq->read_index > bq->write_index) {
159 size_t d = bq->read_index - bq->write_index;
160
161 if (l > d)
162 l -= d;
163 else
164 return 1;
165 }
166
167 end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : 0;
168
169 /* Make sure that the list doesn't get too long */
170 if (bq->write_index + (int64_t)l > end)
171 if (bq->write_index + l - bq->read_index > bq->maxlength)
172 return 0;
173
174 return 1;
175 }
176
177 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
178
179 struct list_item *q, *n;
180 pa_memchunk chunk;
181
182 pa_assert(bq);
183 pa_assert(uchunk);
184 pa_assert(uchunk->memblock);
185 pa_assert(uchunk->length > 0);
186 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
187
188 if (uchunk->length % bq->base)
189 return -1;
190
191 if (!can_push(bq, uchunk->length))
192 return -1;
193
194 chunk = *uchunk;
195
196 if (bq->read_index > bq->write_index) {
197
198 /* We currently have a buffer underflow, we need to drop some
199 * incoming data */
200
201 size_t d = bq->read_index - bq->write_index;
202
203 if (chunk.length > d) {
204 chunk.index += d;
205 chunk.length -= d;
206 bq->write_index += d;
207 } else {
208 /* We drop the incoming data completely */
209 bq->write_index += chunk.length;
210 return 0;
211 }
212 }
213
214 /* We go from back to front to look for the right place to add
215 * this new entry. Drop data we will overwrite on the way */
216
217 q = bq->blocks_tail;
218 while (q) {
219
220 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
221 /* We found the entry where we need to place the new entry immediately after */
222 break;
223 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
224 /* This entry isn't touched at all, let's skip it */
225 q = q->prev;
226 } else if (bq->write_index <= q->index &&
227 bq->write_index + chunk.length >= q->index + q->chunk.length) {
228
229 /* This entry is fully replaced by the new entry, so let's drop it */
230
231 struct list_item *p;
232 p = q;
233 q = q->prev;
234 drop_block(bq, p);
235 } else if (bq->write_index >= q->index) {
236 /* The write index points into this memblock, so let's
237 * truncate or split it */
238
239 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
240
241 /* We need to save the end of this memchunk */
242 struct list_item *p;
243 size_t d;
244
245 /* Create a new list entry for the end of thie memchunk */
246 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
247 p = pa_xnew(struct list_item, 1);
248
249 p->chunk = q->chunk;
250 pa_memblock_ref(p->chunk.memblock);
251
252 /* Calculate offset */
253 d = bq->write_index + chunk.length - q->index;
254 pa_assert(d > 0);
255
256 /* Drop it from the new entry */
257 p->index = q->index + d;
258 p->chunk.length -= d;
259
260 /* Add it to the list */
261 p->prev = q;
262 if ((p->next = q->next))
263 q->next->prev = p;
264 else
265 bq->blocks_tail = p;
266 q->next = p;
267
268 bq->n_blocks++;
269 }
270
271 /* Truncate the chunk */
272 if (!(q->chunk.length = bq->write_index - q->index)) {
273 struct list_item *p;
274 p = q;
275 q = q->prev;
276 drop_block(bq, p);
277 }
278
279 /* We had to truncate this block, hence we're now at the right position */
280 break;
281 } else {
282 size_t d;
283
284 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
285 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
286 bq->write_index < q->index);
287
288 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
289
290 d = bq->write_index + chunk.length - q->index;
291 q->index += d;
292 q->chunk.index += d;
293 q->chunk.length -= d;
294
295 q = q->prev;
296 }
297 }
298
299 if (q) {
300 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
301 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
302
303 /* Try to merge memory blocks */
304
305 if (q->chunk.memblock == chunk.memblock &&
306 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
307 bq->write_index == q->index + (int64_t)q->chunk.length) {
308
309 q->chunk.length += chunk.length;
310 bq->write_index += chunk.length;
311 return 0;
312 }
313 } else
314 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
315
316
317 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
318 n = pa_xnew(struct list_item, 1);
319
320 n->chunk = chunk;
321 pa_memblock_ref(n->chunk.memblock);
322 n->index = bq->write_index;
323 bq->write_index += n->chunk.length;
324
325 n->next = q ? q->next : bq->blocks;
326 n->prev = q;
327
328 if (n->next)
329 n->next->prev = n;
330 else
331 bq->blocks_tail = n;
332
333 if (n->prev)
334 n->prev->next = n;
335 else
336 bq->blocks = n;
337
338 bq->n_blocks++;
339 return 0;
340 }
341
342 static int memblockq_check_prebuf(pa_memblockq *bq) {
343 pa_assert(bq);
344
345 if (bq->in_prebuf) {
346
347 if (pa_memblockq_get_length(bq) < bq->prebuf)
348 return 1;
349
350 bq->in_prebuf = 0;
351 return 0;
352 } else {
353
354 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
355 bq->in_prebuf = 1;
356 return 1;
357 }
358
359 return 0;
360 }
361 }
362
363 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
364 pa_assert(bq);
365 pa_assert(chunk);
366
367 /* We need to pre-buffer */
368 if (memblockq_check_prebuf(bq))
369 return -1;
370
371 /* Do we need to spit out silence? */
372 if (!bq->blocks || bq->blocks->index > bq->read_index) {
373
374 size_t length;
375
376 /* How much silence shall we return? */
377 length = bq->blocks ? bq->blocks->index - bq->read_index : 0;
378
379 /* We need to return silence, since no data is yet available */
380 if (bq->silence) {
381 chunk->memblock = pa_memblock_ref(bq->silence);
382
383 if (!length || length > pa_memblock_get_length(chunk->memblock))
384 length = pa_memblock_get_length(chunk->memblock);
385
386 chunk->length = length;
387 } else {
388
389 /* If the memblockq is empty, return -1, otherwise return
390 * the time to sleep */
391 if (!bq->blocks)
392 return -1;
393
394 chunk->memblock = NULL;
395 chunk->length = length;
396 }
397
398 chunk->index = 0;
399 return 0;
400 }
401
402 /* Ok, let's pass real data to the caller */
403 pa_assert(bq->blocks->index == bq->read_index);
404
405 *chunk = bq->blocks->chunk;
406 pa_memblock_ref(chunk->memblock);
407
408 return 0;
409 }
410
411 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
412 pa_assert(bq);
413 pa_assert(length % bq->base == 0);
414
415 while (length > 0) {
416
417 /* Do not drop any data when we are in prebuffering mode */
418 if (memblockq_check_prebuf(bq))
419 break;
420
421 if (bq->blocks) {
422 size_t d;
423
424 pa_assert(bq->blocks->index >= bq->read_index);
425
426 d = (size_t) (bq->blocks->index - bq->read_index);
427
428 if (d >= length) {
429 /* The first block is too far in the future */
430
431 bq->read_index += length;
432 break;
433 } else {
434
435 length -= d;
436 bq->read_index += d;
437 }
438
439 pa_assert(bq->blocks->index == bq->read_index);
440
441 if (bq->blocks->chunk.length <= length) {
442 /* We need to drop the full block */
443
444 length -= bq->blocks->chunk.length;
445 bq->read_index += bq->blocks->chunk.length;
446
447 drop_block(bq, bq->blocks);
448 } else {
449 /* Only the start of this block needs to be dropped */
450
451 bq->blocks->chunk.index += length;
452 bq->blocks->chunk.length -= length;
453 bq->blocks->index += length;
454 bq->read_index += length;
455 break;
456 }
457
458 } else {
459
460 /* The list is empty, there's nothing we could drop */
461 bq->read_index += length;
462 break;
463 }
464 }
465 }
466
467 int pa_memblockq_is_readable(pa_memblockq *bq) {
468 pa_assert(bq);
469
470 if (memblockq_check_prebuf(bq))
471 return 0;
472
473 if (pa_memblockq_get_length(bq) <= 0)
474 return 0;
475
476 return 1;
477 }
478
479 size_t pa_memblockq_get_length(pa_memblockq *bq) {
480 pa_assert(bq);
481
482 if (bq->write_index <= bq->read_index)
483 return 0;
484
485 return (size_t) (bq->write_index - bq->read_index);
486 }
487
488 size_t pa_memblockq_missing(pa_memblockq *bq) {
489 size_t l;
490 pa_assert(bq);
491
492 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
493 return 0;
494
495 l = bq->tlength - l;
496 return l >= bq->minreq ? l : 0;
497 }
498
499 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
500 pa_assert(bq);
501
502 return bq->minreq;
503 }
504
505 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
506 pa_assert(bq);
507
508 switch (seek) {
509 case PA_SEEK_RELATIVE:
510 bq->write_index += offset;
511 return;
512 case PA_SEEK_ABSOLUTE:
513 bq->write_index = offset;
514 return;
515 case PA_SEEK_RELATIVE_ON_READ:
516 bq->write_index = bq->read_index + offset;
517 return;
518 case PA_SEEK_RELATIVE_END:
519 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
520 return;
521 }
522
523 pa_assert_not_reached();
524 }
525
526 void pa_memblockq_flush(pa_memblockq *bq) {
527 pa_assert(bq);
528
529 while (bq->blocks)
530 drop_block(bq, bq->blocks);
531
532 pa_assert(bq->n_blocks == 0);
533
534 bq->write_index = bq->read_index;
535
536 pa_memblockq_prebuf_force(bq);
537 }
538
539 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
540 pa_assert(bq);
541
542 return bq->tlength;
543 }
544
545 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
546 pa_assert(bq);
547 return bq->read_index;
548 }
549
550 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
551 pa_assert(bq);
552 return bq->write_index;
553 }
554
555 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
556 pa_memchunk rchunk;
557
558 pa_assert(bq);
559 pa_assert(chunk);
560
561 if (bq->base == 1)
562 return pa_memblockq_push(bq, chunk);
563
564 if (!bq->mcalign)
565 bq->mcalign = pa_mcalign_new(bq->base);
566
567 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
568 return -1;
569
570 pa_mcalign_push(bq->mcalign, chunk);
571
572 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
573 int r;
574 r = pa_memblockq_push(bq, &rchunk);
575 pa_memblock_unref(rchunk.memblock);
576
577 if (r < 0)
578 return -1;
579 }
580
581 return 0;
582 }
583
584 void pa_memblockq_shorten(pa_memblockq *bq, size_t length) {
585 size_t l;
586 pa_assert(bq);
587
588 l = pa_memblockq_get_length(bq);
589
590 if (l > length)
591 pa_memblockq_drop(bq, l - length);
592 }
593
594 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
595 pa_assert(bq);
596
597 bq->in_prebuf = 0;
598 }
599
600 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
601 pa_assert(bq);
602
603 if (!bq->in_prebuf && bq->prebuf > 0)
604 bq->in_prebuf = 1;
605 }
606
607 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
608 pa_assert(bq);
609
610 return bq->maxlength;
611 }
612
613 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
614 pa_assert(bq);
615
616 return bq->prebuf;
617 }