]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
really create glitch-free branch
[pulseaudio] / src / pulsecore / memblockq.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published
10 by the Free Software Foundation; either version 2 of the License,
11 or (at your option) any later version.
12
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 USA.
22 ***/
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <sys/time.h>
29 #include <time.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/log.h>
37 #include <pulsecore/mcalign.h>
38 #include <pulsecore/macro.h>
39 #include <pulsecore/flist.h>
40
41 #include "memblockq.h"
42
43 struct list_item {
44 struct list_item *next, *prev;
45 int64_t index;
46 pa_memchunk chunk;
47 };
48
49 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
50
51 struct pa_memblockq {
52 struct list_item *blocks, *blocks_tail;
53 unsigned n_blocks;
54 size_t maxlength, tlength, base, prebuf, minreq;
55 int64_t read_index, write_index;
56 pa_bool_t in_prebuf;
57 pa_memblock *silence;
58 pa_mcalign *mcalign;
59 int64_t missing;
60 size_t requested;
61 };
62
63 pa_memblockq* pa_memblockq_new(
64 int64_t idx,
65 size_t maxlength,
66 size_t tlength,
67 size_t base,
68 size_t prebuf,
69 size_t minreq,
70 pa_memblock *silence) {
71
72 pa_memblockq* bq;
73
74 pa_assert(base > 0);
75
76 bq = pa_xnew(pa_memblockq, 1);
77 bq->blocks = bq->blocks_tail = NULL;
78 bq->n_blocks = 0;
79
80 bq->base = base;
81 bq->read_index = bq->write_index = idx;
82
83 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
84 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq);
85
86 bq->missing = bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = 0;
87 bq->in_prebuf = TRUE;
88
89 pa_memblockq_set_maxlength(bq, maxlength);
90 pa_memblockq_set_tlength(bq, tlength);
91 pa_memblockq_set_prebuf(bq, prebuf);
92 pa_memblockq_set_minreq(bq, minreq);
93
94 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
95 (unsigned long)bq->maxlength, (unsigned long)bq->tlength, (unsigned long)bq->base, (unsigned long)bq->prebuf, (unsigned long)bq->minreq);
96
97 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
98 bq->mcalign = NULL;
99
100 return bq;
101 }
102
103 void pa_memblockq_free(pa_memblockq* bq) {
104 pa_assert(bq);
105
106 pa_memblockq_flush(bq);
107
108 if (bq->silence)
109 pa_memblock_unref(bq->silence);
110
111 if (bq->mcalign)
112 pa_mcalign_free(bq->mcalign);
113
114 pa_xfree(bq);
115 }
116
117 static void drop_block(pa_memblockq *bq, struct list_item *q) {
118 pa_assert(bq);
119 pa_assert(q);
120
121 pa_assert(bq->n_blocks >= 1);
122
123 if (q->prev)
124 q->prev->next = q->next;
125 else
126 bq->blocks = q->next;
127
128 if (q->next)
129 q->next->prev = q->prev;
130 else
131 bq->blocks_tail = q->prev;
132
133 pa_memblock_unref(q->chunk.memblock);
134
135 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
136 pa_xfree(q);
137
138 bq->n_blocks--;
139 }
140
141 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
142 int64_t end;
143
144 pa_assert(bq);
145
146 if (bq->read_index > bq->write_index) {
147 size_t d = bq->read_index - bq->write_index;
148
149 if (l > d)
150 l -= d;
151 else
152 return TRUE;
153 }
154
155 end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : 0;
156
157 /* Make sure that the list doesn't get too long */
158 if (bq->write_index + (int64_t)l > end)
159 if (bq->write_index + l - bq->read_index > bq->maxlength)
160 return FALSE;
161
162 return TRUE;
163 }
164
165 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
166 struct list_item *q, *n;
167 pa_memchunk chunk;
168 int64_t old, delta;
169
170 pa_assert(bq);
171 pa_assert(uchunk);
172 pa_assert(uchunk->memblock);
173 pa_assert(uchunk->length > 0);
174 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
175
176 if (uchunk->length % bq->base)
177 return -1;
178
179 if (!can_push(bq, uchunk->length))
180 return -1;
181
182 old = bq->write_index;
183 chunk = *uchunk;
184
185 if (bq->read_index > bq->write_index) {
186
187 /* We currently have a buffer underflow, we need to drop some
188 * incoming data */
189
190 size_t d = bq->read_index - bq->write_index;
191
192 if (chunk.length > d) {
193 chunk.index += d;
194 chunk.length -= d;
195 bq->write_index += d;
196 } else {
197 /* We drop the incoming data completely */
198 bq->write_index += chunk.length;
199 goto finish;
200 }
201 }
202
203 /* We go from back to front to look for the right place to add
204 * this new entry. Drop data we will overwrite on the way */
205
206 q = bq->blocks_tail;
207 while (q) {
208
209 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
210 /* We found the entry where we need to place the new entry immediately after */
211 break;
212 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
213 /* This entry isn't touched at all, let's skip it */
214 q = q->prev;
215 } else if (bq->write_index <= q->index &&
216 bq->write_index + chunk.length >= q->index + q->chunk.length) {
217
218 /* This entry is fully replaced by the new entry, so let's drop it */
219
220 struct list_item *p;
221 p = q;
222 q = q->prev;
223 drop_block(bq, p);
224 } else if (bq->write_index >= q->index) {
225 /* The write index points into this memblock, so let's
226 * truncate or split it */
227
228 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
229
230 /* We need to save the end of this memchunk */
231 struct list_item *p;
232 size_t d;
233
234 /* Create a new list entry for the end of thie memchunk */
235 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
236 p = pa_xnew(struct list_item, 1);
237
238 p->chunk = q->chunk;
239 pa_memblock_ref(p->chunk.memblock);
240
241 /* Calculate offset */
242 d = bq->write_index + chunk.length - q->index;
243 pa_assert(d > 0);
244
245 /* Drop it from the new entry */
246 p->index = q->index + d;
247 p->chunk.length -= d;
248
249 /* Add it to the list */
250 p->prev = q;
251 if ((p->next = q->next))
252 q->next->prev = p;
253 else
254 bq->blocks_tail = p;
255 q->next = p;
256
257 bq->n_blocks++;
258 }
259
260 /* Truncate the chunk */
261 if (!(q->chunk.length = bq->write_index - q->index)) {
262 struct list_item *p;
263 p = q;
264 q = q->prev;
265 drop_block(bq, p);
266 }
267
268 /* We had to truncate this block, hence we're now at the right position */
269 break;
270 } else {
271 size_t d;
272
273 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
274 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
275 bq->write_index < q->index);
276
277 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
278
279 d = bq->write_index + chunk.length - q->index;
280 q->index += d;
281 q->chunk.index += d;
282 q->chunk.length -= d;
283
284 q = q->prev;
285 }
286 }
287
288 if (q) {
289 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
290 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
291
292 /* Try to merge memory blocks */
293
294 if (q->chunk.memblock == chunk.memblock &&
295 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
296 bq->write_index == q->index + (int64_t)q->chunk.length) {
297
298 q->chunk.length += chunk.length;
299 bq->write_index += chunk.length;
300 goto finish;
301 }
302 } else
303 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
304
305 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
306 n = pa_xnew(struct list_item, 1);
307
308 n->chunk = chunk;
309 pa_memblock_ref(n->chunk.memblock);
310 n->index = bq->write_index;
311 bq->write_index += n->chunk.length;
312
313 n->next = q ? q->next : bq->blocks;
314 n->prev = q;
315
316 if (n->next)
317 n->next->prev = n;
318 else
319 bq->blocks_tail = n;
320
321 if (n->prev)
322 n->prev->next = n;
323 else
324 bq->blocks = n;
325
326 bq->n_blocks++;
327
328 finish:
329
330 delta = bq->write_index - old;
331
332 if (delta >= bq->requested) {
333 delta -= bq->requested;
334 bq->requested = 0;
335 } else {
336 bq->requested -= delta;
337 delta = 0;
338 }
339
340 bq->missing -= delta;
341
342 return 0;
343 }
344
345 static pa_bool_t memblockq_check_prebuf(pa_memblockq *bq) {
346 pa_assert(bq);
347
348 if (bq->in_prebuf) {
349
350 if (pa_memblockq_get_length(bq) < bq->prebuf)
351 return TRUE;
352
353 bq->in_prebuf = FALSE;
354 return FALSE;
355 } else {
356
357 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
358 bq->in_prebuf = TRUE;
359 return TRUE;
360 }
361
362 return FALSE;
363 }
364 }
365
366 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
367 pa_assert(bq);
368 pa_assert(chunk);
369
370 /* We need to pre-buffer */
371 if (memblockq_check_prebuf(bq))
372 return -1;
373
374 /* Do we need to spit out silence? */
375 if (!bq->blocks || bq->blocks->index > bq->read_index) {
376
377 size_t length;
378
379 /* How much silence shall we return? */
380 length = bq->blocks ? bq->blocks->index - bq->read_index : 0;
381
382 /* We need to return silence, since no data is yet available */
383 if (bq->silence) {
384 chunk->memblock = pa_memblock_ref(bq->silence);
385
386 if (!length || length > pa_memblock_get_length(chunk->memblock))
387 length = pa_memblock_get_length(chunk->memblock);
388
389 chunk->length = length;
390 } else {
391
392 /* If the memblockq is empty, return -1, otherwise return
393 * the time to sleep */
394 if (!bq->blocks)
395 return -1;
396
397 chunk->memblock = NULL;
398 chunk->length = length;
399 }
400
401 chunk->index = 0;
402 return 0;
403 }
404
405 /* Ok, let's pass real data to the caller */
406 pa_assert(bq->blocks->index == bq->read_index);
407
408 *chunk = bq->blocks->chunk;
409 pa_memblock_ref(chunk->memblock);
410
411 return 0;
412 }
413
414 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
415 int64_t old, delta;
416 pa_assert(bq);
417 pa_assert(length % bq->base == 0);
418
419 old = bq->read_index;
420
421 while (length > 0) {
422
423 /* Do not drop any data when we are in prebuffering mode */
424 if (memblockq_check_prebuf(bq))
425 break;
426
427 if (bq->blocks) {
428 size_t d;
429
430 pa_assert(bq->blocks->index >= bq->read_index);
431
432 d = (size_t) (bq->blocks->index - bq->read_index);
433
434 if (d >= length) {
435 /* The first block is too far in the future */
436
437 bq->read_index += length;
438 break;
439 } else {
440
441 length -= d;
442 bq->read_index += d;
443 }
444
445 pa_assert(bq->blocks->index == bq->read_index);
446
447 if (bq->blocks->chunk.length <= length) {
448 /* We need to drop the full block */
449
450 length -= bq->blocks->chunk.length;
451 bq->read_index += bq->blocks->chunk.length;
452
453 drop_block(bq, bq->blocks);
454 } else {
455 /* Only the start of this block needs to be dropped */
456
457 bq->blocks->chunk.index += length;
458 bq->blocks->chunk.length -= length;
459 bq->blocks->index += length;
460 bq->read_index += length;
461 break;
462 }
463
464 } else {
465
466 /* The list is empty, there's nothing we could drop */
467 bq->read_index += length;
468 break;
469 }
470 }
471
472 delta = bq->read_index - old;
473 bq->missing += delta;
474 }
475
476 int pa_memblockq_is_readable(pa_memblockq *bq) {
477 pa_assert(bq);
478
479 if (memblockq_check_prebuf(bq))
480 return 0;
481
482 if (pa_memblockq_get_length(bq) <= 0)
483 return 0;
484
485 return 1;
486 }
487
488 size_t pa_memblockq_get_length(pa_memblockq *bq) {
489 pa_assert(bq);
490
491 if (bq->write_index <= bq->read_index)
492 return 0;
493
494 return (size_t) (bq->write_index - bq->read_index);
495 }
496
497 size_t pa_memblockq_missing(pa_memblockq *bq) {
498 size_t l;
499 pa_assert(bq);
500
501 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
502 return 0;
503
504 l = bq->tlength - l;
505
506 return l >= bq->minreq ? l : 0;
507 }
508
509 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
510 pa_assert(bq);
511
512 return bq->minreq;
513 }
514
515 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
516 int64_t old, delta;
517 pa_assert(bq);
518
519 old = bq->write_index;
520
521 switch (seek) {
522 case PA_SEEK_RELATIVE:
523 bq->write_index += offset;
524 break;
525 case PA_SEEK_ABSOLUTE:
526 bq->write_index = offset;
527 break;
528 case PA_SEEK_RELATIVE_ON_READ:
529 bq->write_index = bq->read_index + offset;
530 break;
531 case PA_SEEK_RELATIVE_END:
532 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
533 break;
534 default:
535 pa_assert_not_reached();
536 }
537
538 delta = bq->write_index - old;
539
540 if (delta >= bq->requested) {
541 delta -= bq->requested;
542 bq->requested = 0;
543 } else if (delta >= 0) {
544 bq->requested -= delta;
545 delta = 0;
546 }
547
548 bq->missing -= delta;
549 }
550
551 void pa_memblockq_flush(pa_memblockq *bq) {
552 int64_t old, delta;
553 pa_assert(bq);
554
555 while (bq->blocks)
556 drop_block(bq, bq->blocks);
557
558 pa_assert(bq->n_blocks == 0);
559
560 old = bq->write_index;
561 bq->write_index = bq->read_index;
562
563 pa_memblockq_prebuf_force(bq);
564
565 delta = bq->write_index - old;
566
567 if (delta > bq->requested) {
568 delta -= bq->requested;
569 bq->requested = 0;
570 } else if (delta >= 0) {
571 bq->requested -= delta;
572 delta = 0;
573 }
574
575 bq->missing -= delta;
576 }
577
578 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
579 pa_assert(bq);
580
581 return bq->tlength;
582 }
583
584 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
585 pa_assert(bq);
586 return bq->read_index;
587 }
588
589 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
590 pa_assert(bq);
591 return bq->write_index;
592 }
593
594 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
595 pa_memchunk rchunk;
596
597 pa_assert(bq);
598 pa_assert(chunk);
599
600 if (bq->base == 1)
601 return pa_memblockq_push(bq, chunk);
602
603 if (!bq->mcalign)
604 bq->mcalign = pa_mcalign_new(bq->base);
605
606 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
607 return -1;
608
609 pa_mcalign_push(bq->mcalign, chunk);
610
611 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
612 int r;
613 r = pa_memblockq_push(bq, &rchunk);
614 pa_memblock_unref(rchunk.memblock);
615
616 if (r < 0)
617 return -1;
618 }
619
620 return 0;
621 }
622
623 void pa_memblockq_shorten(pa_memblockq *bq, size_t length) {
624 size_t l;
625 pa_assert(bq);
626
627 l = pa_memblockq_get_length(bq);
628
629 if (l > length)
630 pa_memblockq_drop(bq, l - length);
631 }
632
633 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
634 pa_assert(bq);
635
636 bq->in_prebuf = FALSE;
637 }
638
639 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
640 pa_assert(bq);
641
642 if (!bq->in_prebuf && bq->prebuf > 0)
643 bq->in_prebuf = TRUE;
644 }
645
646 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
647 pa_assert(bq);
648
649 return bq->maxlength;
650 }
651
652 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
653 pa_assert(bq);
654
655 return bq->prebuf;
656 }
657
658 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
659 size_t l;
660
661 pa_assert(bq);
662
663 /* pa_log("pop: %lli", bq->missing); */
664
665 if (bq->missing <= 0)
666 return 0;
667
668 l = (size_t) bq->missing;
669 bq->missing = 0;
670 bq->requested += l;
671
672 return l;
673 }
674
675 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
676 pa_assert(bq);
677
678 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
679
680 if (bq->maxlength < bq->base)
681 bq->maxlength = bq->base;
682
683 if (bq->tlength > bq->maxlength)
684 pa_memblockq_set_tlength(bq, bq->maxlength);
685
686 if (bq->prebuf > bq->maxlength)
687 pa_memblockq_set_prebuf(bq, bq->maxlength);
688 }
689
690 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
691 size_t old_tlength;
692 pa_assert(bq);
693
694 old_tlength = bq->tlength;
695
696 if (tlength <= 0)
697 tlength = bq->maxlength;
698
699 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
700
701 if (bq->tlength > bq->maxlength)
702 bq->tlength = bq->maxlength;
703
704 if (bq->minreq > bq->tlength - bq->prebuf)
705 pa_memblockq_set_minreq(bq, bq->tlength - bq->prebuf);
706
707 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
708 }
709
710 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
711 pa_assert(bq);
712
713 bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength/2 : prebuf;
714 bq->prebuf = ((bq->prebuf+bq->base-1)/bq->base)*bq->base;
715
716 if (prebuf > 0 && bq->prebuf < bq->base)
717 bq->prebuf = bq->base;
718
719 if (bq->prebuf > bq->maxlength)
720 bq->prebuf = bq->maxlength;
721
722 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
723 bq->in_prebuf = FALSE;
724
725 if (bq->minreq > bq->tlength - bq->prebuf)
726 pa_memblockq_set_minreq(bq, bq->tlength - bq->prebuf);
727 }
728
729 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
730 pa_assert(bq);
731
732 bq->minreq = (minreq/bq->base)*bq->base;
733
734 if (bq->minreq > bq->tlength - bq->prebuf)
735 bq->minreq = bq->tlength - bq->prebuf;
736
737 if (bq->minreq < bq->base)
738 bq->minreq = bq->base;
739 }