]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
merge glitch-free branch back into trunk
[pulseaudio] / src / pulsecore / memblockq.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published
10 by the Free Software Foundation; either version 2 of the License,
11 or (at your option) any later version.
12
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 USA.
22 ***/
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <sys/time.h>
29 #include <time.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/log.h>
37 #include <pulsecore/mcalign.h>
38 #include <pulsecore/macro.h>
39 #include <pulsecore/flist.h>
40
41 #include "memblockq.h"
42
43 struct list_item {
44 struct list_item *next, *prev;
45 int64_t index;
46 pa_memchunk chunk;
47 };
48
49 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
50
51 struct pa_memblockq {
52 struct list_item *blocks, *blocks_tail;
53 struct list_item *current_read, *current_write;
54 unsigned n_blocks;
55 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
56 int64_t read_index, write_index;
57 pa_bool_t in_prebuf;
58 pa_memchunk silence;
59 pa_mcalign *mcalign;
60 int64_t missing;
61 size_t requested;
62 };
63
64 pa_memblockq* pa_memblockq_new(
65 int64_t idx,
66 size_t maxlength,
67 size_t tlength,
68 size_t base,
69 size_t prebuf,
70 size_t minreq,
71 size_t maxrewind,
72 pa_memchunk *silence) {
73
74 pa_memblockq* bq;
75
76 pa_assert(base > 0);
77
78 bq = pa_xnew(pa_memblockq, 1);
79 bq->blocks = bq->blocks_tail = NULL;
80 bq->current_read = bq->current_write = NULL;
81 bq->n_blocks = 0;
82
83 bq->base = base;
84 bq->read_index = bq->write_index = idx;
85
86 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
87 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
88
89 bq->missing = bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
90 bq->in_prebuf = TRUE;
91
92 pa_memblockq_set_maxlength(bq, maxlength);
93 pa_memblockq_set_tlength(bq, tlength);
94 pa_memblockq_set_prebuf(bq, prebuf);
95 pa_memblockq_set_minreq(bq, minreq);
96 pa_memblockq_set_maxrewind(bq, maxrewind);
97
98 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
99 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
100
101 if (silence) {
102 bq->silence = *silence;
103 pa_memblock_ref(bq->silence.memblock);
104 } else
105 pa_memchunk_reset(&bq->silence);
106
107 bq->mcalign = pa_mcalign_new(bq->base);
108
109 return bq;
110 }
111
112 void pa_memblockq_free(pa_memblockq* bq) {
113 pa_assert(bq);
114
115 pa_memblockq_flush(bq);
116
117 if (bq->silence.memblock)
118 pa_memblock_unref(bq->silence.memblock);
119
120 if (bq->mcalign)
121 pa_mcalign_free(bq->mcalign);
122
123 pa_xfree(bq);
124 }
125
126 static void fix_current_read(pa_memblockq *bq) {
127 pa_assert(bq);
128
129 if (PA_UNLIKELY(!bq->blocks)) {
130 bq->current_read = NULL;
131 return;
132 }
133
134 if (PA_UNLIKELY(!bq->current_read))
135 bq->current_read = bq->blocks;
136
137 /* Scan left */
138 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
139
140 if (bq->current_read->prev)
141 bq->current_read = bq->current_read->prev;
142 else
143 break;
144
145 /* Scan right */
146 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
147 bq->current_read = bq->current_read->next;
148
149 /* At this point current_read will either point at or left of the
150 next block to play. It may be NULL in case everything in
151 the queue was already played */
152 }
153
154 static void fix_current_write(pa_memblockq *bq) {
155 pa_assert(bq);
156
157 if (PA_UNLIKELY(!bq->blocks)) {
158 bq->current_write = NULL;
159 return;
160 }
161
162 if (PA_UNLIKELY(!bq->current_write))
163 bq->current_write = bq->blocks_tail;
164
165 /* Scan right */
166 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
167
168 if (bq->current_write->next)
169 bq->current_write = bq->current_write->next;
170 else
171 break;
172
173 /* Scan left */
174 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
175 bq->current_write = bq->current_write->prev;
176
177 /* At this point current_write will either point at or right of
178 the next block to write data to. It may be NULL in case
179 everything in the queue is still to be played */
180 }
181
182 static void drop_block(pa_memblockq *bq, struct list_item *q) {
183 pa_assert(bq);
184 pa_assert(q);
185
186 pa_assert(bq->n_blocks >= 1);
187
188 if (q->prev)
189 q->prev->next = q->next;
190 else {
191 pa_assert(bq->blocks == q);
192 bq->blocks = q->next;
193 }
194
195 if (q->next)
196 q->next->prev = q->prev;
197 else {
198 pa_assert(bq->blocks_tail == q);
199 bq->blocks_tail = q->prev;
200 }
201
202 if (bq->current_write == q)
203 bq->current_write = q->prev;
204
205 if (bq->current_read == q)
206 bq->current_read = q->next;
207
208 pa_memblock_unref(q->chunk.memblock);
209
210 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
211 pa_xfree(q);
212
213 bq->n_blocks--;
214 }
215
216 static void drop_backlog(pa_memblockq *bq) {
217 int64_t boundary;
218 pa_assert(bq);
219
220 boundary = bq->read_index - bq->maxrewind;
221
222 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
223 drop_block(bq, bq->blocks);
224 }
225
226 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
227 int64_t end;
228
229 pa_assert(bq);
230
231 if (bq->read_index > bq->write_index) {
232 size_t d = bq->read_index - bq->write_index;
233
234 if (l > d)
235 l -= d;
236 else
237 return TRUE;
238 }
239
240 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
241
242 /* Make sure that the list doesn't get too long */
243 if (bq->write_index + (int64_t) l > end)
244 if (bq->write_index + l - bq->read_index > bq->maxlength)
245 return FALSE;
246
247 return TRUE;
248 }
249
250 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
251 struct list_item *q, *n;
252 pa_memchunk chunk;
253 int64_t old, delta;
254
255 pa_assert(bq);
256 pa_assert(uchunk);
257 pa_assert(uchunk->memblock);
258 pa_assert(uchunk->length > 0);
259 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
260
261 if (uchunk->length % bq->base)
262 return -1;
263
264 if (!can_push(bq, uchunk->length))
265 return -1;
266
267 old = bq->write_index;
268 chunk = *uchunk;
269
270 fix_current_write(bq);
271 q = bq->current_write;
272
273 /* First we advance the q pointer right of where we want to
274 * write to */
275
276 if (q) {
277 while (bq->write_index + (int64_t) chunk.length > q->index)
278 if (q->next)
279 q = q->next;
280 else
281 break;
282 }
283
284 if (!q)
285 q = bq->blocks_tail;
286
287 /* We go from back to front to look for the right place to add
288 * this new entry. Drop data we will overwrite on the way */
289
290 while (q) {
291
292 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
293 /* We found the entry where we need to place the new entry immediately after */
294 break;
295 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
296 /* This entry isn't touched at all, let's skip it */
297 q = q->prev;
298 } else if (bq->write_index <= q->index &&
299 bq->write_index + chunk.length >= q->index + q->chunk.length) {
300
301 /* This entry is fully replaced by the new entry, so let's drop it */
302
303 struct list_item *p;
304 p = q;
305 q = q->prev;
306 drop_block(bq, p);
307 } else if (bq->write_index >= q->index) {
308 /* The write index points into this memblock, so let's
309 * truncate or split it */
310
311 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
312
313 /* We need to save the end of this memchunk */
314 struct list_item *p;
315 size_t d;
316
317 /* Create a new list entry for the end of thie memchunk */
318 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
319 p = pa_xnew(struct list_item, 1);
320
321 p->chunk = q->chunk;
322 pa_memblock_ref(p->chunk.memblock);
323
324 /* Calculate offset */
325 d = bq->write_index + chunk.length - q->index;
326 pa_assert(d > 0);
327
328 /* Drop it from the new entry */
329 p->index = q->index + d;
330 p->chunk.length -= d;
331
332 /* Add it to the list */
333 p->prev = q;
334 if ((p->next = q->next))
335 q->next->prev = p;
336 else
337 bq->blocks_tail = p;
338 q->next = p;
339
340 bq->n_blocks++;
341 }
342
343 /* Truncate the chunk */
344 if (!(q->chunk.length = bq->write_index - q->index)) {
345 struct list_item *p;
346 p = q;
347 q = q->prev;
348 drop_block(bq, p);
349 }
350
351 /* We had to truncate this block, hence we're now at the right position */
352 break;
353 } else {
354 size_t d;
355
356 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
357 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
358 bq->write_index < q->index);
359
360 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
361
362 d = bq->write_index + chunk.length - q->index;
363 q->index += d;
364 q->chunk.index += d;
365 q->chunk.length -= d;
366
367 q = q->prev;
368 }
369 }
370
371 if (q) {
372 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
373 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
374
375 /* Try to merge memory blocks */
376
377 if (q->chunk.memblock == chunk.memblock &&
378 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
379 bq->write_index == q->index + (int64_t)q->chunk.length) {
380
381 q->chunk.length += chunk.length;
382 bq->write_index += chunk.length;
383 goto finish;
384 }
385 } else
386 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
387
388 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
389 n = pa_xnew(struct list_item, 1);
390
391 n->chunk = chunk;
392 pa_memblock_ref(n->chunk.memblock);
393 n->index = bq->write_index;
394 bq->write_index += n->chunk.length;
395
396 n->next = q ? q->next : bq->blocks;
397 n->prev = q;
398
399 if (n->next)
400 n->next->prev = n;
401 else
402 bq->blocks_tail = n;
403
404 if (n->prev)
405 n->prev->next = n;
406 else
407 bq->blocks = n;
408
409 bq->n_blocks++;
410
411 finish:
412
413 delta = bq->write_index - old;
414
415 if (delta >= (int64_t) bq->requested) {
416 delta -= bq->requested;
417 bq->requested = 0;
418 } else {
419 bq->requested -= delta;
420 delta = 0;
421 }
422
423 bq->missing -= delta;
424
425 return 0;
426 }
427
428 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
429 pa_assert(bq);
430
431 if (bq->in_prebuf)
432 return pa_memblockq_get_length(bq) < bq->prebuf;
433 else
434 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
435 }
436
437 static pa_bool_t update_prebuf(pa_memblockq *bq) {
438 pa_assert(bq);
439
440 if (bq->in_prebuf) {
441
442 if (pa_memblockq_get_length(bq) < bq->prebuf)
443 return TRUE;
444
445 bq->in_prebuf = FALSE;
446 return FALSE;
447 } else {
448
449 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
450 bq->in_prebuf = TRUE;
451 return TRUE;
452 }
453
454 return FALSE;
455 }
456 }
457
458 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
459 int64_t d;
460 pa_assert(bq);
461 pa_assert(chunk);
462
463 /* We need to pre-buffer */
464 if (update_prebuf(bq))
465 return -1;
466
467 fix_current_read(bq);
468
469 /* Do we need to spit out silence? */
470 if (!bq->current_read || bq->current_read->index > bq->read_index) {
471
472 size_t length;
473
474 /* How much silence shall we return? */
475 if (bq->current_read)
476 length = bq->current_read->index - bq->read_index;
477 else if (bq->write_index > bq->read_index)
478 length = (size_t) (bq->write_index - bq->read_index);
479 else
480 length = 0;
481
482 /* We need to return silence, since no data is yet available */
483 if (bq->silence.memblock) {
484 *chunk = bq->silence;
485 pa_memblock_ref(chunk->memblock);
486
487 if (length > 0 && length < chunk->length)
488 chunk->length = length;
489
490 } else {
491
492 /* If the memblockq is empty, return -1, otherwise return
493 * the time to sleep */
494 if (length <= 0)
495 return -1;
496
497 chunk->memblock = NULL;
498 chunk->length = length;
499 }
500
501 chunk->index = 0;
502 return 0;
503 }
504
505 /* Ok, let's pass real data to the caller */
506 *chunk = bq->current_read->chunk;
507 pa_memblock_ref(chunk->memblock);
508
509 pa_assert(bq->read_index >= bq->current_read->index);
510 d = bq->read_index - bq->current_read->index;
511 chunk->index += d;
512 chunk->length -= d;
513
514 return 0;
515 }
516
517 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
518 int64_t old, delta;
519 pa_assert(bq);
520 pa_assert(length % bq->base == 0);
521
522 old = bq->read_index;
523
524 while (length > 0) {
525
526 /* Do not drop any data when we are in prebuffering mode */
527 if (update_prebuf(bq))
528 break;
529
530 fix_current_read(bq);
531
532 if (bq->current_read) {
533 int64_t p, d;
534
535 /* We go through this piece by piece to make sure we don't
536 * drop more than allowed by prebuf */
537
538 p = bq->current_read->index + bq->current_read->chunk.length;
539 pa_assert(p >= bq->read_index);
540 d = p - bq->read_index;
541
542 if (d > (int64_t) length)
543 d = length;
544
545 bq->read_index += d;
546 length -= d;
547
548 } else {
549
550 /* The list is empty, there's nothing we could drop */
551 bq->read_index += length;
552 break;
553 }
554 }
555
556 drop_backlog(bq);
557
558 delta = bq->read_index - old;
559 bq->missing += delta;
560 }
561
562 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
563 pa_assert(bq);
564 pa_assert(length % bq->base == 0);
565
566 /* This is kind of the inverse of pa_memblockq_drop() */
567
568 bq->read_index -= length;
569 bq->missing -= length;
570 }
571
572 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
573 pa_assert(bq);
574
575 if (pa_memblockq_prebuf_active(bq))
576 return FALSE;
577
578 if (pa_memblockq_get_length(bq) <= 0)
579 return FALSE;
580
581 return TRUE;
582 }
583
584 size_t pa_memblockq_get_length(pa_memblockq *bq) {
585 pa_assert(bq);
586
587 if (bq->write_index <= bq->read_index)
588 return 0;
589
590 return (size_t) (bq->write_index - bq->read_index);
591 }
592
593 size_t pa_memblockq_missing(pa_memblockq *bq) {
594 size_t l;
595 pa_assert(bq);
596
597 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
598 return 0;
599
600 l = bq->tlength - l;
601
602 return l >= bq->minreq ? l : 0;
603 }
604
605 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
606 int64_t old, delta;
607 pa_assert(bq);
608
609 old = bq->write_index;
610
611 switch (seek) {
612 case PA_SEEK_RELATIVE:
613 bq->write_index += offset;
614 break;
615 case PA_SEEK_ABSOLUTE:
616 bq->write_index = offset;
617 break;
618 case PA_SEEK_RELATIVE_ON_READ:
619 bq->write_index = bq->read_index + offset;
620 break;
621 case PA_SEEK_RELATIVE_END:
622 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
623 break;
624 default:
625 pa_assert_not_reached();
626 }
627
628 drop_backlog(bq);
629
630 delta = bq->write_index - old;
631
632 if (delta >= (int64_t) bq->requested) {
633 delta -= bq->requested;
634 bq->requested = 0;
635 } else if (delta >= 0) {
636 bq->requested -= delta;
637 delta = 0;
638 }
639
640 bq->missing -= delta;
641 }
642
643 void pa_memblockq_flush(pa_memblockq *bq) {
644 int64_t old, delta;
645 pa_assert(bq);
646
647 pa_memblockq_silence(bq);
648
649 old = bq->write_index;
650 bq->write_index = bq->read_index;
651
652 pa_memblockq_prebuf_force(bq);
653
654 delta = bq->write_index - old;
655
656 if (delta >= (int64_t) bq->requested) {
657 delta -= bq->requested;
658 bq->requested = 0;
659 } else if (delta >= 0) {
660 bq->requested -= delta;
661 delta = 0;
662 }
663
664 bq->missing -= delta;
665 }
666
667 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
668 pa_assert(bq);
669
670 return bq->tlength;
671 }
672
673 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
674 pa_assert(bq);
675
676 return bq->minreq;
677 }
678
679 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
680 pa_assert(bq);
681
682 return bq->read_index;
683 }
684
685 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
686 pa_assert(bq);
687
688 return bq->write_index;
689 }
690
691 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
692 pa_memchunk rchunk;
693
694 pa_assert(bq);
695 pa_assert(chunk);
696
697 if (bq->base == 1)
698 return pa_memblockq_push(bq, chunk);
699
700 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
701 return -1;
702
703 pa_mcalign_push(bq->mcalign, chunk);
704
705 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
706 int r;
707 r = pa_memblockq_push(bq, &rchunk);
708 pa_memblock_unref(rchunk.memblock);
709
710 if (r < 0) {
711 pa_mcalign_flush(bq->mcalign);
712 return -1;
713 }
714 }
715
716 return 0;
717 }
718
719 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
720 pa_assert(bq);
721
722 bq->in_prebuf = FALSE;
723 }
724
725 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
726 pa_assert(bq);
727
728 if (bq->prebuf > 0)
729 bq->in_prebuf = TRUE;
730 }
731
732 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
733 pa_assert(bq);
734
735 return bq->maxlength;
736 }
737
738 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
739 pa_assert(bq);
740
741 return bq->prebuf;
742 }
743
744 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
745 size_t l;
746
747 pa_assert(bq);
748
749 /* pa_log("pop: %lli", bq->missing); */
750
751 if (bq->missing <= 0)
752 return 0;
753
754 l = (size_t) bq->missing;
755 bq->missing = 0;
756 bq->requested += l;
757
758 return l;
759 }
760
761 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
762 pa_assert(bq);
763
764 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
765
766 if (bq->maxlength < bq->base)
767 bq->maxlength = bq->base;
768
769 if (bq->tlength > bq->maxlength)
770 pa_memblockq_set_tlength(bq, bq->maxlength);
771
772 if (bq->prebuf > bq->maxlength)
773 pa_memblockq_set_prebuf(bq, bq->maxlength);
774 }
775
776 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
777 size_t old_tlength;
778 pa_assert(bq);
779
780 if (tlength <= 0)
781 tlength = bq->maxlength;
782
783 old_tlength = bq->tlength;
784 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
785
786 if (bq->tlength > bq->maxlength)
787 bq->tlength = bq->maxlength;
788
789 if (bq->minreq > bq->tlength)
790 pa_memblockq_set_minreq(bq, bq->tlength);
791
792 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
793 }
794
795 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
796 pa_assert(bq);
797
798 if (prebuf == (size_t) -1)
799 prebuf = bq->tlength;
800
801 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
802
803 if (prebuf > 0 && bq->prebuf < bq->base)
804 bq->prebuf = bq->base;
805
806 if (bq->prebuf > bq->maxlength)
807 bq->prebuf = bq->maxlength;
808
809 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
810 bq->in_prebuf = FALSE;
811
812 if (bq->minreq > bq->prebuf)
813 pa_memblockq_set_minreq(bq, bq->prebuf);
814 }
815
816 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
817 pa_assert(bq);
818
819 bq->minreq = (minreq/bq->base)*bq->base;
820
821 if (bq->minreq > bq->tlength)
822 bq->minreq = bq->tlength;
823
824 if (bq->minreq > bq->prebuf)
825 bq->minreq = bq->prebuf;
826
827 if (bq->minreq < bq->base)
828 bq->minreq = bq->base;
829 }
830
831 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
832 pa_assert(bq);
833
834 bq->maxrewind = (maxrewind/bq->base)*bq->base;
835 }
836
837 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
838
839 pa_assert(bq);
840 pa_assert(source);
841
842 pa_memblockq_prebuf_disable(bq);
843
844 for (;;) {
845 pa_memchunk chunk;
846
847 if (pa_memblockq_peek(source, &chunk) < 0)
848 return 0;
849
850 pa_assert(chunk.length > 0);
851
852 if (chunk.memblock) {
853
854 if (pa_memblockq_push_align(bq, &chunk) < 0) {
855 pa_memblock_unref(chunk.memblock);
856 return -1;
857 }
858
859 pa_memblock_unref(chunk.memblock);
860 } else
861 pa_memblockq_seek(bq, chunk.length, PA_SEEK_RELATIVE);
862
863 pa_memblockq_drop(bq, chunk.length);
864 }
865 }
866
867 void pa_memblockq_willneed(pa_memblockq *bq) {
868 struct list_item *q;
869
870 pa_assert(bq);
871
872 fix_current_read(bq);
873
874 for (q = bq->current_read; q; q = q->next)
875 pa_memchunk_will_need(&q->chunk);
876 }
877
878 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
879 pa_assert(bq);
880
881 if (bq->silence.memblock)
882 pa_memblock_unref(bq->silence.memblock);
883
884 if (silence) {
885 bq->silence = *silence;
886 pa_memblock_ref(bq->silence.memblock);
887 } else
888 pa_memchunk_reset(&bq->silence);
889 }
890
891 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
892 pa_assert(bq);
893
894 return !bq->blocks;
895 }
896
897 void pa_memblockq_silence(pa_memblockq *bq) {
898 pa_assert(bq);
899
900 while (bq->blocks)
901 drop_block(bq, bq->blocks);
902
903 pa_assert(bq->n_blocks == 0);
904 }
905
906 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
907 pa_assert(bq);
908
909 return bq->n_blocks;
910 }