]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
commit glitch-free work
[pulseaudio] / src / pulsecore / memblockq.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published
10 by the Free Software Foundation; either version 2 of the License,
11 or (at your option) any later version.
12
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 USA.
22 ***/
23
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
27
28 #include <sys/time.h>
29 #include <time.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/log.h>
37 #include <pulsecore/mcalign.h>
38 #include <pulsecore/macro.h>
39 #include <pulsecore/flist.h>
40
41 #include "memblockq.h"
42
43 struct list_item {
44 struct list_item *next, *prev;
45 int64_t index;
46 pa_memchunk chunk;
47 };
48
49 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
50
51 struct pa_memblockq {
52 struct list_item *blocks, *blocks_tail;
53 struct list_item *current_read, *current_write;
54 unsigned n_blocks;
55 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
56 int64_t read_index, write_index;
57 pa_bool_t in_prebuf;
58 pa_memblock *silence;
59 pa_mcalign *mcalign;
60 int64_t missing;
61 size_t requested;
62 };
63
64 pa_memblockq* pa_memblockq_new(
65 int64_t idx,
66 size_t maxlength,
67 size_t tlength,
68 size_t base,
69 size_t prebuf,
70 size_t minreq,
71 size_t maxrewind,
72 pa_memblock *silence) {
73
74 pa_memblockq* bq;
75
76 pa_assert(base > 0);
77
78 bq = pa_xnew(pa_memblockq, 1);
79 bq->blocks = bq->blocks_tail = NULL;
80 bq->current_read = bq->current_write = NULL;
81 bq->n_blocks = 0;
82
83 bq->base = base;
84 bq->read_index = bq->write_index = idx;
85
86 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
87 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
88
89 bq->missing = bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
90 bq->in_prebuf = TRUE;
91
92 pa_memblockq_set_maxlength(bq, maxlength);
93 pa_memblockq_set_tlength(bq, tlength);
94 pa_memblockq_set_prebuf(bq, prebuf);
95 pa_memblockq_set_minreq(bq, minreq);
96 pa_memblockq_set_maxrewind(bq, maxrewind);
97
98 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
99 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
100
101 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
102 bq->mcalign = pa_mcalign_new(bq->base);
103
104 return bq;
105 }
106
107 void pa_memblockq_free(pa_memblockq* bq) {
108 pa_assert(bq);
109
110 pa_memblockq_flush(bq);
111
112 if (bq->silence)
113 pa_memblock_unref(bq->silence);
114
115 if (bq->mcalign)
116 pa_mcalign_free(bq->mcalign);
117
118 pa_xfree(bq);
119 }
120
121 static void fix_current_read(pa_memblockq *bq) {
122 pa_assert(bq);
123
124 if (PA_UNLIKELY(!bq->blocks)) {
125 bq->current_read = NULL;
126 return;
127 }
128
129 if (PA_UNLIKELY(!bq->current_read))
130 bq->current_read = bq->blocks;
131
132 /* Scan left */
133 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
134
135 if (bq->current_read->prev)
136 bq->current_read = bq->current_read->prev;
137 else
138 break;
139
140 /* Scan right */
141 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + bq->current_read->chunk.length <= bq->read_index))
142 bq->current_read = bq->current_read->next;
143
144 /* At this point current_read will either point at or left of the
145 next block to play. It may be NULL in case everything in
146 the queue was already played */
147 }
148
149 static void fix_current_write(pa_memblockq *bq) {
150 pa_assert(bq);
151
152 if (PA_UNLIKELY(!bq->blocks)) {
153 bq->current_write = NULL;
154 return;
155 }
156
157 if (PA_UNLIKELY(!bq->current_write))
158 bq->current_write = bq->blocks_tail;
159
160 /* Scan right */
161 while (PA_UNLIKELY(bq->current_write->index + bq->current_write->chunk.length <= bq->write_index))
162
163 if (bq->current_write->next)
164 bq->current_write = bq->current_write->next;
165 else
166 break;
167
168 /* Scan left */
169 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
170 bq->current_write = bq->current_write->prev;
171
172 /* At this point current_write will either point at or right of
173 the next block to write data to. It may be NULL in case
174 everything in the queue is still to be played */
175 }
176
177 static void drop_block(pa_memblockq *bq, struct list_item *q) {
178 pa_assert(bq);
179 pa_assert(q);
180
181 pa_assert(bq->n_blocks >= 1);
182
183 if (q->prev)
184 q->prev->next = q->next;
185 else {
186 pa_assert(bq->blocks == q);
187 bq->blocks = q->next;
188 }
189
190 if (q->next)
191 q->next->prev = q->prev;
192 else {
193 pa_assert(bq->blocks_tail == q);
194 bq->blocks_tail = q->prev;
195 }
196
197 if (bq->current_write == q)
198 bq->current_write = q->prev;
199
200 if (bq->current_read == q)
201 bq->current_read = q->next;
202
203 pa_memblock_unref(q->chunk.memblock);
204
205 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
206 pa_xfree(q);
207
208 bq->n_blocks--;
209 }
210
211 static void drop_backlog(pa_memblockq *bq) {
212 int64_t boundary;
213 pa_assert(bq);
214
215 boundary = bq->read_index - bq->maxrewind;
216
217 while (bq->blocks && (bq->blocks->index + bq->blocks->chunk.length <= boundary))
218 drop_block(bq, bq->blocks);
219 }
220
221 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
222 int64_t end;
223
224 pa_assert(bq);
225
226 if (bq->read_index > bq->write_index) {
227 size_t d = bq->read_index - bq->write_index;
228
229 if (l > d)
230 l -= d;
231 else
232 return TRUE;
233 }
234
235 end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : bq->write_index;
236
237 /* Make sure that the list doesn't get too long */
238 if (bq->write_index + l > end)
239 if (bq->write_index + l - bq->read_index > bq->maxlength)
240 return FALSE;
241
242 return TRUE;
243 }
244
245 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
246 struct list_item *q, *n;
247 pa_memchunk chunk;
248 int64_t old, delta;
249
250 pa_assert(bq);
251 pa_assert(uchunk);
252 pa_assert(uchunk->memblock);
253 pa_assert(uchunk->length > 0);
254 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
255
256 if (uchunk->length % bq->base)
257 return -1;
258
259 if (!can_push(bq, uchunk->length))
260 return -1;
261
262 old = bq->write_index;
263 chunk = *uchunk;
264
265 fix_current_write(bq);
266 q = bq->current_write;
267
268 /* First we advance the q pointer right of where we want to
269 * write to */
270
271 if (q) {
272 while (bq->write_index + chunk.length > q->index)
273 if (q->next)
274 q = q->next;
275 else
276 break;
277 }
278
279 if (!q)
280 q = bq->blocks_tail;
281
282 /* We go from back to front to look for the right place to add
283 * this new entry. Drop data we will overwrite on the way */
284
285 while (q) {
286
287 if (bq->write_index >= q->index + q->chunk.length)
288 /* We found the entry where we need to place the new entry immediately after */
289 break;
290 else if (bq->write_index + chunk.length <= q->index) {
291 /* This entry isn't touched at all, let's skip it */
292 q = q->prev;
293 } else if (bq->write_index <= q->index &&
294 bq->write_index + chunk.length >= q->index + q->chunk.length) {
295
296 /* This entry is fully replaced by the new entry, so let's drop it */
297
298 struct list_item *p;
299 p = q;
300 q = q->prev;
301 drop_block(bq, p);
302 } else if (bq->write_index >= q->index) {
303 /* The write index points into this memblock, so let's
304 * truncate or split it */
305
306 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
307
308 /* We need to save the end of this memchunk */
309 struct list_item *p;
310 size_t d;
311
312 /* Create a new list entry for the end of thie memchunk */
313 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
314 p = pa_xnew(struct list_item, 1);
315
316 p->chunk = q->chunk;
317 pa_memblock_ref(p->chunk.memblock);
318
319 /* Calculate offset */
320 d = bq->write_index + chunk.length - q->index;
321 pa_assert(d > 0);
322
323 /* Drop it from the new entry */
324 p->index = q->index + d;
325 p->chunk.length -= d;
326
327 /* Add it to the list */
328 p->prev = q;
329 if ((p->next = q->next))
330 q->next->prev = p;
331 else
332 bq->blocks_tail = p;
333 q->next = p;
334
335 bq->n_blocks++;
336 }
337
338 /* Truncate the chunk */
339 if (!(q->chunk.length = bq->write_index - q->index)) {
340 struct list_item *p;
341 p = q;
342 q = q->prev;
343 drop_block(bq, p);
344 }
345
346 /* We had to truncate this block, hence we're now at the right position */
347 break;
348 } else {
349 size_t d;
350
351 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
352 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
353 bq->write_index < q->index);
354
355 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
356
357 d = bq->write_index + chunk.length - q->index;
358 q->index += d;
359 q->chunk.index += d;
360 q->chunk.length -= d;
361
362 q = q->prev;
363 }
364 }
365
366 if (q) {
367 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
368 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
369
370 /* Try to merge memory blocks */
371
372 if (q->chunk.memblock == chunk.memblock &&
373 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
374 bq->write_index == q->index + (int64_t)q->chunk.length) {
375
376 q->chunk.length += chunk.length;
377 bq->write_index += chunk.length;
378 goto finish;
379 }
380 } else
381 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
382
383 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
384 n = pa_xnew(struct list_item, 1);
385
386 n->chunk = chunk;
387 pa_memblock_ref(n->chunk.memblock);
388 n->index = bq->write_index;
389 bq->write_index += n->chunk.length;
390
391 n->next = q ? q->next : bq->blocks;
392 n->prev = q;
393
394 if (n->next)
395 n->next->prev = n;
396 else
397 bq->blocks_tail = n;
398
399 if (n->prev)
400 n->prev->next = n;
401 else
402 bq->blocks = n;
403
404 bq->n_blocks++;
405
406 finish:
407
408 delta = bq->write_index - old;
409
410 if (delta >= bq->requested) {
411 delta -= bq->requested;
412 bq->requested = 0;
413 } else {
414 bq->requested -= delta;
415 delta = 0;
416 }
417
418 bq->missing -= delta;
419
420 return 0;
421 }
422
423 static pa_bool_t memblockq_check_prebuf(pa_memblockq *bq) {
424 pa_assert(bq);
425
426 if (bq->in_prebuf) {
427
428 if (pa_memblockq_get_length(bq) < bq->prebuf)
429 return TRUE;
430
431 bq->in_prebuf = FALSE;
432 return FALSE;
433 } else {
434
435 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
436 bq->in_prebuf = TRUE;
437 return TRUE;
438 }
439
440 return FALSE;
441 }
442 }
443
444 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
445 int64_t d;
446 pa_assert(bq);
447 pa_assert(chunk);
448
449 /* We need to pre-buffer */
450 if (memblockq_check_prebuf(bq))
451 return -1;
452
453 fix_current_read(bq);
454
455 /* Do we need to spit out silence? */
456 if (!bq->current_read || bq->current_read->index > bq->read_index) {
457
458 size_t length;
459
460 /* How much silence shall we return? */
461 if (bq->current_read)
462 length = bq->current_read->index - bq->read_index;
463 else if (bq->write_index > bq->read_index)
464 length = (size_t) (bq->write_index - bq->read_index);
465 else
466 length = 0;
467
468 /* We need to return silence, since no data is yet available */
469 if (bq->silence) {
470 size_t l;
471
472 chunk->memblock = pa_memblock_ref(bq->silence);
473
474 l = pa_memblock_get_length(chunk->memblock);
475 chunk->length = (length <= 0 || length > l) ? l : length;
476
477 } else {
478
479 /* If the memblockq is empty, return -1, otherwise return
480 * the time to sleep */
481 if (length <= 0)
482 return -1;
483
484 chunk->memblock = NULL;
485 chunk->length = length;
486 }
487
488 chunk->index = 0;
489 return 0;
490 }
491
492 /* Ok, let's pass real data to the caller */
493 *chunk = bq->current_read->chunk;
494 pa_memblock_ref(chunk->memblock);
495
496 pa_assert(bq->read_index >= bq->current_read->index);
497 d = bq->read_index - bq->current_read->index;
498 chunk->index += d;
499 chunk->length -= d;
500
501 return 0;
502 }
503
504 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
505 int64_t old, delta;
506 pa_assert(bq);
507 pa_assert(length % bq->base == 0);
508
509 old = bq->read_index;
510
511 while (length > 0) {
512
513 /* Do not drop any data when we are in prebuffering mode */
514 if (memblockq_check_prebuf(bq))
515 break;
516
517 fix_current_read(bq);
518
519 if (bq->current_read) {
520 int64_t p, d;
521
522 /* We go through this piece by piece to make sure we don't
523 * drop more than allowed by prebuf */
524
525 p = bq->current_read->index + bq->current_read->chunk.length;
526 pa_assert(p >= bq->read_index);
527 d = p - bq->read_index;
528
529 if (d > length)
530 d = length;
531
532 bq->read_index += d;
533 length -= d;
534
535 } else {
536
537 /* The list is empty, there's nothing we could drop */
538 bq->read_index += length;
539 break;
540 }
541 }
542
543 drop_backlog(bq);
544
545 delta = bq->read_index - old;
546 bq->missing += delta;
547 }
548
549 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
550 pa_assert(bq);
551
552 if (memblockq_check_prebuf(bq))
553 return FALSE;
554
555 if (pa_memblockq_get_length(bq) <= 0)
556 return FALSE;
557
558 return TRUE;
559 }
560
561 size_t pa_memblockq_get_length(pa_memblockq *bq) {
562 pa_assert(bq);
563
564 if (bq->write_index <= bq->read_index)
565 return 0;
566
567 return (size_t) (bq->write_index - bq->read_index);
568 }
569
570 size_t pa_memblockq_missing(pa_memblockq *bq) {
571 size_t l;
572 pa_assert(bq);
573
574 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
575 return 0;
576
577 l = bq->tlength - l;
578
579 return l >= bq->minreq ? l : 0;
580 }
581
582 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
583 int64_t old, delta;
584 pa_assert(bq);
585
586 old = bq->write_index;
587
588 switch (seek) {
589 case PA_SEEK_RELATIVE:
590 bq->write_index += offset;
591 break;
592 case PA_SEEK_ABSOLUTE:
593 bq->write_index = offset;
594 break;
595 case PA_SEEK_RELATIVE_ON_READ:
596 bq->write_index = bq->read_index + offset;
597 break;
598 case PA_SEEK_RELATIVE_END:
599 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
600 break;
601 default:
602 pa_assert_not_reached();
603 }
604
605 drop_backlog(bq);
606
607 delta = bq->write_index - old;
608
609 if (delta >= bq->requested) {
610 delta -= bq->requested;
611 bq->requested = 0;
612 } else if (delta >= 0) {
613 bq->requested -= delta;
614 delta = 0;
615 }
616
617 bq->missing -= delta;
618 }
619
620 void pa_memblockq_flush(pa_memblockq *bq) {
621 int64_t old, delta;
622 pa_assert(bq);
623
624 while (bq->blocks)
625 drop_block(bq, bq->blocks);
626
627 pa_assert(bq->n_blocks == 0);
628
629 old = bq->write_index;
630 bq->write_index = bq->read_index;
631
632 pa_memblockq_prebuf_force(bq);
633
634 delta = bq->write_index - old;
635
636 if (delta >= bq->requested) {
637 delta -= bq->requested;
638 bq->requested = 0;
639 } else if (delta >= 0) {
640 bq->requested -= delta;
641 delta = 0;
642 }
643
644 bq->missing -= delta;
645 }
646
647 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
648 pa_assert(bq);
649
650 return bq->tlength;
651 }
652
653 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
654 pa_assert(bq);
655
656 return bq->minreq;
657 }
658
659 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
660 pa_assert(bq);
661
662 return bq->read_index;
663 }
664
665 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
666 pa_assert(bq);
667
668 return bq->write_index;
669 }
670
671 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
672 pa_memchunk rchunk;
673
674 pa_assert(bq);
675 pa_assert(chunk);
676
677 if (bq->base == 1)
678 return pa_memblockq_push(bq, chunk);
679
680 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
681 return -1;
682
683 pa_mcalign_push(bq->mcalign, chunk);
684
685 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
686 int r;
687 r = pa_memblockq_push(bq, &rchunk);
688 pa_memblock_unref(rchunk.memblock);
689
690 if (r < 0) {
691 pa_mcalign_flush(bq->mcalign);
692 return -1;
693 }
694 }
695
696 return 0;
697 }
698
699 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
700 pa_assert(bq);
701
702 bq->in_prebuf = FALSE;
703 }
704
705 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
706 pa_assert(bq);
707
708 if (bq->prebuf > 0)
709 bq->in_prebuf = TRUE;
710 }
711
712 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
713 pa_assert(bq);
714
715 return bq->maxlength;
716 }
717
718 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
719 pa_assert(bq);
720
721 return bq->prebuf;
722 }
723
724 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
725 size_t l;
726
727 pa_assert(bq);
728
729 /* pa_log("pop: %lli", bq->missing); */
730
731 if (bq->missing <= 0)
732 return 0;
733
734 l = (size_t) bq->missing;
735 bq->missing = 0;
736 bq->requested += l;
737
738 return l;
739 }
740
741 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
742 pa_assert(bq);
743
744 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
745
746 if (bq->maxlength < bq->base)
747 bq->maxlength = bq->base;
748
749 if (bq->tlength > bq->maxlength)
750 pa_memblockq_set_tlength(bq, bq->maxlength);
751
752 if (bq->prebuf > bq->maxlength)
753 pa_memblockq_set_prebuf(bq, bq->maxlength);
754 }
755
756 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
757 size_t old_tlength;
758 pa_assert(bq);
759
760 old_tlength = bq->tlength;
761
762 if (tlength <= 0)
763 tlength = bq->maxlength;
764
765 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
766
767 if (bq->tlength > bq->maxlength)
768 bq->tlength = bq->maxlength;
769
770 if (bq->minreq > bq->tlength - bq->prebuf)
771 pa_memblockq_set_minreq(bq, bq->tlength - bq->prebuf);
772
773 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
774 }
775
776 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
777 pa_assert(bq);
778
779 bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength : prebuf;
780 bq->prebuf = ((bq->prebuf+bq->base-1)/bq->base)*bq->base;
781
782 if (prebuf > 0 && bq->prebuf < bq->base)
783 bq->prebuf = bq->base;
784
785 if (bq->prebuf > bq->maxlength)
786 bq->prebuf = bq->maxlength;
787
788 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
789 bq->in_prebuf = FALSE;
790
791 if (bq->minreq > bq->tlength - bq->prebuf)
792 pa_memblockq_set_minreq(bq, bq->tlength - bq->prebuf);
793 }
794
795 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
796 pa_assert(bq);
797
798 bq->minreq = (minreq/bq->base)*bq->base;
799
800 if (bq->minreq > bq->tlength - bq->prebuf)
801 bq->minreq = bq->tlength - bq->prebuf;
802
803 if (bq->minreq < bq->base)
804 bq->minreq = bq->base;
805 }
806
807 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
808 pa_assert(bq);
809
810 bq->maxrewind = (maxrewind/bq->base)*bq->base;
811 }
812
813 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
814 pa_assert(bq);
815 pa_assert(length % bq->base == 0);
816
817 bq->read_index -= length;
818 bq->missing -= length;
819 }
820
821 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
822
823 pa_assert(bq);
824 pa_assert(source);
825
826 pa_memblockq_prebuf_disable(bq);
827
828 for (;;) {
829 pa_memchunk chunk;
830
831 if (pa_memblockq_peek(source, &chunk) < 0)
832 return 0;
833
834 pa_assert(chunk.length > 0);
835
836 if (chunk.memblock) {
837
838 if (pa_memblockq_push_align(bq, &chunk) < 0) {
839 pa_memblock_unref(chunk.memblock);
840 return -1;
841 }
842
843 pa_memblock_unref(chunk.memblock);
844 } else
845 pa_memblockq_seek(bq, chunk.length, PA_SEEK_RELATIVE);
846
847 pa_memblockq_drop(bq, chunk.length);
848 }
849 }
850
851 void pa_memblockq_willneed(pa_memblockq *bq) {
852 struct list_item *q;
853
854 pa_assert(bq);
855
856 fix_current_read(bq);
857
858 for (q = bq->current_read; q; q = q->next)
859 pa_memchunk_will_need(&q->chunk);
860 }
861
862 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memblock *silence) {
863 pa_assert(bq);
864
865 if (bq->silence)
866 pa_memblock_unref(bq->silence);
867
868 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
869 }
870
871 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
872 pa_assert(bq);
873
874 return !bq->blocks;
875 }