]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
Merge dead branch 'prepare-0.9.10'
[pulseaudio] / src / pulsecore / memblockq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
38
39 #include "memblockq.h"
40
41 struct list_item {
42 struct list_item *next, *prev;
43 int64_t index;
44 pa_memchunk chunk;
45 };
46
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
49 struct pa_memblockq {
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
52 unsigned n_blocks;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
55 pa_bool_t in_prebuf;
56 pa_memchunk silence;
57 pa_mcalign *mcalign;
58 int64_t missing;
59 size_t requested;
60 };
61
62 pa_memblockq* pa_memblockq_new(
63 int64_t idx,
64 size_t maxlength,
65 size_t tlength,
66 size_t base,
67 size_t prebuf,
68 size_t minreq,
69 size_t maxrewind,
70 pa_memchunk *silence) {
71
72 pa_memblockq* bq;
73
74 pa_assert(base > 0);
75
76 bq = pa_xnew(pa_memblockq, 1);
77 bq->blocks = bq->blocks_tail = NULL;
78 bq->current_read = bq->current_write = NULL;
79 bq->n_blocks = 0;
80
81 bq->base = base;
82 bq->read_index = bq->write_index = idx;
83
84 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
86
87 bq->missing = bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
88 bq->in_prebuf = TRUE;
89
90 pa_memblockq_set_maxlength(bq, maxlength);
91 pa_memblockq_set_tlength(bq, tlength);
92 pa_memblockq_set_prebuf(bq, prebuf);
93 pa_memblockq_set_minreq(bq, minreq);
94 pa_memblockq_set_maxrewind(bq, maxrewind);
95
96 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
97 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
98
99 if (silence) {
100 bq->silence = *silence;
101 pa_memblock_ref(bq->silence.memblock);
102 } else
103 pa_memchunk_reset(&bq->silence);
104
105 bq->mcalign = pa_mcalign_new(bq->base);
106
107 return bq;
108 }
109
110 void pa_memblockq_free(pa_memblockq* bq) {
111 pa_assert(bq);
112
113 pa_memblockq_flush(bq);
114
115 if (bq->silence.memblock)
116 pa_memblock_unref(bq->silence.memblock);
117
118 if (bq->mcalign)
119 pa_mcalign_free(bq->mcalign);
120
121 pa_xfree(bq);
122 }
123
124 static void fix_current_read(pa_memblockq *bq) {
125 pa_assert(bq);
126
127 if (PA_UNLIKELY(!bq->blocks)) {
128 bq->current_read = NULL;
129 return;
130 }
131
132 if (PA_UNLIKELY(!bq->current_read))
133 bq->current_read = bq->blocks;
134
135 /* Scan left */
136 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
137
138 if (bq->current_read->prev)
139 bq->current_read = bq->current_read->prev;
140 else
141 break;
142
143 /* Scan right */
144 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
145 bq->current_read = bq->current_read->next;
146
147 /* At this point current_read will either point at or left of the
148 next block to play. It may be NULL in case everything in
149 the queue was already played */
150 }
151
152 static void fix_current_write(pa_memblockq *bq) {
153 pa_assert(bq);
154
155 if (PA_UNLIKELY(!bq->blocks)) {
156 bq->current_write = NULL;
157 return;
158 }
159
160 if (PA_UNLIKELY(!bq->current_write))
161 bq->current_write = bq->blocks_tail;
162
163 /* Scan right */
164 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
165
166 if (bq->current_write->next)
167 bq->current_write = bq->current_write->next;
168 else
169 break;
170
171 /* Scan left */
172 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
173 bq->current_write = bq->current_write->prev;
174
175 /* At this point current_write will either point at or right of
176 the next block to write data to. It may be NULL in case
177 everything in the queue is still to be played */
178 }
179
180 static void drop_block(pa_memblockq *bq, struct list_item *q) {
181 pa_assert(bq);
182 pa_assert(q);
183
184 pa_assert(bq->n_blocks >= 1);
185
186 if (q->prev)
187 q->prev->next = q->next;
188 else {
189 pa_assert(bq->blocks == q);
190 bq->blocks = q->next;
191 }
192
193 if (q->next)
194 q->next->prev = q->prev;
195 else {
196 pa_assert(bq->blocks_tail == q);
197 bq->blocks_tail = q->prev;
198 }
199
200 if (bq->current_write == q)
201 bq->current_write = q->prev;
202
203 if (bq->current_read == q)
204 bq->current_read = q->next;
205
206 pa_memblock_unref(q->chunk.memblock);
207
208 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
209 pa_xfree(q);
210
211 bq->n_blocks--;
212 }
213
214 static void drop_backlog(pa_memblockq *bq) {
215 int64_t boundary;
216 pa_assert(bq);
217
218 boundary = bq->read_index - bq->maxrewind;
219
220 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
221 drop_block(bq, bq->blocks);
222 }
223
224 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
225 int64_t end;
226
227 pa_assert(bq);
228
229 if (bq->read_index > bq->write_index) {
230 size_t d = bq->read_index - bq->write_index;
231
232 if (l > d)
233 l -= d;
234 else
235 return TRUE;
236 }
237
238 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
239
240 /* Make sure that the list doesn't get too long */
241 if (bq->write_index + (int64_t) l > end)
242 if (bq->write_index + l - bq->read_index > bq->maxlength)
243 return FALSE;
244
245 return TRUE;
246 }
247
248 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
249 struct list_item *q, *n;
250 pa_memchunk chunk;
251 int64_t old, delta;
252
253 pa_assert(bq);
254 pa_assert(uchunk);
255 pa_assert(uchunk->memblock);
256 pa_assert(uchunk->length > 0);
257 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
258
259 if (uchunk->length % bq->base)
260 return -1;
261
262 if (!can_push(bq, uchunk->length))
263 return -1;
264
265 old = bq->write_index;
266 chunk = *uchunk;
267
268 fix_current_write(bq);
269 q = bq->current_write;
270
271 /* First we advance the q pointer right of where we want to
272 * write to */
273
274 if (q) {
275 while (bq->write_index + (int64_t) chunk.length > q->index)
276 if (q->next)
277 q = q->next;
278 else
279 break;
280 }
281
282 if (!q)
283 q = bq->blocks_tail;
284
285 /* We go from back to front to look for the right place to add
286 * this new entry. Drop data we will overwrite on the way */
287
288 while (q) {
289
290 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
291 /* We found the entry where we need to place the new entry immediately after */
292 break;
293 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
294 /* This entry isn't touched at all, let's skip it */
295 q = q->prev;
296 } else if (bq->write_index <= q->index &&
297 bq->write_index + chunk.length >= q->index + q->chunk.length) {
298
299 /* This entry is fully replaced by the new entry, so let's drop it */
300
301 struct list_item *p;
302 p = q;
303 q = q->prev;
304 drop_block(bq, p);
305 } else if (bq->write_index >= q->index) {
306 /* The write index points into this memblock, so let's
307 * truncate or split it */
308
309 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
310
311 /* We need to save the end of this memchunk */
312 struct list_item *p;
313 size_t d;
314
315 /* Create a new list entry for the end of thie memchunk */
316 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
317 p = pa_xnew(struct list_item, 1);
318
319 p->chunk = q->chunk;
320 pa_memblock_ref(p->chunk.memblock);
321
322 /* Calculate offset */
323 d = bq->write_index + chunk.length - q->index;
324 pa_assert(d > 0);
325
326 /* Drop it from the new entry */
327 p->index = q->index + d;
328 p->chunk.length -= d;
329
330 /* Add it to the list */
331 p->prev = q;
332 if ((p->next = q->next))
333 q->next->prev = p;
334 else
335 bq->blocks_tail = p;
336 q->next = p;
337
338 bq->n_blocks++;
339 }
340
341 /* Truncate the chunk */
342 if (!(q->chunk.length = bq->write_index - q->index)) {
343 struct list_item *p;
344 p = q;
345 q = q->prev;
346 drop_block(bq, p);
347 }
348
349 /* We had to truncate this block, hence we're now at the right position */
350 break;
351 } else {
352 size_t d;
353
354 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
355 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
356 bq->write_index < q->index);
357
358 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
359
360 d = bq->write_index + chunk.length - q->index;
361 q->index += d;
362 q->chunk.index += d;
363 q->chunk.length -= d;
364
365 q = q->prev;
366 }
367 }
368
369 if (q) {
370 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
371 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
372
373 /* Try to merge memory blocks */
374
375 if (q->chunk.memblock == chunk.memblock &&
376 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
377 bq->write_index == q->index + (int64_t)q->chunk.length) {
378
379 q->chunk.length += chunk.length;
380 bq->write_index += chunk.length;
381 goto finish;
382 }
383 } else
384 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
385
386 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
387 n = pa_xnew(struct list_item, 1);
388
389 n->chunk = chunk;
390 pa_memblock_ref(n->chunk.memblock);
391 n->index = bq->write_index;
392 bq->write_index += n->chunk.length;
393
394 n->next = q ? q->next : bq->blocks;
395 n->prev = q;
396
397 if (n->next)
398 n->next->prev = n;
399 else
400 bq->blocks_tail = n;
401
402 if (n->prev)
403 n->prev->next = n;
404 else
405 bq->blocks = n;
406
407 bq->n_blocks++;
408
409 finish:
410
411 delta = bq->write_index - old;
412
413 if (delta >= (int64_t) bq->requested) {
414 delta -= bq->requested;
415 bq->requested = 0;
416 } else {
417 bq->requested -= delta;
418 delta = 0;
419 }
420
421 bq->missing -= delta;
422
423 return 0;
424 }
425
426 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
427 pa_assert(bq);
428
429 if (bq->in_prebuf)
430 return pa_memblockq_get_length(bq) < bq->prebuf;
431 else
432 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
433 }
434
435 static pa_bool_t update_prebuf(pa_memblockq *bq) {
436 pa_assert(bq);
437
438 if (bq->in_prebuf) {
439
440 if (pa_memblockq_get_length(bq) < bq->prebuf)
441 return TRUE;
442
443 bq->in_prebuf = FALSE;
444 return FALSE;
445 } else {
446
447 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
448 bq->in_prebuf = TRUE;
449 return TRUE;
450 }
451
452 return FALSE;
453 }
454 }
455
456 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
457 int64_t d;
458 pa_assert(bq);
459 pa_assert(chunk);
460
461 /* We need to pre-buffer */
462 if (update_prebuf(bq))
463 return -1;
464
465 fix_current_read(bq);
466
467 /* Do we need to spit out silence? */
468 if (!bq->current_read || bq->current_read->index > bq->read_index) {
469
470 size_t length;
471
472 /* How much silence shall we return? */
473 if (bq->current_read)
474 length = bq->current_read->index - bq->read_index;
475 else if (bq->write_index > bq->read_index)
476 length = (size_t) (bq->write_index - bq->read_index);
477 else
478 length = 0;
479
480 /* We need to return silence, since no data is yet available */
481 if (bq->silence.memblock) {
482 *chunk = bq->silence;
483 pa_memblock_ref(chunk->memblock);
484
485 if (length > 0 && length < chunk->length)
486 chunk->length = length;
487
488 } else {
489
490 /* If the memblockq is empty, return -1, otherwise return
491 * the time to sleep */
492 if (length <= 0)
493 return -1;
494
495 chunk->memblock = NULL;
496 chunk->length = length;
497 }
498
499 chunk->index = 0;
500 return 0;
501 }
502
503 /* Ok, let's pass real data to the caller */
504 *chunk = bq->current_read->chunk;
505 pa_memblock_ref(chunk->memblock);
506
507 pa_assert(bq->read_index >= bq->current_read->index);
508 d = bq->read_index - bq->current_read->index;
509 chunk->index += d;
510 chunk->length -= d;
511
512 return 0;
513 }
514
515 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
516 int64_t old, delta;
517 pa_assert(bq);
518 pa_assert(length % bq->base == 0);
519
520 old = bq->read_index;
521
522 while (length > 0) {
523
524 /* Do not drop any data when we are in prebuffering mode */
525 if (update_prebuf(bq))
526 break;
527
528 fix_current_read(bq);
529
530 if (bq->current_read) {
531 int64_t p, d;
532
533 /* We go through this piece by piece to make sure we don't
534 * drop more than allowed by prebuf */
535
536 p = bq->current_read->index + bq->current_read->chunk.length;
537 pa_assert(p >= bq->read_index);
538 d = p - bq->read_index;
539
540 if (d > (int64_t) length)
541 d = length;
542
543 bq->read_index += d;
544 length -= d;
545
546 } else {
547
548 /* The list is empty, there's nothing we could drop */
549 bq->read_index += length;
550 break;
551 }
552 }
553
554 drop_backlog(bq);
555
556 delta = bq->read_index - old;
557 bq->missing += delta;
558 }
559
560 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
561 pa_assert(bq);
562 pa_assert(length % bq->base == 0);
563
564 /* This is kind of the inverse of pa_memblockq_drop() */
565
566 bq->read_index -= length;
567 bq->missing -= length;
568 }
569
570 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
571 pa_assert(bq);
572
573 if (pa_memblockq_prebuf_active(bq))
574 return FALSE;
575
576 if (pa_memblockq_get_length(bq) <= 0)
577 return FALSE;
578
579 return TRUE;
580 }
581
582 size_t pa_memblockq_get_length(pa_memblockq *bq) {
583 pa_assert(bq);
584
585 if (bq->write_index <= bq->read_index)
586 return 0;
587
588 return (size_t) (bq->write_index - bq->read_index);
589 }
590
591 size_t pa_memblockq_missing(pa_memblockq *bq) {
592 size_t l;
593 pa_assert(bq);
594
595 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
596 return 0;
597
598 l = bq->tlength - l;
599
600 return l >= bq->minreq ? l : 0;
601 }
602
603 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
604 int64_t old, delta;
605 pa_assert(bq);
606
607 old = bq->write_index;
608
609 switch (seek) {
610 case PA_SEEK_RELATIVE:
611 bq->write_index += offset;
612 break;
613 case PA_SEEK_ABSOLUTE:
614 bq->write_index = offset;
615 break;
616 case PA_SEEK_RELATIVE_ON_READ:
617 bq->write_index = bq->read_index + offset;
618 break;
619 case PA_SEEK_RELATIVE_END:
620 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
621 break;
622 default:
623 pa_assert_not_reached();
624 }
625
626 drop_backlog(bq);
627
628 delta = bq->write_index - old;
629
630 if (delta >= (int64_t) bq->requested) {
631 delta -= bq->requested;
632 bq->requested = 0;
633 } else if (delta >= 0) {
634 bq->requested -= delta;
635 delta = 0;
636 }
637
638 bq->missing -= delta;
639 }
640
641 void pa_memblockq_flush(pa_memblockq *bq) {
642 int64_t old, delta;
643 pa_assert(bq);
644
645 pa_memblockq_silence(bq);
646
647 old = bq->write_index;
648 bq->write_index = bq->read_index;
649
650 pa_memblockq_prebuf_force(bq);
651
652 delta = bq->write_index - old;
653
654 if (delta >= (int64_t) bq->requested) {
655 delta -= bq->requested;
656 bq->requested = 0;
657 } else if (delta >= 0) {
658 bq->requested -= delta;
659 delta = 0;
660 }
661
662 bq->missing -= delta;
663 }
664
665 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
666 pa_assert(bq);
667
668 return bq->tlength;
669 }
670
671 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
672 pa_assert(bq);
673
674 return bq->minreq;
675 }
676
677 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
678 pa_assert(bq);
679
680 return bq->read_index;
681 }
682
683 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
684 pa_assert(bq);
685
686 return bq->write_index;
687 }
688
689 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
690 pa_memchunk rchunk;
691
692 pa_assert(bq);
693 pa_assert(chunk);
694
695 if (bq->base == 1)
696 return pa_memblockq_push(bq, chunk);
697
698 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
699 return -1;
700
701 pa_mcalign_push(bq->mcalign, chunk);
702
703 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
704 int r;
705 r = pa_memblockq_push(bq, &rchunk);
706 pa_memblock_unref(rchunk.memblock);
707
708 if (r < 0) {
709 pa_mcalign_flush(bq->mcalign);
710 return -1;
711 }
712 }
713
714 return 0;
715 }
716
717 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
718 pa_assert(bq);
719
720 bq->in_prebuf = FALSE;
721 }
722
723 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
724 pa_assert(bq);
725
726 if (bq->prebuf > 0)
727 bq->in_prebuf = TRUE;
728 }
729
730 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
731 pa_assert(bq);
732
733 return bq->maxlength;
734 }
735
736 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
737 pa_assert(bq);
738
739 return bq->prebuf;
740 }
741
742 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
743 size_t l;
744
745 pa_assert(bq);
746
747 /* pa_log("pop: %lli", bq->missing); */
748
749 if (bq->missing <= 0)
750 return 0;
751
752 l = (size_t) bq->missing;
753 bq->missing = 0;
754 bq->requested += l;
755
756 return l;
757 }
758
759 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
760 pa_assert(bq);
761
762 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
763
764 if (bq->maxlength < bq->base)
765 bq->maxlength = bq->base;
766
767 if (bq->tlength > bq->maxlength)
768 pa_memblockq_set_tlength(bq, bq->maxlength);
769
770 if (bq->prebuf > bq->maxlength)
771 pa_memblockq_set_prebuf(bq, bq->maxlength);
772 }
773
774 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
775 size_t old_tlength;
776 pa_assert(bq);
777
778 if (tlength <= 0)
779 tlength = bq->maxlength;
780
781 old_tlength = bq->tlength;
782 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
783
784 if (bq->tlength > bq->maxlength)
785 bq->tlength = bq->maxlength;
786
787 if (bq->prebuf > bq->tlength)
788 pa_memblockq_set_prebuf(bq, bq->tlength);
789
790 if (bq->minreq > bq->tlength)
791 pa_memblockq_set_minreq(bq, bq->tlength);
792
793 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
794 }
795
796 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
797 pa_assert(bq);
798
799 if (prebuf == (size_t) -1)
800 prebuf = bq->tlength;
801
802 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
803
804 if (prebuf > 0 && bq->prebuf < bq->base)
805 bq->prebuf = bq->base;
806
807 if (bq->prebuf > bq->tlength)
808 bq->prebuf = bq->tlength;
809
810 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
811 bq->in_prebuf = FALSE;
812
813 if (bq->minreq > bq->prebuf)
814 pa_memblockq_set_minreq(bq, bq->prebuf);
815 }
816
817 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
818 pa_assert(bq);
819
820 bq->minreq = (minreq/bq->base)*bq->base;
821
822 if (bq->minreq > bq->tlength)
823 bq->minreq = bq->tlength;
824
825 if (bq->minreq > bq->prebuf)
826 bq->minreq = bq->prebuf;
827
828 if (bq->minreq < bq->base)
829 bq->minreq = bq->base;
830 }
831
832 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
833 pa_assert(bq);
834
835 bq->maxrewind = (maxrewind/bq->base)*bq->base;
836 }
837
838 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
839
840 pa_assert(bq);
841 pa_assert(source);
842
843 pa_memblockq_prebuf_disable(bq);
844
845 for (;;) {
846 pa_memchunk chunk;
847
848 if (pa_memblockq_peek(source, &chunk) < 0)
849 return 0;
850
851 pa_assert(chunk.length > 0);
852
853 if (chunk.memblock) {
854
855 if (pa_memblockq_push_align(bq, &chunk) < 0) {
856 pa_memblock_unref(chunk.memblock);
857 return -1;
858 }
859
860 pa_memblock_unref(chunk.memblock);
861 } else
862 pa_memblockq_seek(bq, chunk.length, PA_SEEK_RELATIVE);
863
864 pa_memblockq_drop(bq, chunk.length);
865 }
866 }
867
868 void pa_memblockq_willneed(pa_memblockq *bq) {
869 struct list_item *q;
870
871 pa_assert(bq);
872
873 fix_current_read(bq);
874
875 for (q = bq->current_read; q; q = q->next)
876 pa_memchunk_will_need(&q->chunk);
877 }
878
879 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
880 pa_assert(bq);
881
882 if (bq->silence.memblock)
883 pa_memblock_unref(bq->silence.memblock);
884
885 if (silence) {
886 bq->silence = *silence;
887 pa_memblock_ref(bq->silence.memblock);
888 } else
889 pa_memchunk_reset(&bq->silence);
890 }
891
892 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
893 pa_assert(bq);
894
895 return !bq->blocks;
896 }
897
898 void pa_memblockq_silence(pa_memblockq *bq) {
899 pa_assert(bq);
900
901 while (bq->blocks)
902 drop_block(bq, bq->blocks);
903
904 pa_assert(bq->n_blocks == 0);
905 }
906
907 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
908 pa_assert(bq);
909
910 return bq->n_blocks;
911 }
912
913 size_t pa_memblockq_get_base(pa_memblockq *bq) {
914 pa_assert(bq);
915
916 return bq->base;
917 }