]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
remap: Change remapping function argument type from void to int16_t / float as approp...
[pulseaudio] / src / pulsecore / memblockq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include <pulse/xmalloc.h>
31
32 #include <pulsecore/log.h>
33 #include <pulsecore/mcalign.h>
34 #include <pulsecore/macro.h>
35 #include <pulsecore/flist.h>
36
37 #include "memblockq.h"
38
39 /* #define MEMBLOCKQ_DEBUG */
40
41 struct list_item {
42 struct list_item *next, *prev;
43 int64_t index;
44 pa_memchunk chunk;
45 };
46
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
49 struct pa_memblockq {
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
52 unsigned n_blocks;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
55 bool in_prebuf;
56 pa_memchunk silence;
57 pa_mcalign *mcalign;
58 int64_t missing, requested;
59 char *name;
60 pa_sample_spec sample_spec;
61 };
62
63 pa_memblockq* pa_memblockq_new(
64 const char *name,
65 int64_t idx,
66 size_t maxlength,
67 size_t tlength,
68 const pa_sample_spec *sample_spec,
69 size_t prebuf,
70 size_t minreq,
71 size_t maxrewind,
72 pa_memchunk *silence) {
73
74 pa_memblockq* bq;
75
76 pa_assert(sample_spec);
77 pa_assert(name);
78
79 bq = pa_xnew0(pa_memblockq, 1);
80 bq->name = pa_xstrdup(name);
81
82 bq->sample_spec = *sample_spec;
83 bq->base = pa_frame_size(sample_spec);
84 bq->read_index = bq->write_index = idx;
85
86 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
87 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) bq->base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
88
89 bq->in_prebuf = true;
90
91 pa_memblockq_set_maxlength(bq, maxlength);
92 pa_memblockq_set_tlength(bq, tlength);
93 pa_memblockq_set_minreq(bq, minreq);
94 pa_memblockq_set_prebuf(bq, prebuf);
95 pa_memblockq_set_maxrewind(bq, maxrewind);
96
97 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
98 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
99
100 if (silence) {
101 bq->silence = *silence;
102 pa_memblock_ref(bq->silence.memblock);
103 }
104
105 bq->mcalign = pa_mcalign_new(bq->base);
106
107 return bq;
108 }
109
110 void pa_memblockq_free(pa_memblockq* bq) {
111 pa_assert(bq);
112
113 pa_memblockq_silence(bq);
114
115 if (bq->silence.memblock)
116 pa_memblock_unref(bq->silence.memblock);
117
118 if (bq->mcalign)
119 pa_mcalign_free(bq->mcalign);
120
121 pa_xfree(bq->name);
122 pa_xfree(bq);
123 }
124
125 static void fix_current_read(pa_memblockq *bq) {
126 pa_assert(bq);
127
128 if (PA_UNLIKELY(!bq->blocks)) {
129 bq->current_read = NULL;
130 return;
131 }
132
133 if (PA_UNLIKELY(!bq->current_read))
134 bq->current_read = bq->blocks;
135
136 /* Scan left */
137 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
138
139 if (bq->current_read->prev)
140 bq->current_read = bq->current_read->prev;
141 else
142 break;
143
144 /* Scan right */
145 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
146 bq->current_read = bq->current_read->next;
147
148 /* At this point current_read will either point at or left of the
149 next block to play. It may be NULL in case everything in
150 the queue was already played */
151 }
152
153 static void fix_current_write(pa_memblockq *bq) {
154 pa_assert(bq);
155
156 if (PA_UNLIKELY(!bq->blocks)) {
157 bq->current_write = NULL;
158 return;
159 }
160
161 if (PA_UNLIKELY(!bq->current_write))
162 bq->current_write = bq->blocks_tail;
163
164 /* Scan right */
165 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
166
167 if (bq->current_write->next)
168 bq->current_write = bq->current_write->next;
169 else
170 break;
171
172 /* Scan left */
173 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
174 bq->current_write = bq->current_write->prev;
175
176 /* At this point current_write will either point at or right of
177 the next block to write data to. It may be NULL in case
178 everything in the queue is still to be played */
179 }
180
181 static void drop_block(pa_memblockq *bq, struct list_item *q) {
182 pa_assert(bq);
183 pa_assert(q);
184
185 pa_assert(bq->n_blocks >= 1);
186
187 if (q->prev)
188 q->prev->next = q->next;
189 else {
190 pa_assert(bq->blocks == q);
191 bq->blocks = q->next;
192 }
193
194 if (q->next)
195 q->next->prev = q->prev;
196 else {
197 pa_assert(bq->blocks_tail == q);
198 bq->blocks_tail = q->prev;
199 }
200
201 if (bq->current_write == q)
202 bq->current_write = q->prev;
203
204 if (bq->current_read == q)
205 bq->current_read = q->next;
206
207 pa_memblock_unref(q->chunk.memblock);
208
209 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
210 pa_xfree(q);
211
212 bq->n_blocks--;
213 }
214
215 static void drop_backlog(pa_memblockq *bq) {
216 int64_t boundary;
217 pa_assert(bq);
218
219 boundary = bq->read_index - (int64_t) bq->maxrewind;
220
221 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
222 drop_block(bq, bq->blocks);
223 }
224
225 static bool can_push(pa_memblockq *bq, size_t l) {
226 int64_t end;
227
228 pa_assert(bq);
229
230 if (bq->read_index > bq->write_index) {
231 int64_t d = bq->read_index - bq->write_index;
232
233 if ((int64_t) l > d)
234 l -= (size_t) d;
235 else
236 return true;
237 }
238
239 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
240
241 /* Make sure that the list doesn't get too long */
242 if (bq->write_index + (int64_t) l > end)
243 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
244 return false;
245
246 return true;
247 }
248
249 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, bool account) {
250 int64_t delta;
251
252 pa_assert(bq);
253
254 delta = bq->write_index - old_write_index;
255
256 if (account)
257 bq->requested -= delta;
258 else
259 bq->missing -= delta;
260
261 #ifdef MEMBLOCKQ_DEBUG
262 pa_log_debug("[%s] pushed/seeked %lli: requested counter at %lli, account=%i", bq->name, (long long) delta, (long long) bq->requested, account);
263 #endif
264 }
265
266 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
267 int64_t delta;
268
269 pa_assert(bq);
270
271 delta = bq->read_index - old_read_index;
272 bq->missing += delta;
273
274 #ifdef MEMBLOCKQ_DEBUG
275 pa_log_debug("[%s] popped %lli: missing counter at %lli", bq->name, (long long) delta, (long long) bq->missing);
276 #endif
277 }
278
279 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
280 struct list_item *q, *n;
281 pa_memchunk chunk;
282 int64_t old;
283
284 pa_assert(bq);
285 pa_assert(uchunk);
286 pa_assert(uchunk->memblock);
287 pa_assert(uchunk->length > 0);
288 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
289
290 pa_assert_se(uchunk->length % bq->base == 0);
291
292 if (!can_push(bq, uchunk->length))
293 return -1;
294
295 old = bq->write_index;
296 chunk = *uchunk;
297
298 fix_current_write(bq);
299 q = bq->current_write;
300
301 /* First we advance the q pointer right of where we want to
302 * write to */
303
304 if (q) {
305 while (bq->write_index + (int64_t) chunk.length > q->index)
306 if (q->next)
307 q = q->next;
308 else
309 break;
310 }
311
312 if (!q)
313 q = bq->blocks_tail;
314
315 /* We go from back to front to look for the right place to add
316 * this new entry. Drop data we will overwrite on the way */
317
318 while (q) {
319
320 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
321 /* We found the entry where we need to place the new entry immediately after */
322 break;
323 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
324 /* This entry isn't touched at all, let's skip it */
325 q = q->prev;
326 } else if (bq->write_index <= q->index &&
327 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
328
329 /* This entry is fully replaced by the new entry, so let's drop it */
330
331 struct list_item *p;
332 p = q;
333 q = q->prev;
334 drop_block(bq, p);
335 } else if (bq->write_index >= q->index) {
336 /* The write index points into this memblock, so let's
337 * truncate or split it */
338
339 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
340
341 /* We need to save the end of this memchunk */
342 struct list_item *p;
343 size_t d;
344
345 /* Create a new list entry for the end of the memchunk */
346 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
347 p = pa_xnew(struct list_item, 1);
348
349 p->chunk = q->chunk;
350 pa_memblock_ref(p->chunk.memblock);
351
352 /* Calculate offset */
353 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
354 pa_assert(d > 0);
355
356 /* Drop it from the new entry */
357 p->index = q->index + (int64_t) d;
358 p->chunk.length -= d;
359
360 /* Add it to the list */
361 p->prev = q;
362 if ((p->next = q->next))
363 q->next->prev = p;
364 else
365 bq->blocks_tail = p;
366 q->next = p;
367
368 bq->n_blocks++;
369 }
370
371 /* Truncate the chunk */
372 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
373 struct list_item *p;
374 p = q;
375 q = q->prev;
376 drop_block(bq, p);
377 }
378
379 /* We had to truncate this block, hence we're now at the right position */
380 break;
381 } else {
382 size_t d;
383
384 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
385 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
386 bq->write_index < q->index);
387
388 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
389
390 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
391 q->index += (int64_t) d;
392 q->chunk.index += d;
393 q->chunk.length -= d;
394
395 q = q->prev;
396 }
397 }
398
399 if (q) {
400 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
401 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
402
403 /* Try to merge memory blocks */
404
405 if (q->chunk.memblock == chunk.memblock &&
406 q->chunk.index + q->chunk.length == chunk.index &&
407 bq->write_index == q->index + (int64_t) q->chunk.length) {
408
409 q->chunk.length += chunk.length;
410 bq->write_index += (int64_t) chunk.length;
411 goto finish;
412 }
413 } else
414 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
415
416 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
417 n = pa_xnew(struct list_item, 1);
418
419 n->chunk = chunk;
420 pa_memblock_ref(n->chunk.memblock);
421 n->index = bq->write_index;
422 bq->write_index += (int64_t) n->chunk.length;
423
424 n->next = q ? q->next : bq->blocks;
425 n->prev = q;
426
427 if (n->next)
428 n->next->prev = n;
429 else
430 bq->blocks_tail = n;
431
432 if (n->prev)
433 n->prev->next = n;
434 else
435 bq->blocks = n;
436
437 bq->n_blocks++;
438
439 finish:
440
441 write_index_changed(bq, old, true);
442 return 0;
443 }
444
445 bool pa_memblockq_prebuf_active(pa_memblockq *bq) {
446 pa_assert(bq);
447
448 if (bq->in_prebuf)
449 return pa_memblockq_get_length(bq) < bq->prebuf;
450 else
451 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
452 }
453
454 static bool update_prebuf(pa_memblockq *bq) {
455 pa_assert(bq);
456
457 if (bq->in_prebuf) {
458
459 if (pa_memblockq_get_length(bq) < bq->prebuf)
460 return true;
461
462 bq->in_prebuf = false;
463 return false;
464 } else {
465
466 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
467 bq->in_prebuf = true;
468 return true;
469 }
470
471 return false;
472 }
473 }
474
475 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
476 int64_t d;
477 pa_assert(bq);
478 pa_assert(chunk);
479
480 /* We need to pre-buffer */
481 if (update_prebuf(bq))
482 return -1;
483
484 fix_current_read(bq);
485
486 /* Do we need to spit out silence? */
487 if (!bq->current_read || bq->current_read->index > bq->read_index) {
488 size_t length;
489
490 /* How much silence shall we return? */
491 if (bq->current_read)
492 length = (size_t) (bq->current_read->index - bq->read_index);
493 else if (bq->write_index > bq->read_index)
494 length = (size_t) (bq->write_index - bq->read_index);
495 else
496 length = 0;
497
498 /* We need to return silence, since no data is yet available */
499 if (bq->silence.memblock) {
500 *chunk = bq->silence;
501 pa_memblock_ref(chunk->memblock);
502
503 if (length > 0 && length < chunk->length)
504 chunk->length = length;
505
506 } else {
507
508 /* If the memblockq is empty, return -1, otherwise return
509 * the time to sleep */
510 if (length <= 0)
511 return -1;
512
513 chunk->memblock = NULL;
514 chunk->length = length;
515 }
516
517 chunk->index = 0;
518 return 0;
519 }
520
521 /* Ok, let's pass real data to the caller */
522 *chunk = bq->current_read->chunk;
523 pa_memblock_ref(chunk->memblock);
524
525 pa_assert(bq->read_index >= bq->current_read->index);
526 d = bq->read_index - bq->current_read->index;
527 chunk->index += (size_t) d;
528 chunk->length -= (size_t) d;
529
530 return 0;
531 }
532
533 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
534 pa_memchunk tchunk, rchunk;
535 int64_t ri;
536 struct list_item *item;
537
538 pa_assert(bq);
539 pa_assert(block_size > 0);
540 pa_assert(chunk);
541 pa_assert(bq->silence.memblock);
542
543 if (pa_memblockq_peek(bq, &tchunk) < 0)
544 return -1;
545
546 if (tchunk.length >= block_size) {
547 *chunk = tchunk;
548 chunk->length = block_size;
549 return 0;
550 }
551
552 rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
553 rchunk.index = 0;
554 rchunk.length = tchunk.length;
555
556 pa_memchunk_memcpy(&rchunk, &tchunk);
557 pa_memblock_unref(tchunk.memblock);
558
559 rchunk.index += tchunk.length;
560
561 /* We don't need to call fix_current_read() here, since
562 * pa_memblock_peek() already did that */
563 item = bq->current_read;
564 ri = bq->read_index + tchunk.length;
565
566 while (rchunk.index < block_size) {
567
568 if (!item || item->index > ri) {
569 /* Do we need to append silence? */
570 tchunk = bq->silence;
571
572 if (item)
573 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
574
575 } else {
576 int64_t d;
577
578 /* We can append real data! */
579 tchunk = item->chunk;
580
581 d = ri - item->index;
582 tchunk.index += (size_t) d;
583 tchunk.length -= (size_t) d;
584
585 /* Go to next item for the next iteration */
586 item = item->next;
587 }
588
589 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
590 pa_memchunk_memcpy(&rchunk, &tchunk);
591
592 rchunk.index += rchunk.length;
593 ri += rchunk.length;
594 }
595
596 rchunk.index = 0;
597 rchunk.length = block_size;
598
599 *chunk = rchunk;
600 return 0;
601 }
602
603 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
604 int64_t old;
605 pa_assert(bq);
606 pa_assert(length % bq->base == 0);
607
608 old = bq->read_index;
609
610 while (length > 0) {
611
612 /* Do not drop any data when we are in prebuffering mode */
613 if (update_prebuf(bq))
614 break;
615
616 fix_current_read(bq);
617
618 if (bq->current_read) {
619 int64_t p, d;
620
621 /* We go through this piece by piece to make sure we don't
622 * drop more than allowed by prebuf */
623
624 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
625 pa_assert(p >= bq->read_index);
626 d = p - bq->read_index;
627
628 if (d > (int64_t) length)
629 d = (int64_t) length;
630
631 bq->read_index += d;
632 length -= (size_t) d;
633
634 } else {
635
636 /* The list is empty, there's nothing we could drop */
637 bq->read_index += (int64_t) length;
638 break;
639 }
640 }
641
642 drop_backlog(bq);
643 read_index_changed(bq, old);
644 }
645
646 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
647 int64_t old;
648 pa_assert(bq);
649 pa_assert(length % bq->base == 0);
650
651 old = bq->read_index;
652
653 /* This is kind of the inverse of pa_memblockq_drop() */
654
655 bq->read_index -= (int64_t) length;
656
657 read_index_changed(bq, old);
658 }
659
660 bool pa_memblockq_is_readable(pa_memblockq *bq) {
661 pa_assert(bq);
662
663 if (pa_memblockq_prebuf_active(bq))
664 return false;
665
666 if (pa_memblockq_get_length(bq) <= 0)
667 return false;
668
669 return true;
670 }
671
672 size_t pa_memblockq_get_length(pa_memblockq *bq) {
673 pa_assert(bq);
674
675 if (bq->write_index <= bq->read_index)
676 return 0;
677
678 return (size_t) (bq->write_index - bq->read_index);
679 }
680
681 size_t pa_memblockq_missing(pa_memblockq *bq) {
682 size_t l;
683 pa_assert(bq);
684
685 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
686 return 0;
687
688 l = bq->tlength - l;
689
690 return l >= bq->minreq ? l : 0;
691 }
692
693 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, bool account) {
694 int64_t old;
695 pa_assert(bq);
696
697 old = bq->write_index;
698
699 switch (seek) {
700 case PA_SEEK_RELATIVE:
701 bq->write_index += offset;
702 break;
703 case PA_SEEK_ABSOLUTE:
704 bq->write_index = offset;
705 break;
706 case PA_SEEK_RELATIVE_ON_READ:
707 bq->write_index = bq->read_index + offset;
708 break;
709 case PA_SEEK_RELATIVE_END:
710 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
711 break;
712 default:
713 pa_assert_not_reached();
714 }
715
716 drop_backlog(bq);
717 write_index_changed(bq, old, account);
718 }
719
720 void pa_memblockq_flush_write(pa_memblockq *bq, bool account) {
721 int64_t old;
722 pa_assert(bq);
723
724 pa_memblockq_silence(bq);
725
726 old = bq->write_index;
727 bq->write_index = bq->read_index;
728
729 pa_memblockq_prebuf_force(bq);
730 write_index_changed(bq, old, account);
731 }
732
733 void pa_memblockq_flush_read(pa_memblockq *bq) {
734 int64_t old;
735 pa_assert(bq);
736
737 pa_memblockq_silence(bq);
738
739 old = bq->read_index;
740 bq->read_index = bq->write_index;
741
742 pa_memblockq_prebuf_force(bq);
743 read_index_changed(bq, old);
744 }
745
746 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
747 pa_assert(bq);
748
749 return bq->tlength;
750 }
751
752 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
753 pa_assert(bq);
754
755 return bq->minreq;
756 }
757
758 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
759 pa_assert(bq);
760
761 return bq->maxrewind;
762 }
763
764 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
765 pa_assert(bq);
766
767 return bq->read_index;
768 }
769
770 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
771 pa_assert(bq);
772
773 return bq->write_index;
774 }
775
776 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
777 pa_memchunk rchunk;
778
779 pa_assert(bq);
780 pa_assert(chunk);
781
782 if (bq->base == 1)
783 return pa_memblockq_push(bq, chunk);
784
785 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
786 return -1;
787
788 pa_mcalign_push(bq->mcalign, chunk);
789
790 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
791 int r;
792 r = pa_memblockq_push(bq, &rchunk);
793 pa_memblock_unref(rchunk.memblock);
794
795 if (r < 0) {
796 pa_mcalign_flush(bq->mcalign);
797 return -1;
798 }
799 }
800
801 return 0;
802 }
803
804 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
805 pa_assert(bq);
806
807 bq->in_prebuf = false;
808 }
809
810 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
811 pa_assert(bq);
812
813 if (bq->prebuf > 0)
814 bq->in_prebuf = true;
815 }
816
817 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
818 pa_assert(bq);
819
820 return bq->maxlength;
821 }
822
823 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
824 pa_assert(bq);
825
826 return bq->prebuf;
827 }
828
829 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
830 size_t l;
831
832 pa_assert(bq);
833
834 #ifdef MEMBLOCKQ_DEBUG
835 pa_log_debug("[%s] pop: %lli", bq->name, (long long) bq->missing);
836 #endif
837
838 if (bq->missing <= 0)
839 return 0;
840
841 l = (size_t) bq->missing;
842
843 bq->requested += bq->missing;
844 bq->missing = 0;
845
846 #ifdef MEMBLOCKQ_DEBUG
847 pa_log_debug("[%s] sent %lli: request counter is at %lli", bq->name, (long long) l, (long long) bq->requested);
848 #endif
849
850 return l;
851 }
852
853 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
854 pa_assert(bq);
855
856 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
857
858 if (bq->maxlength < bq->base)
859 bq->maxlength = bq->base;
860
861 if (bq->tlength > bq->maxlength)
862 pa_memblockq_set_tlength(bq, bq->maxlength);
863 }
864
865 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
866 size_t old_tlength;
867 pa_assert(bq);
868
869 if (tlength <= 0 || tlength == (size_t) -1)
870 tlength = bq->maxlength;
871
872 old_tlength = bq->tlength;
873 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
874
875 if (bq->tlength > bq->maxlength)
876 bq->tlength = bq->maxlength;
877
878 if (bq->minreq > bq->tlength)
879 pa_memblockq_set_minreq(bq, bq->tlength);
880
881 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
882 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
883
884 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
885 }
886
887 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
888 pa_assert(bq);
889
890 bq->minreq = (minreq/bq->base)*bq->base;
891
892 if (bq->minreq > bq->tlength)
893 bq->minreq = bq->tlength;
894
895 if (bq->minreq < bq->base)
896 bq->minreq = bq->base;
897
898 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
899 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
900 }
901
902 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
903 pa_assert(bq);
904
905 if (prebuf == (size_t) -1)
906 prebuf = bq->tlength+bq->base-bq->minreq;
907
908 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
909
910 if (prebuf > 0 && bq->prebuf < bq->base)
911 bq->prebuf = bq->base;
912
913 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
914 bq->prebuf = bq->tlength+bq->base-bq->minreq;
915
916 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
917 bq->in_prebuf = false;
918 }
919
920 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
921 pa_assert(bq);
922
923 bq->maxrewind = (maxrewind/bq->base)*bq->base;
924 }
925
926 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
927 pa_assert(bq);
928 pa_assert(a);
929
930 pa_memblockq_set_maxlength(bq, a->maxlength);
931 pa_memblockq_set_tlength(bq, a->tlength);
932 pa_memblockq_set_minreq(bq, a->minreq);
933 pa_memblockq_set_prebuf(bq, a->prebuf);
934 }
935
936 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
937 pa_assert(bq);
938 pa_assert(a);
939
940 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
941 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
942 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
943 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
944 }
945
946 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
947
948 pa_assert(bq);
949 pa_assert(source);
950
951 pa_memblockq_prebuf_disable(bq);
952
953 for (;;) {
954 pa_memchunk chunk;
955
956 if (pa_memblockq_peek(source, &chunk) < 0)
957 return 0;
958
959 pa_assert(chunk.length > 0);
960
961 if (chunk.memblock) {
962
963 if (pa_memblockq_push_align(bq, &chunk) < 0) {
964 pa_memblock_unref(chunk.memblock);
965 return -1;
966 }
967
968 pa_memblock_unref(chunk.memblock);
969 } else
970 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, true);
971
972 pa_memblockq_drop(bq, chunk.length);
973 }
974 }
975
976 void pa_memblockq_willneed(pa_memblockq *bq) {
977 struct list_item *q;
978
979 pa_assert(bq);
980
981 fix_current_read(bq);
982
983 for (q = bq->current_read; q; q = q->next)
984 pa_memchunk_will_need(&q->chunk);
985 }
986
987 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
988 pa_assert(bq);
989
990 if (bq->silence.memblock)
991 pa_memblock_unref(bq->silence.memblock);
992
993 if (silence) {
994 bq->silence = *silence;
995 pa_memblock_ref(bq->silence.memblock);
996 } else
997 pa_memchunk_reset(&bq->silence);
998 }
999
1000 bool pa_memblockq_is_empty(pa_memblockq *bq) {
1001 pa_assert(bq);
1002
1003 return !bq->blocks;
1004 }
1005
1006 void pa_memblockq_silence(pa_memblockq *bq) {
1007 pa_assert(bq);
1008
1009 while (bq->blocks)
1010 drop_block(bq, bq->blocks);
1011
1012 pa_assert(bq->n_blocks == 0);
1013 }
1014
1015 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1016 pa_assert(bq);
1017
1018 return bq->n_blocks;
1019 }
1020
1021 size_t pa_memblockq_get_base(pa_memblockq *bq) {
1022 pa_assert(bq);
1023
1024 return bq->base;
1025 }