]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
win32: Implement rtclock based on QueryPerformanceCounter
[pulseaudio] / src / pulsecore / memblockq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include <pulse/xmalloc.h>
31
32 #include <pulsecore/log.h>
33 #include <pulsecore/mcalign.h>
34 #include <pulsecore/macro.h>
35 #include <pulsecore/flist.h>
36
37 #include "memblockq.h"
38
39 struct list_item {
40 struct list_item *next, *prev;
41 int64_t index;
42 pa_memchunk chunk;
43 };
44
45 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
46
47 struct pa_memblockq {
48 struct list_item *blocks, *blocks_tail;
49 struct list_item *current_read, *current_write;
50 unsigned n_blocks;
51 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
52 int64_t read_index, write_index;
53 pa_bool_t in_prebuf;
54 pa_memchunk silence;
55 pa_mcalign *mcalign;
56 int64_t missing, requested;
57 };
58
59 pa_memblockq* pa_memblockq_new(
60 int64_t idx,
61 size_t maxlength,
62 size_t tlength,
63 size_t base,
64 size_t prebuf,
65 size_t minreq,
66 size_t maxrewind,
67 pa_memchunk *silence) {
68
69 pa_memblockq* bq;
70
71 pa_assert(base > 0);
72
73 bq = pa_xnew(pa_memblockq, 1);
74 bq->blocks = bq->blocks_tail = NULL;
75 bq->current_read = bq->current_write = NULL;
76 bq->n_blocks = 0;
77
78 bq->base = base;
79 bq->read_index = bq->write_index = idx;
80
81 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
82 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
83
84 bq->missing = bq->requested = 0;
85 bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
86 bq->in_prebuf = TRUE;
87
88 pa_memblockq_set_maxlength(bq, maxlength);
89 pa_memblockq_set_tlength(bq, tlength);
90 pa_memblockq_set_minreq(bq, minreq);
91 pa_memblockq_set_prebuf(bq, prebuf);
92 pa_memblockq_set_maxrewind(bq, maxrewind);
93
94 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
95 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
96
97 if (silence) {
98 bq->silence = *silence;
99 pa_memblock_ref(bq->silence.memblock);
100 } else
101 pa_memchunk_reset(&bq->silence);
102
103 bq->mcalign = pa_mcalign_new(bq->base);
104
105 return bq;
106 }
107
108 void pa_memblockq_free(pa_memblockq* bq) {
109 pa_assert(bq);
110
111 pa_memblockq_silence(bq);
112
113 if (bq->silence.memblock)
114 pa_memblock_unref(bq->silence.memblock);
115
116 if (bq->mcalign)
117 pa_mcalign_free(bq->mcalign);
118
119 pa_xfree(bq);
120 }
121
122 static void fix_current_read(pa_memblockq *bq) {
123 pa_assert(bq);
124
125 if (PA_UNLIKELY(!bq->blocks)) {
126 bq->current_read = NULL;
127 return;
128 }
129
130 if (PA_UNLIKELY(!bq->current_read))
131 bq->current_read = bq->blocks;
132
133 /* Scan left */
134 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
135
136 if (bq->current_read->prev)
137 bq->current_read = bq->current_read->prev;
138 else
139 break;
140
141 /* Scan right */
142 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
143 bq->current_read = bq->current_read->next;
144
145 /* At this point current_read will either point at or left of the
146 next block to play. It may be NULL in case everything in
147 the queue was already played */
148 }
149
150 static void fix_current_write(pa_memblockq *bq) {
151 pa_assert(bq);
152
153 if (PA_UNLIKELY(!bq->blocks)) {
154 bq->current_write = NULL;
155 return;
156 }
157
158 if (PA_UNLIKELY(!bq->current_write))
159 bq->current_write = bq->blocks_tail;
160
161 /* Scan right */
162 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
163
164 if (bq->current_write->next)
165 bq->current_write = bq->current_write->next;
166 else
167 break;
168
169 /* Scan left */
170 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
171 bq->current_write = bq->current_write->prev;
172
173 /* At this point current_write will either point at or right of
174 the next block to write data to. It may be NULL in case
175 everything in the queue is still to be played */
176 }
177
178 static void drop_block(pa_memblockq *bq, struct list_item *q) {
179 pa_assert(bq);
180 pa_assert(q);
181
182 pa_assert(bq->n_blocks >= 1);
183
184 if (q->prev)
185 q->prev->next = q->next;
186 else {
187 pa_assert(bq->blocks == q);
188 bq->blocks = q->next;
189 }
190
191 if (q->next)
192 q->next->prev = q->prev;
193 else {
194 pa_assert(bq->blocks_tail == q);
195 bq->blocks_tail = q->prev;
196 }
197
198 if (bq->current_write == q)
199 bq->current_write = q->prev;
200
201 if (bq->current_read == q)
202 bq->current_read = q->next;
203
204 pa_memblock_unref(q->chunk.memblock);
205
206 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
207 pa_xfree(q);
208
209 bq->n_blocks--;
210 }
211
212 static void drop_backlog(pa_memblockq *bq) {
213 int64_t boundary;
214 pa_assert(bq);
215
216 boundary = bq->read_index - (int64_t) bq->maxrewind;
217
218 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
219 drop_block(bq, bq->blocks);
220 }
221
222 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
223 int64_t end;
224
225 pa_assert(bq);
226
227 if (bq->read_index > bq->write_index) {
228 int64_t d = bq->read_index - bq->write_index;
229
230 if ((int64_t) l > d)
231 l -= (size_t) d;
232 else
233 return TRUE;
234 }
235
236 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
237
238 /* Make sure that the list doesn't get too long */
239 if (bq->write_index + (int64_t) l > end)
240 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
241 return FALSE;
242
243 return TRUE;
244 }
245
246 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, pa_bool_t account) {
247 int64_t delta;
248
249 pa_assert(bq);
250
251 delta = bq->write_index - old_write_index;
252
253 if (account)
254 bq->requested -= delta;
255 else
256 bq->missing -= delta;
257
258 /* pa_log("pushed/seeked %lli: requested counter at %lli, account=%i", (long long) delta, (long long) bq->requested, account); */
259 }
260
261 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
262 int64_t delta;
263
264 pa_assert(bq);
265
266 delta = bq->read_index - old_read_index;
267 bq->missing += delta;
268
269 /* pa_log("popped %lli: missing counter at %lli", (long long) delta, (long long) bq->missing); */
270 }
271
272 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
273 struct list_item *q, *n;
274 pa_memchunk chunk;
275 int64_t old;
276
277 pa_assert(bq);
278 pa_assert(uchunk);
279 pa_assert(uchunk->memblock);
280 pa_assert(uchunk->length > 0);
281 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
282
283 if (uchunk->length % bq->base)
284 return -1;
285
286 if (!can_push(bq, uchunk->length))
287 return -1;
288
289 old = bq->write_index;
290 chunk = *uchunk;
291
292 fix_current_write(bq);
293 q = bq->current_write;
294
295 /* First we advance the q pointer right of where we want to
296 * write to */
297
298 if (q) {
299 while (bq->write_index + (int64_t) chunk.length > q->index)
300 if (q->next)
301 q = q->next;
302 else
303 break;
304 }
305
306 if (!q)
307 q = bq->blocks_tail;
308
309 /* We go from back to front to look for the right place to add
310 * this new entry. Drop data we will overwrite on the way */
311
312 while (q) {
313
314 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
315 /* We found the entry where we need to place the new entry immediately after */
316 break;
317 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
318 /* This entry isn't touched at all, let's skip it */
319 q = q->prev;
320 } else if (bq->write_index <= q->index &&
321 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
322
323 /* This entry is fully replaced by the new entry, so let's drop it */
324
325 struct list_item *p;
326 p = q;
327 q = q->prev;
328 drop_block(bq, p);
329 } else if (bq->write_index >= q->index) {
330 /* The write index points into this memblock, so let's
331 * truncate or split it */
332
333 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
334
335 /* We need to save the end of this memchunk */
336 struct list_item *p;
337 size_t d;
338
339 /* Create a new list entry for the end of thie memchunk */
340 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
341 p = pa_xnew(struct list_item, 1);
342
343 p->chunk = q->chunk;
344 pa_memblock_ref(p->chunk.memblock);
345
346 /* Calculate offset */
347 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
348 pa_assert(d > 0);
349
350 /* Drop it from the new entry */
351 p->index = q->index + (int64_t) d;
352 p->chunk.length -= d;
353
354 /* Add it to the list */
355 p->prev = q;
356 if ((p->next = q->next))
357 q->next->prev = p;
358 else
359 bq->blocks_tail = p;
360 q->next = p;
361
362 bq->n_blocks++;
363 }
364
365 /* Truncate the chunk */
366 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
367 struct list_item *p;
368 p = q;
369 q = q->prev;
370 drop_block(bq, p);
371 }
372
373 /* We had to truncate this block, hence we're now at the right position */
374 break;
375 } else {
376 size_t d;
377
378 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
379 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
380 bq->write_index < q->index);
381
382 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
383
384 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
385 q->index += (int64_t) d;
386 q->chunk.index += d;
387 q->chunk.length -= d;
388
389 q = q->prev;
390 }
391 }
392
393 if (q) {
394 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
395 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
396
397 /* Try to merge memory blocks */
398
399 if (q->chunk.memblock == chunk.memblock &&
400 q->chunk.index + q->chunk.length == chunk.index &&
401 bq->write_index == q->index + (int64_t) q->chunk.length) {
402
403 q->chunk.length += chunk.length;
404 bq->write_index += (int64_t) chunk.length;
405 goto finish;
406 }
407 } else
408 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
409
410 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
411 n = pa_xnew(struct list_item, 1);
412
413 n->chunk = chunk;
414 pa_memblock_ref(n->chunk.memblock);
415 n->index = bq->write_index;
416 bq->write_index += (int64_t) n->chunk.length;
417
418 n->next = q ? q->next : bq->blocks;
419 n->prev = q;
420
421 if (n->next)
422 n->next->prev = n;
423 else
424 bq->blocks_tail = n;
425
426 if (n->prev)
427 n->prev->next = n;
428 else
429 bq->blocks = n;
430
431 bq->n_blocks++;
432
433 finish:
434
435 write_index_changed(bq, old, TRUE);
436 return 0;
437 }
438
439 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
440 pa_assert(bq);
441
442 if (bq->in_prebuf)
443 return pa_memblockq_get_length(bq) < bq->prebuf;
444 else
445 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
446 }
447
448 static pa_bool_t update_prebuf(pa_memblockq *bq) {
449 pa_assert(bq);
450
451 if (bq->in_prebuf) {
452
453 if (pa_memblockq_get_length(bq) < bq->prebuf)
454 return TRUE;
455
456 bq->in_prebuf = FALSE;
457 return FALSE;
458 } else {
459
460 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
461 bq->in_prebuf = TRUE;
462 return TRUE;
463 }
464
465 return FALSE;
466 }
467 }
468
469 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
470 int64_t d;
471 pa_assert(bq);
472 pa_assert(chunk);
473
474 /* We need to pre-buffer */
475 if (update_prebuf(bq))
476 return -1;
477
478 fix_current_read(bq);
479
480 /* Do we need to spit out silence? */
481 if (!bq->current_read || bq->current_read->index > bq->read_index) {
482 size_t length;
483
484 /* How much silence shall we return? */
485 if (bq->current_read)
486 length = (size_t) (bq->current_read->index - bq->read_index);
487 else if (bq->write_index > bq->read_index)
488 length = (size_t) (bq->write_index - bq->read_index);
489 else
490 length = 0;
491
492 /* We need to return silence, since no data is yet available */
493 if (bq->silence.memblock) {
494 *chunk = bq->silence;
495 pa_memblock_ref(chunk->memblock);
496
497 if (length > 0 && length < chunk->length)
498 chunk->length = length;
499
500 } else {
501
502 /* If the memblockq is empty, return -1, otherwise return
503 * the time to sleep */
504 if (length <= 0)
505 return -1;
506
507 chunk->memblock = NULL;
508 chunk->length = length;
509 }
510
511 chunk->index = 0;
512 return 0;
513 }
514
515 /* Ok, let's pass real data to the caller */
516 *chunk = bq->current_read->chunk;
517 pa_memblock_ref(chunk->memblock);
518
519 pa_assert(bq->read_index >= bq->current_read->index);
520 d = bq->read_index - bq->current_read->index;
521 chunk->index += (size_t) d;
522 chunk->length -= (size_t) d;
523
524 return 0;
525 }
526
527 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
528 pa_memchunk tchunk, rchunk;
529 int64_t ri;
530 struct list_item *item;
531
532 pa_assert(bq);
533 pa_assert(block_size > 0);
534 pa_assert(chunk);
535 pa_assert(bq->silence.memblock);
536
537 if (pa_memblockq_peek(bq, &tchunk) < 0)
538 return -1;
539
540 if (tchunk.length >= block_size) {
541 *chunk = tchunk;
542 chunk->length = block_size;
543 return 0;
544 }
545
546 rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
547 rchunk.index = 0;
548 rchunk.length = tchunk.length;
549
550 pa_memchunk_memcpy(&rchunk, &tchunk);
551 pa_memblock_unref(tchunk.memblock);
552
553 rchunk.index += tchunk.length;
554
555 /* We don't need to call fix_current_read() here, since
556 * pa_memblock_peek() already did that */
557 item = bq->current_read;
558 ri = bq->read_index + tchunk.length;
559
560 while (rchunk.index < block_size) {
561
562 if (!item || item->index > ri) {
563 /* Do we need to append silence? */
564 tchunk = bq->silence;
565
566 if (item)
567 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
568
569 } else {
570 int64_t d;
571
572 /* We can append real data! */
573 tchunk = item->chunk;
574
575 d = ri - item->index;
576 tchunk.index += (size_t) d;
577 tchunk.length -= (size_t) d;
578
579 /* Go to next item for the next iteration */
580 item = item->next;
581 }
582
583 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
584 pa_memchunk_memcpy(&rchunk, &tchunk);
585
586 rchunk.index += rchunk.length;
587 ri += rchunk.length;
588 }
589
590 rchunk.index = 0;
591 rchunk.length = block_size;
592
593 *chunk = rchunk;
594 return 0;
595 }
596
597 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
598 int64_t old;
599 pa_assert(bq);
600 pa_assert(length % bq->base == 0);
601
602 old = bq->read_index;
603
604 while (length > 0) {
605
606 /* Do not drop any data when we are in prebuffering mode */
607 if (update_prebuf(bq))
608 break;
609
610 fix_current_read(bq);
611
612 if (bq->current_read) {
613 int64_t p, d;
614
615 /* We go through this piece by piece to make sure we don't
616 * drop more than allowed by prebuf */
617
618 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
619 pa_assert(p >= bq->read_index);
620 d = p - bq->read_index;
621
622 if (d > (int64_t) length)
623 d = (int64_t) length;
624
625 bq->read_index += d;
626 length -= (size_t) d;
627
628 } else {
629
630 /* The list is empty, there's nothing we could drop */
631 bq->read_index += (int64_t) length;
632 break;
633 }
634 }
635
636 drop_backlog(bq);
637 read_index_changed(bq, old);
638 }
639
640 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
641 int64_t old;
642 pa_assert(bq);
643 pa_assert(length % bq->base == 0);
644
645 old = bq->read_index;
646
647 /* This is kind of the inverse of pa_memblockq_drop() */
648
649 bq->read_index -= (int64_t) length;
650
651 read_index_changed(bq, old);
652 }
653
654 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
655 pa_assert(bq);
656
657 if (pa_memblockq_prebuf_active(bq))
658 return FALSE;
659
660 if (pa_memblockq_get_length(bq) <= 0)
661 return FALSE;
662
663 return TRUE;
664 }
665
666 size_t pa_memblockq_get_length(pa_memblockq *bq) {
667 pa_assert(bq);
668
669 if (bq->write_index <= bq->read_index)
670 return 0;
671
672 return (size_t) (bq->write_index - bq->read_index);
673 }
674
675 size_t pa_memblockq_missing(pa_memblockq *bq) {
676 size_t l;
677 pa_assert(bq);
678
679 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
680 return 0;
681
682 l = bq->tlength - l;
683
684 return l >= bq->minreq ? l : 0;
685 }
686
687 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
688 int64_t old;
689 pa_assert(bq);
690
691 old = bq->write_index;
692
693 switch (seek) {
694 case PA_SEEK_RELATIVE:
695 bq->write_index += offset;
696 break;
697 case PA_SEEK_ABSOLUTE:
698 bq->write_index = offset;
699 break;
700 case PA_SEEK_RELATIVE_ON_READ:
701 bq->write_index = bq->read_index + offset;
702 break;
703 case PA_SEEK_RELATIVE_END:
704 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
705 break;
706 default:
707 pa_assert_not_reached();
708 }
709
710 drop_backlog(bq);
711 write_index_changed(bq, old, account);
712 }
713
714 void pa_memblockq_flush_write(pa_memblockq *bq, pa_bool_t account) {
715 int64_t old;
716 pa_assert(bq);
717
718 pa_memblockq_silence(bq);
719
720 old = bq->write_index;
721 bq->write_index = bq->read_index;
722
723 pa_memblockq_prebuf_force(bq);
724 write_index_changed(bq, old, account);
725 }
726
727 void pa_memblockq_flush_read(pa_memblockq *bq) {
728 int64_t old;
729 pa_assert(bq);
730
731 pa_memblockq_silence(bq);
732
733 old = bq->read_index;
734 bq->read_index = bq->write_index;
735
736 pa_memblockq_prebuf_force(bq);
737 read_index_changed(bq, old);
738 }
739
740 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
741 pa_assert(bq);
742
743 return bq->tlength;
744 }
745
746 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
747 pa_assert(bq);
748
749 return bq->minreq;
750 }
751
752 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
753 pa_assert(bq);
754
755 return bq->maxrewind;
756 }
757
758 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
759 pa_assert(bq);
760
761 return bq->read_index;
762 }
763
764 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
765 pa_assert(bq);
766
767 return bq->write_index;
768 }
769
770 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
771 pa_memchunk rchunk;
772
773 pa_assert(bq);
774 pa_assert(chunk);
775
776 if (bq->base == 1)
777 return pa_memblockq_push(bq, chunk);
778
779 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
780 return -1;
781
782 pa_mcalign_push(bq->mcalign, chunk);
783
784 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
785 int r;
786 r = pa_memblockq_push(bq, &rchunk);
787 pa_memblock_unref(rchunk.memblock);
788
789 if (r < 0) {
790 pa_mcalign_flush(bq->mcalign);
791 return -1;
792 }
793 }
794
795 return 0;
796 }
797
798 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
799 pa_assert(bq);
800
801 bq->in_prebuf = FALSE;
802 }
803
804 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
805 pa_assert(bq);
806
807 if (bq->prebuf > 0)
808 bq->in_prebuf = TRUE;
809 }
810
811 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
812 pa_assert(bq);
813
814 return bq->maxlength;
815 }
816
817 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
818 pa_assert(bq);
819
820 return bq->prebuf;
821 }
822
823 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
824 size_t l;
825
826 pa_assert(bq);
827
828 /* pa_log("pop: %lli", bq->missing); */
829
830 if (bq->missing <= 0)
831 return 0;
832
833 l = (size_t) bq->missing;
834
835 bq->requested += bq->missing;
836 bq->missing = 0;
837
838 /* pa_log("sent %lli: request counter is at %lli", (long long) l, (long long) bq->requested); */
839
840 return l;
841 }
842
843 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
844 pa_assert(bq);
845
846 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
847
848 if (bq->maxlength < bq->base)
849 bq->maxlength = bq->base;
850
851 if (bq->tlength > bq->maxlength)
852 pa_memblockq_set_tlength(bq, bq->maxlength);
853 }
854
855 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
856 size_t old_tlength;
857 pa_assert(bq);
858
859 if (tlength <= 0 || tlength == (size_t) -1)
860 tlength = bq->maxlength;
861
862 old_tlength = bq->tlength;
863 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
864
865 if (bq->tlength > bq->maxlength)
866 bq->tlength = bq->maxlength;
867
868 if (bq->minreq > bq->tlength)
869 pa_memblockq_set_minreq(bq, bq->tlength);
870
871 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
872 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
873
874 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
875 }
876
877 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
878 pa_assert(bq);
879
880 bq->minreq = (minreq/bq->base)*bq->base;
881
882 if (bq->minreq > bq->tlength)
883 bq->minreq = bq->tlength;
884
885 if (bq->minreq < bq->base)
886 bq->minreq = bq->base;
887
888 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
889 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
890 }
891
892 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
893 pa_assert(bq);
894
895 if (prebuf == (size_t) -1)
896 prebuf = bq->tlength+bq->base-bq->minreq;
897
898 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
899
900 if (prebuf > 0 && bq->prebuf < bq->base)
901 bq->prebuf = bq->base;
902
903 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
904 bq->prebuf = bq->tlength+bq->base-bq->minreq;
905
906 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
907 bq->in_prebuf = FALSE;
908 }
909
910 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
911 pa_assert(bq);
912
913 bq->maxrewind = (maxrewind/bq->base)*bq->base;
914 }
915
916 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
917 pa_assert(bq);
918 pa_assert(a);
919
920 pa_memblockq_set_maxlength(bq, a->maxlength);
921 pa_memblockq_set_tlength(bq, a->tlength);
922 pa_memblockq_set_prebuf(bq, a->prebuf);
923 pa_memblockq_set_minreq(bq, a->minreq);
924 }
925
926 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
927 pa_assert(bq);
928 pa_assert(a);
929
930 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
931 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
932 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
933 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
934 }
935
936 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
937
938 pa_assert(bq);
939 pa_assert(source);
940
941 pa_memblockq_prebuf_disable(bq);
942
943 for (;;) {
944 pa_memchunk chunk;
945
946 if (pa_memblockq_peek(source, &chunk) < 0)
947 return 0;
948
949 pa_assert(chunk.length > 0);
950
951 if (chunk.memblock) {
952
953 if (pa_memblockq_push_align(bq, &chunk) < 0) {
954 pa_memblock_unref(chunk.memblock);
955 return -1;
956 }
957
958 pa_memblock_unref(chunk.memblock);
959 } else
960 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
961
962 pa_memblockq_drop(bq, chunk.length);
963 }
964 }
965
966 void pa_memblockq_willneed(pa_memblockq *bq) {
967 struct list_item *q;
968
969 pa_assert(bq);
970
971 fix_current_read(bq);
972
973 for (q = bq->current_read; q; q = q->next)
974 pa_memchunk_will_need(&q->chunk);
975 }
976
977 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
978 pa_assert(bq);
979
980 if (bq->silence.memblock)
981 pa_memblock_unref(bq->silence.memblock);
982
983 if (silence) {
984 bq->silence = *silence;
985 pa_memblock_ref(bq->silence.memblock);
986 } else
987 pa_memchunk_reset(&bq->silence);
988 }
989
990 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
991 pa_assert(bq);
992
993 return !bq->blocks;
994 }
995
996 void pa_memblockq_silence(pa_memblockq *bq) {
997 pa_assert(bq);
998
999 while (bq->blocks)
1000 drop_block(bq, bq->blocks);
1001
1002 pa_assert(bq->n_blocks == 0);
1003 }
1004
1005 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1006 pa_assert(bq);
1007
1008 return bq->n_blocks;
1009 }
1010
1011 size_t pa_memblockq_get_base(pa_memblockq *bq) {
1012 pa_assert(bq);
1013
1014 return bq->base;
1015 }