]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
core: Make debugging a bit simpler
[pulseaudio] / src / pulsecore / memblockq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29
30 #include <pulse/xmalloc.h>
31
32 #include <pulsecore/log.h>
33 #include <pulsecore/mcalign.h>
34 #include <pulsecore/macro.h>
35 #include <pulsecore/flist.h>
36
37 #include "memblockq.h"
38
39 /* #define MEMBLOCKQ_DEBUG */
40
41 struct list_item {
42 struct list_item *next, *prev;
43 int64_t index;
44 pa_memchunk chunk;
45 };
46
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
49 struct pa_memblockq {
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
52 unsigned n_blocks;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
55 pa_bool_t in_prebuf;
56 pa_memchunk silence;
57 pa_mcalign *mcalign;
58 int64_t missing, requested;
59 char *name;
60 pa_sample_spec sample_spec;
61 };
62
63 pa_memblockq* pa_memblockq_new(
64 const char *name,
65 int64_t idx,
66 size_t maxlength,
67 size_t tlength,
68 const pa_sample_spec *sample_spec,
69 size_t prebuf,
70 size_t minreq,
71 size_t maxrewind,
72 pa_memchunk *silence) {
73
74 pa_memblockq* bq;
75
76 pa_assert(sample_spec);
77 pa_assert(name);
78
79 bq = pa_xnew(pa_memblockq, 1);
80 bq->name = pa_xstrdup(name);
81 bq->blocks = bq->blocks_tail = NULL;
82 bq->current_read = bq->current_write = NULL;
83 bq->n_blocks = 0;
84
85 bq->sample_spec = *sample_spec;
86 bq->base = pa_frame_size(sample_spec);
87 bq->read_index = bq->write_index = idx;
88
89 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
90 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) bq->base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
91
92 bq->missing = bq->requested = 0;
93 bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
94 bq->in_prebuf = TRUE;
95
96 pa_memblockq_set_maxlength(bq, maxlength);
97 pa_memblockq_set_tlength(bq, tlength);
98 pa_memblockq_set_minreq(bq, minreq);
99 pa_memblockq_set_prebuf(bq, prebuf);
100 pa_memblockq_set_maxrewind(bq, maxrewind);
101
102 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
103 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
104
105 if (silence) {
106 bq->silence = *silence;
107 pa_memblock_ref(bq->silence.memblock);
108 } else
109 pa_memchunk_reset(&bq->silence);
110
111 bq->mcalign = pa_mcalign_new(bq->base);
112
113 return bq;
114 }
115
116 void pa_memblockq_free(pa_memblockq* bq) {
117 pa_assert(bq);
118
119 pa_memblockq_silence(bq);
120
121 if (bq->silence.memblock)
122 pa_memblock_unref(bq->silence.memblock);
123
124 if (bq->mcalign)
125 pa_mcalign_free(bq->mcalign);
126
127 pa_xfree(bq->name);
128 pa_xfree(bq);
129 }
130
131 static void fix_current_read(pa_memblockq *bq) {
132 pa_assert(bq);
133
134 if (PA_UNLIKELY(!bq->blocks)) {
135 bq->current_read = NULL;
136 return;
137 }
138
139 if (PA_UNLIKELY(!bq->current_read))
140 bq->current_read = bq->blocks;
141
142 /* Scan left */
143 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
144
145 if (bq->current_read->prev)
146 bq->current_read = bq->current_read->prev;
147 else
148 break;
149
150 /* Scan right */
151 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
152 bq->current_read = bq->current_read->next;
153
154 /* At this point current_read will either point at or left of the
155 next block to play. It may be NULL in case everything in
156 the queue was already played */
157 }
158
159 static void fix_current_write(pa_memblockq *bq) {
160 pa_assert(bq);
161
162 if (PA_UNLIKELY(!bq->blocks)) {
163 bq->current_write = NULL;
164 return;
165 }
166
167 if (PA_UNLIKELY(!bq->current_write))
168 bq->current_write = bq->blocks_tail;
169
170 /* Scan right */
171 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
172
173 if (bq->current_write->next)
174 bq->current_write = bq->current_write->next;
175 else
176 break;
177
178 /* Scan left */
179 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
180 bq->current_write = bq->current_write->prev;
181
182 /* At this point current_write will either point at or right of
183 the next block to write data to. It may be NULL in case
184 everything in the queue is still to be played */
185 }
186
187 static void drop_block(pa_memblockq *bq, struct list_item *q) {
188 pa_assert(bq);
189 pa_assert(q);
190
191 pa_assert(bq->n_blocks >= 1);
192
193 if (q->prev)
194 q->prev->next = q->next;
195 else {
196 pa_assert(bq->blocks == q);
197 bq->blocks = q->next;
198 }
199
200 if (q->next)
201 q->next->prev = q->prev;
202 else {
203 pa_assert(bq->blocks_tail == q);
204 bq->blocks_tail = q->prev;
205 }
206
207 if (bq->current_write == q)
208 bq->current_write = q->prev;
209
210 if (bq->current_read == q)
211 bq->current_read = q->next;
212
213 pa_memblock_unref(q->chunk.memblock);
214
215 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
216 pa_xfree(q);
217
218 bq->n_blocks--;
219 }
220
221 static void drop_backlog(pa_memblockq *bq) {
222 int64_t boundary;
223 pa_assert(bq);
224
225 boundary = bq->read_index - (int64_t) bq->maxrewind;
226
227 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
228 drop_block(bq, bq->blocks);
229 }
230
231 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
232 int64_t end;
233
234 pa_assert(bq);
235
236 if (bq->read_index > bq->write_index) {
237 int64_t d = bq->read_index - bq->write_index;
238
239 if ((int64_t) l > d)
240 l -= (size_t) d;
241 else
242 return TRUE;
243 }
244
245 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
246
247 /* Make sure that the list doesn't get too long */
248 if (bq->write_index + (int64_t) l > end)
249 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
250 return FALSE;
251
252 return TRUE;
253 }
254
255 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, pa_bool_t account) {
256 int64_t delta;
257
258 pa_assert(bq);
259
260 delta = bq->write_index - old_write_index;
261
262 if (account)
263 bq->requested -= delta;
264 else
265 bq->missing -= delta;
266
267 #ifdef MEMBLOCKQ_DEBUG
268 pa_log("[%s] pushed/seeked %lli: requested counter at %lli, account=%i", bq->name, (long long) delta, (long long) bq->requested, account);
269 #endif
270 }
271
272 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
273 int64_t delta;
274
275 pa_assert(bq);
276
277 delta = bq->read_index - old_read_index;
278 bq->missing += delta;
279
280 #ifdef MEMBLOCKQ_DEBUG
281 pa_log("[%s] popped %lli: missing counter at %lli", bq->name, (long long) delta, (long long) bq->missing);
282 #endif
283 }
284
285 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
286 struct list_item *q, *n;
287 pa_memchunk chunk;
288 int64_t old;
289
290 pa_assert(bq);
291 pa_assert(uchunk);
292 pa_assert(uchunk->memblock);
293 pa_assert(uchunk->length > 0);
294 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
295
296 if (uchunk->length % bq->base)
297 return -1;
298
299 if (!can_push(bq, uchunk->length))
300 return -1;
301
302 old = bq->write_index;
303 chunk = *uchunk;
304
305 fix_current_write(bq);
306 q = bq->current_write;
307
308 /* First we advance the q pointer right of where we want to
309 * write to */
310
311 if (q) {
312 while (bq->write_index + (int64_t) chunk.length > q->index)
313 if (q->next)
314 q = q->next;
315 else
316 break;
317 }
318
319 if (!q)
320 q = bq->blocks_tail;
321
322 /* We go from back to front to look for the right place to add
323 * this new entry. Drop data we will overwrite on the way */
324
325 while (q) {
326
327 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
328 /* We found the entry where we need to place the new entry immediately after */
329 break;
330 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
331 /* This entry isn't touched at all, let's skip it */
332 q = q->prev;
333 } else if (bq->write_index <= q->index &&
334 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
335
336 /* This entry is fully replaced by the new entry, so let's drop it */
337
338 struct list_item *p;
339 p = q;
340 q = q->prev;
341 drop_block(bq, p);
342 } else if (bq->write_index >= q->index) {
343 /* The write index points into this memblock, so let's
344 * truncate or split it */
345
346 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
347
348 /* We need to save the end of this memchunk */
349 struct list_item *p;
350 size_t d;
351
352 /* Create a new list entry for the end of the memchunk */
353 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
354 p = pa_xnew(struct list_item, 1);
355
356 p->chunk = q->chunk;
357 pa_memblock_ref(p->chunk.memblock);
358
359 /* Calculate offset */
360 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
361 pa_assert(d > 0);
362
363 /* Drop it from the new entry */
364 p->index = q->index + (int64_t) d;
365 p->chunk.length -= d;
366
367 /* Add it to the list */
368 p->prev = q;
369 if ((p->next = q->next))
370 q->next->prev = p;
371 else
372 bq->blocks_tail = p;
373 q->next = p;
374
375 bq->n_blocks++;
376 }
377
378 /* Truncate the chunk */
379 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
380 struct list_item *p;
381 p = q;
382 q = q->prev;
383 drop_block(bq, p);
384 }
385
386 /* We had to truncate this block, hence we're now at the right position */
387 break;
388 } else {
389 size_t d;
390
391 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
392 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
393 bq->write_index < q->index);
394
395 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
396
397 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
398 q->index += (int64_t) d;
399 q->chunk.index += d;
400 q->chunk.length -= d;
401
402 q = q->prev;
403 }
404 }
405
406 if (q) {
407 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
408 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
409
410 /* Try to merge memory blocks */
411
412 if (q->chunk.memblock == chunk.memblock &&
413 q->chunk.index + q->chunk.length == chunk.index &&
414 bq->write_index == q->index + (int64_t) q->chunk.length) {
415
416 q->chunk.length += chunk.length;
417 bq->write_index += (int64_t) chunk.length;
418 goto finish;
419 }
420 } else
421 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
422
423 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
424 n = pa_xnew(struct list_item, 1);
425
426 n->chunk = chunk;
427 pa_memblock_ref(n->chunk.memblock);
428 n->index = bq->write_index;
429 bq->write_index += (int64_t) n->chunk.length;
430
431 n->next = q ? q->next : bq->blocks;
432 n->prev = q;
433
434 if (n->next)
435 n->next->prev = n;
436 else
437 bq->blocks_tail = n;
438
439 if (n->prev)
440 n->prev->next = n;
441 else
442 bq->blocks = n;
443
444 bq->n_blocks++;
445
446 finish:
447
448 write_index_changed(bq, old, TRUE);
449 return 0;
450 }
451
452 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
453 pa_assert(bq);
454
455 if (bq->in_prebuf)
456 return pa_memblockq_get_length(bq) < bq->prebuf;
457 else
458 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
459 }
460
461 static pa_bool_t update_prebuf(pa_memblockq *bq) {
462 pa_assert(bq);
463
464 if (bq->in_prebuf) {
465
466 if (pa_memblockq_get_length(bq) < bq->prebuf)
467 return TRUE;
468
469 bq->in_prebuf = FALSE;
470 return FALSE;
471 } else {
472
473 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
474 bq->in_prebuf = TRUE;
475 return TRUE;
476 }
477
478 return FALSE;
479 }
480 }
481
482 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
483 int64_t d;
484 pa_assert(bq);
485 pa_assert(chunk);
486
487 /* We need to pre-buffer */
488 if (update_prebuf(bq))
489 return -1;
490
491 fix_current_read(bq);
492
493 /* Do we need to spit out silence? */
494 if (!bq->current_read || bq->current_read->index > bq->read_index) {
495 size_t length;
496
497 /* How much silence shall we return? */
498 if (bq->current_read)
499 length = (size_t) (bq->current_read->index - bq->read_index);
500 else if (bq->write_index > bq->read_index)
501 length = (size_t) (bq->write_index - bq->read_index);
502 else
503 length = 0;
504
505 /* We need to return silence, since no data is yet available */
506 if (bq->silence.memblock) {
507 *chunk = bq->silence;
508 pa_memblock_ref(chunk->memblock);
509
510 if (length > 0 && length < chunk->length)
511 chunk->length = length;
512
513 } else {
514
515 /* If the memblockq is empty, return -1, otherwise return
516 * the time to sleep */
517 if (length <= 0)
518 return -1;
519
520 chunk->memblock = NULL;
521 chunk->length = length;
522 }
523
524 chunk->index = 0;
525 return 0;
526 }
527
528 /* Ok, let's pass real data to the caller */
529 *chunk = bq->current_read->chunk;
530 pa_memblock_ref(chunk->memblock);
531
532 pa_assert(bq->read_index >= bq->current_read->index);
533 d = bq->read_index - bq->current_read->index;
534 chunk->index += (size_t) d;
535 chunk->length -= (size_t) d;
536
537 return 0;
538 }
539
540 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
541 pa_memchunk tchunk, rchunk;
542 int64_t ri;
543 struct list_item *item;
544
545 pa_assert(bq);
546 pa_assert(block_size > 0);
547 pa_assert(chunk);
548 pa_assert(bq->silence.memblock);
549
550 if (pa_memblockq_peek(bq, &tchunk) < 0)
551 return -1;
552
553 if (tchunk.length >= block_size) {
554 *chunk = tchunk;
555 chunk->length = block_size;
556 return 0;
557 }
558
559 rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
560 rchunk.index = 0;
561 rchunk.length = tchunk.length;
562
563 pa_memchunk_memcpy(&rchunk, &tchunk);
564 pa_memblock_unref(tchunk.memblock);
565
566 rchunk.index += tchunk.length;
567
568 /* We don't need to call fix_current_read() here, since
569 * pa_memblock_peek() already did that */
570 item = bq->current_read;
571 ri = bq->read_index + tchunk.length;
572
573 while (rchunk.index < block_size) {
574
575 if (!item || item->index > ri) {
576 /* Do we need to append silence? */
577 tchunk = bq->silence;
578
579 if (item)
580 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
581
582 } else {
583 int64_t d;
584
585 /* We can append real data! */
586 tchunk = item->chunk;
587
588 d = ri - item->index;
589 tchunk.index += (size_t) d;
590 tchunk.length -= (size_t) d;
591
592 /* Go to next item for the next iteration */
593 item = item->next;
594 }
595
596 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
597 pa_memchunk_memcpy(&rchunk, &tchunk);
598
599 rchunk.index += rchunk.length;
600 ri += rchunk.length;
601 }
602
603 rchunk.index = 0;
604 rchunk.length = block_size;
605
606 *chunk = rchunk;
607 return 0;
608 }
609
610 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
611 int64_t old;
612 pa_assert(bq);
613 pa_assert(length % bq->base == 0);
614
615 old = bq->read_index;
616
617 while (length > 0) {
618
619 /* Do not drop any data when we are in prebuffering mode */
620 if (update_prebuf(bq))
621 break;
622
623 fix_current_read(bq);
624
625 if (bq->current_read) {
626 int64_t p, d;
627
628 /* We go through this piece by piece to make sure we don't
629 * drop more than allowed by prebuf */
630
631 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
632 pa_assert(p >= bq->read_index);
633 d = p - bq->read_index;
634
635 if (d > (int64_t) length)
636 d = (int64_t) length;
637
638 bq->read_index += d;
639 length -= (size_t) d;
640
641 } else {
642
643 /* The list is empty, there's nothing we could drop */
644 bq->read_index += (int64_t) length;
645 break;
646 }
647 }
648
649 drop_backlog(bq);
650 read_index_changed(bq, old);
651 }
652
653 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
654 int64_t old;
655 pa_assert(bq);
656 pa_assert(length % bq->base == 0);
657
658 old = bq->read_index;
659
660 /* This is kind of the inverse of pa_memblockq_drop() */
661
662 bq->read_index -= (int64_t) length;
663
664 read_index_changed(bq, old);
665 }
666
667 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
668 pa_assert(bq);
669
670 if (pa_memblockq_prebuf_active(bq))
671 return FALSE;
672
673 if (pa_memblockq_get_length(bq) <= 0)
674 return FALSE;
675
676 return TRUE;
677 }
678
679 size_t pa_memblockq_get_length(pa_memblockq *bq) {
680 pa_assert(bq);
681
682 if (bq->write_index <= bq->read_index)
683 return 0;
684
685 return (size_t) (bq->write_index - bq->read_index);
686 }
687
688 size_t pa_memblockq_missing(pa_memblockq *bq) {
689 size_t l;
690 pa_assert(bq);
691
692 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
693 return 0;
694
695 l = bq->tlength - l;
696
697 return l >= bq->minreq ? l : 0;
698 }
699
700 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
701 int64_t old;
702 pa_assert(bq);
703
704 old = bq->write_index;
705
706 switch (seek) {
707 case PA_SEEK_RELATIVE:
708 bq->write_index += offset;
709 break;
710 case PA_SEEK_ABSOLUTE:
711 bq->write_index = offset;
712 break;
713 case PA_SEEK_RELATIVE_ON_READ:
714 bq->write_index = bq->read_index + offset;
715 break;
716 case PA_SEEK_RELATIVE_END:
717 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
718 break;
719 default:
720 pa_assert_not_reached();
721 }
722
723 drop_backlog(bq);
724 write_index_changed(bq, old, account);
725 }
726
727 void pa_memblockq_flush_write(pa_memblockq *bq, pa_bool_t account) {
728 int64_t old;
729 pa_assert(bq);
730
731 pa_memblockq_silence(bq);
732
733 old = bq->write_index;
734 bq->write_index = bq->read_index;
735
736 pa_memblockq_prebuf_force(bq);
737 write_index_changed(bq, old, account);
738 }
739
740 void pa_memblockq_flush_read(pa_memblockq *bq) {
741 int64_t old;
742 pa_assert(bq);
743
744 pa_memblockq_silence(bq);
745
746 old = bq->read_index;
747 bq->read_index = bq->write_index;
748
749 pa_memblockq_prebuf_force(bq);
750 read_index_changed(bq, old);
751 }
752
753 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
754 pa_assert(bq);
755
756 return bq->tlength;
757 }
758
759 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
760 pa_assert(bq);
761
762 return bq->minreq;
763 }
764
765 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
766 pa_assert(bq);
767
768 return bq->maxrewind;
769 }
770
771 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
772 pa_assert(bq);
773
774 return bq->read_index;
775 }
776
777 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
778 pa_assert(bq);
779
780 return bq->write_index;
781 }
782
783 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
784 pa_memchunk rchunk;
785
786 pa_assert(bq);
787 pa_assert(chunk);
788
789 if (bq->base == 1)
790 return pa_memblockq_push(bq, chunk);
791
792 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
793 return -1;
794
795 pa_mcalign_push(bq->mcalign, chunk);
796
797 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
798 int r;
799 r = pa_memblockq_push(bq, &rchunk);
800 pa_memblock_unref(rchunk.memblock);
801
802 if (r < 0) {
803 pa_mcalign_flush(bq->mcalign);
804 return -1;
805 }
806 }
807
808 return 0;
809 }
810
811 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
812 pa_assert(bq);
813
814 bq->in_prebuf = FALSE;
815 }
816
817 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
818 pa_assert(bq);
819
820 if (bq->prebuf > 0)
821 bq->in_prebuf = TRUE;
822 }
823
824 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
825 pa_assert(bq);
826
827 return bq->maxlength;
828 }
829
830 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
831 pa_assert(bq);
832
833 return bq->prebuf;
834 }
835
836 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
837 size_t l;
838
839 pa_assert(bq);
840
841 #ifdef MEMBLOCKQ_DEBUG
842 pa_log("[%s] pop: %lli", bq->name, (long long) bq->missing);
843 #endif
844
845 if (bq->missing <= 0)
846 return 0;
847
848 l = (size_t) bq->missing;
849
850 bq->requested += bq->missing;
851 bq->missing = 0;
852
853 #ifdef MEMBLOCKQ_DEBUG
854 pa_log("[%s] sent %lli: request counter is at %lli", bq->name, (long long) l, (long long) bq->requested);
855 #endif
856
857 return l;
858 }
859
860 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
861 pa_assert(bq);
862
863 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
864
865 if (bq->maxlength < bq->base)
866 bq->maxlength = bq->base;
867
868 if (bq->tlength > bq->maxlength)
869 pa_memblockq_set_tlength(bq, bq->maxlength);
870 }
871
872 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
873 size_t old_tlength;
874 pa_assert(bq);
875
876 if (tlength <= 0 || tlength == (size_t) -1)
877 tlength = bq->maxlength;
878
879 old_tlength = bq->tlength;
880 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
881
882 if (bq->tlength > bq->maxlength)
883 bq->tlength = bq->maxlength;
884
885 if (bq->minreq > bq->tlength)
886 pa_memblockq_set_minreq(bq, bq->tlength);
887
888 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
889 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
890
891 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
892 }
893
894 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
895 pa_assert(bq);
896
897 bq->minreq = (minreq/bq->base)*bq->base;
898
899 if (bq->minreq > bq->tlength)
900 bq->minreq = bq->tlength;
901
902 if (bq->minreq < bq->base)
903 bq->minreq = bq->base;
904
905 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
906 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
907 }
908
909 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
910 pa_assert(bq);
911
912 if (prebuf == (size_t) -1)
913 prebuf = bq->tlength+bq->base-bq->minreq;
914
915 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
916
917 if (prebuf > 0 && bq->prebuf < bq->base)
918 bq->prebuf = bq->base;
919
920 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
921 bq->prebuf = bq->tlength+bq->base-bq->minreq;
922
923 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
924 bq->in_prebuf = FALSE;
925 }
926
927 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
928 pa_assert(bq);
929
930 bq->maxrewind = (maxrewind/bq->base)*bq->base;
931 }
932
933 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
934 pa_assert(bq);
935 pa_assert(a);
936
937 pa_memblockq_set_maxlength(bq, a->maxlength);
938 pa_memblockq_set_tlength(bq, a->tlength);
939 pa_memblockq_set_prebuf(bq, a->prebuf);
940 pa_memblockq_set_minreq(bq, a->minreq);
941 }
942
943 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
944 pa_assert(bq);
945 pa_assert(a);
946
947 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
948 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
949 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
950 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
951 }
952
953 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
954
955 pa_assert(bq);
956 pa_assert(source);
957
958 pa_memblockq_prebuf_disable(bq);
959
960 for (;;) {
961 pa_memchunk chunk;
962
963 if (pa_memblockq_peek(source, &chunk) < 0)
964 return 0;
965
966 pa_assert(chunk.length > 0);
967
968 if (chunk.memblock) {
969
970 if (pa_memblockq_push_align(bq, &chunk) < 0) {
971 pa_memblock_unref(chunk.memblock);
972 return -1;
973 }
974
975 pa_memblock_unref(chunk.memblock);
976 } else
977 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
978
979 pa_memblockq_drop(bq, chunk.length);
980 }
981 }
982
983 void pa_memblockq_willneed(pa_memblockq *bq) {
984 struct list_item *q;
985
986 pa_assert(bq);
987
988 fix_current_read(bq);
989
990 for (q = bq->current_read; q; q = q->next)
991 pa_memchunk_will_need(&q->chunk);
992 }
993
994 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
995 pa_assert(bq);
996
997 if (bq->silence.memblock)
998 pa_memblock_unref(bq->silence.memblock);
999
1000 if (silence) {
1001 bq->silence = *silence;
1002 pa_memblock_ref(bq->silence.memblock);
1003 } else
1004 pa_memchunk_reset(&bq->silence);
1005 }
1006
1007 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
1008 pa_assert(bq);
1009
1010 return !bq->blocks;
1011 }
1012
1013 void pa_memblockq_silence(pa_memblockq *bq) {
1014 pa_assert(bq);
1015
1016 while (bq->blocks)
1017 drop_block(bq, bq->blocks);
1018
1019 pa_assert(bq->n_blocks == 0);
1020 }
1021
1022 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1023 pa_assert(bq);
1024
1025 return bq->n_blocks;
1026 }
1027
1028 size_t pa_memblockq_get_base(pa_memblockq *bq) {
1029 pa_assert(bq);
1030
1031 return bq->base;
1032 }