]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
822bd66cd138db5b81d448754b096ecec76ff589
[pulseaudio] / src / pulsecore / memblockq.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <assert.h>
30 #include <stdlib.h>
31 #include <string.h>
32
33 #include <pulse/xmalloc.h>
34
35 #include <pulsecore/log.h>
36 #include <pulsecore/mcalign.h>
37
38 #include "memblockq.h"
39
40 struct memblock_list {
41 struct memblock_list *next, *prev;
42 int64_t index;
43 pa_memchunk chunk;
44 };
45
46 struct pa_memblockq {
47 struct memblock_list *blocks, *blocks_tail;
48 unsigned n_blocks;
49 size_t maxlength, tlength, base, prebuf, minreq;
50 int64_t read_index, write_index;
51 enum { PREBUF, RUNNING } state;
52 pa_memblock_stat *memblock_stat;
53 pa_memblock *silence;
54 pa_mcalign *mcalign;
55 };
56
57 pa_memblockq* pa_memblockq_new(
58 int64_t idx,
59 size_t maxlength,
60 size_t tlength,
61 size_t base,
62 size_t prebuf,
63 size_t minreq,
64 pa_memblock *silence,
65 pa_memblock_stat *s) {
66
67 pa_memblockq* bq;
68
69 assert(base > 0);
70 assert(maxlength >= base);
71
72 bq = pa_xnew(pa_memblockq, 1);
73 bq->blocks = bq->blocks_tail = NULL;
74 bq->n_blocks = 0;
75
76 bq->base = base;
77 bq->read_index = bq->write_index = idx;
78 bq->memblock_stat = s;
79
80 pa_log_debug(__FILE__": memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
81 (unsigned long)maxlength, (unsigned long)tlength, (unsigned long)base, (unsigned long)prebuf, (unsigned long)minreq);
82
83 bq->maxlength = ((maxlength+base-1)/base)*base;
84 assert(bq->maxlength >= base);
85
86 bq->tlength = ((tlength+base-1)/base)*base;
87 if (!bq->tlength || bq->tlength >= bq->maxlength)
88 bq->tlength = bq->maxlength;
89
90 bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength/2 : prebuf;
91 bq->prebuf = ((bq->prebuf+base-1)/base)*base;
92 if (bq->prebuf > bq->maxlength)
93 bq->prebuf = bq->maxlength;
94
95 bq->minreq = (minreq/base)*base;
96
97 if (bq->minreq > bq->tlength - bq->prebuf)
98 bq->minreq = bq->tlength - bq->prebuf;
99
100 if (!bq->minreq)
101 bq->minreq = 1;
102
103 pa_log_debug(__FILE__": memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
104 (unsigned long)bq->maxlength, (unsigned long)bq->tlength, (unsigned long)bq->base, (unsigned long)bq->prebuf, (unsigned long)bq->minreq);
105
106 bq->state = bq->prebuf ? PREBUF : RUNNING;
107 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
108 bq->mcalign = NULL;
109
110 return bq;
111 }
112
113 void pa_memblockq_free(pa_memblockq* bq) {
114 assert(bq);
115
116 pa_memblockq_flush(bq);
117
118 if (bq->silence)
119 pa_memblock_unref(bq->silence);
120
121 if (bq->mcalign)
122 pa_mcalign_free(bq->mcalign);
123
124 pa_xfree(bq);
125 }
126
127 static void drop_block(pa_memblockq *bq, struct memblock_list *q) {
128 assert(bq);
129 assert(q);
130
131 assert(bq->n_blocks >= 1);
132
133 if (q->prev)
134 q->prev->next = q->next;
135 else
136 bq->blocks = q->next;
137
138 if (q->next)
139 q->next->prev = q->prev;
140 else
141 bq->blocks_tail = q->prev;
142
143 pa_memblock_unref(q->chunk.memblock);
144 pa_xfree(q);
145
146 bq->n_blocks--;
147 }
148
149 static int can_push(pa_memblockq *bq, size_t l) {
150 int64_t end;
151
152 assert(bq);
153
154 if (bq->read_index > bq->write_index) {
155 size_t d = bq->read_index - bq->write_index;
156
157 if (l > d)
158 l -= d;
159 else
160 return 1;
161 }
162
163 end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : 0;
164
165 /* Make sure that the list doesn't get too long */
166 if (bq->write_index + (int64_t)l > end)
167 if (bq->write_index + l - bq->read_index > bq->maxlength)
168 return 0;
169
170 return 1;
171 }
172
173 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
174
175 struct memblock_list *q, *n;
176 pa_memchunk chunk;
177
178 assert(bq);
179 assert(uchunk);
180 assert(uchunk->memblock);
181 assert(uchunk->length > 0);
182 assert(uchunk->index + uchunk->length <= uchunk->memblock->length);
183
184 if (uchunk->length % bq->base)
185 return -1;
186
187 if (!can_push(bq, uchunk->length))
188 return -1;
189
190 chunk = *uchunk;
191
192 if (bq->read_index > bq->write_index) {
193
194 /* We currently have a buffer underflow, we need to drop some
195 * incoming data */
196
197 size_t d = bq->read_index - bq->write_index;
198
199 if (chunk.length > d) {
200 chunk.index += d;
201 chunk.length -= d;
202 bq->write_index = bq->read_index;
203 } else {
204 /* We drop the incoming data completely */
205 bq->write_index += chunk.length;
206 return 0;
207 }
208 }
209
210 /* We go from back to front to look for the right place to add
211 * this new entry. Drop data we will overwrite on the way */
212
213 q = bq->blocks_tail;
214 while (q) {
215
216 if (bq->write_index >= q->index + (int64_t)q->chunk.length)
217 /* We found the entry where we need to place the new entry immediately after */
218 break;
219 else if (bq->write_index + (int64_t)chunk.length <= q->index) {
220 /* This entry isn't touched at all, let's skip it */
221 q = q->prev;
222 } else if (bq->write_index <= q->index &&
223 bq->write_index + chunk.length >= q->index + q->chunk.length) {
224
225 /* This entry is fully replaced by the new entry, so let's drop it */
226
227 struct memblock_list *p;
228 p = q;
229 q = q->prev;
230 drop_block(bq, p);
231 } else if (bq->write_index >= q->index) {
232 /* The write index points into this memblock, so let's
233 * truncate or split it */
234
235 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
236
237 /* We need to save the end of this memchunk */
238 struct memblock_list *p;
239 size_t d;
240
241 /* Create a new list entry for the end of thie memchunk */
242 p = pa_xnew(struct memblock_list, 1);
243 p->chunk = q->chunk;
244 pa_memblock_ref(p->chunk.memblock);
245
246 /* Calculate offset */
247 d = bq->write_index + chunk.length - q->index;
248 assert(d > 0);
249
250 /* Drop it from the new entry */
251 p->index = q->index + d;
252 p->chunk.length -= d;
253
254 /* Add it to the list */
255 p->prev = q;
256 if ((p->next = q->next))
257 q->next->prev = p;
258 else
259 bq->blocks_tail = p;
260 q->next = p;
261
262 bq->n_blocks++;
263 }
264
265 /* Truncate the chunk */
266 if (!(q->chunk.length = bq->write_index - q->index)) {
267 struct memblock_list *p;
268 p = q;
269 q = q->prev;
270 drop_block(bq, p);
271 }
272
273 /* We had to truncate this block, hence we're now at the right position */
274 break;
275 } else {
276 size_t d;
277
278 assert(bq->write_index + (int64_t)chunk.length > q->index &&
279 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
280 bq->write_index < q->index);
281
282 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
283
284 d = bq->write_index + chunk.length - q->index;
285 q->index += d;
286 q->chunk.index += d;
287 q->chunk.length -= d;
288
289 q = q->prev;
290 }
291
292 }
293
294 if (q) {
295 assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
296 assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
297
298 /* Try to merge memory blocks */
299
300 if (q->chunk.memblock == chunk.memblock &&
301 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
302 bq->write_index == q->index + (int64_t)q->chunk.length) {
303
304 q->chunk.length += chunk.length;
305 bq->write_index += chunk.length;
306 return 0;
307 }
308 } else
309 assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
310
311
312 n = pa_xnew(struct memblock_list, 1);
313 n->chunk = chunk;
314 pa_memblock_ref(n->chunk.memblock);
315 n->index = bq->write_index;
316 bq->write_index += n->chunk.length;
317
318 n->next = q ? q->next : bq->blocks;
319 n->prev = q;
320
321 if (n->next)
322 n->next->prev = n;
323 else
324 bq->blocks_tail = n;
325
326 if (n->prev)
327 n->prev->next = n;
328 else
329 bq->blocks = n;
330
331 bq->n_blocks++;
332 return 0;
333 }
334
335 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
336 assert(bq);
337 assert(chunk);
338
339 if (bq->state == PREBUF) {
340
341 /* We need to pre-buffer */
342 if (pa_memblockq_get_length(bq) < bq->prebuf)
343 return -1;
344
345 bq->state = RUNNING;
346
347 } else if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
348
349 /* Buffer underflow protection */
350 bq->state = PREBUF;
351 return -1;
352 }
353
354 /* Do we need to spit out silence? */
355 if (!bq->blocks || bq->blocks->index > bq->read_index) {
356
357 size_t length;
358
359 /* How much silence shall we return? */
360 length = bq->blocks ? bq->blocks->index - bq->read_index : 0;
361
362 /* We need to return silence, since no data is yet available */
363 if (bq->silence) {
364 chunk->memblock = pa_memblock_ref(bq->silence);
365
366 if (!length || length > chunk->memblock->length)
367 length = chunk->memblock->length;
368
369 chunk->length = length;
370 } else {
371
372 /* If the memblockq is empty, return -1, otherwise return
373 * the time to sleep */
374 if (!bq->blocks)
375 return -1;
376
377 chunk->memblock = NULL;
378 chunk->length = length;
379 }
380
381 chunk->index = 0;
382 return 0;
383 }
384
385 /* Ok, let's pass real data to the caller */
386 assert(bq->blocks->index == bq->read_index);
387
388 *chunk = bq->blocks->chunk;
389 pa_memblock_ref(chunk->memblock);
390
391 return 0;
392 }
393
394 void pa_memblockq_drop(pa_memblockq *bq, const pa_memchunk *chunk, size_t length) {
395 assert(bq);
396 assert(length % bq->base == 0);
397
398 assert(!chunk || length <= chunk->length);
399
400 if (chunk) {
401
402 if (bq->blocks && bq->blocks->index == bq->read_index) {
403 /* The first item in queue is valid */
404
405 /* Does the chunk match with what the user supplied us? */
406 if (memcmp(chunk, &bq->blocks->chunk, sizeof(pa_memchunk)) != 0)
407 return;
408
409 } else {
410 size_t l;
411
412 /* The first item in the queue is not yet relevant */
413
414 assert(!bq->blocks || bq->blocks->index > bq->read_index);
415 l = bq->blocks ? bq->blocks->index - bq->read_index : 0;
416
417 if (bq->silence) {
418
419 if (!l || l > bq->silence->length)
420 l = bq->silence->length;
421
422 }
423
424 /* Do the entries still match? */
425 if (chunk->index != 0 || chunk->length != l || chunk->memblock != bq->silence)
426 return;
427 }
428 }
429
430 while (length > 0) {
431
432 if (bq->blocks) {
433 size_t d;
434
435 assert(bq->blocks->index >= bq->read_index);
436
437 d = (size_t) (bq->blocks->index - bq->read_index);
438
439 if (d >= length) {
440 /* The first block is too far in the future */
441
442 bq->read_index += length;
443 break;
444 } else {
445
446 length -= d;
447 bq->read_index += d;
448 }
449
450 assert(bq->blocks->index == bq->read_index);
451
452 if (bq->blocks->chunk.length <= length) {
453 /* We need to drop the full block */
454
455 length -= bq->blocks->chunk.length;
456 bq->read_index += bq->blocks->chunk.length;
457
458 drop_block(bq, bq->blocks);
459 } else {
460 /* Only the start of this block needs to be dropped */
461
462 bq->blocks->chunk.index += length;
463 bq->blocks->chunk.length -= length;
464 bq->blocks->index += length;
465 bq->read_index += length;
466 break;
467 }
468
469 } else {
470
471 /* The list is empty, there's nothing we could drop */
472 bq->read_index += length;
473 break;
474 }
475 }
476 }
477
478 int pa_memblockq_is_readable(pa_memblockq *bq) {
479 assert(bq);
480
481 if (bq->prebuf > 0) {
482 size_t l = pa_memblockq_get_length(bq);
483
484 if (bq->state == PREBUF && l < bq->prebuf)
485 return 0;
486
487 if (l <= 0)
488 return 0;
489 }
490
491 return 1;
492 }
493
494 int pa_memblockq_is_writable(pa_memblockq *bq, size_t length) {
495 assert(bq);
496
497 if (length % bq->base)
498 return 0;
499
500 return pa_memblockq_get_length(bq) + length <= bq->tlength;
501 }
502
503 size_t pa_memblockq_get_length(pa_memblockq *bq) {
504 assert(bq);
505
506 if (bq->write_index <= bq->read_index)
507 return 0;
508
509 return (size_t) (bq->write_index - bq->read_index);
510 }
511
512 size_t pa_memblockq_missing(pa_memblockq *bq) {
513 size_t l;
514 assert(bq);
515
516 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
517 return 0;
518
519 l = bq->tlength - l;
520 return (l >= bq->minreq) ? l : 0;
521 }
522
523 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
524 assert(bq);
525
526 return bq->minreq;
527 }
528
529 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
530 assert(bq);
531
532 switch (seek) {
533 case PA_SEEK_RELATIVE:
534 bq->write_index += offset;
535 return;
536 case PA_SEEK_ABSOLUTE:
537 bq->write_index = offset;
538 return;
539 case PA_SEEK_RELATIVE_ON_READ:
540 bq->write_index = bq->read_index + offset;
541 return;
542 case PA_SEEK_RELATIVE_END:
543 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t)bq->blocks_tail->chunk.length : bq->read_index) + offset;
544 return;
545 }
546
547 assert(0);
548 }
549
550 void pa_memblockq_flush(pa_memblockq *bq) {
551 assert(bq);
552
553 while (bq->blocks)
554 drop_block(bq, bq->blocks);
555
556 assert(bq->n_blocks == 0);
557
558 bq->write_index = bq->read_index;
559
560 pa_memblockq_prebuf_force(bq);
561 }
562
563 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
564 assert(bq);
565
566 return bq->tlength;
567 }
568
569 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
570 assert(bq);
571 return bq->read_index;
572 }
573
574 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
575 assert(bq);
576 return bq->write_index;
577 }
578
579 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
580 pa_memchunk rchunk;
581
582 assert(bq);
583 assert(chunk && bq->base);
584
585 if (bq->base == 1)
586 return pa_memblockq_push(bq, chunk);
587
588 if (!bq->mcalign)
589 bq->mcalign = pa_mcalign_new(bq->base, bq->memblock_stat);
590
591 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
592 return -1;
593
594 pa_mcalign_push(bq->mcalign, chunk);
595
596 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
597 int r;
598 r = pa_memblockq_push(bq, &rchunk);
599 pa_memblock_unref(rchunk.memblock);
600
601 if (r < 0)
602 return -1;
603 }
604
605 return 0;
606 }
607
608 void pa_memblockq_shorten(pa_memblockq *bq, size_t length) {
609 size_t l;
610 assert(bq);
611
612 l = pa_memblockq_get_length(bq);
613
614 if (l > length)
615 pa_memblockq_drop(bq, NULL, l - length);
616 }
617
618 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
619 assert(bq);
620
621 if (bq->state == PREBUF)
622 bq->state = RUNNING;
623 }
624
625 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
626 assert(bq);
627
628 if (bq->state == RUNNING && bq->prebuf > 0)
629 bq->state = PREBUF;
630 }
631
632 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
633 assert(bq);
634
635 return bq->maxlength;
636 }
637
638 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
639 assert(bq);
640
641 return bq->prebuf;
642 }