]> code.delx.au - pulseaudio/blob - src/polypcore/memblockq.c
517495ebba8de9d3c780c8ffcb68e05750548b44
[pulseaudio] / src / polypcore / memblockq.c
1 /* $Id$ */
2
3 /***
4 This file is part of polypaudio.
5
6 polypaudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2 of the License,
9 or (at your option) any later version.
10
11 polypaudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with polypaudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <assert.h>
30 #include <stdlib.h>
31 #include <string.h>
32
33 #include <polypcore/xmalloc.h>
34 #include <polypcore/log.h>
35 #include <polypcore/mcalign.h>
36
37 #include "memblockq.h"
38
39 struct memblock_list {
40 struct memblock_list *next, *prev;
41 int64_t index;
42 pa_memchunk chunk;
43 };
44
45 struct pa_memblockq {
46 struct memblock_list *blocks, *blocks_tail;
47 unsigned n_blocks;
48 size_t maxlength, tlength, base, prebuf, minreq;
49 int64_t read_index, write_index;
50 enum { PREBUF, RUNNING } state;
51 pa_memblock_stat *memblock_stat;
52 pa_memblock *silence;
53 pa_mcalign *mcalign;
54 };
55
56 pa_memblockq* pa_memblockq_new(
57 int64_t idx,
58 size_t maxlength,
59 size_t tlength,
60 size_t base,
61 size_t prebuf,
62 size_t minreq,
63 pa_memblock *silence,
64 pa_memblock_stat *s) {
65
66 pa_memblockq* bq;
67
68 assert(base > 0);
69 assert(maxlength >= base);
70
71 bq = pa_xnew(pa_memblockq, 1);
72 bq->blocks = bq->blocks_tail = NULL;
73 bq->n_blocks = 0;
74
75 bq->base = base;
76 bq->read_index = bq->write_index = idx;
77 bq->memblock_stat = s;
78
79 pa_log_debug(__FILE__": memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
80 (unsigned long)maxlength, (unsigned long)tlength, (unsigned long)base, (unsigned long)prebuf, (unsigned long)minreq);
81
82 bq->maxlength = ((maxlength+base-1)/base)*base;
83 assert(bq->maxlength >= base);
84
85 bq->tlength = ((tlength+base-1)/base)*base;
86 if (!bq->tlength || bq->tlength >= bq->maxlength)
87 bq->tlength = bq->maxlength;
88
89 bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength/2 : prebuf;
90 bq->prebuf = ((bq->prebuf+base-1)/base)*base;
91 if (bq->prebuf > bq->maxlength)
92 bq->prebuf = bq->maxlength;
93
94 bq->minreq = (minreq/base)*base;
95
96 if (bq->minreq > bq->tlength - bq->prebuf)
97 bq->minreq = bq->tlength - bq->prebuf;
98
99 if (!bq->minreq)
100 bq->minreq = 1;
101
102 pa_log_debug(__FILE__": memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
103 (unsigned long)bq->maxlength, (unsigned long)bq->tlength, (unsigned long)bq->base, (unsigned long)bq->prebuf, (unsigned long)bq->minreq);
104
105 bq->state = bq->prebuf ? PREBUF : RUNNING;
106 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
107 bq->mcalign = NULL;
108
109 return bq;
110 }
111
112 void pa_memblockq_free(pa_memblockq* bq) {
113 assert(bq);
114
115 pa_memblockq_flush(bq);
116
117 if (bq->silence)
118 pa_memblock_unref(bq->silence);
119
120 if (bq->mcalign)
121 pa_mcalign_free(bq->mcalign);
122
123 pa_xfree(bq);
124 }
125
126 static void drop_block(pa_memblockq *bq, struct memblock_list *q) {
127 assert(bq);
128 assert(q);
129
130 assert(bq->n_blocks >= 1);
131
132 if (q->prev)
133 q->prev->next = q->next;
134 else
135 bq->blocks = q->next;
136
137 if (q->next)
138 q->next->prev = q->prev;
139 else
140 bq->blocks_tail = q->prev;
141
142 pa_memblock_unref(q->chunk.memblock);
143 pa_xfree(q);
144
145 bq->n_blocks--;
146 }
147
148 static int can_push(pa_memblockq *bq, size_t l) {
149 int64_t end;
150
151 assert(bq);
152
153 if (bq->read_index > bq->write_index) {
154 size_t d = bq->read_index - bq->write_index;
155
156 if (l > d)
157 l -= d;
158 else
159 return 1;
160 }
161
162 end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : 0;
163
164 /* Make sure that the list doesn't get too long */
165 if (bq->write_index + (int64_t)l > end)
166 if (bq->write_index + l - bq->read_index > bq->maxlength)
167 return 0;
168
169 return 1;
170 }
171
172 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
173
174 struct memblock_list *q, *n;
175 pa_memchunk chunk;
176
177 assert(bq);
178 assert(uchunk);
179 assert(uchunk->memblock);
180 assert(uchunk->length > 0);
181 assert(uchunk->index + uchunk->length <= uchunk->memblock->length);
182
183 if (uchunk->length % bq->base)
184 return -1;
185
186 if (!can_push(bq, uchunk->length))
187 return -1;
188
189 chunk = *uchunk;
190
191 if (bq->read_index > bq->write_index) {
192
193 /* We currently have a buffer underflow, we need to drop some
194 * incoming data */
195
196 size_t d = bq->read_index - bq->write_index;
197
198 if (chunk.length > d) {
199 chunk.index += d;
200 chunk.length -= d;
201 bq->write_index = bq->read_index;
202 } else {
203 /* We drop the incoming data completely */
204 bq->write_index += chunk.length;
205 return 0;
206 }
207 }
208
209 /* We go from back to front to look for the right place to add
210 * this new entry. Drop data we will overwrite on the way */
211
212 q = bq->blocks_tail;
213 while (q) {
214
215 if (bq->write_index >= q->index + (int64_t)q->chunk.length)
216 /* We found the entry where we need to place the new entry immediately after */
217 break;
218 else if (bq->write_index + (int64_t)chunk.length <= q->index) {
219 /* This entry isn't touched at all, let's skip it */
220 q = q->prev;
221 } else if (bq->write_index <= q->index &&
222 bq->write_index + chunk.length >= q->index + q->chunk.length) {
223
224 /* This entry is fully replaced by the new entry, so let's drop it */
225
226 struct memblock_list *p;
227 p = q;
228 q = q->prev;
229 drop_block(bq, p);
230 } else if (bq->write_index >= q->index) {
231 /* The write index points into this memblock, so let's
232 * truncate or split it */
233
234 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
235
236 /* We need to save the end of this memchunk */
237 struct memblock_list *p;
238 size_t d;
239
240 /* Create a new list entry for the end of thie memchunk */
241 p = pa_xnew(struct memblock_list, 1);
242 p->chunk = q->chunk;
243 pa_memblock_ref(p->chunk.memblock);
244
245 /* Calculate offset */
246 d = bq->write_index + chunk.length - q->index;
247 assert(d > 0);
248
249 /* Drop it from the new entry */
250 p->index = q->index + d;
251 p->chunk.length -= d;
252
253 /* Add it to the list */
254 p->prev = q;
255 if ((p->next = q->next))
256 q->next->prev = p;
257 else
258 bq->blocks_tail = p;
259 q->next = p;
260
261 bq->n_blocks++;
262 }
263
264 /* Truncate the chunk */
265 if (!(q->chunk.length = bq->write_index - q->index)) {
266 struct memblock_list *p;
267 p = q;
268 q = q->prev;
269 drop_block(bq, p);
270 }
271
272 /* We had to truncate this block, hence we're now at the right position */
273 break;
274 } else {
275 size_t d;
276
277 assert(bq->write_index + (int64_t)chunk.length > q->index &&
278 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
279 bq->write_index < q->index);
280
281 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
282
283 d = bq->write_index + chunk.length - q->index;
284 q->index += d;
285 q->chunk.index += d;
286 q->chunk.length -= d;
287
288 q = q->prev;
289 }
290
291 }
292
293 if (q) {
294 assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
295 assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
296
297 /* Try to merge memory blocks */
298
299 if (q->chunk.memblock == chunk.memblock &&
300 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
301 bq->write_index == q->index + (int64_t)q->chunk.length) {
302
303 q->chunk.length += chunk.length;
304 bq->write_index += chunk.length;
305 return 0;
306 }
307 } else
308 assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
309
310
311 n = pa_xnew(struct memblock_list, 1);
312 n->chunk = chunk;
313 pa_memblock_ref(n->chunk.memblock);
314 n->index = bq->write_index;
315 bq->write_index += n->chunk.length;
316
317 n->next = q ? q->next : bq->blocks;
318 n->prev = q;
319
320 if (n->next)
321 n->next->prev = n;
322 else
323 bq->blocks_tail = n;
324
325 if (n->prev)
326 n->prev->next = n;
327 else
328 bq->blocks = n;
329
330 bq->n_blocks++;
331 return 0;
332 }
333
334 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
335 assert(bq);
336 assert(chunk);
337
338 if (bq->state == PREBUF) {
339
340 /* We need to pre-buffer */
341 if (pa_memblockq_get_length(bq) < bq->prebuf)
342 return -1;
343
344 bq->state = RUNNING;
345
346 } else if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
347
348 /* Buffer underflow protection */
349 bq->state = PREBUF;
350 return -1;
351 }
352
353 /* Do we need to spit out silence? */
354 if (!bq->blocks || bq->blocks->index > bq->read_index) {
355
356 size_t length;
357
358 /* How much silence shall we return? */
359 length = bq->blocks ? bq->blocks->index - bq->read_index : 0;
360
361 /* We need to return silence, since no data is yet available */
362 if (bq->silence) {
363 chunk->memblock = pa_memblock_ref(bq->silence);
364
365 if (!length || length > chunk->memblock->length)
366 length = chunk->memblock->length;
367
368 chunk->length = length;
369 } else {
370 chunk->memblock = NULL;
371 chunk->length = length;
372 }
373
374 chunk->index = 0;
375 return 0;
376 }
377
378 /* Ok, let's pass real data to the caller */
379 assert(bq->blocks->index == bq->read_index);
380
381 *chunk = bq->blocks->chunk;
382 pa_memblock_ref(chunk->memblock);
383
384 return 0;
385 }
386
387 void pa_memblockq_drop(pa_memblockq *bq, const pa_memchunk *chunk, size_t length) {
388 assert(bq);
389 assert(length % bq->base == 0);
390
391 assert(!chunk || length <= chunk->length);
392
393 if (chunk) {
394
395 if (bq->blocks && bq->blocks->index == bq->read_index) {
396 /* The first item in queue is valid */
397
398 /* Does the chunk match with what the user supplied us? */
399 if (memcmp(chunk, &bq->blocks->chunk, sizeof(pa_memchunk)) != 0)
400 return;
401
402 } else {
403 size_t l;
404
405 /* The first item in the queue is not yet relevant */
406
407 assert(!bq->blocks || bq->blocks->index > bq->read_index);
408 l = bq->blocks ? bq->blocks->index - bq->read_index : 0;
409
410 if (bq->silence) {
411
412 if (!l || l > bq->silence->length)
413 l = bq->silence->length;
414
415 }
416
417 /* Do the entries still match? */
418 if (chunk->index != 0 || chunk->length != l || chunk->memblock != bq->silence)
419 return;
420 }
421 }
422
423 while (length > 0) {
424
425 if (bq->blocks) {
426 size_t d;
427
428 assert(bq->blocks->index >= bq->read_index);
429
430 d = (size_t) (bq->blocks->index - bq->read_index);
431
432 if (d >= length) {
433 /* The first block is too far in the future */
434
435 bq->read_index += length;
436 break;
437 } else {
438
439 length -= d;
440 bq->read_index += d;
441 }
442
443 assert(bq->blocks->index == bq->read_index);
444
445 if (bq->blocks->chunk.length <= length) {
446 /* We need to drop the full block */
447
448 length -= bq->blocks->chunk.length;
449 bq->read_index += bq->blocks->chunk.length;
450
451 drop_block(bq, bq->blocks);
452 } else {
453 /* Only the start of this block needs to be dropped */
454
455 bq->blocks->chunk.index += length;
456 bq->blocks->chunk.length -= length;
457 bq->blocks->index += length;
458 bq->read_index += length;
459 break;
460 }
461
462 } else {
463
464 /* The list is empty, there's nothing we could drop */
465 bq->read_index += length;
466 break;
467 }
468 }
469 }
470
471 int pa_memblockq_is_readable(pa_memblockq *bq) {
472 assert(bq);
473
474 if (bq->prebuf > 0) {
475 size_t l = pa_memblockq_get_length(bq);
476
477 if (bq->state == PREBUF && l < bq->prebuf)
478 return 0;
479
480 if (l <= 0)
481 return 0;
482 }
483
484 return 1;
485 }
486
487 int pa_memblockq_is_writable(pa_memblockq *bq, size_t length) {
488 assert(bq);
489
490 if (length % bq->base)
491 return 0;
492
493 return pa_memblockq_get_length(bq) + length <= bq->tlength;
494 }
495
496 size_t pa_memblockq_get_length(pa_memblockq *bq) {
497 assert(bq);
498
499 if (bq->write_index <= bq->read_index)
500 return 0;
501
502 return (size_t) (bq->write_index - bq->read_index);
503 }
504
505 size_t pa_memblockq_missing(pa_memblockq *bq) {
506 size_t l;
507 assert(bq);
508
509 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
510 return 0;
511
512 l = bq->tlength - l;
513 return (l >= bq->minreq) ? l : 0;
514 }
515
516 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
517 assert(bq);
518
519 return bq->minreq;
520 }
521
522 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
523 assert(bq);
524
525 switch (seek) {
526 case PA_SEEK_RELATIVE:
527 bq->write_index += offset;
528 return;
529 case PA_SEEK_ABSOLUTE:
530 bq->write_index = offset;
531 return;
532 case PA_SEEK_RELATIVE_ON_READ:
533 bq->write_index = bq->read_index + offset;
534 return;
535 case PA_SEEK_RELATIVE_END:
536 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t)bq->blocks_tail->chunk.length : bq->read_index) + offset;
537 return;
538 }
539
540 assert(0);
541 }
542
543 void pa_memblockq_flush(pa_memblockq *bq) {
544 assert(bq);
545
546 while (bq->blocks)
547 drop_block(bq, bq->blocks);
548
549 assert(bq->n_blocks == 0);
550
551 pa_memblockq_prebuf_force(bq);
552 }
553
554 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
555 assert(bq);
556
557 return bq->tlength;
558 }
559
560 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
561 assert(bq);
562 return bq->read_index;
563 }
564
565 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
566 assert(bq);
567 return bq->write_index;
568 }
569
570 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
571 pa_memchunk rchunk;
572
573 assert(bq);
574 assert(chunk && bq->base);
575
576 if (bq->base == 1)
577 return pa_memblockq_push(bq, chunk);
578
579 if (!bq->mcalign)
580 bq->mcalign = pa_mcalign_new(bq->base, bq->memblock_stat);
581
582 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
583 return -1;
584
585 pa_mcalign_push(bq->mcalign, chunk);
586
587 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
588 int r;
589 r = pa_memblockq_push(bq, &rchunk);
590 pa_memblock_unref(rchunk.memblock);
591
592 if (r < 0)
593 return -1;
594 }
595
596 return 0;
597 }
598
599 void pa_memblockq_shorten(pa_memblockq *bq, size_t length) {
600 size_t l;
601 assert(bq);
602
603 l = pa_memblockq_get_length(bq);
604
605 if (l > length)
606 pa_memblockq_drop(bq, NULL, l - length);
607 }
608
609 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
610 assert(bq);
611
612 if (bq->state == PREBUF)
613 bq->state = RUNNING;
614 }
615
616 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
617 assert(bq);
618
619 if (bq->state == RUNNING && bq->prebuf > 0)
620 bq->state = PREBUF;
621 }