]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
channelmap: Add 2.1 surround
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/mutex.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/refcnt.h>
48 #include <pulsecore/llist.h>
49 #include <pulsecore/flist.h>
50 #include <pulsecore/core-util.h>
51 #include <pulsecore/memtrap.h>
52
53 #include "memblock.h"
54
55 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
56 * note that the footprint is usually much smaller, since the data is
57 * stored in SHM and our OS does not commit the memory before we use
58 * it for the first time. */
59 #define PA_MEMPOOL_SLOTS_MAX 1024
60 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
61
62 #define PA_MEMEXPORT_SLOTS_MAX 128
63
64 #define PA_MEMIMPORT_SLOTS_MAX 160
65 #define PA_MEMIMPORT_SEGMENTS_MAX 16
66
67 struct pa_memblock {
68 PA_REFCNT_DECLARE; /* the reference counter */
69 pa_mempool *pool;
70
71 pa_memblock_type_t type;
72
73 bool read_only:1;
74 bool is_silence:1;
75
76 pa_atomic_ptr_t data;
77 size_t length;
78
79 pa_atomic_t n_acquired;
80 pa_atomic_t please_signal;
81
82 union {
83 struct {
84 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
85 pa_free_cb_t free_cb;
86 } user;
87
88 struct {
89 uint32_t id;
90 pa_memimport_segment *segment;
91 } imported;
92 } per_type;
93 };
94
95 struct pa_memimport_segment {
96 pa_memimport *import;
97 pa_shm memory;
98 pa_memtrap *trap;
99 unsigned n_blocks;
100 };
101
102 /* A collection of multiple segments */
103 struct pa_memimport {
104 pa_mutex *mutex;
105
106 pa_mempool *pool;
107 pa_hashmap *segments;
108 pa_hashmap *blocks;
109
110 /* Called whenever an imported memory block is no longer
111 * needed. */
112 pa_memimport_release_cb_t release_cb;
113 void *userdata;
114
115 PA_LLIST_FIELDS(pa_memimport);
116 };
117
118 struct memexport_slot {
119 PA_LLIST_FIELDS(struct memexport_slot);
120 pa_memblock *block;
121 };
122
123 struct pa_memexport {
124 pa_mutex *mutex;
125 pa_mempool *pool;
126
127 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
128
129 PA_LLIST_HEAD(struct memexport_slot, free_slots);
130 PA_LLIST_HEAD(struct memexport_slot, used_slots);
131 unsigned n_init;
132
133 /* Called whenever a client from which we imported a memory block
134 which we in turn exported to another client dies and we need to
135 revoke the memory block accordingly */
136 pa_memexport_revoke_cb_t revoke_cb;
137 void *userdata;
138
139 PA_LLIST_FIELDS(pa_memexport);
140 };
141
142 struct pa_mempool {
143 pa_semaphore *semaphore;
144 pa_mutex *mutex;
145
146 pa_shm memory;
147 size_t block_size;
148 unsigned n_blocks;
149
150 pa_atomic_t n_init;
151
152 PA_LLIST_HEAD(pa_memimport, imports);
153 PA_LLIST_HEAD(pa_memexport, exports);
154
155 /* A list of free slots that may be reused */
156 pa_flist *free_slots;
157
158 pa_mempool_stat stat;
159 };
160
161 static void segment_detach(pa_memimport_segment *seg);
162
163 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
164
165 /* No lock necessary */
166 static void stat_add(pa_memblock*b) {
167 pa_assert(b);
168 pa_assert(b->pool);
169
170 pa_atomic_inc(&b->pool->stat.n_allocated);
171 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
172
173 pa_atomic_inc(&b->pool->stat.n_accumulated);
174 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
175
176 if (b->type == PA_MEMBLOCK_IMPORTED) {
177 pa_atomic_inc(&b->pool->stat.n_imported);
178 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
179 }
180
181 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
182 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
183 }
184
185 /* No lock necessary */
186 static void stat_remove(pa_memblock *b) {
187 pa_assert(b);
188 pa_assert(b->pool);
189
190 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
191 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
192
193 pa_atomic_dec(&b->pool->stat.n_allocated);
194 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
195
196 if (b->type == PA_MEMBLOCK_IMPORTED) {
197 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
198 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
199
200 pa_atomic_dec(&b->pool->stat.n_imported);
201 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
202 }
203
204 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
205 }
206
207 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
208
209 /* No lock necessary */
210 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
211 pa_memblock *b;
212
213 pa_assert(p);
214 pa_assert(length);
215
216 if (!(b = pa_memblock_new_pool(p, length)))
217 b = memblock_new_appended(p, length);
218
219 return b;
220 }
221
222 /* No lock necessary */
223 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
224 pa_memblock *b;
225
226 pa_assert(p);
227 pa_assert(length);
228
229 /* If -1 is passed as length we choose the size for the caller. */
230
231 if (length == (size_t) -1)
232 length = pa_mempool_block_size_max(p);
233
234 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
235 PA_REFCNT_INIT(b);
236 b->pool = p;
237 b->type = PA_MEMBLOCK_APPENDED;
238 b->read_only = b->is_silence = false;
239 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
240 b->length = length;
241 pa_atomic_store(&b->n_acquired, 0);
242 pa_atomic_store(&b->please_signal, 0);
243
244 stat_add(b);
245 return b;
246 }
247
248 /* No lock necessary */
249 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
250 struct mempool_slot *slot;
251 pa_assert(p);
252
253 if (!(slot = pa_flist_pop(p->free_slots))) {
254 int idx;
255
256 /* The free list was empty, we have to allocate a new entry */
257
258 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
259 pa_atomic_dec(&p->n_init);
260 else
261 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
262
263 if (!slot) {
264 if (pa_log_ratelimit(PA_LOG_DEBUG))
265 pa_log_debug("Pool full");
266 pa_atomic_inc(&p->stat.n_pool_full);
267 return NULL;
268 }
269 }
270
271 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
272 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
273 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
274 /* } */
275 /* #endif */
276
277 return slot;
278 }
279
280 /* No lock necessary, totally redundant anyway */
281 static inline void* mempool_slot_data(struct mempool_slot *slot) {
282 return slot;
283 }
284
285 /* No lock necessary */
286 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
287 pa_assert(p);
288
289 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
290 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
291
292 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
293 }
294
295 /* No lock necessary */
296 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
297 unsigned idx;
298
299 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
300 return NULL;
301
302 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
303 }
304
305 /* No lock necessary */
306 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
307 pa_memblock *b = NULL;
308 struct mempool_slot *slot;
309 static int mempool_disable = 0;
310
311 pa_assert(p);
312 pa_assert(length);
313
314 if (mempool_disable == 0)
315 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
316
317 if (mempool_disable > 0)
318 return NULL;
319
320 /* If -1 is passed as length we choose the size for the caller: we
321 * take the largest size that fits in one of our slots. */
322
323 if (length == (size_t) -1)
324 length = pa_mempool_block_size_max(p);
325
326 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
327
328 if (!(slot = mempool_allocate_slot(p)))
329 return NULL;
330
331 b = mempool_slot_data(slot);
332 b->type = PA_MEMBLOCK_POOL;
333 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
334
335 } else if (p->block_size >= length) {
336
337 if (!(slot = mempool_allocate_slot(p)))
338 return NULL;
339
340 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
341 b = pa_xnew(pa_memblock, 1);
342
343 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
344 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
345
346 } else {
347 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
348 pa_atomic_inc(&p->stat.n_too_large_for_pool);
349 return NULL;
350 }
351
352 PA_REFCNT_INIT(b);
353 b->pool = p;
354 b->read_only = b->is_silence = false;
355 b->length = length;
356 pa_atomic_store(&b->n_acquired, 0);
357 pa_atomic_store(&b->please_signal, 0);
358
359 stat_add(b);
360 return b;
361 }
362
363 /* No lock necessary */
364 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
365 pa_memblock *b;
366
367 pa_assert(p);
368 pa_assert(d);
369 pa_assert(length != (size_t) -1);
370 pa_assert(length);
371
372 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
373 b = pa_xnew(pa_memblock, 1);
374
375 PA_REFCNT_INIT(b);
376 b->pool = p;
377 b->type = PA_MEMBLOCK_FIXED;
378 b->read_only = read_only;
379 b->is_silence = false;
380 pa_atomic_ptr_store(&b->data, d);
381 b->length = length;
382 pa_atomic_store(&b->n_acquired, 0);
383 pa_atomic_store(&b->please_signal, 0);
384
385 stat_add(b);
386 return b;
387 }
388
389 /* No lock necessary */
390 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, bool read_only) {
391 pa_memblock *b;
392
393 pa_assert(p);
394 pa_assert(d);
395 pa_assert(length);
396 pa_assert(length != (size_t) -1);
397 pa_assert(free_cb);
398
399 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
400 b = pa_xnew(pa_memblock, 1);
401
402 PA_REFCNT_INIT(b);
403 b->pool = p;
404 b->type = PA_MEMBLOCK_USER;
405 b->read_only = read_only;
406 b->is_silence = false;
407 pa_atomic_ptr_store(&b->data, d);
408 b->length = length;
409 pa_atomic_store(&b->n_acquired, 0);
410 pa_atomic_store(&b->please_signal, 0);
411
412 b->per_type.user.free_cb = free_cb;
413
414 stat_add(b);
415 return b;
416 }
417
418 /* No lock necessary */
419 bool pa_memblock_is_read_only(pa_memblock *b) {
420 pa_assert(b);
421 pa_assert(PA_REFCNT_VALUE(b) > 0);
422
423 return b->read_only && PA_REFCNT_VALUE(b) == 1;
424 }
425
426 /* No lock necessary */
427 bool pa_memblock_is_silence(pa_memblock *b) {
428 pa_assert(b);
429 pa_assert(PA_REFCNT_VALUE(b) > 0);
430
431 return b->is_silence;
432 }
433
434 /* No lock necessary */
435 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
436 pa_assert(b);
437 pa_assert(PA_REFCNT_VALUE(b) > 0);
438
439 b->is_silence = v;
440 }
441
442 /* No lock necessary */
443 bool pa_memblock_ref_is_one(pa_memblock *b) {
444 int r;
445 pa_assert(b);
446
447 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
448
449 return r == 1;
450 }
451
452 /* No lock necessary */
453 void* pa_memblock_acquire(pa_memblock *b) {
454 pa_assert(b);
455 pa_assert(PA_REFCNT_VALUE(b) > 0);
456
457 pa_atomic_inc(&b->n_acquired);
458
459 return pa_atomic_ptr_load(&b->data);
460 }
461
462 /* No lock necessary */
463 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
464 pa_assert(c);
465
466 return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
467 }
468
469 /* No lock necessary, in corner cases locks by its own */
470 void pa_memblock_release(pa_memblock *b) {
471 int r;
472 pa_assert(b);
473 pa_assert(PA_REFCNT_VALUE(b) > 0);
474
475 r = pa_atomic_dec(&b->n_acquired);
476 pa_assert(r >= 1);
477
478 /* Signal a waiting thread that this memblock is no longer used */
479 if (r == 1 && pa_atomic_load(&b->please_signal))
480 pa_semaphore_post(b->pool->semaphore);
481 }
482
483 size_t pa_memblock_get_length(pa_memblock *b) {
484 pa_assert(b);
485 pa_assert(PA_REFCNT_VALUE(b) > 0);
486
487 return b->length;
488 }
489
490 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
491 pa_assert(b);
492 pa_assert(PA_REFCNT_VALUE(b) > 0);
493
494 return b->pool;
495 }
496
497 /* No lock necessary */
498 pa_memblock* pa_memblock_ref(pa_memblock*b) {
499 pa_assert(b);
500 pa_assert(PA_REFCNT_VALUE(b) > 0);
501
502 PA_REFCNT_INC(b);
503 return b;
504 }
505
506 static void memblock_free(pa_memblock *b) {
507 pa_assert(b);
508
509 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
510
511 stat_remove(b);
512
513 switch (b->type) {
514 case PA_MEMBLOCK_USER :
515 pa_assert(b->per_type.user.free_cb);
516 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
517
518 /* Fall through */
519
520 case PA_MEMBLOCK_FIXED:
521 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
522 pa_xfree(b);
523
524 break;
525
526 case PA_MEMBLOCK_APPENDED:
527
528 /* We could attach it to unused_memblocks, but that would
529 * probably waste some considerable amount of memory */
530 pa_xfree(b);
531 break;
532
533 case PA_MEMBLOCK_IMPORTED: {
534 pa_memimport_segment *segment;
535 pa_memimport *import;
536
537 /* FIXME! This should be implemented lock-free */
538
539 pa_assert_se(segment = b->per_type.imported.segment);
540 pa_assert_se(import = segment->import);
541
542 pa_mutex_lock(import->mutex);
543
544 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
545
546 pa_assert(segment->n_blocks >= 1);
547 if (-- segment->n_blocks <= 0)
548 segment_detach(segment);
549
550 pa_mutex_unlock(import->mutex);
551
552 import->release_cb(import, b->per_type.imported.id, import->userdata);
553
554 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
555 pa_xfree(b);
556
557 break;
558 }
559
560 case PA_MEMBLOCK_POOL_EXTERNAL:
561 case PA_MEMBLOCK_POOL: {
562 struct mempool_slot *slot;
563 bool call_free;
564
565 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
566
567 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
568
569 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
570 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
571 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
572 /* } */
573 /* #endif */
574
575 /* The free list dimensions should easily allow all slots
576 * to fit in, hence try harder if pushing this slot into
577 * the free list fails */
578 while (pa_flist_push(b->pool->free_slots, slot) < 0)
579 ;
580
581 if (call_free)
582 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
583 pa_xfree(b);
584
585 break;
586 }
587
588 case PA_MEMBLOCK_TYPE_MAX:
589 default:
590 pa_assert_not_reached();
591 }
592 }
593
594 /* No lock necessary */
595 void pa_memblock_unref(pa_memblock*b) {
596 pa_assert(b);
597 pa_assert(PA_REFCNT_VALUE(b) > 0);
598
599 if (PA_REFCNT_DEC(b) > 0)
600 return;
601
602 memblock_free(b);
603 }
604
605 /* Self locked */
606 static void memblock_wait(pa_memblock *b) {
607 pa_assert(b);
608
609 if (pa_atomic_load(&b->n_acquired) > 0) {
610 /* We need to wait until all threads gave up access to the
611 * memory block before we can go on. Unfortunately this means
612 * that we have to lock and wait here. Sniff! */
613
614 pa_atomic_inc(&b->please_signal);
615
616 while (pa_atomic_load(&b->n_acquired) > 0)
617 pa_semaphore_wait(b->pool->semaphore);
618
619 pa_atomic_dec(&b->please_signal);
620 }
621 }
622
623 /* No lock necessary. This function is not multiple caller safe! */
624 static void memblock_make_local(pa_memblock *b) {
625 pa_assert(b);
626
627 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
628
629 if (b->length <= b->pool->block_size) {
630 struct mempool_slot *slot;
631
632 if ((slot = mempool_allocate_slot(b->pool))) {
633 void *new_data;
634 /* We can move it into a local pool, perfect! */
635
636 new_data = mempool_slot_data(slot);
637 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
638 pa_atomic_ptr_store(&b->data, new_data);
639
640 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
641 b->read_only = false;
642
643 goto finish;
644 }
645 }
646
647 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
648 b->per_type.user.free_cb = pa_xfree;
649 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
650
651 b->type = PA_MEMBLOCK_USER;
652 b->read_only = false;
653
654 finish:
655 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
656 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
657 memblock_wait(b);
658 }
659
660 /* No lock necessary. This function is not multiple caller safe */
661 void pa_memblock_unref_fixed(pa_memblock *b) {
662 pa_assert(b);
663 pa_assert(PA_REFCNT_VALUE(b) > 0);
664 pa_assert(b->type == PA_MEMBLOCK_FIXED);
665
666 if (PA_REFCNT_VALUE(b) > 1)
667 memblock_make_local(b);
668
669 pa_memblock_unref(b);
670 }
671
672 /* No lock necessary. */
673 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
674 void *p;
675
676 pa_assert(b);
677 pa_assert(PA_REFCNT_VALUE(b) > 0);
678
679 p = pa_memblock_acquire(b);
680 pa_will_need(p, b->length);
681 pa_memblock_release(b);
682
683 return b;
684 }
685
686 /* Self-locked. This function is not multiple-caller safe */
687 static void memblock_replace_import(pa_memblock *b) {
688 pa_memimport_segment *segment;
689 pa_memimport *import;
690
691 pa_assert(b);
692 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
693
694 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
695 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
696 pa_atomic_dec(&b->pool->stat.n_imported);
697 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
698
699 pa_assert_se(segment = b->per_type.imported.segment);
700 pa_assert_se(import = segment->import);
701
702 pa_mutex_lock(import->mutex);
703
704 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
705
706 memblock_make_local(b);
707
708 pa_assert(segment->n_blocks >= 1);
709 if (-- segment->n_blocks <= 0)
710 segment_detach(segment);
711
712 pa_mutex_unlock(import->mutex);
713 }
714
715 pa_mempool* pa_mempool_new(bool shared, size_t size) {
716 pa_mempool *p;
717 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
718
719 p = pa_xnew(pa_mempool, 1);
720
721 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
722 if (p->block_size < PA_PAGE_SIZE)
723 p->block_size = PA_PAGE_SIZE;
724
725 if (size <= 0)
726 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
727 else {
728 p->n_blocks = (unsigned) (size / p->block_size);
729
730 if (p->n_blocks < 2)
731 p->n_blocks = 2;
732 }
733
734 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
735 pa_xfree(p);
736 return NULL;
737 }
738
739 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
740 p->memory.shared ? "shared" : "private",
741 p->n_blocks,
742 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
743 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
744 (unsigned long) pa_mempool_block_size_max(p));
745
746 memset(&p->stat, 0, sizeof(p->stat));
747 pa_atomic_store(&p->n_init, 0);
748
749 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
750 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
751
752 p->mutex = pa_mutex_new(true, true);
753 p->semaphore = pa_semaphore_new(0);
754
755 p->free_slots = pa_flist_new(p->n_blocks);
756
757 return p;
758 }
759
760 void pa_mempool_free(pa_mempool *p) {
761 pa_assert(p);
762
763 pa_mutex_lock(p->mutex);
764
765 while (p->imports)
766 pa_memimport_free(p->imports);
767
768 while (p->exports)
769 pa_memexport_free(p->exports);
770
771 pa_mutex_unlock(p->mutex);
772
773 pa_flist_free(p->free_slots, NULL);
774
775 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
776
777 /* Ouch, somebody is retaining a memory block reference! */
778
779 #ifdef DEBUG_REF
780 unsigned i;
781 pa_flist *list;
782
783 /* Let's try to find at least one of those leaked memory blocks */
784
785 list = pa_flist_new(p->n_blocks);
786
787 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
788 struct mempool_slot *slot;
789 pa_memblock *b, *k;
790
791 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
792 b = mempool_slot_data(slot);
793
794 while ((k = pa_flist_pop(p->free_slots))) {
795 while (pa_flist_push(list, k) < 0)
796 ;
797
798 if (b == k)
799 break;
800 }
801
802 if (!k)
803 pa_log("REF: Leaked memory block %p", b);
804
805 while ((k = pa_flist_pop(list)))
806 while (pa_flist_push(p->free_slots, k) < 0)
807 ;
808 }
809
810 pa_flist_free(list, NULL);
811
812 #endif
813
814 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
815
816 /* PA_DEBUG_TRAP; */
817 }
818
819 pa_shm_free(&p->memory);
820
821 pa_mutex_free(p->mutex);
822 pa_semaphore_free(p->semaphore);
823
824 pa_xfree(p);
825 }
826
827 /* No lock necessary */
828 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
829 pa_assert(p);
830
831 return &p->stat;
832 }
833
834 /* No lock necessary */
835 size_t pa_mempool_block_size_max(pa_mempool *p) {
836 pa_assert(p);
837
838 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
839 }
840
841 /* No lock necessary */
842 void pa_mempool_vacuum(pa_mempool *p) {
843 struct mempool_slot *slot;
844 pa_flist *list;
845
846 pa_assert(p);
847
848 list = pa_flist_new(p->n_blocks);
849
850 while ((slot = pa_flist_pop(p->free_slots)))
851 while (pa_flist_push(list, slot) < 0)
852 ;
853
854 while ((slot = pa_flist_pop(list))) {
855 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
856
857 while (pa_flist_push(p->free_slots, slot))
858 ;
859 }
860
861 pa_flist_free(list, NULL);
862 }
863
864 /* No lock necessary */
865 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
866 pa_assert(p);
867
868 if (!p->memory.shared)
869 return -1;
870
871 *id = p->memory.id;
872
873 return 0;
874 }
875
876 /* No lock necessary */
877 bool pa_mempool_is_shared(pa_mempool *p) {
878 pa_assert(p);
879
880 return !!p->memory.shared;
881 }
882
883 /* For receiving blocks from other nodes */
884 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
885 pa_memimport *i;
886
887 pa_assert(p);
888 pa_assert(cb);
889
890 i = pa_xnew(pa_memimport, 1);
891 i->mutex = pa_mutex_new(true, true);
892 i->pool = p;
893 i->segments = pa_hashmap_new(NULL, NULL);
894 i->blocks = pa_hashmap_new(NULL, NULL);
895 i->release_cb = cb;
896 i->userdata = userdata;
897
898 pa_mutex_lock(p->mutex);
899 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
900 pa_mutex_unlock(p->mutex);
901
902 return i;
903 }
904
905 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
906
907 /* Should be called locked */
908 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
909 pa_memimport_segment* seg;
910
911 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
912 return NULL;
913
914 seg = pa_xnew0(pa_memimport_segment, 1);
915
916 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
917 pa_xfree(seg);
918 return NULL;
919 }
920
921 seg->import = i;
922 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
923
924 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
925 return seg;
926 }
927
928 /* Should be called locked */
929 static void segment_detach(pa_memimport_segment *seg) {
930 pa_assert(seg);
931
932 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
933 pa_shm_free(&seg->memory);
934
935 if (seg->trap)
936 pa_memtrap_remove(seg->trap);
937
938 pa_xfree(seg);
939 }
940
941 /* Self-locked. Not multiple-caller safe */
942 void pa_memimport_free(pa_memimport *i) {
943 pa_memexport *e;
944 pa_memblock *b;
945
946 pa_assert(i);
947
948 pa_mutex_lock(i->mutex);
949
950 while ((b = pa_hashmap_first(i->blocks)))
951 memblock_replace_import(b);
952
953 pa_assert(pa_hashmap_size(i->segments) == 0);
954
955 pa_mutex_unlock(i->mutex);
956
957 pa_mutex_lock(i->pool->mutex);
958
959 /* If we've exported this block further we need to revoke that export */
960 for (e = i->pool->exports; e; e = e->next)
961 memexport_revoke_blocks(e, i);
962
963 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
964
965 pa_mutex_unlock(i->pool->mutex);
966
967 pa_hashmap_free(i->blocks);
968 pa_hashmap_free(i->segments);
969
970 pa_mutex_free(i->mutex);
971
972 pa_xfree(i);
973 }
974
975 /* Self-locked */
976 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
977 pa_memblock *b = NULL;
978 pa_memimport_segment *seg;
979
980 pa_assert(i);
981
982 pa_mutex_lock(i->mutex);
983
984 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
985 pa_memblock_ref(b);
986 goto finish;
987 }
988
989 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
990 goto finish;
991
992 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
993 if (!(seg = segment_attach(i, shm_id)))
994 goto finish;
995
996 if (offset+size > seg->memory.size)
997 goto finish;
998
999 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1000 b = pa_xnew(pa_memblock, 1);
1001
1002 PA_REFCNT_INIT(b);
1003 b->pool = i->pool;
1004 b->type = PA_MEMBLOCK_IMPORTED;
1005 b->read_only = true;
1006 b->is_silence = false;
1007 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1008 b->length = size;
1009 pa_atomic_store(&b->n_acquired, 0);
1010 pa_atomic_store(&b->please_signal, 0);
1011 b->per_type.imported.id = block_id;
1012 b->per_type.imported.segment = seg;
1013
1014 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1015
1016 seg->n_blocks++;
1017
1018 stat_add(b);
1019
1020 finish:
1021 pa_mutex_unlock(i->mutex);
1022
1023 return b;
1024 }
1025
1026 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1027 pa_memblock *b;
1028 int ret = 0;
1029 pa_assert(i);
1030
1031 pa_mutex_lock(i->mutex);
1032
1033 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1034 ret = -1;
1035 goto finish;
1036 }
1037
1038 memblock_replace_import(b);
1039
1040 finish:
1041 pa_mutex_unlock(i->mutex);
1042
1043 return ret;
1044 }
1045
1046 /* For sending blocks to other nodes */
1047 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1048 pa_memexport *e;
1049
1050 pa_assert(p);
1051 pa_assert(cb);
1052
1053 if (!p->memory.shared)
1054 return NULL;
1055
1056 e = pa_xnew(pa_memexport, 1);
1057 e->mutex = pa_mutex_new(true, true);
1058 e->pool = p;
1059 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1060 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1061 e->n_init = 0;
1062 e->revoke_cb = cb;
1063 e->userdata = userdata;
1064
1065 pa_mutex_lock(p->mutex);
1066 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1067 pa_mutex_unlock(p->mutex);
1068 return e;
1069 }
1070
1071 void pa_memexport_free(pa_memexport *e) {
1072 pa_assert(e);
1073
1074 pa_mutex_lock(e->mutex);
1075 while (e->used_slots)
1076 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1077 pa_mutex_unlock(e->mutex);
1078
1079 pa_mutex_lock(e->pool->mutex);
1080 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1081 pa_mutex_unlock(e->pool->mutex);
1082
1083 pa_mutex_free(e->mutex);
1084 pa_xfree(e);
1085 }
1086
1087 /* Self-locked */
1088 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1089 pa_memblock *b;
1090
1091 pa_assert(e);
1092
1093 pa_mutex_lock(e->mutex);
1094
1095 if (id >= e->n_init)
1096 goto fail;
1097
1098 if (!e->slots[id].block)
1099 goto fail;
1100
1101 b = e->slots[id].block;
1102 e->slots[id].block = NULL;
1103
1104 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1105 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1106
1107 pa_mutex_unlock(e->mutex);
1108
1109 /* pa_log("Processing release for %u", id); */
1110
1111 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1112 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1113
1114 pa_atomic_dec(&e->pool->stat.n_exported);
1115 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1116
1117 pa_memblock_unref(b);
1118
1119 return 0;
1120
1121 fail:
1122 pa_mutex_unlock(e->mutex);
1123
1124 return -1;
1125 }
1126
1127 /* Self-locked */
1128 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1129 struct memexport_slot *slot, *next;
1130 pa_assert(e);
1131 pa_assert(i);
1132
1133 pa_mutex_lock(e->mutex);
1134
1135 for (slot = e->used_slots; slot; slot = next) {
1136 uint32_t idx;
1137 next = slot->next;
1138
1139 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1140 slot->block->per_type.imported.segment->import != i)
1141 continue;
1142
1143 idx = (uint32_t) (slot - e->slots);
1144 e->revoke_cb(e, idx, e->userdata);
1145 pa_memexport_process_release(e, idx);
1146 }
1147
1148 pa_mutex_unlock(e->mutex);
1149 }
1150
1151 /* No lock necessary */
1152 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1153 pa_memblock *n;
1154
1155 pa_assert(p);
1156 pa_assert(b);
1157
1158 if (b->type == PA_MEMBLOCK_IMPORTED ||
1159 b->type == PA_MEMBLOCK_POOL ||
1160 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1161 pa_assert(b->pool == p);
1162 return pa_memblock_ref(b);
1163 }
1164
1165 if (!(n = pa_memblock_new_pool(p, b->length)))
1166 return NULL;
1167
1168 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1169 return n;
1170 }
1171
1172 /* Self-locked */
1173 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1174 pa_shm *memory;
1175 struct memexport_slot *slot;
1176 void *data;
1177
1178 pa_assert(e);
1179 pa_assert(b);
1180 pa_assert(block_id);
1181 pa_assert(shm_id);
1182 pa_assert(offset);
1183 pa_assert(size);
1184 pa_assert(b->pool == e->pool);
1185
1186 if (!(b = memblock_shared_copy(e->pool, b)))
1187 return -1;
1188
1189 pa_mutex_lock(e->mutex);
1190
1191 if (e->free_slots) {
1192 slot = e->free_slots;
1193 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1194 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1195 slot = &e->slots[e->n_init++];
1196 else {
1197 pa_mutex_unlock(e->mutex);
1198 pa_memblock_unref(b);
1199 return -1;
1200 }
1201
1202 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1203 slot->block = b;
1204 *block_id = (uint32_t) (slot - e->slots);
1205
1206 pa_mutex_unlock(e->mutex);
1207 /* pa_log("Got block id %u", *block_id); */
1208
1209 data = pa_memblock_acquire(b);
1210
1211 if (b->type == PA_MEMBLOCK_IMPORTED) {
1212 pa_assert(b->per_type.imported.segment);
1213 memory = &b->per_type.imported.segment->memory;
1214 } else {
1215 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1216 pa_assert(b->pool);
1217 memory = &b->pool->memory;
1218 }
1219
1220 pa_assert(data >= memory->ptr);
1221 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1222
1223 *shm_id = memory->id;
1224 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1225 *size = b->length;
1226
1227 pa_memblock_release(b);
1228
1229 pa_atomic_inc(&e->pool->stat.n_exported);
1230 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1231
1232 return 0;
1233 }