]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
bc8045776a0769c9e35190e231e41a76d458cdd3
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 /* A collection of multiple segments */
100 struct pa_memimport {
101 pa_mutex *mutex;
102
103 pa_mempool *pool;
104 pa_hashmap *segments;
105 pa_hashmap *blocks;
106
107 /* Called whenever an imported memory block is no longer
108 * needed. */
109 pa_memimport_release_cb_t release_cb;
110 void *userdata;
111
112 PA_LLIST_FIELDS(pa_memimport);
113 };
114
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
117 pa_memblock *block;
118 };
119
120 struct pa_memexport {
121 pa_mutex *mutex;
122 pa_mempool *pool;
123
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
125
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
128 unsigned n_init;
129
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
134 void *userdata;
135
136 PA_LLIST_FIELDS(pa_memexport);
137 };
138
139 struct pa_mempool {
140 pa_semaphore *semaphore;
141 pa_mutex *mutex;
142
143 pa_shm memory;
144 size_t block_size;
145 unsigned n_blocks;
146
147 pa_atomic_t n_init;
148
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
151
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
154
155 pa_mempool_stat stat;
156 };
157
158 static void segment_detach(pa_memimport_segment *seg);
159
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
161
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
164 pa_assert(b);
165 pa_assert(b->pool);
166
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
169
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
172
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
176 }
177
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
180 }
181
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
184 pa_assert(b);
185 pa_assert(b->pool);
186
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
189
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
192
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
196
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
199 }
200
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
202 }
203
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
205
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
208 pa_memblock *b;
209
210 pa_assert(p);
211 pa_assert(length);
212
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
215
216 return b;
217 }
218
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
221 pa_memblock *b;
222
223 pa_assert(p);
224 pa_assert(length);
225
226 /* If -1 is passed as length we choose the size for the caller. */
227
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
230
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
232 PA_REFCNT_INIT(b);
233 b->pool = p;
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
237 b->length = length;
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
240
241 stat_add(b);
242 return b;
243 }
244
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
248 pa_assert(p);
249
250 if (!(slot = pa_flist_pop(p->free_slots))) {
251 int idx;
252
253 /* The free list was empty, we have to allocate a new entry */
254
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
257 else
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
259
260 if (!slot) {
261 if (pa_log_ratelimit(PA_LOG_DEBUG))
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p->stat.n_pool_full);
264 return NULL;
265 }
266 }
267
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
271 /* } */
272 /* #endif */
273
274 return slot;
275 }
276
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot *slot) {
279 return slot;
280 }
281
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
284 pa_assert(p);
285
286 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
287 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
288
289 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
290 }
291
292 /* No lock necessary */
293 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
294 unsigned idx;
295
296 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
297 return NULL;
298
299 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
300 }
301
302 /* No lock necessary */
303 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
304 pa_memblock *b = NULL;
305 struct mempool_slot *slot;
306 static int mempool_disable = 0;
307
308 pa_assert(p);
309 pa_assert(length);
310
311 if (mempool_disable == 0)
312 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
313
314 if (mempool_disable > 0)
315 return NULL;
316
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
319
320 if (length == (size_t) -1)
321 length = pa_mempool_block_size_max(p);
322
323 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
324
325 if (!(slot = mempool_allocate_slot(p)))
326 return NULL;
327
328 b = mempool_slot_data(slot);
329 b->type = PA_MEMBLOCK_POOL;
330 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
331
332 } else if (p->block_size >= length) {
333
334 if (!(slot = mempool_allocate_slot(p)))
335 return NULL;
336
337 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
338 b = pa_xnew(pa_memblock, 1);
339
340 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
341 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
342
343 } else {
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
345 pa_atomic_inc(&p->stat.n_too_large_for_pool);
346 return NULL;
347 }
348
349 PA_REFCNT_INIT(b);
350 b->pool = p;
351 b->read_only = b->is_silence = FALSE;
352 b->length = length;
353 pa_atomic_store(&b->n_acquired, 0);
354 pa_atomic_store(&b->please_signal, 0);
355
356 stat_add(b);
357 return b;
358 }
359
360 /* No lock necessary */
361 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
362 pa_memblock *b;
363
364 pa_assert(p);
365 pa_assert(d);
366 pa_assert(length != (size_t) -1);
367 pa_assert(length);
368
369 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
370 b = pa_xnew(pa_memblock, 1);
371
372 PA_REFCNT_INIT(b);
373 b->pool = p;
374 b->type = PA_MEMBLOCK_FIXED;
375 b->read_only = read_only;
376 b->is_silence = FALSE;
377 pa_atomic_ptr_store(&b->data, d);
378 b->length = length;
379 pa_atomic_store(&b->n_acquired, 0);
380 pa_atomic_store(&b->please_signal, 0);
381
382 stat_add(b);
383 return b;
384 }
385
386 /* No lock necessary */
387 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
388 pa_memblock *b;
389
390 pa_assert(p);
391 pa_assert(d);
392 pa_assert(length);
393 pa_assert(length != (size_t) -1);
394 pa_assert(free_cb);
395
396 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
397 b = pa_xnew(pa_memblock, 1);
398
399 PA_REFCNT_INIT(b);
400 b->pool = p;
401 b->type = PA_MEMBLOCK_USER;
402 b->read_only = read_only;
403 b->is_silence = FALSE;
404 pa_atomic_ptr_store(&b->data, d);
405 b->length = length;
406 pa_atomic_store(&b->n_acquired, 0);
407 pa_atomic_store(&b->please_signal, 0);
408
409 b->per_type.user.free_cb = free_cb;
410
411 stat_add(b);
412 return b;
413 }
414
415 /* No lock necessary */
416 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
417 pa_assert(b);
418 pa_assert(PA_REFCNT_VALUE(b) > 0);
419
420 return b->read_only && PA_REFCNT_VALUE(b) == 1;
421 }
422
423 /* No lock necessary */
424 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
425 pa_assert(b);
426 pa_assert(PA_REFCNT_VALUE(b) > 0);
427
428 return b->is_silence;
429 }
430
431 /* No lock necessary */
432 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
433 pa_assert(b);
434 pa_assert(PA_REFCNT_VALUE(b) > 0);
435
436 b->is_silence = v;
437 }
438
439 /* No lock necessary */
440 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
441 int r;
442 pa_assert(b);
443
444 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
445
446 return r == 1;
447 }
448
449 /* No lock necessary */
450 void* pa_memblock_acquire(pa_memblock *b) {
451 pa_assert(b);
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
453
454 pa_atomic_inc(&b->n_acquired);
455
456 return pa_atomic_ptr_load(&b->data);
457 }
458
459 /* No lock necessary, in corner cases locks by its own */
460 void pa_memblock_release(pa_memblock *b) {
461 int r;
462 pa_assert(b);
463 pa_assert(PA_REFCNT_VALUE(b) > 0);
464
465 r = pa_atomic_dec(&b->n_acquired);
466 pa_assert(r >= 1);
467
468 /* Signal a waiting thread that this memblock is no longer used */
469 if (r == 1 && pa_atomic_load(&b->please_signal))
470 pa_semaphore_post(b->pool->semaphore);
471 }
472
473 size_t pa_memblock_get_length(pa_memblock *b) {
474 pa_assert(b);
475 pa_assert(PA_REFCNT_VALUE(b) > 0);
476
477 return b->length;
478 }
479
480 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
481 pa_assert(b);
482 pa_assert(PA_REFCNT_VALUE(b) > 0);
483
484 return b->pool;
485 }
486
487 /* No lock necessary */
488 pa_memblock* pa_memblock_ref(pa_memblock*b) {
489 pa_assert(b);
490 pa_assert(PA_REFCNT_VALUE(b) > 0);
491
492 PA_REFCNT_INC(b);
493 return b;
494 }
495
496 static void memblock_free(pa_memblock *b) {
497 pa_assert(b);
498
499 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
500
501 stat_remove(b);
502
503 switch (b->type) {
504 case PA_MEMBLOCK_USER :
505 pa_assert(b->per_type.user.free_cb);
506 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
507
508 /* Fall through */
509
510 case PA_MEMBLOCK_FIXED:
511 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
512 pa_xfree(b);
513
514 break;
515
516 case PA_MEMBLOCK_APPENDED:
517
518 /* We could attached it unused_memblocks, but that would
519 * probably waste some considerable memory */
520 pa_xfree(b);
521 break;
522
523 case PA_MEMBLOCK_IMPORTED: {
524 pa_memimport_segment *segment;
525 pa_memimport *import;
526
527 /* FIXME! This should be implemented lock-free */
528
529 pa_assert_se(segment = b->per_type.imported.segment);
530 pa_assert_se(import = segment->import);
531
532 pa_mutex_lock(import->mutex);
533
534 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
535
536 pa_assert(segment->n_blocks >= 1);
537 if (-- segment->n_blocks <= 0)
538 segment_detach(segment);
539
540 pa_mutex_unlock(import->mutex);
541
542 import->release_cb(import, b->per_type.imported.id, import->userdata);
543
544 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
545 pa_xfree(b);
546
547 break;
548 }
549
550 case PA_MEMBLOCK_POOL_EXTERNAL:
551 case PA_MEMBLOCK_POOL: {
552 struct mempool_slot *slot;
553 pa_bool_t call_free;
554
555 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
556
557 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
558
559 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
560 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
561 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
562 /* } */
563 /* #endif */
564
565 /* The free list dimensions should easily allow all slots
566 * to fit in, hence try harder if pushing this slot into
567 * the free list fails */
568 while (pa_flist_push(b->pool->free_slots, slot) < 0)
569 ;
570
571 if (call_free)
572 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
573 pa_xfree(b);
574
575 break;
576 }
577
578 case PA_MEMBLOCK_TYPE_MAX:
579 default:
580 pa_assert_not_reached();
581 }
582 }
583
584 /* No lock necessary */
585 void pa_memblock_unref(pa_memblock*b) {
586 pa_assert(b);
587 pa_assert(PA_REFCNT_VALUE(b) > 0);
588
589 if (PA_REFCNT_DEC(b) > 0)
590 return;
591
592 memblock_free(b);
593 }
594
595 /* Self locked */
596 static void memblock_wait(pa_memblock *b) {
597 pa_assert(b);
598
599 if (pa_atomic_load(&b->n_acquired) > 0) {
600 /* We need to wait until all threads gave up access to the
601 * memory block before we can go on. Unfortunately this means
602 * that we have to lock and wait here. Sniff! */
603
604 pa_atomic_inc(&b->please_signal);
605
606 while (pa_atomic_load(&b->n_acquired) > 0)
607 pa_semaphore_wait(b->pool->semaphore);
608
609 pa_atomic_dec(&b->please_signal);
610 }
611 }
612
613 /* No lock necessary. This function is not multiple caller safe! */
614 static void memblock_make_local(pa_memblock *b) {
615 pa_assert(b);
616
617 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
618
619 if (b->length <= b->pool->block_size) {
620 struct mempool_slot *slot;
621
622 if ((slot = mempool_allocate_slot(b->pool))) {
623 void *new_data;
624 /* We can move it into a local pool, perfect! */
625
626 new_data = mempool_slot_data(slot);
627 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
628 pa_atomic_ptr_store(&b->data, new_data);
629
630 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
631 b->read_only = FALSE;
632
633 goto finish;
634 }
635 }
636
637 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
638 b->per_type.user.free_cb = pa_xfree;
639 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
640
641 b->type = PA_MEMBLOCK_USER;
642 b->read_only = FALSE;
643
644 finish:
645 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
646 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
647 memblock_wait(b);
648 }
649
650 /* No lock necessary. This function is not multiple caller safe*/
651 void pa_memblock_unref_fixed(pa_memblock *b) {
652 pa_assert(b);
653 pa_assert(PA_REFCNT_VALUE(b) > 0);
654 pa_assert(b->type == PA_MEMBLOCK_FIXED);
655
656 if (PA_REFCNT_VALUE(b) > 1)
657 memblock_make_local(b);
658
659 pa_memblock_unref(b);
660 }
661
662 /* No lock necessary. */
663 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
664 void *p;
665
666 pa_assert(b);
667 pa_assert(PA_REFCNT_VALUE(b) > 0);
668
669 p = pa_memblock_acquire(b);
670 pa_will_need(p, b->length);
671 pa_memblock_release(b);
672
673 return b;
674 }
675
676 /* Self-locked. This function is not multiple-caller safe */
677 static void memblock_replace_import(pa_memblock *b) {
678 pa_memimport_segment *segment;
679 pa_memimport *import;
680
681 pa_assert(b);
682 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
683
684 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
685 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
686 pa_atomic_dec(&b->pool->stat.n_imported);
687 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
688
689 pa_assert_se(segment = b->per_type.imported.segment);
690 pa_assert_se(import = segment->import);
691
692 pa_mutex_lock(import->mutex);
693
694 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
695
696 memblock_make_local(b);
697
698 pa_assert(segment->n_blocks >= 1);
699 if (-- segment->n_blocks <= 0)
700 segment_detach(segment);
701
702 pa_mutex_unlock(import->mutex);
703 }
704
705 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
706 pa_mempool *p;
707 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
708
709 p = pa_xnew(pa_mempool, 1);
710
711 p->mutex = pa_mutex_new(TRUE, TRUE);
712 p->semaphore = pa_semaphore_new(0);
713
714 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
715 if (p->block_size < PA_PAGE_SIZE)
716 p->block_size = PA_PAGE_SIZE;
717
718 if (size <= 0)
719 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
720 else {
721 p->n_blocks = (unsigned) (size / p->block_size);
722
723 if (p->n_blocks < 2)
724 p->n_blocks = 2;
725 }
726
727 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
728 pa_xfree(p);
729 return NULL;
730 }
731
732 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
733 p->memory.shared ? "shared" : "private",
734 p->n_blocks,
735 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
736 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
737 (unsigned long) pa_mempool_block_size_max(p));
738
739 memset(&p->stat, 0, sizeof(p->stat));
740 pa_atomic_store(&p->n_init, 0);
741
742 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
743 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
744
745 p->free_slots = pa_flist_new(p->n_blocks);
746
747 return p;
748 }
749
750 void pa_mempool_free(pa_mempool *p) {
751 pa_assert(p);
752
753 pa_mutex_lock(p->mutex);
754
755 while (p->imports)
756 pa_memimport_free(p->imports);
757
758 while (p->exports)
759 pa_memexport_free(p->exports);
760
761 pa_mutex_unlock(p->mutex);
762
763 pa_flist_free(p->free_slots, NULL);
764
765 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
766
767 /* Ouch, somebody is retaining a memory block reference! */
768
769 #ifdef DEBUG_REF
770 unsigned i;
771 pa_flist *list;
772
773 /* Let's try to find at least one of those leaked memory blocks */
774
775 list = pa_flist_new(p->n_blocks);
776
777 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
778 struct mempool_slot *slot;
779 pa_memblock *b, *k;
780
781 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
782 b = mempool_slot_data(slot);
783
784 while ((k = pa_flist_pop(p->free_slots))) {
785 while (pa_flist_push(list, k) < 0)
786 ;
787
788 if (b == k)
789 break;
790 }
791
792 if (!k)
793 pa_log("REF: Leaked memory block %p", b);
794
795 while ((k = pa_flist_pop(list)))
796 while (pa_flist_push(p->free_slots, k) < 0)
797 ;
798 }
799
800 pa_flist_free(list, NULL);
801
802 #endif
803
804 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
805
806 /* PA_DEBUG_TRAP; */
807 }
808
809 pa_shm_free(&p->memory);
810
811 pa_mutex_free(p->mutex);
812 pa_semaphore_free(p->semaphore);
813
814 pa_xfree(p);
815 }
816
817 /* No lock necessary */
818 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
819 pa_assert(p);
820
821 return &p->stat;
822 }
823
824 /* No lock necessary */
825 size_t pa_mempool_block_size_max(pa_mempool *p) {
826 pa_assert(p);
827
828 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
829 }
830
831 /* No lock necessary */
832 void pa_mempool_vacuum(pa_mempool *p) {
833 struct mempool_slot *slot;
834 pa_flist *list;
835
836 pa_assert(p);
837
838 list = pa_flist_new(p->n_blocks);
839
840 while ((slot = pa_flist_pop(p->free_slots)))
841 while (pa_flist_push(list, slot) < 0)
842 ;
843
844 while ((slot = pa_flist_pop(list))) {
845 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
846
847 while (pa_flist_push(p->free_slots, slot))
848 ;
849 }
850
851 pa_flist_free(list, NULL);
852 }
853
854 /* No lock necessary */
855 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
856 pa_assert(p);
857
858 if (!p->memory.shared)
859 return -1;
860
861 *id = p->memory.id;
862
863 return 0;
864 }
865
866 /* No lock necessary */
867 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
868 pa_assert(p);
869
870 return !!p->memory.shared;
871 }
872
873 /* For recieving blocks from other nodes */
874 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
875 pa_memimport *i;
876
877 pa_assert(p);
878 pa_assert(cb);
879
880 i = pa_xnew(pa_memimport, 1);
881 i->mutex = pa_mutex_new(TRUE, TRUE);
882 i->pool = p;
883 i->segments = pa_hashmap_new(NULL, NULL);
884 i->blocks = pa_hashmap_new(NULL, NULL);
885 i->release_cb = cb;
886 i->userdata = userdata;
887
888 pa_mutex_lock(p->mutex);
889 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
890 pa_mutex_unlock(p->mutex);
891
892 return i;
893 }
894
895 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
896
897 /* Should be called locked */
898 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
899 pa_memimport_segment* seg;
900
901 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
902 return NULL;
903
904 seg = pa_xnew0(pa_memimport_segment, 1);
905
906 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
907 pa_xfree(seg);
908 return NULL;
909 }
910
911 seg->import = i;
912 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
913
914 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
915 return seg;
916 }
917
918 /* Should be called locked */
919 static void segment_detach(pa_memimport_segment *seg) {
920 pa_assert(seg);
921
922 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
923 pa_shm_free(&seg->memory);
924
925 if (seg->trap)
926 pa_memtrap_remove(seg->trap);
927
928 pa_xfree(seg);
929 }
930
931 /* Self-locked. Not multiple-caller safe */
932 void pa_memimport_free(pa_memimport *i) {
933 pa_memexport *e;
934 pa_memblock *b;
935
936 pa_assert(i);
937
938 pa_mutex_lock(i->mutex);
939
940 while ((b = pa_hashmap_first(i->blocks)))
941 memblock_replace_import(b);
942
943 pa_assert(pa_hashmap_size(i->segments) == 0);
944
945 pa_mutex_unlock(i->mutex);
946
947 pa_mutex_lock(i->pool->mutex);
948
949 /* If we've exported this block further we need to revoke that export */
950 for (e = i->pool->exports; e; e = e->next)
951 memexport_revoke_blocks(e, i);
952
953 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
954
955 pa_mutex_unlock(i->pool->mutex);
956
957 pa_hashmap_free(i->blocks, NULL, NULL);
958 pa_hashmap_free(i->segments, NULL, NULL);
959
960 pa_mutex_free(i->mutex);
961
962 pa_xfree(i);
963 }
964
965 /* Self-locked */
966 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
967 pa_memblock *b = NULL;
968 pa_memimport_segment *seg;
969
970 pa_assert(i);
971
972 pa_mutex_lock(i->mutex);
973
974 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
975 pa_memblock_ref(b);
976 goto finish;
977 }
978
979 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
980 goto finish;
981
982 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
983 if (!(seg = segment_attach(i, shm_id)))
984 goto finish;
985
986 if (offset+size > seg->memory.size)
987 goto finish;
988
989 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
990 b = pa_xnew(pa_memblock, 1);
991
992 PA_REFCNT_INIT(b);
993 b->pool = i->pool;
994 b->type = PA_MEMBLOCK_IMPORTED;
995 b->read_only = TRUE;
996 b->is_silence = FALSE;
997 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
998 b->length = size;
999 pa_atomic_store(&b->n_acquired, 0);
1000 pa_atomic_store(&b->please_signal, 0);
1001 b->per_type.imported.id = block_id;
1002 b->per_type.imported.segment = seg;
1003
1004 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1005
1006 seg->n_blocks++;
1007
1008 stat_add(b);
1009
1010 finish:
1011 pa_mutex_unlock(i->mutex);
1012
1013 return b;
1014 }
1015
1016 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1017 pa_memblock *b;
1018 int ret = 0;
1019 pa_assert(i);
1020
1021 pa_mutex_lock(i->mutex);
1022
1023 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1024 ret = -1;
1025 goto finish;
1026 }
1027
1028 memblock_replace_import(b);
1029
1030 finish:
1031 pa_mutex_unlock(i->mutex);
1032
1033 return ret;
1034 }
1035
1036 /* For sending blocks to other nodes */
1037 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1038 pa_memexport *e;
1039
1040 pa_assert(p);
1041 pa_assert(cb);
1042
1043 if (!p->memory.shared)
1044 return NULL;
1045
1046 e = pa_xnew(pa_memexport, 1);
1047 e->mutex = pa_mutex_new(TRUE, TRUE);
1048 e->pool = p;
1049 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1050 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1051 e->n_init = 0;
1052 e->revoke_cb = cb;
1053 e->userdata = userdata;
1054
1055 pa_mutex_lock(p->mutex);
1056 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1057 pa_mutex_unlock(p->mutex);
1058 return e;
1059 }
1060
1061 void pa_memexport_free(pa_memexport *e) {
1062 pa_assert(e);
1063
1064 pa_mutex_lock(e->mutex);
1065 while (e->used_slots)
1066 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1067 pa_mutex_unlock(e->mutex);
1068
1069 pa_mutex_lock(e->pool->mutex);
1070 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1071 pa_mutex_unlock(e->pool->mutex);
1072
1073 pa_mutex_free(e->mutex);
1074 pa_xfree(e);
1075 }
1076
1077 /* Self-locked */
1078 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1079 pa_memblock *b;
1080
1081 pa_assert(e);
1082
1083 pa_mutex_lock(e->mutex);
1084
1085 if (id >= e->n_init)
1086 goto fail;
1087
1088 if (!e->slots[id].block)
1089 goto fail;
1090
1091 b = e->slots[id].block;
1092 e->slots[id].block = NULL;
1093
1094 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1095 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1096
1097 pa_mutex_unlock(e->mutex);
1098
1099 /* pa_log("Processing release for %u", id); */
1100
1101 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1102 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1103
1104 pa_atomic_dec(&e->pool->stat.n_exported);
1105 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1106
1107 pa_memblock_unref(b);
1108
1109 return 0;
1110
1111 fail:
1112 pa_mutex_unlock(e->mutex);
1113
1114 return -1;
1115 }
1116
1117 /* Self-locked */
1118 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1119 struct memexport_slot *slot, *next;
1120 pa_assert(e);
1121 pa_assert(i);
1122
1123 pa_mutex_lock(e->mutex);
1124
1125 for (slot = e->used_slots; slot; slot = next) {
1126 uint32_t idx;
1127 next = slot->next;
1128
1129 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1130 slot->block->per_type.imported.segment->import != i)
1131 continue;
1132
1133 idx = (uint32_t) (slot - e->slots);
1134 e->revoke_cb(e, idx, e->userdata);
1135 pa_memexport_process_release(e, idx);
1136 }
1137
1138 pa_mutex_unlock(e->mutex);
1139 }
1140
1141 /* No lock necessary */
1142 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1143 pa_memblock *n;
1144
1145 pa_assert(p);
1146 pa_assert(b);
1147
1148 if (b->type == PA_MEMBLOCK_IMPORTED ||
1149 b->type == PA_MEMBLOCK_POOL ||
1150 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1151 pa_assert(b->pool == p);
1152 return pa_memblock_ref(b);
1153 }
1154
1155 if (!(n = pa_memblock_new_pool(p, b->length)))
1156 return NULL;
1157
1158 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1159 return n;
1160 }
1161
1162 /* Self-locked */
1163 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1164 pa_shm *memory;
1165 struct memexport_slot *slot;
1166 void *data;
1167
1168 pa_assert(e);
1169 pa_assert(b);
1170 pa_assert(block_id);
1171 pa_assert(shm_id);
1172 pa_assert(offset);
1173 pa_assert(size);
1174 pa_assert(b->pool == e->pool);
1175
1176 if (!(b = memblock_shared_copy(e->pool, b)))
1177 return -1;
1178
1179 pa_mutex_lock(e->mutex);
1180
1181 if (e->free_slots) {
1182 slot = e->free_slots;
1183 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1184 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1185 slot = &e->slots[e->n_init++];
1186 else {
1187 pa_mutex_unlock(e->mutex);
1188 pa_memblock_unref(b);
1189 return -1;
1190 }
1191
1192 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1193 slot->block = b;
1194 *block_id = (uint32_t) (slot - e->slots);
1195
1196 pa_mutex_unlock(e->mutex);
1197 /* pa_log("Got block id %u", *block_id); */
1198
1199 data = pa_memblock_acquire(b);
1200
1201 if (b->type == PA_MEMBLOCK_IMPORTED) {
1202 pa_assert(b->per_type.imported.segment);
1203 memory = &b->per_type.imported.segment->memory;
1204 } else {
1205 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1206 pa_assert(b->pool);
1207 memory = &b->pool->memory;
1208 }
1209
1210 pa_assert(data >= memory->ptr);
1211 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1212
1213 *shm_id = memory->id;
1214 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1215 *size = b->length;
1216
1217 pa_memblock_release(b);
1218
1219 pa_atomic_inc(&e->pool->stat.n_exported);
1220 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1221
1222 return 0;
1223 }