]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
memblock: modernizations
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 /* A collection of multiple segments */
100 struct pa_memimport {
101 pa_mutex *mutex;
102
103 pa_mempool *pool;
104 pa_hashmap *segments;
105 pa_hashmap *blocks;
106
107 /* Called whenever an imported memory block is no longer
108 * needed. */
109 pa_memimport_release_cb_t release_cb;
110 void *userdata;
111
112 PA_LLIST_FIELDS(pa_memimport);
113 };
114
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
117 pa_memblock *block;
118 };
119
120 struct pa_memexport {
121 pa_mutex *mutex;
122 pa_mempool *pool;
123
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
125
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
128 unsigned n_init;
129
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
134 void *userdata;
135
136 PA_LLIST_FIELDS(pa_memexport);
137 };
138
139 struct pa_mempool {
140 pa_semaphore *semaphore;
141 pa_mutex *mutex;
142
143 pa_shm memory;
144 size_t block_size;
145 unsigned n_blocks;
146
147 pa_atomic_t n_init;
148
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
151
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
154
155 pa_mempool_stat stat;
156 };
157
158 static void segment_detach(pa_memimport_segment *seg);
159
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
161
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
164 pa_assert(b);
165 pa_assert(b->pool);
166
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
169
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
172
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
176 }
177
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
180 }
181
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
184 pa_assert(b);
185 pa_assert(b->pool);
186
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
189
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
192
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
196
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
199 }
200
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
202 }
203
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
205
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
208 pa_memblock *b;
209
210 pa_assert(p);
211 pa_assert(length);
212
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
215
216 return b;
217 }
218
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
221 pa_memblock *b;
222
223 pa_assert(p);
224 pa_assert(length);
225
226 /* If -1 is passed as length we choose the size for the caller. */
227
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
230
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
232 PA_REFCNT_INIT(b);
233 b->pool = p;
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
237 b->length = length;
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
240
241 stat_add(b);
242 return b;
243 }
244
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
248 pa_assert(p);
249
250 if (!(slot = pa_flist_pop(p->free_slots))) {
251 int idx;
252
253 /* The free list was empty, we have to allocate a new entry */
254
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
257 else
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
259
260 if (!slot) {
261 if (pa_log_ratelimit())
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p->stat.n_pool_full);
264 return NULL;
265 }
266 }
267
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
271 /* } */
272 /* #endif */
273
274 return slot;
275 }
276
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot *slot) {
279 return slot;
280 }
281
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
284 pa_assert(p);
285
286 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
287 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
288
289 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
290 }
291
292 /* No lock necessary */
293 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
294 unsigned idx;
295
296 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
297 return NULL;
298
299 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
300 }
301
302 /* No lock necessary */
303 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
304 pa_memblock *b = NULL;
305 struct mempool_slot *slot;
306 static int mempool_disable = 0;
307
308 pa_assert(p);
309 pa_assert(length);
310
311 if (mempool_disable == 0)
312 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
313
314 if (mempool_disable > 0)
315 return NULL;
316
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
319
320 if (length == (size_t) -1)
321 length = pa_mempool_block_size_max(p);
322
323 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
324
325 if (!(slot = mempool_allocate_slot(p)))
326 return NULL;
327
328 b = mempool_slot_data(slot);
329 b->type = PA_MEMBLOCK_POOL;
330 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
331
332 } else if (p->block_size >= length) {
333
334 if (!(slot = mempool_allocate_slot(p)))
335 return NULL;
336
337 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
338 b = pa_xnew(pa_memblock, 1);
339
340 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
341 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
342
343 } else {
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
345 pa_atomic_inc(&p->stat.n_too_large_for_pool);
346 return NULL;
347 }
348
349 PA_REFCNT_INIT(b);
350 b->pool = p;
351 b->read_only = b->is_silence = FALSE;
352 b->length = length;
353 pa_atomic_store(&b->n_acquired, 0);
354 pa_atomic_store(&b->please_signal, 0);
355
356 stat_add(b);
357 return b;
358 }
359
360 /* No lock necessary */
361 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
362 pa_memblock *b;
363
364 pa_assert(p);
365 pa_assert(d);
366 pa_assert(length != (size_t) -1);
367 pa_assert(length);
368
369 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
370 b = pa_xnew(pa_memblock, 1);
371 PA_REFCNT_INIT(b);
372 b->pool = p;
373 b->type = PA_MEMBLOCK_FIXED;
374 b->read_only = read_only;
375 b->is_silence = FALSE;
376 pa_atomic_ptr_store(&b->data, d);
377 b->length = length;
378 pa_atomic_store(&b->n_acquired, 0);
379 pa_atomic_store(&b->please_signal, 0);
380
381 stat_add(b);
382 return b;
383 }
384
385 /* No lock necessary */
386 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
387 pa_memblock *b;
388
389 pa_assert(p);
390 pa_assert(d);
391 pa_assert(length);
392 pa_assert(length != (size_t) -1);
393 pa_assert(free_cb);
394
395 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
396 b = pa_xnew(pa_memblock, 1);
397 PA_REFCNT_INIT(b);
398 b->pool = p;
399 b->type = PA_MEMBLOCK_USER;
400 b->read_only = read_only;
401 b->is_silence = FALSE;
402 pa_atomic_ptr_store(&b->data, d);
403 b->length = length;
404 pa_atomic_store(&b->n_acquired, 0);
405 pa_atomic_store(&b->please_signal, 0);
406
407 b->per_type.user.free_cb = free_cb;
408
409 stat_add(b);
410 return b;
411 }
412
413 /* No lock necessary */
414 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
415 pa_assert(b);
416 pa_assert(PA_REFCNT_VALUE(b) > 0);
417
418 return b->read_only && PA_REFCNT_VALUE(b) == 1;
419 }
420
421 /* No lock necessary */
422 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
423 pa_assert(b);
424 pa_assert(PA_REFCNT_VALUE(b) > 0);
425
426 return b->is_silence;
427 }
428
429 /* No lock necessary */
430 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
431 pa_assert(b);
432 pa_assert(PA_REFCNT_VALUE(b) > 0);
433
434 b->is_silence = v;
435 }
436
437 /* No lock necessary */
438 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
439 int r;
440 pa_assert(b);
441
442 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
443
444 return r == 1;
445 }
446
447 /* No lock necessary */
448 void* pa_memblock_acquire(pa_memblock *b) {
449 pa_assert(b);
450 pa_assert(PA_REFCNT_VALUE(b) > 0);
451
452 pa_atomic_inc(&b->n_acquired);
453
454 return pa_atomic_ptr_load(&b->data);
455 }
456
457 /* No lock necessary, in corner cases locks by its own */
458 void pa_memblock_release(pa_memblock *b) {
459 int r;
460 pa_assert(b);
461 pa_assert(PA_REFCNT_VALUE(b) > 0);
462
463 r = pa_atomic_dec(&b->n_acquired);
464 pa_assert(r >= 1);
465
466 /* Signal a waiting thread that this memblock is no longer used */
467 if (r == 1 && pa_atomic_load(&b->please_signal))
468 pa_semaphore_post(b->pool->semaphore);
469 }
470
471 size_t pa_memblock_get_length(pa_memblock *b) {
472 pa_assert(b);
473 pa_assert(PA_REFCNT_VALUE(b) > 0);
474
475 return b->length;
476 }
477
478 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
479 pa_assert(b);
480 pa_assert(PA_REFCNT_VALUE(b) > 0);
481
482 return b->pool;
483 }
484
485 /* No lock necessary */
486 pa_memblock* pa_memblock_ref(pa_memblock*b) {
487 pa_assert(b);
488 pa_assert(PA_REFCNT_VALUE(b) > 0);
489
490 PA_REFCNT_INC(b);
491 return b;
492 }
493
494 static void memblock_free(pa_memblock *b) {
495 pa_assert(b);
496
497 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
498
499 stat_remove(b);
500
501 switch (b->type) {
502 case PA_MEMBLOCK_USER :
503 pa_assert(b->per_type.user.free_cb);
504 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
505
506 /* Fall through */
507
508 case PA_MEMBLOCK_FIXED:
509 case PA_MEMBLOCK_APPENDED :
510 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
511 pa_xfree(b);
512
513 break;
514
515 case PA_MEMBLOCK_IMPORTED : {
516 pa_memimport_segment *segment;
517 pa_memimport *import;
518
519 /* FIXME! This should be implemented lock-free */
520
521 pa_assert_se(segment = b->per_type.imported.segment);
522 pa_assert_se(import = segment->import);
523
524 pa_mutex_lock(import->mutex);
525
526 pa_assert_se(pa_hashmap_remove(
527 import->blocks,
528 PA_UINT32_TO_PTR(b->per_type.imported.id)));
529
530 pa_assert(segment->n_blocks >= 1);
531 if (-- segment->n_blocks <= 0)
532 segment_detach(segment);
533
534 pa_mutex_unlock(import->mutex);
535
536 import->release_cb(import, b->per_type.imported.id, import->userdata);
537
538 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
539 pa_xfree(b);
540
541 break;
542 }
543
544 case PA_MEMBLOCK_POOL_EXTERNAL:
545 case PA_MEMBLOCK_POOL: {
546 struct mempool_slot *slot;
547 pa_bool_t call_free;
548
549 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
550 pa_assert(slot);
551
552 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
553
554 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
555 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
556 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
557 /* } */
558 /* #endif */
559
560 /* The free list dimensions should easily allow all slots
561 * to fit in, hence try harder if pushing this slot into
562 * the free list fails */
563 while (pa_flist_push(b->pool->free_slots, slot) < 0)
564 ;
565
566 if (call_free)
567 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
568 pa_xfree(b);
569
570 break;
571 }
572
573 case PA_MEMBLOCK_TYPE_MAX:
574 default:
575 pa_assert_not_reached();
576 }
577 }
578
579 /* No lock necessary */
580 void pa_memblock_unref(pa_memblock*b) {
581 pa_assert(b);
582 pa_assert(PA_REFCNT_VALUE(b) > 0);
583
584 if (PA_REFCNT_DEC(b) > 0)
585 return;
586
587 memblock_free(b);
588 }
589
590 /* Self locked */
591 static void memblock_wait(pa_memblock *b) {
592 pa_assert(b);
593
594 if (pa_atomic_load(&b->n_acquired) > 0) {
595 /* We need to wait until all threads gave up access to the
596 * memory block before we can go on. Unfortunately this means
597 * that we have to lock and wait here. Sniff! */
598
599 pa_atomic_inc(&b->please_signal);
600
601 while (pa_atomic_load(&b->n_acquired) > 0)
602 pa_semaphore_wait(b->pool->semaphore);
603
604 pa_atomic_dec(&b->please_signal);
605 }
606 }
607
608 /* No lock necessary. This function is not multiple caller safe! */
609 static void memblock_make_local(pa_memblock *b) {
610 pa_assert(b);
611
612 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
613
614 if (b->length <= b->pool->block_size) {
615 struct mempool_slot *slot;
616
617 if ((slot = mempool_allocate_slot(b->pool))) {
618 void *new_data;
619 /* We can move it into a local pool, perfect! */
620
621 new_data = mempool_slot_data(slot);
622 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
623 pa_atomic_ptr_store(&b->data, new_data);
624
625 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
626 b->read_only = FALSE;
627
628 goto finish;
629 }
630 }
631
632 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
633 b->per_type.user.free_cb = pa_xfree;
634 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
635
636 b->type = PA_MEMBLOCK_USER;
637 b->read_only = FALSE;
638
639 finish:
640 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
641 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
642 memblock_wait(b);
643 }
644
645 /* No lock necessary. This function is not multiple caller safe*/
646 void pa_memblock_unref_fixed(pa_memblock *b) {
647 pa_assert(b);
648 pa_assert(PA_REFCNT_VALUE(b) > 0);
649 pa_assert(b->type == PA_MEMBLOCK_FIXED);
650
651 if (PA_REFCNT_VALUE(b) > 1)
652 memblock_make_local(b);
653
654 pa_memblock_unref(b);
655 }
656
657 /* No lock necessary. */
658 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
659 void *p;
660
661 pa_assert(b);
662 pa_assert(PA_REFCNT_VALUE(b) > 0);
663
664 p = pa_memblock_acquire(b);
665 pa_will_need(p, b->length);
666 pa_memblock_release(b);
667
668 return b;
669 }
670
671 /* Self-locked. This function is not multiple-caller safe */
672 static void memblock_replace_import(pa_memblock *b) {
673 pa_memimport_segment *segment;
674 pa_memimport *import;
675
676 pa_assert(b);
677 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
678
679 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
680 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
681 pa_atomic_dec(&b->pool->stat.n_imported);
682 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
683
684 pa_assert_se(segment = b->per_type.imported.segment);
685 pa_assert_se(import = segment->import);
686
687 pa_mutex_lock(import->mutex);
688
689 pa_assert_se(pa_hashmap_remove(
690 import->blocks,
691 PA_UINT32_TO_PTR(b->per_type.imported.id)));
692
693 memblock_make_local(b);
694
695 pa_assert(segment->n_blocks >= 1);
696 if (-- segment->n_blocks <= 0)
697 segment_detach(segment);
698
699 pa_mutex_unlock(import->mutex);
700 }
701
702 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
703 pa_mempool *p;
704 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
705
706 p = pa_xnew(pa_mempool, 1);
707
708 p->mutex = pa_mutex_new(TRUE, TRUE);
709 p->semaphore = pa_semaphore_new(0);
710
711 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
712 if (p->block_size < PA_PAGE_SIZE)
713 p->block_size = PA_PAGE_SIZE;
714
715 if (size <= 0)
716 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
717 else {
718 p->n_blocks = (unsigned) (size / p->block_size);
719
720 if (p->n_blocks < 2)
721 p->n_blocks = 2;
722 }
723
724 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
725 pa_xfree(p);
726 return NULL;
727 }
728
729 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
730 p->memory.shared ? "shared" : "private",
731 p->n_blocks,
732 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
733 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
734 (unsigned long) pa_mempool_block_size_max(p));
735
736 memset(&p->stat, 0, sizeof(p->stat));
737 pa_atomic_store(&p->n_init, 0);
738
739 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
740 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
741
742 p->free_slots = pa_flist_new(p->n_blocks);
743
744 return p;
745 }
746
747 void pa_mempool_free(pa_mempool *p) {
748 pa_assert(p);
749
750 pa_mutex_lock(p->mutex);
751
752 while (p->imports)
753 pa_memimport_free(p->imports);
754
755 while (p->exports)
756 pa_memexport_free(p->exports);
757
758 pa_mutex_unlock(p->mutex);
759
760 pa_flist_free(p->free_slots, NULL);
761
762 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
763
764 /* Ouch, somebody is retaining a memory block reference! */
765
766 #ifdef DEBUG_REF
767 unsigned i;
768 pa_flist *list;
769
770 /* Let's try to find at least one of those leaked memory blocks */
771
772 list = pa_flist_new(p->n_blocks);
773
774 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
775 struct mempool_slot *slot;
776 pa_memblock *b, *k;
777
778 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
779 b = mempool_slot_data(slot);
780
781 while ((k = pa_flist_pop(p->free_slots))) {
782 while (pa_flist_push(list, k) < 0)
783 ;
784
785 if (b == k)
786 break;
787 }
788
789 if (!k)
790 pa_log("REF: Leaked memory block %p", b);
791
792 while ((k = pa_flist_pop(list)))
793 while (pa_flist_push(p->free_slots, k) < 0)
794 ;
795 }
796
797 pa_flist_free(list, NULL);
798
799 #endif
800
801 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
802
803 /* PA_DEBUG_TRAP; */
804 }
805
806 pa_shm_free(&p->memory);
807
808 pa_mutex_free(p->mutex);
809 pa_semaphore_free(p->semaphore);
810
811 pa_xfree(p);
812 }
813
814 /* No lock necessary */
815 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
816 pa_assert(p);
817
818 return &p->stat;
819 }
820
821 /* No lock necessary */
822 size_t pa_mempool_block_size_max(pa_mempool *p) {
823 pa_assert(p);
824
825 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
826 }
827
828 /* No lock necessary */
829 void pa_mempool_vacuum(pa_mempool *p) {
830 struct mempool_slot *slot;
831 pa_flist *list;
832
833 pa_assert(p);
834
835 list = pa_flist_new(p->n_blocks);
836
837 while ((slot = pa_flist_pop(p->free_slots)))
838 while (pa_flist_push(list, slot) < 0)
839 ;
840
841 while ((slot = pa_flist_pop(list))) {
842 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
843
844 while (pa_flist_push(p->free_slots, slot))
845 ;
846 }
847
848 pa_flist_free(list, NULL);
849 }
850
851 /* No lock necessary */
852 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
853 pa_assert(p);
854
855 if (!p->memory.shared)
856 return -1;
857
858 *id = p->memory.id;
859
860 return 0;
861 }
862
863 /* No lock necessary */
864 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
865 pa_assert(p);
866
867 return !!p->memory.shared;
868 }
869
870 /* For recieving blocks from other nodes */
871 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
872 pa_memimport *i;
873
874 pa_assert(p);
875 pa_assert(cb);
876
877 i = pa_xnew(pa_memimport, 1);
878 i->mutex = pa_mutex_new(TRUE, TRUE);
879 i->pool = p;
880 i->segments = pa_hashmap_new(NULL, NULL);
881 i->blocks = pa_hashmap_new(NULL, NULL);
882 i->release_cb = cb;
883 i->userdata = userdata;
884
885 pa_mutex_lock(p->mutex);
886 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
887 pa_mutex_unlock(p->mutex);
888
889 return i;
890 }
891
892 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
893
894 /* Should be called locked */
895 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
896 pa_memimport_segment* seg;
897
898 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
899 return NULL;
900
901 seg = pa_xnew0(pa_memimport_segment, 1);
902
903 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
904 pa_xfree(seg);
905 return NULL;
906 }
907
908 seg->import = i;
909 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
910
911 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
912 return seg;
913 }
914
915 /* Should be called locked */
916 static void segment_detach(pa_memimport_segment *seg) {
917 pa_assert(seg);
918
919 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
920 pa_shm_free(&seg->memory);
921
922 if (seg->trap)
923 pa_memtrap_remove(seg->trap);
924
925 pa_xfree(seg);
926 }
927
928 /* Self-locked. Not multiple-caller safe */
929 void pa_memimport_free(pa_memimport *i) {
930 pa_memexport *e;
931 pa_memblock *b;
932
933 pa_assert(i);
934
935 pa_mutex_lock(i->mutex);
936
937 while ((b = pa_hashmap_first(i->blocks)))
938 memblock_replace_import(b);
939
940 pa_assert(pa_hashmap_size(i->segments) == 0);
941
942 pa_mutex_unlock(i->mutex);
943
944 pa_mutex_lock(i->pool->mutex);
945
946 /* If we've exported this block further we need to revoke that export */
947 for (e = i->pool->exports; e; e = e->next)
948 memexport_revoke_blocks(e, i);
949
950 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
951
952 pa_mutex_unlock(i->pool->mutex);
953
954 pa_hashmap_free(i->blocks, NULL, NULL);
955 pa_hashmap_free(i->segments, NULL, NULL);
956
957 pa_mutex_free(i->mutex);
958
959 pa_xfree(i);
960 }
961
962 /* Self-locked */
963 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
964 pa_memblock *b = NULL;
965 pa_memimport_segment *seg;
966
967 pa_assert(i);
968
969 pa_mutex_lock(i->mutex);
970
971 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
972 pa_memblock_ref(b);
973 goto finish;
974 }
975
976 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
977 goto finish;
978
979 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
980 if (!(seg = segment_attach(i, shm_id)))
981 goto finish;
982
983 if (offset+size > seg->memory.size)
984 goto finish;
985
986 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
987 b = pa_xnew(pa_memblock, 1);
988
989 PA_REFCNT_INIT(b);
990 b->pool = i->pool;
991 b->type = PA_MEMBLOCK_IMPORTED;
992 b->read_only = TRUE;
993 b->is_silence = FALSE;
994 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
995 b->length = size;
996 pa_atomic_store(&b->n_acquired, 0);
997 pa_atomic_store(&b->please_signal, 0);
998 b->per_type.imported.id = block_id;
999 b->per_type.imported.segment = seg;
1000
1001 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1002
1003 seg->n_blocks++;
1004
1005 stat_add(b);
1006
1007 finish:
1008 pa_mutex_unlock(i->mutex);
1009
1010 return b;
1011 }
1012
1013 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1014 pa_memblock *b;
1015 int ret = 0;
1016 pa_assert(i);
1017
1018 pa_mutex_lock(i->mutex);
1019
1020 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1021 ret = -1;
1022 goto finish;
1023 }
1024
1025 memblock_replace_import(b);
1026
1027 finish:
1028 pa_mutex_unlock(i->mutex);
1029
1030 return ret;
1031 }
1032
1033 /* For sending blocks to other nodes */
1034 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1035 pa_memexport *e;
1036
1037 pa_assert(p);
1038 pa_assert(cb);
1039
1040 if (!p->memory.shared)
1041 return NULL;
1042
1043 e = pa_xnew(pa_memexport, 1);
1044 e->mutex = pa_mutex_new(TRUE, TRUE);
1045 e->pool = p;
1046 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1047 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1048 e->n_init = 0;
1049 e->revoke_cb = cb;
1050 e->userdata = userdata;
1051
1052 pa_mutex_lock(p->mutex);
1053 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1054 pa_mutex_unlock(p->mutex);
1055 return e;
1056 }
1057
1058 void pa_memexport_free(pa_memexport *e) {
1059 pa_assert(e);
1060
1061 pa_mutex_lock(e->mutex);
1062 while (e->used_slots)
1063 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1064 pa_mutex_unlock(e->mutex);
1065
1066 pa_mutex_lock(e->pool->mutex);
1067 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1068 pa_mutex_unlock(e->pool->mutex);
1069
1070 pa_mutex_free(e->mutex);
1071 pa_xfree(e);
1072 }
1073
1074 /* Self-locked */
1075 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1076 pa_memblock *b;
1077
1078 pa_assert(e);
1079
1080 pa_mutex_lock(e->mutex);
1081
1082 if (id >= e->n_init)
1083 goto fail;
1084
1085 if (!e->slots[id].block)
1086 goto fail;
1087
1088 b = e->slots[id].block;
1089 e->slots[id].block = NULL;
1090
1091 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1092 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1093
1094 pa_mutex_unlock(e->mutex);
1095
1096 /* pa_log("Processing release for %u", id); */
1097
1098 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1099 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1100
1101 pa_atomic_dec(&e->pool->stat.n_exported);
1102 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1103
1104 pa_memblock_unref(b);
1105
1106 return 0;
1107
1108 fail:
1109 pa_mutex_unlock(e->mutex);
1110
1111 return -1;
1112 }
1113
1114 /* Self-locked */
1115 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1116 struct memexport_slot *slot, *next;
1117 pa_assert(e);
1118 pa_assert(i);
1119
1120 pa_mutex_lock(e->mutex);
1121
1122 for (slot = e->used_slots; slot; slot = next) {
1123 uint32_t idx;
1124 next = slot->next;
1125
1126 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1127 slot->block->per_type.imported.segment->import != i)
1128 continue;
1129
1130 idx = (uint32_t) (slot - e->slots);
1131 e->revoke_cb(e, idx, e->userdata);
1132 pa_memexport_process_release(e, idx);
1133 }
1134
1135 pa_mutex_unlock(e->mutex);
1136 }
1137
1138 /* No lock necessary */
1139 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1140 pa_memblock *n;
1141
1142 pa_assert(p);
1143 pa_assert(b);
1144
1145 if (b->type == PA_MEMBLOCK_IMPORTED ||
1146 b->type == PA_MEMBLOCK_POOL ||
1147 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1148 pa_assert(b->pool == p);
1149 return pa_memblock_ref(b);
1150 }
1151
1152 if (!(n = pa_memblock_new_pool(p, b->length)))
1153 return NULL;
1154
1155 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1156 return n;
1157 }
1158
1159 /* Self-locked */
1160 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1161 pa_shm *memory;
1162 struct memexport_slot *slot;
1163 void *data;
1164
1165 pa_assert(e);
1166 pa_assert(b);
1167 pa_assert(block_id);
1168 pa_assert(shm_id);
1169 pa_assert(offset);
1170 pa_assert(size);
1171 pa_assert(b->pool == e->pool);
1172
1173 if (!(b = memblock_shared_copy(e->pool, b)))
1174 return -1;
1175
1176 pa_mutex_lock(e->mutex);
1177
1178 if (e->free_slots) {
1179 slot = e->free_slots;
1180 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1181 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1182 slot = &e->slots[e->n_init++];
1183 else {
1184 pa_mutex_unlock(e->mutex);
1185 pa_memblock_unref(b);
1186 return -1;
1187 }
1188
1189 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1190 slot->block = b;
1191 *block_id = (uint32_t) (slot - e->slots);
1192
1193 pa_mutex_unlock(e->mutex);
1194 /* pa_log("Got block id %u", *block_id); */
1195
1196 data = pa_memblock_acquire(b);
1197
1198 if (b->type == PA_MEMBLOCK_IMPORTED) {
1199 pa_assert(b->per_type.imported.segment);
1200 memory = &b->per_type.imported.segment->memory;
1201 } else {
1202 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1203 pa_assert(b->pool);
1204 memory = &b->pool->memory;
1205 }
1206
1207 pa_assert(data >= memory->ptr);
1208 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1209
1210 *shm_id = memory->id;
1211 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1212 *size = b->length;
1213
1214 pa_memblock_release(b);
1215
1216 pa_atomic_inc(&e->pool->stat.n_exported);
1217 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1218
1219 return 0;
1220 }