]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
Merge branch 'master' of git://0pointer.de/pulseaudio into dbus-work
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 /* A collection of multiple segments */
100 struct pa_memimport {
101 pa_mutex *mutex;
102
103 pa_mempool *pool;
104 pa_hashmap *segments;
105 pa_hashmap *blocks;
106
107 /* Called whenever an imported memory block is no longer
108 * needed. */
109 pa_memimport_release_cb_t release_cb;
110 void *userdata;
111
112 PA_LLIST_FIELDS(pa_memimport);
113 };
114
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
117 pa_memblock *block;
118 };
119
120 struct pa_memexport {
121 pa_mutex *mutex;
122 pa_mempool *pool;
123
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
125
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
128 unsigned n_init;
129
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
134 void *userdata;
135
136 PA_LLIST_FIELDS(pa_memexport);
137 };
138
139 struct pa_mempool {
140 pa_semaphore *semaphore;
141 pa_mutex *mutex;
142
143 pa_shm memory;
144 size_t block_size;
145 unsigned n_blocks;
146
147 pa_atomic_t n_init;
148
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
151
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
154
155 pa_mempool_stat stat;
156 };
157
158 static void segment_detach(pa_memimport_segment *seg);
159
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
161
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
164 pa_assert(b);
165 pa_assert(b->pool);
166
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
169
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
172
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
176 }
177
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
180 }
181
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
184 pa_assert(b);
185 pa_assert(b->pool);
186
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
189
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
192
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
196
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
199 }
200
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
202 }
203
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
205
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
208 pa_memblock *b;
209
210 pa_assert(p);
211 pa_assert(length);
212
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
215
216 return b;
217 }
218
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
221 pa_memblock *b;
222
223 pa_assert(p);
224 pa_assert(length);
225
226 /* If -1 is passed as length we choose the size for the caller. */
227
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
230
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
232 PA_REFCNT_INIT(b);
233 b->pool = p;
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
237 b->length = length;
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
240
241 stat_add(b);
242 return b;
243 }
244
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
248 pa_assert(p);
249
250 if (!(slot = pa_flist_pop(p->free_slots))) {
251 int idx;
252
253 /* The free list was empty, we have to allocate a new entry */
254
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
257 else
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
259
260 if (!slot) {
261 if (pa_log_ratelimit())
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p->stat.n_pool_full);
264 return NULL;
265 }
266 }
267
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
271 /* } */
272 /* #endif */
273
274 return slot;
275 }
276
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot *slot) {
279 return slot;
280 }
281
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
284 pa_assert(p);
285
286 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
287 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
288
289 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
290 }
291
292 /* No lock necessary */
293 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
294 unsigned idx;
295
296 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
297 return NULL;
298
299 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
300 }
301
302 /* No lock necessary */
303 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
304 pa_memblock *b = NULL;
305 struct mempool_slot *slot;
306
307 pa_assert(p);
308 pa_assert(length);
309
310 /* If -1 is passed as length we choose the size for the caller: we
311 * take the largest size that fits in one of our slots. */
312
313 if (length == (size_t) -1)
314 length = pa_mempool_block_size_max(p);
315
316 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
317
318 if (!(slot = mempool_allocate_slot(p)))
319 return NULL;
320
321 b = mempool_slot_data(slot);
322 b->type = PA_MEMBLOCK_POOL;
323 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
324
325 } else if (p->block_size >= length) {
326
327 if (!(slot = mempool_allocate_slot(p)))
328 return NULL;
329
330 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
331 b = pa_xnew(pa_memblock, 1);
332
333 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
334 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
335
336 } else {
337 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
338 pa_atomic_inc(&p->stat.n_too_large_for_pool);
339 return NULL;
340 }
341
342 PA_REFCNT_INIT(b);
343 b->pool = p;
344 b->read_only = b->is_silence = FALSE;
345 b->length = length;
346 pa_atomic_store(&b->n_acquired, 0);
347 pa_atomic_store(&b->please_signal, 0);
348
349 stat_add(b);
350 return b;
351 }
352
353 /* No lock necessary */
354 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
355 pa_memblock *b;
356
357 pa_assert(p);
358 pa_assert(d);
359 pa_assert(length != (size_t) -1);
360 pa_assert(length);
361
362 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
363 b = pa_xnew(pa_memblock, 1);
364 PA_REFCNT_INIT(b);
365 b->pool = p;
366 b->type = PA_MEMBLOCK_FIXED;
367 b->read_only = read_only;
368 b->is_silence = FALSE;
369 pa_atomic_ptr_store(&b->data, d);
370 b->length = length;
371 pa_atomic_store(&b->n_acquired, 0);
372 pa_atomic_store(&b->please_signal, 0);
373
374 stat_add(b);
375 return b;
376 }
377
378 /* No lock necessary */
379 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
380 pa_memblock *b;
381
382 pa_assert(p);
383 pa_assert(d);
384 pa_assert(length);
385 pa_assert(length != (size_t) -1);
386 pa_assert(free_cb);
387
388 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
389 b = pa_xnew(pa_memblock, 1);
390 PA_REFCNT_INIT(b);
391 b->pool = p;
392 b->type = PA_MEMBLOCK_USER;
393 b->read_only = read_only;
394 b->is_silence = FALSE;
395 pa_atomic_ptr_store(&b->data, d);
396 b->length = length;
397 pa_atomic_store(&b->n_acquired, 0);
398 pa_atomic_store(&b->please_signal, 0);
399
400 b->per_type.user.free_cb = free_cb;
401
402 stat_add(b);
403 return b;
404 }
405
406 /* No lock necessary */
407 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
408 pa_assert(b);
409 pa_assert(PA_REFCNT_VALUE(b) > 0);
410
411 return b->read_only && PA_REFCNT_VALUE(b) == 1;
412 }
413
414 /* No lock necessary */
415 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
416 pa_assert(b);
417 pa_assert(PA_REFCNT_VALUE(b) > 0);
418
419 return b->is_silence;
420 }
421
422 /* No lock necessary */
423 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
424 pa_assert(b);
425 pa_assert(PA_REFCNT_VALUE(b) > 0);
426
427 b->is_silence = v;
428 }
429
430 /* No lock necessary */
431 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
432 int r;
433 pa_assert(b);
434
435 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
436
437 return r == 1;
438 }
439
440 /* No lock necessary */
441 void* pa_memblock_acquire(pa_memblock *b) {
442 pa_assert(b);
443 pa_assert(PA_REFCNT_VALUE(b) > 0);
444
445 pa_atomic_inc(&b->n_acquired);
446
447 return pa_atomic_ptr_load(&b->data);
448 }
449
450 /* No lock necessary, in corner cases locks by its own */
451 void pa_memblock_release(pa_memblock *b) {
452 int r;
453 pa_assert(b);
454 pa_assert(PA_REFCNT_VALUE(b) > 0);
455
456 r = pa_atomic_dec(&b->n_acquired);
457 pa_assert(r >= 1);
458
459 /* Signal a waiting thread that this memblock is no longer used */
460 if (r == 1 && pa_atomic_load(&b->please_signal))
461 pa_semaphore_post(b->pool->semaphore);
462 }
463
464 size_t pa_memblock_get_length(pa_memblock *b) {
465 pa_assert(b);
466 pa_assert(PA_REFCNT_VALUE(b) > 0);
467
468 return b->length;
469 }
470
471 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
472 pa_assert(b);
473 pa_assert(PA_REFCNT_VALUE(b) > 0);
474
475 return b->pool;
476 }
477
478 /* No lock necessary */
479 pa_memblock* pa_memblock_ref(pa_memblock*b) {
480 pa_assert(b);
481 pa_assert(PA_REFCNT_VALUE(b) > 0);
482
483 PA_REFCNT_INC(b);
484 return b;
485 }
486
487 static void memblock_free(pa_memblock *b) {
488 pa_assert(b);
489
490 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
491
492 stat_remove(b);
493
494 switch (b->type) {
495 case PA_MEMBLOCK_USER :
496 pa_assert(b->per_type.user.free_cb);
497 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
498
499 /* Fall through */
500
501 case PA_MEMBLOCK_FIXED:
502 case PA_MEMBLOCK_APPENDED :
503 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
504 pa_xfree(b);
505
506 break;
507
508 case PA_MEMBLOCK_IMPORTED : {
509 pa_memimport_segment *segment;
510 pa_memimport *import;
511
512 /* FIXME! This should be implemented lock-free */
513
514 pa_assert_se(segment = b->per_type.imported.segment);
515 pa_assert_se(import = segment->import);
516
517 pa_mutex_lock(import->mutex);
518
519 pa_assert_se(pa_hashmap_remove(
520 import->blocks,
521 PA_UINT32_TO_PTR(b->per_type.imported.id)));
522
523 pa_assert(segment->n_blocks >= 1);
524 if (-- segment->n_blocks <= 0)
525 segment_detach(segment);
526
527 pa_mutex_unlock(import->mutex);
528
529 import->release_cb(import, b->per_type.imported.id, import->userdata);
530
531 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
532 pa_xfree(b);
533
534 break;
535 }
536
537 case PA_MEMBLOCK_POOL_EXTERNAL:
538 case PA_MEMBLOCK_POOL: {
539 struct mempool_slot *slot;
540 pa_bool_t call_free;
541
542 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
543 pa_assert(slot);
544
545 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
546
547 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
548 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
549 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
550 /* } */
551 /* #endif */
552
553 /* The free list dimensions should easily allow all slots
554 * to fit in, hence try harder if pushing this slot into
555 * the free list fails */
556 while (pa_flist_push(b->pool->free_slots, slot) < 0)
557 ;
558
559 if (call_free)
560 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
561 pa_xfree(b);
562
563 break;
564 }
565
566 case PA_MEMBLOCK_TYPE_MAX:
567 default:
568 pa_assert_not_reached();
569 }
570 }
571
572 /* No lock necessary */
573 void pa_memblock_unref(pa_memblock*b) {
574 pa_assert(b);
575 pa_assert(PA_REFCNT_VALUE(b) > 0);
576
577 if (PA_REFCNT_DEC(b) > 0)
578 return;
579
580 memblock_free(b);
581 }
582
583 /* Self locked */
584 static void memblock_wait(pa_memblock *b) {
585 pa_assert(b);
586
587 if (pa_atomic_load(&b->n_acquired) > 0) {
588 /* We need to wait until all threads gave up access to the
589 * memory block before we can go on. Unfortunately this means
590 * that we have to lock and wait here. Sniff! */
591
592 pa_atomic_inc(&b->please_signal);
593
594 while (pa_atomic_load(&b->n_acquired) > 0)
595 pa_semaphore_wait(b->pool->semaphore);
596
597 pa_atomic_dec(&b->please_signal);
598 }
599 }
600
601 /* No lock necessary. This function is not multiple caller safe! */
602 static void memblock_make_local(pa_memblock *b) {
603 pa_assert(b);
604
605 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
606
607 if (b->length <= b->pool->block_size) {
608 struct mempool_slot *slot;
609
610 if ((slot = mempool_allocate_slot(b->pool))) {
611 void *new_data;
612 /* We can move it into a local pool, perfect! */
613
614 new_data = mempool_slot_data(slot);
615 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
616 pa_atomic_ptr_store(&b->data, new_data);
617
618 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
619 b->read_only = FALSE;
620
621 goto finish;
622 }
623 }
624
625 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
626 b->per_type.user.free_cb = pa_xfree;
627 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
628
629 b->type = PA_MEMBLOCK_USER;
630 b->read_only = FALSE;
631
632 finish:
633 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
634 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
635 memblock_wait(b);
636 }
637
638 /* No lock necessary. This function is not multiple caller safe*/
639 void pa_memblock_unref_fixed(pa_memblock *b) {
640 pa_assert(b);
641 pa_assert(PA_REFCNT_VALUE(b) > 0);
642 pa_assert(b->type == PA_MEMBLOCK_FIXED);
643
644 if (PA_REFCNT_VALUE(b) > 1)
645 memblock_make_local(b);
646
647 pa_memblock_unref(b);
648 }
649
650 /* No lock necessary. */
651 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
652 void *p;
653
654 pa_assert(b);
655 pa_assert(PA_REFCNT_VALUE(b) > 0);
656
657 p = pa_memblock_acquire(b);
658 pa_will_need(p, b->length);
659 pa_memblock_release(b);
660
661 return b;
662 }
663
664 /* Self-locked. This function is not multiple-caller safe */
665 static void memblock_replace_import(pa_memblock *b) {
666 pa_memimport_segment *segment;
667 pa_memimport *import;
668
669 pa_assert(b);
670 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
671
672 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
673 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
674 pa_atomic_dec(&b->pool->stat.n_imported);
675 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
676
677 pa_assert_se(segment = b->per_type.imported.segment);
678 pa_assert_se(import = segment->import);
679
680 pa_mutex_lock(import->mutex);
681
682 pa_assert_se(pa_hashmap_remove(
683 import->blocks,
684 PA_UINT32_TO_PTR(b->per_type.imported.id)));
685
686 memblock_make_local(b);
687
688 pa_assert(segment->n_blocks >= 1);
689 if (-- segment->n_blocks <= 0)
690 segment_detach(segment);
691
692 pa_mutex_unlock(import->mutex);
693 }
694
695 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
696 pa_mempool *p;
697 char t1[64], t2[64];
698
699 p = pa_xnew(pa_mempool, 1);
700
701 p->mutex = pa_mutex_new(TRUE, TRUE);
702 p->semaphore = pa_semaphore_new(0);
703
704 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
705 if (p->block_size < PA_PAGE_SIZE)
706 p->block_size = PA_PAGE_SIZE;
707
708 if (size <= 0)
709 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
710 else {
711 p->n_blocks = (unsigned) (size / p->block_size);
712
713 if (p->n_blocks < 2)
714 p->n_blocks = 2;
715 }
716
717 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
718 pa_xfree(p);
719 return NULL;
720 }
721
722 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
723 p->memory.shared ? "shared" : "private",
724 p->n_blocks,
725 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
726 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
727 (unsigned long) pa_mempool_block_size_max(p));
728
729 memset(&p->stat, 0, sizeof(p->stat));
730 pa_atomic_store(&p->n_init, 0);
731
732 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
733 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
734
735 p->free_slots = pa_flist_new(p->n_blocks);
736
737 return p;
738 }
739
740 void pa_mempool_free(pa_mempool *p) {
741 pa_assert(p);
742
743 pa_mutex_lock(p->mutex);
744
745 while (p->imports)
746 pa_memimport_free(p->imports);
747
748 while (p->exports)
749 pa_memexport_free(p->exports);
750
751 pa_mutex_unlock(p->mutex);
752
753 pa_flist_free(p->free_slots, NULL);
754
755 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
756
757 /* Ouch, somebody is retaining a memory block reference! */
758
759 #ifdef DEBUG_REF
760 unsigned i;
761 pa_flist *list;
762
763 /* Let's try to find at least one of those leaked memory blocks */
764
765 list = pa_flist_new(p->n_blocks);
766
767 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
768 struct mempool_slot *slot;
769 pa_memblock *b, *k;
770
771 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
772 b = mempool_slot_data(slot);
773
774 while ((k = pa_flist_pop(p->free_slots))) {
775 while (pa_flist_push(list, k) < 0)
776 ;
777
778 if (b == k)
779 break;
780 }
781
782 if (!k)
783 pa_log("REF: Leaked memory block %p", b);
784
785 while ((k = pa_flist_pop(list)))
786 while (pa_flist_push(p->free_slots, k) < 0)
787 ;
788 }
789
790 pa_flist_free(list, NULL);
791
792 #endif
793
794 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
795
796 /* PA_DEBUG_TRAP; */
797 }
798
799 pa_shm_free(&p->memory);
800
801 pa_mutex_free(p->mutex);
802 pa_semaphore_free(p->semaphore);
803
804 pa_xfree(p);
805 }
806
807 /* No lock necessary */
808 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
809 pa_assert(p);
810
811 return &p->stat;
812 }
813
814 /* No lock necessary */
815 size_t pa_mempool_block_size_max(pa_mempool *p) {
816 pa_assert(p);
817
818 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
819 }
820
821 /* No lock necessary */
822 void pa_mempool_vacuum(pa_mempool *p) {
823 struct mempool_slot *slot;
824 pa_flist *list;
825
826 pa_assert(p);
827
828 list = pa_flist_new(p->n_blocks);
829
830 while ((slot = pa_flist_pop(p->free_slots)))
831 while (pa_flist_push(list, slot) < 0)
832 ;
833
834 while ((slot = pa_flist_pop(list))) {
835 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
836
837 while (pa_flist_push(p->free_slots, slot))
838 ;
839 }
840
841 pa_flist_free(list, NULL);
842 }
843
844 /* No lock necessary */
845 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
846 pa_assert(p);
847
848 if (!p->memory.shared)
849 return -1;
850
851 *id = p->memory.id;
852
853 return 0;
854 }
855
856 /* No lock necessary */
857 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
858 pa_assert(p);
859
860 return !!p->memory.shared;
861 }
862
863 /* For recieving blocks from other nodes */
864 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
865 pa_memimport *i;
866
867 pa_assert(p);
868 pa_assert(cb);
869
870 i = pa_xnew(pa_memimport, 1);
871 i->mutex = pa_mutex_new(TRUE, TRUE);
872 i->pool = p;
873 i->segments = pa_hashmap_new(NULL, NULL);
874 i->blocks = pa_hashmap_new(NULL, NULL);
875 i->release_cb = cb;
876 i->userdata = userdata;
877
878 pa_mutex_lock(p->mutex);
879 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
880 pa_mutex_unlock(p->mutex);
881
882 return i;
883 }
884
885 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
886
887 /* Should be called locked */
888 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
889 pa_memimport_segment* seg;
890
891 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
892 return NULL;
893
894 seg = pa_xnew(pa_memimport_segment, 1);
895
896 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
897 pa_xfree(seg);
898 return NULL;
899 }
900
901 seg->import = i;
902 seg->n_blocks = 0;
903 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
904
905 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
906 return seg;
907 }
908
909 /* Should be called locked */
910 static void segment_detach(pa_memimport_segment *seg) {
911 pa_assert(seg);
912
913 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
914 pa_shm_free(&seg->memory);
915
916 if (seg->trap)
917 pa_memtrap_remove(seg->trap);
918
919 pa_xfree(seg);
920 }
921
922 /* Self-locked. Not multiple-caller safe */
923 void pa_memimport_free(pa_memimport *i) {
924 pa_memexport *e;
925 pa_memblock *b;
926
927 pa_assert(i);
928
929 pa_mutex_lock(i->mutex);
930
931 while ((b = pa_hashmap_first(i->blocks)))
932 memblock_replace_import(b);
933
934 pa_assert(pa_hashmap_size(i->segments) == 0);
935
936 pa_mutex_unlock(i->mutex);
937
938 pa_mutex_lock(i->pool->mutex);
939
940 /* If we've exported this block further we need to revoke that export */
941 for (e = i->pool->exports; e; e = e->next)
942 memexport_revoke_blocks(e, i);
943
944 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
945
946 pa_mutex_unlock(i->pool->mutex);
947
948 pa_hashmap_free(i->blocks, NULL, NULL);
949 pa_hashmap_free(i->segments, NULL, NULL);
950
951 pa_mutex_free(i->mutex);
952
953 pa_xfree(i);
954 }
955
956 /* Self-locked */
957 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
958 pa_memblock *b = NULL;
959 pa_memimport_segment *seg;
960
961 pa_assert(i);
962
963 pa_mutex_lock(i->mutex);
964
965 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
966 pa_memblock_ref(b);
967 goto finish;
968 }
969
970 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
971 goto finish;
972
973 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
974 if (!(seg = segment_attach(i, shm_id)))
975 goto finish;
976
977 if (offset+size > seg->memory.size)
978 goto finish;
979
980 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
981 b = pa_xnew(pa_memblock, 1);
982
983 PA_REFCNT_INIT(b);
984 b->pool = i->pool;
985 b->type = PA_MEMBLOCK_IMPORTED;
986 b->read_only = TRUE;
987 b->is_silence = FALSE;
988 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
989 b->length = size;
990 pa_atomic_store(&b->n_acquired, 0);
991 pa_atomic_store(&b->please_signal, 0);
992 b->per_type.imported.id = block_id;
993 b->per_type.imported.segment = seg;
994
995 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
996
997 seg->n_blocks++;
998
999 stat_add(b);
1000
1001 finish:
1002 pa_mutex_unlock(i->mutex);
1003
1004 return b;
1005 }
1006
1007 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1008 pa_memblock *b;
1009 int ret = 0;
1010 pa_assert(i);
1011
1012 pa_mutex_lock(i->mutex);
1013
1014 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1015 ret = -1;
1016 goto finish;
1017 }
1018
1019 memblock_replace_import(b);
1020
1021 finish:
1022 pa_mutex_unlock(i->mutex);
1023
1024 return ret;
1025 }
1026
1027 /* For sending blocks to other nodes */
1028 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1029 pa_memexport *e;
1030
1031 pa_assert(p);
1032 pa_assert(cb);
1033
1034 if (!p->memory.shared)
1035 return NULL;
1036
1037 e = pa_xnew(pa_memexport, 1);
1038 e->mutex = pa_mutex_new(TRUE, TRUE);
1039 e->pool = p;
1040 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1041 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1042 e->n_init = 0;
1043 e->revoke_cb = cb;
1044 e->userdata = userdata;
1045
1046 pa_mutex_lock(p->mutex);
1047 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1048 pa_mutex_unlock(p->mutex);
1049 return e;
1050 }
1051
1052 void pa_memexport_free(pa_memexport *e) {
1053 pa_assert(e);
1054
1055 pa_mutex_lock(e->mutex);
1056 while (e->used_slots)
1057 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1058 pa_mutex_unlock(e->mutex);
1059
1060 pa_mutex_lock(e->pool->mutex);
1061 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1062 pa_mutex_unlock(e->pool->mutex);
1063
1064 pa_mutex_free(e->mutex);
1065 pa_xfree(e);
1066 }
1067
1068 /* Self-locked */
1069 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1070 pa_memblock *b;
1071
1072 pa_assert(e);
1073
1074 pa_mutex_lock(e->mutex);
1075
1076 if (id >= e->n_init)
1077 goto fail;
1078
1079 if (!e->slots[id].block)
1080 goto fail;
1081
1082 b = e->slots[id].block;
1083 e->slots[id].block = NULL;
1084
1085 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1086 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1087
1088 pa_mutex_unlock(e->mutex);
1089
1090 /* pa_log("Processing release for %u", id); */
1091
1092 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1093 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1094
1095 pa_atomic_dec(&e->pool->stat.n_exported);
1096 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1097
1098 pa_memblock_unref(b);
1099
1100 return 0;
1101
1102 fail:
1103 pa_mutex_unlock(e->mutex);
1104
1105 return -1;
1106 }
1107
1108 /* Self-locked */
1109 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1110 struct memexport_slot *slot, *next;
1111 pa_assert(e);
1112 pa_assert(i);
1113
1114 pa_mutex_lock(e->mutex);
1115
1116 for (slot = e->used_slots; slot; slot = next) {
1117 uint32_t idx;
1118 next = slot->next;
1119
1120 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1121 slot->block->per_type.imported.segment->import != i)
1122 continue;
1123
1124 idx = (uint32_t) (slot - e->slots);
1125 e->revoke_cb(e, idx, e->userdata);
1126 pa_memexport_process_release(e, idx);
1127 }
1128
1129 pa_mutex_unlock(e->mutex);
1130 }
1131
1132 /* No lock necessary */
1133 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1134 pa_memblock *n;
1135
1136 pa_assert(p);
1137 pa_assert(b);
1138
1139 if (b->type == PA_MEMBLOCK_IMPORTED ||
1140 b->type == PA_MEMBLOCK_POOL ||
1141 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1142 pa_assert(b->pool == p);
1143 return pa_memblock_ref(b);
1144 }
1145
1146 if (!(n = pa_memblock_new_pool(p, b->length)))
1147 return NULL;
1148
1149 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1150 return n;
1151 }
1152
1153 /* Self-locked */
1154 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1155 pa_shm *memory;
1156 struct memexport_slot *slot;
1157 void *data;
1158
1159 pa_assert(e);
1160 pa_assert(b);
1161 pa_assert(block_id);
1162 pa_assert(shm_id);
1163 pa_assert(offset);
1164 pa_assert(size);
1165 pa_assert(b->pool == e->pool);
1166
1167 if (!(b = memblock_shared_copy(e->pool, b)))
1168 return -1;
1169
1170 pa_mutex_lock(e->mutex);
1171
1172 if (e->free_slots) {
1173 slot = e->free_slots;
1174 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1175 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1176 slot = &e->slots[e->n_init++];
1177 else {
1178 pa_mutex_unlock(e->mutex);
1179 pa_memblock_unref(b);
1180 return -1;
1181 }
1182
1183 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1184 slot->block = b;
1185 *block_id = (uint32_t) (slot - e->slots);
1186
1187 pa_mutex_unlock(e->mutex);
1188 /* pa_log("Got block id %u", *block_id); */
1189
1190 data = pa_memblock_acquire(b);
1191
1192 if (b->type == PA_MEMBLOCK_IMPORTED) {
1193 pa_assert(b->per_type.imported.segment);
1194 memory = &b->per_type.imported.segment->memory;
1195 } else {
1196 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1197 pa_assert(b->pool);
1198 memory = &b->pool->memory;
1199 }
1200
1201 pa_assert(data >= memory->ptr);
1202 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1203
1204 *shm_id = memory->id;
1205 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1206 *size = b->length;
1207
1208 pa_memblock_release(b);
1209
1210 pa_atomic_inc(&e->pool->stat.n_exported);
1211 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1212
1213 return 0;
1214 }