2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
65 PA_REFCNT_DECLARE
; /* the reference counter */
68 pa_memblock_type_t type
;
70 pa_bool_t read_only
:1;
71 pa_bool_t is_silence
:1;
76 pa_atomic_t n_acquired
;
77 pa_atomic_t please_signal
;
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
87 pa_memimport_segment
*segment
;
92 struct pa_memimport_segment
{
103 pa_hashmap
*segments
;
106 /* Called whenever an imported memory block is no longer
108 pa_memimport_release_cb_t release_cb
;
111 PA_LLIST_FIELDS(pa_memimport
);
114 struct memexport_slot
{
115 PA_LLIST_FIELDS(struct memexport_slot
);
119 struct pa_memexport
{
123 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
125 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
126 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
129 /* Called whenever a client from which we imported a memory block
130 which we in turn exported to another client dies and we need to
131 revoke the memory block accordingly */
132 pa_memexport_revoke_cb_t revoke_cb
;
135 PA_LLIST_FIELDS(pa_memexport
);
139 pa_semaphore
*semaphore
;
148 PA_LLIST_HEAD(pa_memimport
, imports
);
149 PA_LLIST_HEAD(pa_memexport
, exports
);
151 /* A list of free slots that may be reused */
152 pa_flist
*free_slots
;
154 pa_mempool_stat stat
;
157 static void segment_detach(pa_memimport_segment
*seg
);
159 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
161 /* No lock necessary */
162 static void stat_add(pa_memblock
*b
) {
166 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
167 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
169 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
170 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
172 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
173 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
174 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
177 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
178 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
181 /* No lock necessary */
182 static void stat_remove(pa_memblock
*b
) {
186 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
189 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
190 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
192 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
193 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
194 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
196 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
197 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
200 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
203 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
205 /* No lock necessary */
206 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
212 if (!(b
= pa_memblock_new_pool(p
, length
)))
213 b
= memblock_new_appended(p
, length
);
218 /* No lock necessary */
219 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
225 /* If -1 is passed as length we choose the size for the caller. */
227 if (length
== (size_t) -1)
228 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
230 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
233 b
->type
= PA_MEMBLOCK_APPENDED
;
234 b
->read_only
= b
->is_silence
= FALSE
;
235 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
237 pa_atomic_store(&b
->n_acquired
, 0);
238 pa_atomic_store(&b
->please_signal
, 0);
244 /* No lock necessary */
245 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
246 struct mempool_slot
*slot
;
249 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
252 /* The free list was empty, we have to allocate a new entry */
254 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
255 pa_atomic_dec(&p
->n_init
);
257 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
260 pa_log_debug("Pool full");
261 pa_atomic_inc(&p
->stat
.n_pool_full
);
266 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
267 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
268 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
275 /* No lock necessary, totally redundant anyway */
276 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
280 /* No lock necessary */
281 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
284 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
285 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
287 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
290 /* No lock necessary */
291 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
294 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
297 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
300 /* No lock necessary */
301 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
302 pa_memblock
*b
= NULL
;
303 struct mempool_slot
*slot
;
308 /* If -1 is passed as length we choose the size for the caller: we
309 * take the largest size that fits in one of our slots. */
311 if (length
== (size_t) -1)
312 length
= pa_mempool_block_size_max(p
);
314 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
316 if (!(slot
= mempool_allocate_slot(p
)))
319 b
= mempool_slot_data(slot
);
320 b
->type
= PA_MEMBLOCK_POOL
;
321 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
323 } else if (p
->block_size
>= length
) {
325 if (!(slot
= mempool_allocate_slot(p
)))
328 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
329 b
= pa_xnew(pa_memblock
, 1);
331 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
332 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
335 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
336 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
342 b
->read_only
= b
->is_silence
= FALSE
;
344 pa_atomic_store(&b
->n_acquired
, 0);
345 pa_atomic_store(&b
->please_signal
, 0);
351 /* No lock necessary */
352 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
357 pa_assert(length
!= (size_t) -1);
360 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
361 b
= pa_xnew(pa_memblock
, 1);
364 b
->type
= PA_MEMBLOCK_FIXED
;
365 b
->read_only
= read_only
;
366 b
->is_silence
= FALSE
;
367 pa_atomic_ptr_store(&b
->data
, d
);
369 pa_atomic_store(&b
->n_acquired
, 0);
370 pa_atomic_store(&b
->please_signal
, 0);
376 /* No lock necessary */
377 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
383 pa_assert(length
!= (size_t) -1);
386 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
387 b
= pa_xnew(pa_memblock
, 1);
390 b
->type
= PA_MEMBLOCK_USER
;
391 b
->read_only
= read_only
;
392 b
->is_silence
= FALSE
;
393 pa_atomic_ptr_store(&b
->data
, d
);
395 pa_atomic_store(&b
->n_acquired
, 0);
396 pa_atomic_store(&b
->please_signal
, 0);
398 b
->per_type
.user
.free_cb
= free_cb
;
404 /* No lock necessary */
405 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
407 pa_assert(PA_REFCNT_VALUE(b
) > 0);
409 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
412 /* No lock necessary */
413 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
415 pa_assert(PA_REFCNT_VALUE(b
) > 0);
417 return b
->is_silence
;
420 /* No lock necessary */
421 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
423 pa_assert(PA_REFCNT_VALUE(b
) > 0);
428 /* No lock necessary */
429 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
433 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
438 /* No lock necessary */
439 void* pa_memblock_acquire(pa_memblock
*b
) {
441 pa_assert(PA_REFCNT_VALUE(b
) > 0);
443 pa_atomic_inc(&b
->n_acquired
);
445 return pa_atomic_ptr_load(&b
->data
);
448 /* No lock necessary, in corner cases locks by its own */
449 void pa_memblock_release(pa_memblock
*b
) {
452 pa_assert(PA_REFCNT_VALUE(b
) > 0);
454 r
= pa_atomic_dec(&b
->n_acquired
);
457 /* Signal a waiting thread that this memblock is no longer used */
458 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
459 pa_semaphore_post(b
->pool
->semaphore
);
462 size_t pa_memblock_get_length(pa_memblock
*b
) {
464 pa_assert(PA_REFCNT_VALUE(b
) > 0);
469 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
471 pa_assert(PA_REFCNT_VALUE(b
) > 0);
476 /* No lock necessary */
477 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
479 pa_assert(PA_REFCNT_VALUE(b
) > 0);
485 static void memblock_free(pa_memblock
*b
) {
488 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
493 case PA_MEMBLOCK_USER
:
494 pa_assert(b
->per_type
.user
.free_cb
);
495 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
499 case PA_MEMBLOCK_FIXED
:
500 case PA_MEMBLOCK_APPENDED
:
501 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
506 case PA_MEMBLOCK_IMPORTED
: {
507 pa_memimport_segment
*segment
;
508 pa_memimport
*import
;
510 /* FIXME! This should be implemented lock-free */
512 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
513 pa_assert_se(import
= segment
->import
);
515 pa_mutex_lock(import
->mutex
);
519 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
521 pa_assert(segment
->n_blocks
>= 1);
522 if (-- segment
->n_blocks
<= 0)
523 segment_detach(segment
);
525 pa_mutex_unlock(import
->mutex
);
527 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
529 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
535 case PA_MEMBLOCK_POOL_EXTERNAL
:
536 case PA_MEMBLOCK_POOL
: {
537 struct mempool_slot
*slot
;
540 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
543 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
545 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
546 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
547 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
551 /* The free list dimensions should easily allow all slots
552 * to fit in, hence try harder if pushing this slot into
553 * the free list fails */
554 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
558 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
564 case PA_MEMBLOCK_TYPE_MAX
:
566 pa_assert_not_reached();
570 /* No lock necessary */
571 void pa_memblock_unref(pa_memblock
*b
) {
573 pa_assert(PA_REFCNT_VALUE(b
) > 0);
575 if (PA_REFCNT_DEC(b
) > 0)
582 static void memblock_wait(pa_memblock
*b
) {
585 if (pa_atomic_load(&b
->n_acquired
) > 0) {
586 /* We need to wait until all threads gave up access to the
587 * memory block before we can go on. Unfortunately this means
588 * that we have to lock and wait here. Sniff! */
590 pa_atomic_inc(&b
->please_signal
);
592 while (pa_atomic_load(&b
->n_acquired
) > 0)
593 pa_semaphore_wait(b
->pool
->semaphore
);
595 pa_atomic_dec(&b
->please_signal
);
599 /* No lock necessary. This function is not multiple caller safe! */
600 static void memblock_make_local(pa_memblock
*b
) {
603 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
605 if (b
->length
<= b
->pool
->block_size
) {
606 struct mempool_slot
*slot
;
608 if ((slot
= mempool_allocate_slot(b
->pool
))) {
610 /* We can move it into a local pool, perfect! */
612 new_data
= mempool_slot_data(slot
);
613 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
614 pa_atomic_ptr_store(&b
->data
, new_data
);
616 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
617 b
->read_only
= FALSE
;
623 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
624 b
->per_type
.user
.free_cb
= pa_xfree
;
625 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
627 b
->type
= PA_MEMBLOCK_USER
;
628 b
->read_only
= FALSE
;
631 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
632 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
636 /* No lock necessary. This function is not multiple caller safe*/
637 void pa_memblock_unref_fixed(pa_memblock
*b
) {
639 pa_assert(PA_REFCNT_VALUE(b
) > 0);
640 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
642 if (PA_REFCNT_VALUE(b
) > 1)
643 memblock_make_local(b
);
645 pa_memblock_unref(b
);
648 /* No lock necessary. */
649 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
653 pa_assert(PA_REFCNT_VALUE(b
) > 0);
655 p
= pa_memblock_acquire(b
);
656 pa_will_need(p
, b
->length
);
657 pa_memblock_release(b
);
662 /* Self-locked. This function is not multiple-caller safe */
663 static void memblock_replace_import(pa_memblock
*b
) {
664 pa_memimport_segment
*segment
;
665 pa_memimport
*import
;
668 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
670 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
671 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
672 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
673 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
675 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
676 pa_assert_se(import
= segment
->import
);
678 pa_mutex_lock(import
->mutex
);
682 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
684 memblock_make_local(b
);
686 pa_assert(segment
->n_blocks
>= 1);
687 if (-- segment
->n_blocks
<= 0)
688 segment_detach(segment
);
690 pa_mutex_unlock(import
->mutex
);
693 pa_mempool
* pa_mempool_new(pa_bool_t shared
, size_t size
) {
697 p
= pa_xnew(pa_mempool
, 1);
699 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
700 p
->semaphore
= pa_semaphore_new(0);
702 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
703 if (p
->block_size
< PA_PAGE_SIZE
)
704 p
->block_size
= PA_PAGE_SIZE
;
707 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
709 p
->n_blocks
= (unsigned) (size
/ p
->block_size
);
715 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
720 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
721 p
->memory
.shared
? "shared" : "private",
723 pa_bytes_snprint(t1
, sizeof(t1
), (unsigned) p
->block_size
),
724 pa_bytes_snprint(t2
, sizeof(t2
), (unsigned) (p
->n_blocks
* p
->block_size
)),
725 (unsigned long) pa_mempool_block_size_max(p
));
727 memset(&p
->stat
, 0, sizeof(p
->stat
));
728 pa_atomic_store(&p
->n_init
, 0);
730 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
731 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
733 p
->free_slots
= pa_flist_new(p
->n_blocks
);
738 void pa_mempool_free(pa_mempool
*p
) {
741 pa_mutex_lock(p
->mutex
);
744 pa_memimport_free(p
->imports
);
747 pa_memexport_free(p
->exports
);
749 pa_mutex_unlock(p
->mutex
);
751 pa_flist_free(p
->free_slots
, NULL
);
753 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
755 /* Ouch, somebody is retaining a memory block reference! */
761 /* Let's try to find at least one of those leaked memory blocks */
763 list
= pa_flist_new(p
->n_blocks
);
765 for (i
= 0; i
< (unsigned) pa_atomic_load(&p
->n_init
); i
++) {
766 struct mempool_slot
*slot
;
769 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) i
));
770 b
= mempool_slot_data(slot
);
772 while ((k
= pa_flist_pop(p
->free_slots
))) {
773 while (pa_flist_push(list
, k
) < 0)
781 pa_log("REF: Leaked memory block %p", b
);
783 while ((k
= pa_flist_pop(list
)))
784 while (pa_flist_push(p
->free_slots
, k
) < 0)
788 pa_flist_free(list
, NULL
);
792 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
797 pa_shm_free(&p
->memory
);
799 pa_mutex_free(p
->mutex
);
800 pa_semaphore_free(p
->semaphore
);
805 /* No lock necessary */
806 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
812 /* No lock necessary */
813 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
816 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
819 /* No lock necessary */
820 void pa_mempool_vacuum(pa_mempool
*p
) {
821 struct mempool_slot
*slot
;
826 list
= pa_flist_new(p
->n_blocks
);
828 while ((slot
= pa_flist_pop(p
->free_slots
)))
829 while (pa_flist_push(list
, slot
) < 0)
832 while ((slot
= pa_flist_pop(list
))) {
833 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
835 while (pa_flist_push(p
->free_slots
, slot
))
839 pa_flist_free(list
, NULL
);
842 /* No lock necessary */
843 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
846 if (!p
->memory
.shared
)
854 /* No lock necessary */
855 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
858 return !!p
->memory
.shared
;
861 /* For recieving blocks from other nodes */
862 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
868 i
= pa_xnew(pa_memimport
, 1);
869 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
871 i
->segments
= pa_hashmap_new(NULL
, NULL
);
872 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
874 i
->userdata
= userdata
;
876 pa_mutex_lock(p
->mutex
);
877 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
878 pa_mutex_unlock(p
->mutex
);
883 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
885 /* Should be called locked */
886 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
887 pa_memimport_segment
* seg
;
889 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
892 seg
= pa_xnew(pa_memimport_segment
, 1);
894 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
901 seg
->trap
= pa_memtrap_add(seg
->memory
.ptr
, seg
->memory
.size
);
903 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
907 /* Should be called locked */
908 static void segment_detach(pa_memimport_segment
*seg
) {
911 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
912 pa_shm_free(&seg
->memory
);
915 pa_memtrap_remove(seg
->trap
);
920 /* Self-locked. Not multiple-caller safe */
921 void pa_memimport_free(pa_memimport
*i
) {
927 pa_mutex_lock(i
->mutex
);
929 while ((b
= pa_hashmap_first(i
->blocks
)))
930 memblock_replace_import(b
);
932 pa_assert(pa_hashmap_size(i
->segments
) == 0);
934 pa_mutex_unlock(i
->mutex
);
936 pa_mutex_lock(i
->pool
->mutex
);
938 /* If we've exported this block further we need to revoke that export */
939 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
940 memexport_revoke_blocks(e
, i
);
942 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
944 pa_mutex_unlock(i
->pool
->mutex
);
946 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
947 pa_hashmap_free(i
->segments
, NULL
, NULL
);
949 pa_mutex_free(i
->mutex
);
955 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
956 pa_memblock
*b
= NULL
;
957 pa_memimport_segment
*seg
;
961 pa_mutex_lock(i
->mutex
);
963 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
966 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
967 if (!(seg
= segment_attach(i
, shm_id
)))
970 if (offset
+size
> seg
->memory
.size
)
973 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
974 b
= pa_xnew(pa_memblock
, 1);
978 b
->type
= PA_MEMBLOCK_IMPORTED
;
980 b
->is_silence
= FALSE
;
981 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
983 pa_atomic_store(&b
->n_acquired
, 0);
984 pa_atomic_store(&b
->please_signal
, 0);
985 b
->per_type
.imported
.id
= block_id
;
986 b
->per_type
.imported
.segment
= seg
;
988 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
993 pa_mutex_unlock(i
->mutex
);
1001 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
1006 pa_mutex_lock(i
->mutex
);
1008 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
1013 memblock_replace_import(b
);
1016 pa_mutex_unlock(i
->mutex
);
1021 /* For sending blocks to other nodes */
1022 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
1028 if (!p
->memory
.shared
)
1031 e
= pa_xnew(pa_memexport
, 1);
1032 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
1034 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
1035 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
1038 e
->userdata
= userdata
;
1040 pa_mutex_lock(p
->mutex
);
1041 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
1042 pa_mutex_unlock(p
->mutex
);
1046 void pa_memexport_free(pa_memexport
*e
) {
1049 pa_mutex_lock(e
->mutex
);
1050 while (e
->used_slots
)
1051 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
1052 pa_mutex_unlock(e
->mutex
);
1054 pa_mutex_lock(e
->pool
->mutex
);
1055 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
1056 pa_mutex_unlock(e
->pool
->mutex
);
1058 pa_mutex_free(e
->mutex
);
1063 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
1068 pa_mutex_lock(e
->mutex
);
1070 if (id
>= e
->n_init
)
1073 if (!e
->slots
[id
].block
)
1076 b
= e
->slots
[id
].block
;
1077 e
->slots
[id
].block
= NULL
;
1079 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1080 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1082 pa_mutex_unlock(e
->mutex
);
1084 /* pa_log("Processing release for %u", id); */
1086 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1087 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1089 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1090 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1092 pa_memblock_unref(b
);
1097 pa_mutex_unlock(e
->mutex
);
1103 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1104 struct memexport_slot
*slot
, *next
;
1108 pa_mutex_lock(e
->mutex
);
1110 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1114 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1115 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1118 idx
= (uint32_t) (slot
- e
->slots
);
1119 e
->revoke_cb(e
, idx
, e
->userdata
);
1120 pa_memexport_process_release(e
, idx
);
1123 pa_mutex_unlock(e
->mutex
);
1126 /* No lock necessary */
1127 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1133 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1134 b
->type
== PA_MEMBLOCK_POOL
||
1135 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1136 pa_assert(b
->pool
== p
);
1137 return pa_memblock_ref(b
);
1140 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1143 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1148 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1150 struct memexport_slot
*slot
;
1155 pa_assert(block_id
);
1159 pa_assert(b
->pool
== e
->pool
);
1161 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1164 pa_mutex_lock(e
->mutex
);
1166 if (e
->free_slots
) {
1167 slot
= e
->free_slots
;
1168 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1169 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1170 slot
= &e
->slots
[e
->n_init
++];
1172 pa_mutex_unlock(e
->mutex
);
1173 pa_memblock_unref(b
);
1177 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1179 *block_id
= (uint32_t) (slot
- e
->slots
);
1181 pa_mutex_unlock(e
->mutex
);
1182 /* pa_log("Got block id %u", *block_id); */
1184 data
= pa_memblock_acquire(b
);
1186 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1187 pa_assert(b
->per_type
.imported
.segment
);
1188 memory
= &b
->per_type
.imported
.segment
->memory
;
1190 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1192 memory
= &b
->pool
->memory
;
1195 pa_assert(data
>= memory
->ptr
);
1196 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1198 *shm_id
= memory
->id
;
1199 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1202 pa_memblock_release(b
);
1204 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1205 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);