2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
65 PA_REFCNT_DECLARE
; /* the reference counter */
68 pa_memblock_type_t type
;
70 pa_bool_t read_only
:1;
71 pa_bool_t is_silence
:1;
76 pa_atomic_t n_acquired
;
77 pa_atomic_t please_signal
;
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
87 pa_memimport_segment
*segment
;
92 struct pa_memimport_segment
{
99 /* A collection of multiple segments */
100 struct pa_memimport
{
104 pa_hashmap
*segments
;
107 /* Called whenever an imported memory block is no longer
109 pa_memimport_release_cb_t release_cb
;
112 PA_LLIST_FIELDS(pa_memimport
);
115 struct memexport_slot
{
116 PA_LLIST_FIELDS(struct memexport_slot
);
120 struct pa_memexport
{
124 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
126 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
127 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb
;
136 PA_LLIST_FIELDS(pa_memexport
);
140 pa_semaphore
*semaphore
;
149 PA_LLIST_HEAD(pa_memimport
, imports
);
150 PA_LLIST_HEAD(pa_memexport
, exports
);
152 /* A list of free slots that may be reused */
153 pa_flist
*free_slots
;
155 pa_mempool_stat stat
;
158 static void segment_detach(pa_memimport_segment
*seg
);
160 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
162 /* No lock necessary */
163 static void stat_add(pa_memblock
*b
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
168 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
170 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
171 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
173 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
174 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
175 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
178 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
179 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
182 /* No lock necessary */
183 static void stat_remove(pa_memblock
*b
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
191 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
193 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
194 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
195 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
197 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
198 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
201 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
204 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
206 /* No lock necessary */
207 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
213 if (!(b
= pa_memblock_new_pool(p
, length
)))
214 b
= memblock_new_appended(p
, length
);
219 /* No lock necessary */
220 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
226 /* If -1 is passed as length we choose the size for the caller. */
228 if (length
== (size_t) -1)
229 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
231 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
234 b
->type
= PA_MEMBLOCK_APPENDED
;
235 b
->read_only
= b
->is_silence
= FALSE
;
236 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
238 pa_atomic_store(&b
->n_acquired
, 0);
239 pa_atomic_store(&b
->please_signal
, 0);
245 /* No lock necessary */
246 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
247 struct mempool_slot
*slot
;
250 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
253 /* The free list was empty, we have to allocate a new entry */
255 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
256 pa_atomic_dec(&p
->n_init
);
258 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
261 pa_log_debug("Pool full");
262 pa_atomic_inc(&p
->stat
.n_pool_full
);
267 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
268 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
269 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
276 /* No lock necessary, totally redundant anyway */
277 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
281 /* No lock necessary */
282 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
285 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
286 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
288 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
291 /* No lock necessary */
292 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
295 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
298 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
301 /* No lock necessary */
302 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
303 pa_memblock
*b
= NULL
;
304 struct mempool_slot
*slot
;
309 /* If -1 is passed as length we choose the size for the caller: we
310 * take the largest size that fits in one of our slots. */
312 if (length
== (size_t) -1)
313 length
= pa_mempool_block_size_max(p
);
315 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
317 if (!(slot
= mempool_allocate_slot(p
)))
320 b
= mempool_slot_data(slot
);
321 b
->type
= PA_MEMBLOCK_POOL
;
322 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
324 } else if (p
->block_size
>= length
) {
326 if (!(slot
= mempool_allocate_slot(p
)))
329 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
330 b
= pa_xnew(pa_memblock
, 1);
332 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
333 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
336 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
337 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
343 b
->read_only
= b
->is_silence
= FALSE
;
345 pa_atomic_store(&b
->n_acquired
, 0);
346 pa_atomic_store(&b
->please_signal
, 0);
352 /* No lock necessary */
353 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
358 pa_assert(length
!= (size_t) -1);
361 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
362 b
= pa_xnew(pa_memblock
, 1);
365 b
->type
= PA_MEMBLOCK_FIXED
;
366 b
->read_only
= read_only
;
367 b
->is_silence
= FALSE
;
368 pa_atomic_ptr_store(&b
->data
, d
);
370 pa_atomic_store(&b
->n_acquired
, 0);
371 pa_atomic_store(&b
->please_signal
, 0);
377 /* No lock necessary */
378 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
384 pa_assert(length
!= (size_t) -1);
387 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
388 b
= pa_xnew(pa_memblock
, 1);
391 b
->type
= PA_MEMBLOCK_USER
;
392 b
->read_only
= read_only
;
393 b
->is_silence
= FALSE
;
394 pa_atomic_ptr_store(&b
->data
, d
);
396 pa_atomic_store(&b
->n_acquired
, 0);
397 pa_atomic_store(&b
->please_signal
, 0);
399 b
->per_type
.user
.free_cb
= free_cb
;
405 /* No lock necessary */
406 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
408 pa_assert(PA_REFCNT_VALUE(b
) > 0);
410 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
413 /* No lock necessary */
414 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
416 pa_assert(PA_REFCNT_VALUE(b
) > 0);
418 return b
->is_silence
;
421 /* No lock necessary */
422 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
424 pa_assert(PA_REFCNT_VALUE(b
) > 0);
429 /* No lock necessary */
430 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
434 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
439 /* No lock necessary */
440 void* pa_memblock_acquire(pa_memblock
*b
) {
442 pa_assert(PA_REFCNT_VALUE(b
) > 0);
444 pa_atomic_inc(&b
->n_acquired
);
446 return pa_atomic_ptr_load(&b
->data
);
449 /* No lock necessary, in corner cases locks by its own */
450 void pa_memblock_release(pa_memblock
*b
) {
453 pa_assert(PA_REFCNT_VALUE(b
) > 0);
455 r
= pa_atomic_dec(&b
->n_acquired
);
458 /* Signal a waiting thread that this memblock is no longer used */
459 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
460 pa_semaphore_post(b
->pool
->semaphore
);
463 size_t pa_memblock_get_length(pa_memblock
*b
) {
465 pa_assert(PA_REFCNT_VALUE(b
) > 0);
470 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
472 pa_assert(PA_REFCNT_VALUE(b
) > 0);
477 /* No lock necessary */
478 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
480 pa_assert(PA_REFCNT_VALUE(b
) > 0);
486 static void memblock_free(pa_memblock
*b
) {
489 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
494 case PA_MEMBLOCK_USER
:
495 pa_assert(b
->per_type
.user
.free_cb
);
496 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
500 case PA_MEMBLOCK_FIXED
:
501 case PA_MEMBLOCK_APPENDED
:
502 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
507 case PA_MEMBLOCK_IMPORTED
: {
508 pa_memimport_segment
*segment
;
509 pa_memimport
*import
;
511 /* FIXME! This should be implemented lock-free */
513 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
514 pa_assert_se(import
= segment
->import
);
516 pa_mutex_lock(import
->mutex
);
518 pa_assert_se(pa_hashmap_remove(
520 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
522 pa_assert(segment
->n_blocks
>= 1);
523 if (-- segment
->n_blocks
<= 0)
524 segment_detach(segment
);
526 pa_mutex_unlock(import
->mutex
);
528 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
530 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
536 case PA_MEMBLOCK_POOL_EXTERNAL
:
537 case PA_MEMBLOCK_POOL
: {
538 struct mempool_slot
*slot
;
541 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
544 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
546 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
547 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
548 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
552 /* The free list dimensions should easily allow all slots
553 * to fit in, hence try harder if pushing this slot into
554 * the free list fails */
555 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
559 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
565 case PA_MEMBLOCK_TYPE_MAX
:
567 pa_assert_not_reached();
571 /* No lock necessary */
572 void pa_memblock_unref(pa_memblock
*b
) {
574 pa_assert(PA_REFCNT_VALUE(b
) > 0);
576 if (PA_REFCNT_DEC(b
) > 0)
583 static void memblock_wait(pa_memblock
*b
) {
586 if (pa_atomic_load(&b
->n_acquired
) > 0) {
587 /* We need to wait until all threads gave up access to the
588 * memory block before we can go on. Unfortunately this means
589 * that we have to lock and wait here. Sniff! */
591 pa_atomic_inc(&b
->please_signal
);
593 while (pa_atomic_load(&b
->n_acquired
) > 0)
594 pa_semaphore_wait(b
->pool
->semaphore
);
596 pa_atomic_dec(&b
->please_signal
);
600 /* No lock necessary. This function is not multiple caller safe! */
601 static void memblock_make_local(pa_memblock
*b
) {
604 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
606 if (b
->length
<= b
->pool
->block_size
) {
607 struct mempool_slot
*slot
;
609 if ((slot
= mempool_allocate_slot(b
->pool
))) {
611 /* We can move it into a local pool, perfect! */
613 new_data
= mempool_slot_data(slot
);
614 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
615 pa_atomic_ptr_store(&b
->data
, new_data
);
617 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
618 b
->read_only
= FALSE
;
624 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
625 b
->per_type
.user
.free_cb
= pa_xfree
;
626 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
628 b
->type
= PA_MEMBLOCK_USER
;
629 b
->read_only
= FALSE
;
632 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
633 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
637 /* No lock necessary. This function is not multiple caller safe*/
638 void pa_memblock_unref_fixed(pa_memblock
*b
) {
640 pa_assert(PA_REFCNT_VALUE(b
) > 0);
641 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
643 if (PA_REFCNT_VALUE(b
) > 1)
644 memblock_make_local(b
);
646 pa_memblock_unref(b
);
649 /* No lock necessary. */
650 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
654 pa_assert(PA_REFCNT_VALUE(b
) > 0);
656 p
= pa_memblock_acquire(b
);
657 pa_will_need(p
, b
->length
);
658 pa_memblock_release(b
);
663 /* Self-locked. This function is not multiple-caller safe */
664 static void memblock_replace_import(pa_memblock
*b
) {
665 pa_memimport_segment
*segment
;
666 pa_memimport
*import
;
669 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
671 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
672 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
673 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
674 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
676 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
677 pa_assert_se(import
= segment
->import
);
679 pa_mutex_lock(import
->mutex
);
681 pa_assert_se(pa_hashmap_remove(
683 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
685 memblock_make_local(b
);
687 pa_assert(segment
->n_blocks
>= 1);
688 if (-- segment
->n_blocks
<= 0)
689 segment_detach(segment
);
691 pa_mutex_unlock(import
->mutex
);
694 pa_mempool
* pa_mempool_new(pa_bool_t shared
, size_t size
) {
698 p
= pa_xnew(pa_mempool
, 1);
700 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
701 p
->semaphore
= pa_semaphore_new(0);
703 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
704 if (p
->block_size
< PA_PAGE_SIZE
)
705 p
->block_size
= PA_PAGE_SIZE
;
708 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
710 p
->n_blocks
= (unsigned) (size
/ p
->block_size
);
716 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
721 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
722 p
->memory
.shared
? "shared" : "private",
724 pa_bytes_snprint(t1
, sizeof(t1
), (unsigned) p
->block_size
),
725 pa_bytes_snprint(t2
, sizeof(t2
), (unsigned) (p
->n_blocks
* p
->block_size
)),
726 (unsigned long) pa_mempool_block_size_max(p
));
728 memset(&p
->stat
, 0, sizeof(p
->stat
));
729 pa_atomic_store(&p
->n_init
, 0);
731 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
732 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
734 p
->free_slots
= pa_flist_new(p
->n_blocks
);
739 void pa_mempool_free(pa_mempool
*p
) {
742 pa_mutex_lock(p
->mutex
);
745 pa_memimport_free(p
->imports
);
748 pa_memexport_free(p
->exports
);
750 pa_mutex_unlock(p
->mutex
);
752 pa_flist_free(p
->free_slots
, NULL
);
754 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
756 /* Ouch, somebody is retaining a memory block reference! */
762 /* Let's try to find at least one of those leaked memory blocks */
764 list
= pa_flist_new(p
->n_blocks
);
766 for (i
= 0; i
< (unsigned) pa_atomic_load(&p
->n_init
); i
++) {
767 struct mempool_slot
*slot
;
770 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) i
));
771 b
= mempool_slot_data(slot
);
773 while ((k
= pa_flist_pop(p
->free_slots
))) {
774 while (pa_flist_push(list
, k
) < 0)
782 pa_log("REF: Leaked memory block %p", b
);
784 while ((k
= pa_flist_pop(list
)))
785 while (pa_flist_push(p
->free_slots
, k
) < 0)
789 pa_flist_free(list
, NULL
);
793 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
798 pa_shm_free(&p
->memory
);
800 pa_mutex_free(p
->mutex
);
801 pa_semaphore_free(p
->semaphore
);
806 /* No lock necessary */
807 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
813 /* No lock necessary */
814 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
817 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
820 /* No lock necessary */
821 void pa_mempool_vacuum(pa_mempool
*p
) {
822 struct mempool_slot
*slot
;
827 list
= pa_flist_new(p
->n_blocks
);
829 while ((slot
= pa_flist_pop(p
->free_slots
)))
830 while (pa_flist_push(list
, slot
) < 0)
833 while ((slot
= pa_flist_pop(list
))) {
834 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
836 while (pa_flist_push(p
->free_slots
, slot
))
840 pa_flist_free(list
, NULL
);
843 /* No lock necessary */
844 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
847 if (!p
->memory
.shared
)
855 /* No lock necessary */
856 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
859 return !!p
->memory
.shared
;
862 /* For recieving blocks from other nodes */
863 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
869 i
= pa_xnew(pa_memimport
, 1);
870 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
872 i
->segments
= pa_hashmap_new(NULL
, NULL
);
873 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
875 i
->userdata
= userdata
;
877 pa_mutex_lock(p
->mutex
);
878 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
879 pa_mutex_unlock(p
->mutex
);
884 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
886 /* Should be called locked */
887 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
888 pa_memimport_segment
* seg
;
890 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
893 seg
= pa_xnew(pa_memimport_segment
, 1);
895 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
902 seg
->trap
= pa_memtrap_add(seg
->memory
.ptr
, seg
->memory
.size
);
904 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
908 /* Should be called locked */
909 static void segment_detach(pa_memimport_segment
*seg
) {
912 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
913 pa_shm_free(&seg
->memory
);
916 pa_memtrap_remove(seg
->trap
);
921 /* Self-locked. Not multiple-caller safe */
922 void pa_memimport_free(pa_memimport
*i
) {
928 pa_mutex_lock(i
->mutex
);
930 while ((b
= pa_hashmap_first(i
->blocks
)))
931 memblock_replace_import(b
);
933 pa_assert(pa_hashmap_size(i
->segments
) == 0);
935 pa_mutex_unlock(i
->mutex
);
937 pa_mutex_lock(i
->pool
->mutex
);
939 /* If we've exported this block further we need to revoke that export */
940 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
941 memexport_revoke_blocks(e
, i
);
943 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
945 pa_mutex_unlock(i
->pool
->mutex
);
947 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
948 pa_hashmap_free(i
->segments
, NULL
, NULL
);
950 pa_mutex_free(i
->mutex
);
956 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
957 pa_memblock
*b
= NULL
;
958 pa_memimport_segment
*seg
;
962 pa_mutex_lock(i
->mutex
);
964 if ((b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(block_id
)))) {
969 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
972 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
973 if (!(seg
= segment_attach(i
, shm_id
)))
976 if (offset
+size
> seg
->memory
.size
)
979 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
980 b
= pa_xnew(pa_memblock
, 1);
984 b
->type
= PA_MEMBLOCK_IMPORTED
;
986 b
->is_silence
= FALSE
;
987 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
989 pa_atomic_store(&b
->n_acquired
, 0);
990 pa_atomic_store(&b
->please_signal
, 0);
991 b
->per_type
.imported
.id
= block_id
;
992 b
->per_type
.imported
.segment
= seg
;
994 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
1001 pa_mutex_unlock(i
->mutex
);
1006 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
1011 pa_mutex_lock(i
->mutex
);
1013 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
1018 memblock_replace_import(b
);
1021 pa_mutex_unlock(i
->mutex
);
1026 /* For sending blocks to other nodes */
1027 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
1033 if (!p
->memory
.shared
)
1036 e
= pa_xnew(pa_memexport
, 1);
1037 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
1039 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
1040 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
1043 e
->userdata
= userdata
;
1045 pa_mutex_lock(p
->mutex
);
1046 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
1047 pa_mutex_unlock(p
->mutex
);
1051 void pa_memexport_free(pa_memexport
*e
) {
1054 pa_mutex_lock(e
->mutex
);
1055 while (e
->used_slots
)
1056 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
1057 pa_mutex_unlock(e
->mutex
);
1059 pa_mutex_lock(e
->pool
->mutex
);
1060 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
1061 pa_mutex_unlock(e
->pool
->mutex
);
1063 pa_mutex_free(e
->mutex
);
1068 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
1073 pa_mutex_lock(e
->mutex
);
1075 if (id
>= e
->n_init
)
1078 if (!e
->slots
[id
].block
)
1081 b
= e
->slots
[id
].block
;
1082 e
->slots
[id
].block
= NULL
;
1084 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1085 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1087 pa_mutex_unlock(e
->mutex
);
1089 /* pa_log("Processing release for %u", id); */
1091 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1092 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1094 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1095 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1097 pa_memblock_unref(b
);
1102 pa_mutex_unlock(e
->mutex
);
1108 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1109 struct memexport_slot
*slot
, *next
;
1113 pa_mutex_lock(e
->mutex
);
1115 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1119 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1120 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1123 idx
= (uint32_t) (slot
- e
->slots
);
1124 e
->revoke_cb(e
, idx
, e
->userdata
);
1125 pa_memexport_process_release(e
, idx
);
1128 pa_mutex_unlock(e
->mutex
);
1131 /* No lock necessary */
1132 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1138 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1139 b
->type
== PA_MEMBLOCK_POOL
||
1140 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1141 pa_assert(b
->pool
== p
);
1142 return pa_memblock_ref(b
);
1145 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1148 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1153 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1155 struct memexport_slot
*slot
;
1160 pa_assert(block_id
);
1164 pa_assert(b
->pool
== e
->pool
);
1166 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1169 pa_mutex_lock(e
->mutex
);
1171 if (e
->free_slots
) {
1172 slot
= e
->free_slots
;
1173 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1174 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1175 slot
= &e
->slots
[e
->n_init
++];
1177 pa_mutex_unlock(e
->mutex
);
1178 pa_memblock_unref(b
);
1182 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1184 *block_id
= (uint32_t) (slot
- e
->slots
);
1186 pa_mutex_unlock(e
->mutex
);
1187 /* pa_log("Got block id %u", *block_id); */
1189 data
= pa_memblock_acquire(b
);
1191 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1192 pa_assert(b
->per_type
.imported
.segment
);
1193 memory
= &b
->per_type
.imported
.segment
->memory
;
1195 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1197 memory
= &b
->pool
->memory
;
1200 pa_assert(data
>= memory
->ptr
);
1201 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1203 *shm_id
= memory
->id
;
1204 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1207 pa_memblock_release(b
);
1209 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1210 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);