2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
65 PA_REFCNT_DECLARE
; /* the reference counter */
68 pa_memblock_type_t type
;
70 pa_bool_t read_only
:1;
71 pa_bool_t is_silence
:1;
76 pa_atomic_t n_acquired
;
77 pa_atomic_t please_signal
;
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
87 pa_memimport_segment
*segment
;
92 struct pa_memimport_segment
{
99 /* A collection of multiple segments */
100 struct pa_memimport
{
104 pa_hashmap
*segments
;
107 /* Called whenever an imported memory block is no longer
109 pa_memimport_release_cb_t release_cb
;
112 PA_LLIST_FIELDS(pa_memimport
);
115 struct memexport_slot
{
116 PA_LLIST_FIELDS(struct memexport_slot
);
120 struct pa_memexport
{
124 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
126 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
127 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb
;
136 PA_LLIST_FIELDS(pa_memexport
);
140 pa_semaphore
*semaphore
;
149 PA_LLIST_HEAD(pa_memimport
, imports
);
150 PA_LLIST_HEAD(pa_memexport
, exports
);
152 /* A list of free slots that may be reused */
153 pa_flist
*free_slots
;
155 pa_mempool_stat stat
;
158 static void segment_detach(pa_memimport_segment
*seg
);
160 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
162 /* No lock necessary */
163 static void stat_add(pa_memblock
*b
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
168 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
170 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
171 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
173 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
174 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
175 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
178 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
179 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
182 /* No lock necessary */
183 static void stat_remove(pa_memblock
*b
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
191 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
193 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
194 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
195 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
197 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
198 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
201 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
204 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
206 /* No lock necessary */
207 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
213 if (!(b
= pa_memblock_new_pool(p
, length
)))
214 b
= memblock_new_appended(p
, length
);
219 /* No lock necessary */
220 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
226 /* If -1 is passed as length we choose the size for the caller. */
228 if (length
== (size_t) -1)
229 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
231 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
234 b
->type
= PA_MEMBLOCK_APPENDED
;
235 b
->read_only
= b
->is_silence
= FALSE
;
236 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
238 pa_atomic_store(&b
->n_acquired
, 0);
239 pa_atomic_store(&b
->please_signal
, 0);
245 /* No lock necessary */
246 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
247 struct mempool_slot
*slot
;
250 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
253 /* The free list was empty, we have to allocate a new entry */
255 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
256 pa_atomic_dec(&p
->n_init
);
258 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
261 if (pa_log_ratelimit())
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p
->stat
.n_pool_full
);
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
286 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
287 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
289 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
292 /* No lock necessary */
293 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
296 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
299 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
302 /* No lock necessary */
303 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
304 pa_memblock
*b
= NULL
;
305 struct mempool_slot
*slot
;
310 /* If -1 is passed as length we choose the size for the caller: we
311 * take the largest size that fits in one of our slots. */
313 if (length
== (size_t) -1)
314 length
= pa_mempool_block_size_max(p
);
316 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
318 if (!(slot
= mempool_allocate_slot(p
)))
321 b
= mempool_slot_data(slot
);
322 b
->type
= PA_MEMBLOCK_POOL
;
323 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
325 } else if (p
->block_size
>= length
) {
327 if (!(slot
= mempool_allocate_slot(p
)))
330 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
331 b
= pa_xnew(pa_memblock
, 1);
333 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
334 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
337 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
338 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
344 b
->read_only
= b
->is_silence
= FALSE
;
346 pa_atomic_store(&b
->n_acquired
, 0);
347 pa_atomic_store(&b
->please_signal
, 0);
353 /* No lock necessary */
354 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
359 pa_assert(length
!= (size_t) -1);
362 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
363 b
= pa_xnew(pa_memblock
, 1);
366 b
->type
= PA_MEMBLOCK_FIXED
;
367 b
->read_only
= read_only
;
368 b
->is_silence
= FALSE
;
369 pa_atomic_ptr_store(&b
->data
, d
);
371 pa_atomic_store(&b
->n_acquired
, 0);
372 pa_atomic_store(&b
->please_signal
, 0);
378 /* No lock necessary */
379 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
385 pa_assert(length
!= (size_t) -1);
388 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
389 b
= pa_xnew(pa_memblock
, 1);
392 b
->type
= PA_MEMBLOCK_USER
;
393 b
->read_only
= read_only
;
394 b
->is_silence
= FALSE
;
395 pa_atomic_ptr_store(&b
->data
, d
);
397 pa_atomic_store(&b
->n_acquired
, 0);
398 pa_atomic_store(&b
->please_signal
, 0);
400 b
->per_type
.user
.free_cb
= free_cb
;
406 /* No lock necessary */
407 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
409 pa_assert(PA_REFCNT_VALUE(b
) > 0);
411 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
414 /* No lock necessary */
415 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
417 pa_assert(PA_REFCNT_VALUE(b
) > 0);
419 return b
->is_silence
;
422 /* No lock necessary */
423 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
425 pa_assert(PA_REFCNT_VALUE(b
) > 0);
430 /* No lock necessary */
431 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
435 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
440 /* No lock necessary */
441 void* pa_memblock_acquire(pa_memblock
*b
) {
443 pa_assert(PA_REFCNT_VALUE(b
) > 0);
445 pa_atomic_inc(&b
->n_acquired
);
447 return pa_atomic_ptr_load(&b
->data
);
450 /* No lock necessary, in corner cases locks by its own */
451 void pa_memblock_release(pa_memblock
*b
) {
454 pa_assert(PA_REFCNT_VALUE(b
) > 0);
456 r
= pa_atomic_dec(&b
->n_acquired
);
459 /* Signal a waiting thread that this memblock is no longer used */
460 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
461 pa_semaphore_post(b
->pool
->semaphore
);
464 size_t pa_memblock_get_length(pa_memblock
*b
) {
466 pa_assert(PA_REFCNT_VALUE(b
) > 0);
471 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
473 pa_assert(PA_REFCNT_VALUE(b
) > 0);
478 /* No lock necessary */
479 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
481 pa_assert(PA_REFCNT_VALUE(b
) > 0);
487 static void memblock_free(pa_memblock
*b
) {
490 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
495 case PA_MEMBLOCK_USER
:
496 pa_assert(b
->per_type
.user
.free_cb
);
497 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
501 case PA_MEMBLOCK_FIXED
:
502 case PA_MEMBLOCK_APPENDED
:
503 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
508 case PA_MEMBLOCK_IMPORTED
: {
509 pa_memimport_segment
*segment
;
510 pa_memimport
*import
;
512 /* FIXME! This should be implemented lock-free */
514 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
515 pa_assert_se(import
= segment
->import
);
517 pa_mutex_lock(import
->mutex
);
519 pa_assert_se(pa_hashmap_remove(
521 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
523 pa_assert(segment
->n_blocks
>= 1);
524 if (-- segment
->n_blocks
<= 0)
525 segment_detach(segment
);
527 pa_mutex_unlock(import
->mutex
);
529 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
531 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
537 case PA_MEMBLOCK_POOL_EXTERNAL
:
538 case PA_MEMBLOCK_POOL
: {
539 struct mempool_slot
*slot
;
542 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
545 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
547 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
548 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
549 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
553 /* The free list dimensions should easily allow all slots
554 * to fit in, hence try harder if pushing this slot into
555 * the free list fails */
556 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
560 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
566 case PA_MEMBLOCK_TYPE_MAX
:
568 pa_assert_not_reached();
572 /* No lock necessary */
573 void pa_memblock_unref(pa_memblock
*b
) {
575 pa_assert(PA_REFCNT_VALUE(b
) > 0);
577 if (PA_REFCNT_DEC(b
) > 0)
584 static void memblock_wait(pa_memblock
*b
) {
587 if (pa_atomic_load(&b
->n_acquired
) > 0) {
588 /* We need to wait until all threads gave up access to the
589 * memory block before we can go on. Unfortunately this means
590 * that we have to lock and wait here. Sniff! */
592 pa_atomic_inc(&b
->please_signal
);
594 while (pa_atomic_load(&b
->n_acquired
) > 0)
595 pa_semaphore_wait(b
->pool
->semaphore
);
597 pa_atomic_dec(&b
->please_signal
);
601 /* No lock necessary. This function is not multiple caller safe! */
602 static void memblock_make_local(pa_memblock
*b
) {
605 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
607 if (b
->length
<= b
->pool
->block_size
) {
608 struct mempool_slot
*slot
;
610 if ((slot
= mempool_allocate_slot(b
->pool
))) {
612 /* We can move it into a local pool, perfect! */
614 new_data
= mempool_slot_data(slot
);
615 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
616 pa_atomic_ptr_store(&b
->data
, new_data
);
618 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
619 b
->read_only
= FALSE
;
625 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
626 b
->per_type
.user
.free_cb
= pa_xfree
;
627 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
629 b
->type
= PA_MEMBLOCK_USER
;
630 b
->read_only
= FALSE
;
633 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
634 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
638 /* No lock necessary. This function is not multiple caller safe*/
639 void pa_memblock_unref_fixed(pa_memblock
*b
) {
641 pa_assert(PA_REFCNT_VALUE(b
) > 0);
642 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
644 if (PA_REFCNT_VALUE(b
) > 1)
645 memblock_make_local(b
);
647 pa_memblock_unref(b
);
650 /* No lock necessary. */
651 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
655 pa_assert(PA_REFCNT_VALUE(b
) > 0);
657 p
= pa_memblock_acquire(b
);
658 pa_will_need(p
, b
->length
);
659 pa_memblock_release(b
);
664 /* Self-locked. This function is not multiple-caller safe */
665 static void memblock_replace_import(pa_memblock
*b
) {
666 pa_memimport_segment
*segment
;
667 pa_memimport
*import
;
670 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
672 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
673 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
674 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
675 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
677 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
678 pa_assert_se(import
= segment
->import
);
680 pa_mutex_lock(import
->mutex
);
682 pa_assert_se(pa_hashmap_remove(
684 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
686 memblock_make_local(b
);
688 pa_assert(segment
->n_blocks
>= 1);
689 if (-- segment
->n_blocks
<= 0)
690 segment_detach(segment
);
692 pa_mutex_unlock(import
->mutex
);
695 pa_mempool
* pa_mempool_new(pa_bool_t shared
, size_t size
) {
699 p
= pa_xnew(pa_mempool
, 1);
701 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
702 p
->semaphore
= pa_semaphore_new(0);
704 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
705 if (p
->block_size
< PA_PAGE_SIZE
)
706 p
->block_size
= PA_PAGE_SIZE
;
709 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
711 p
->n_blocks
= (unsigned) (size
/ p
->block_size
);
717 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
722 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
723 p
->memory
.shared
? "shared" : "private",
725 pa_bytes_snprint(t1
, sizeof(t1
), (unsigned) p
->block_size
),
726 pa_bytes_snprint(t2
, sizeof(t2
), (unsigned) (p
->n_blocks
* p
->block_size
)),
727 (unsigned long) pa_mempool_block_size_max(p
));
729 memset(&p
->stat
, 0, sizeof(p
->stat
));
730 pa_atomic_store(&p
->n_init
, 0);
732 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
733 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
735 p
->free_slots
= pa_flist_new(p
->n_blocks
);
740 void pa_mempool_free(pa_mempool
*p
) {
743 pa_mutex_lock(p
->mutex
);
746 pa_memimport_free(p
->imports
);
749 pa_memexport_free(p
->exports
);
751 pa_mutex_unlock(p
->mutex
);
753 pa_flist_free(p
->free_slots
, NULL
);
755 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
757 /* Ouch, somebody is retaining a memory block reference! */
763 /* Let's try to find at least one of those leaked memory blocks */
765 list
= pa_flist_new(p
->n_blocks
);
767 for (i
= 0; i
< (unsigned) pa_atomic_load(&p
->n_init
); i
++) {
768 struct mempool_slot
*slot
;
771 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) i
));
772 b
= mempool_slot_data(slot
);
774 while ((k
= pa_flist_pop(p
->free_slots
))) {
775 while (pa_flist_push(list
, k
) < 0)
783 pa_log("REF: Leaked memory block %p", b
);
785 while ((k
= pa_flist_pop(list
)))
786 while (pa_flist_push(p
->free_slots
, k
) < 0)
790 pa_flist_free(list
, NULL
);
794 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
799 pa_shm_free(&p
->memory
);
801 pa_mutex_free(p
->mutex
);
802 pa_semaphore_free(p
->semaphore
);
807 /* No lock necessary */
808 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
814 /* No lock necessary */
815 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
818 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
821 /* No lock necessary */
822 void pa_mempool_vacuum(pa_mempool
*p
) {
823 struct mempool_slot
*slot
;
828 list
= pa_flist_new(p
->n_blocks
);
830 while ((slot
= pa_flist_pop(p
->free_slots
)))
831 while (pa_flist_push(list
, slot
) < 0)
834 while ((slot
= pa_flist_pop(list
))) {
835 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
837 while (pa_flist_push(p
->free_slots
, slot
))
841 pa_flist_free(list
, NULL
);
844 /* No lock necessary */
845 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
848 if (!p
->memory
.shared
)
856 /* No lock necessary */
857 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
860 return !!p
->memory
.shared
;
863 /* For recieving blocks from other nodes */
864 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
870 i
= pa_xnew(pa_memimport
, 1);
871 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
873 i
->segments
= pa_hashmap_new(NULL
, NULL
);
874 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
876 i
->userdata
= userdata
;
878 pa_mutex_lock(p
->mutex
);
879 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
880 pa_mutex_unlock(p
->mutex
);
885 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
887 /* Should be called locked */
888 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
889 pa_memimport_segment
* seg
;
891 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
894 seg
= pa_xnew(pa_memimport_segment
, 1);
896 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
903 seg
->trap
= pa_memtrap_add(seg
->memory
.ptr
, seg
->memory
.size
);
905 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
909 /* Should be called locked */
910 static void segment_detach(pa_memimport_segment
*seg
) {
913 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
914 pa_shm_free(&seg
->memory
);
917 pa_memtrap_remove(seg
->trap
);
922 /* Self-locked. Not multiple-caller safe */
923 void pa_memimport_free(pa_memimport
*i
) {
929 pa_mutex_lock(i
->mutex
);
931 while ((b
= pa_hashmap_first(i
->blocks
)))
932 memblock_replace_import(b
);
934 pa_assert(pa_hashmap_size(i
->segments
) == 0);
936 pa_mutex_unlock(i
->mutex
);
938 pa_mutex_lock(i
->pool
->mutex
);
940 /* If we've exported this block further we need to revoke that export */
941 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
942 memexport_revoke_blocks(e
, i
);
944 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
946 pa_mutex_unlock(i
->pool
->mutex
);
948 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
949 pa_hashmap_free(i
->segments
, NULL
, NULL
);
951 pa_mutex_free(i
->mutex
);
957 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
958 pa_memblock
*b
= NULL
;
959 pa_memimport_segment
*seg
;
963 pa_mutex_lock(i
->mutex
);
965 if ((b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(block_id
)))) {
970 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
973 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
974 if (!(seg
= segment_attach(i
, shm_id
)))
977 if (offset
+size
> seg
->memory
.size
)
980 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
981 b
= pa_xnew(pa_memblock
, 1);
985 b
->type
= PA_MEMBLOCK_IMPORTED
;
987 b
->is_silence
= FALSE
;
988 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
990 pa_atomic_store(&b
->n_acquired
, 0);
991 pa_atomic_store(&b
->please_signal
, 0);
992 b
->per_type
.imported
.id
= block_id
;
993 b
->per_type
.imported
.segment
= seg
;
995 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
1002 pa_mutex_unlock(i
->mutex
);
1007 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
1012 pa_mutex_lock(i
->mutex
);
1014 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
1019 memblock_replace_import(b
);
1022 pa_mutex_unlock(i
->mutex
);
1027 /* For sending blocks to other nodes */
1028 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
1034 if (!p
->memory
.shared
)
1037 e
= pa_xnew(pa_memexport
, 1);
1038 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
1040 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
1041 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
1044 e
->userdata
= userdata
;
1046 pa_mutex_lock(p
->mutex
);
1047 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
1048 pa_mutex_unlock(p
->mutex
);
1052 void pa_memexport_free(pa_memexport
*e
) {
1055 pa_mutex_lock(e
->mutex
);
1056 while (e
->used_slots
)
1057 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
1058 pa_mutex_unlock(e
->mutex
);
1060 pa_mutex_lock(e
->pool
->mutex
);
1061 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
1062 pa_mutex_unlock(e
->pool
->mutex
);
1064 pa_mutex_free(e
->mutex
);
1069 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
1074 pa_mutex_lock(e
->mutex
);
1076 if (id
>= e
->n_init
)
1079 if (!e
->slots
[id
].block
)
1082 b
= e
->slots
[id
].block
;
1083 e
->slots
[id
].block
= NULL
;
1085 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1086 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1088 pa_mutex_unlock(e
->mutex
);
1090 /* pa_log("Processing release for %u", id); */
1092 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1093 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1095 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1096 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1098 pa_memblock_unref(b
);
1103 pa_mutex_unlock(e
->mutex
);
1109 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1110 struct memexport_slot
*slot
, *next
;
1114 pa_mutex_lock(e
->mutex
);
1116 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1120 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1121 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1124 idx
= (uint32_t) (slot
- e
->slots
);
1125 e
->revoke_cb(e
, idx
, e
->userdata
);
1126 pa_memexport_process_release(e
, idx
);
1129 pa_mutex_unlock(e
->mutex
);
1132 /* No lock necessary */
1133 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1139 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1140 b
->type
== PA_MEMBLOCK_POOL
||
1141 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1142 pa_assert(b
->pool
== p
);
1143 return pa_memblock_ref(b
);
1146 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1149 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1154 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1156 struct memexport_slot
*slot
;
1161 pa_assert(block_id
);
1165 pa_assert(b
->pool
== e
->pool
);
1167 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1170 pa_mutex_lock(e
->mutex
);
1172 if (e
->free_slots
) {
1173 slot
= e
->free_slots
;
1174 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1175 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1176 slot
= &e
->slots
[e
->n_init
++];
1178 pa_mutex_unlock(e
->mutex
);
1179 pa_memblock_unref(b
);
1183 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1185 *block_id
= (uint32_t) (slot
- e
->slots
);
1187 pa_mutex_unlock(e
->mutex
);
1188 /* pa_log("Got block id %u", *block_id); */
1190 data
= pa_memblock_acquire(b
);
1192 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1193 pa_assert(b
->per_type
.imported
.segment
);
1194 memory
= &b
->per_type
.imported
.segment
->memory
;
1196 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1198 memory
= &b
->pool
->memory
;
1201 pa_assert(data
>= memory
->ptr
);
1202 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1204 *shm_id
= memory
->id
;
1205 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1208 pa_memblock_release(b
);
1210 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1211 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);