2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
65 PA_REFCNT_DECLARE
; /* the reference counter */
68 pa_memblock_type_t type
;
70 pa_bool_t read_only
:1;
71 pa_bool_t is_silence
:1;
76 pa_atomic_t n_acquired
;
77 pa_atomic_t please_signal
;
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
87 pa_memimport_segment
*segment
;
92 struct pa_memimport_segment
{
99 /* A collection of multiple segments */
100 struct pa_memimport
{
104 pa_hashmap
*segments
;
107 /* Called whenever an imported memory block is no longer
109 pa_memimport_release_cb_t release_cb
;
112 PA_LLIST_FIELDS(pa_memimport
);
115 struct memexport_slot
{
116 PA_LLIST_FIELDS(struct memexport_slot
);
120 struct pa_memexport
{
124 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
126 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
127 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb
;
136 PA_LLIST_FIELDS(pa_memexport
);
140 pa_semaphore
*semaphore
;
149 PA_LLIST_HEAD(pa_memimport
, imports
);
150 PA_LLIST_HEAD(pa_memexport
, exports
);
152 /* A list of free slots that may be reused */
153 pa_flist
*free_slots
;
155 pa_mempool_stat stat
;
158 static void segment_detach(pa_memimport_segment
*seg
);
160 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
162 /* No lock necessary */
163 static void stat_add(pa_memblock
*b
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
168 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
170 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
171 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
173 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
174 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
175 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
178 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
179 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
182 /* No lock necessary */
183 static void stat_remove(pa_memblock
*b
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
191 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
193 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
194 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
195 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
197 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
198 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
201 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
204 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
206 /* No lock necessary */
207 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
213 if (!(b
= pa_memblock_new_pool(p
, length
)))
214 b
= memblock_new_appended(p
, length
);
219 /* No lock necessary */
220 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
226 /* If -1 is passed as length we choose the size for the caller. */
228 if (length
== (size_t) -1)
229 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
231 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
234 b
->type
= PA_MEMBLOCK_APPENDED
;
235 b
->read_only
= b
->is_silence
= FALSE
;
236 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
238 pa_atomic_store(&b
->n_acquired
, 0);
239 pa_atomic_store(&b
->please_signal
, 0);
245 /* No lock necessary */
246 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
247 struct mempool_slot
*slot
;
250 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
253 /* The free list was empty, we have to allocate a new entry */
255 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
256 pa_atomic_dec(&p
->n_init
);
258 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
261 if (pa_log_ratelimit())
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p
->stat
.n_pool_full
);
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
286 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
287 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
289 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
292 /* No lock necessary */
293 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
296 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
299 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
302 /* No lock necessary */
303 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
304 pa_memblock
*b
= NULL
;
305 struct mempool_slot
*slot
;
306 static int mempool_disable
= 0;
311 if (mempool_disable
== 0)
312 mempool_disable
= getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
314 if (mempool_disable
> 0)
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
320 if (length
== (size_t) -1)
321 length
= pa_mempool_block_size_max(p
);
323 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
325 if (!(slot
= mempool_allocate_slot(p
)))
328 b
= mempool_slot_data(slot
);
329 b
->type
= PA_MEMBLOCK_POOL
;
330 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
332 } else if (p
->block_size
>= length
) {
334 if (!(slot
= mempool_allocate_slot(p
)))
337 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
338 b
= pa_xnew(pa_memblock
, 1);
340 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
341 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
345 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
351 b
->read_only
= b
->is_silence
= FALSE
;
353 pa_atomic_store(&b
->n_acquired
, 0);
354 pa_atomic_store(&b
->please_signal
, 0);
360 /* No lock necessary */
361 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
366 pa_assert(length
!= (size_t) -1);
369 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
370 b
= pa_xnew(pa_memblock
, 1);
373 b
->type
= PA_MEMBLOCK_FIXED
;
374 b
->read_only
= read_only
;
375 b
->is_silence
= FALSE
;
376 pa_atomic_ptr_store(&b
->data
, d
);
378 pa_atomic_store(&b
->n_acquired
, 0);
379 pa_atomic_store(&b
->please_signal
, 0);
385 /* No lock necessary */
386 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
392 pa_assert(length
!= (size_t) -1);
395 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
396 b
= pa_xnew(pa_memblock
, 1);
399 b
->type
= PA_MEMBLOCK_USER
;
400 b
->read_only
= read_only
;
401 b
->is_silence
= FALSE
;
402 pa_atomic_ptr_store(&b
->data
, d
);
404 pa_atomic_store(&b
->n_acquired
, 0);
405 pa_atomic_store(&b
->please_signal
, 0);
407 b
->per_type
.user
.free_cb
= free_cb
;
413 /* No lock necessary */
414 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
416 pa_assert(PA_REFCNT_VALUE(b
) > 0);
418 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
421 /* No lock necessary */
422 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
424 pa_assert(PA_REFCNT_VALUE(b
) > 0);
426 return b
->is_silence
;
429 /* No lock necessary */
430 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
432 pa_assert(PA_REFCNT_VALUE(b
) > 0);
437 /* No lock necessary */
438 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
442 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
447 /* No lock necessary */
448 void* pa_memblock_acquire(pa_memblock
*b
) {
450 pa_assert(PA_REFCNT_VALUE(b
) > 0);
452 pa_atomic_inc(&b
->n_acquired
);
454 return pa_atomic_ptr_load(&b
->data
);
457 /* No lock necessary, in corner cases locks by its own */
458 void pa_memblock_release(pa_memblock
*b
) {
461 pa_assert(PA_REFCNT_VALUE(b
) > 0);
463 r
= pa_atomic_dec(&b
->n_acquired
);
466 /* Signal a waiting thread that this memblock is no longer used */
467 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
468 pa_semaphore_post(b
->pool
->semaphore
);
471 size_t pa_memblock_get_length(pa_memblock
*b
) {
473 pa_assert(PA_REFCNT_VALUE(b
) > 0);
478 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
480 pa_assert(PA_REFCNT_VALUE(b
) > 0);
485 /* No lock necessary */
486 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
488 pa_assert(PA_REFCNT_VALUE(b
) > 0);
494 static void memblock_free(pa_memblock
*b
) {
497 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
502 case PA_MEMBLOCK_USER
:
503 pa_assert(b
->per_type
.user
.free_cb
);
504 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
508 case PA_MEMBLOCK_FIXED
:
509 case PA_MEMBLOCK_APPENDED
:
510 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
515 case PA_MEMBLOCK_IMPORTED
: {
516 pa_memimport_segment
*segment
;
517 pa_memimport
*import
;
519 /* FIXME! This should be implemented lock-free */
521 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
522 pa_assert_se(import
= segment
->import
);
524 pa_mutex_lock(import
->mutex
);
526 pa_assert_se(pa_hashmap_remove(
528 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
530 pa_assert(segment
->n_blocks
>= 1);
531 if (-- segment
->n_blocks
<= 0)
532 segment_detach(segment
);
534 pa_mutex_unlock(import
->mutex
);
536 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
538 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
544 case PA_MEMBLOCK_POOL_EXTERNAL
:
545 case PA_MEMBLOCK_POOL
: {
546 struct mempool_slot
*slot
;
549 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
552 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
554 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
555 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
556 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
560 /* The free list dimensions should easily allow all slots
561 * to fit in, hence try harder if pushing this slot into
562 * the free list fails */
563 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
567 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
573 case PA_MEMBLOCK_TYPE_MAX
:
575 pa_assert_not_reached();
579 /* No lock necessary */
580 void pa_memblock_unref(pa_memblock
*b
) {
582 pa_assert(PA_REFCNT_VALUE(b
) > 0);
584 if (PA_REFCNT_DEC(b
) > 0)
591 static void memblock_wait(pa_memblock
*b
) {
594 if (pa_atomic_load(&b
->n_acquired
) > 0) {
595 /* We need to wait until all threads gave up access to the
596 * memory block before we can go on. Unfortunately this means
597 * that we have to lock and wait here. Sniff! */
599 pa_atomic_inc(&b
->please_signal
);
601 while (pa_atomic_load(&b
->n_acquired
) > 0)
602 pa_semaphore_wait(b
->pool
->semaphore
);
604 pa_atomic_dec(&b
->please_signal
);
608 /* No lock necessary. This function is not multiple caller safe! */
609 static void memblock_make_local(pa_memblock
*b
) {
612 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
614 if (b
->length
<= b
->pool
->block_size
) {
615 struct mempool_slot
*slot
;
617 if ((slot
= mempool_allocate_slot(b
->pool
))) {
619 /* We can move it into a local pool, perfect! */
621 new_data
= mempool_slot_data(slot
);
622 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
623 pa_atomic_ptr_store(&b
->data
, new_data
);
625 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
626 b
->read_only
= FALSE
;
632 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
633 b
->per_type
.user
.free_cb
= pa_xfree
;
634 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
636 b
->type
= PA_MEMBLOCK_USER
;
637 b
->read_only
= FALSE
;
640 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
641 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
645 /* No lock necessary. This function is not multiple caller safe*/
646 void pa_memblock_unref_fixed(pa_memblock
*b
) {
648 pa_assert(PA_REFCNT_VALUE(b
) > 0);
649 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
651 if (PA_REFCNT_VALUE(b
) > 1)
652 memblock_make_local(b
);
654 pa_memblock_unref(b
);
657 /* No lock necessary. */
658 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
662 pa_assert(PA_REFCNT_VALUE(b
) > 0);
664 p
= pa_memblock_acquire(b
);
665 pa_will_need(p
, b
->length
);
666 pa_memblock_release(b
);
671 /* Self-locked. This function is not multiple-caller safe */
672 static void memblock_replace_import(pa_memblock
*b
) {
673 pa_memimport_segment
*segment
;
674 pa_memimport
*import
;
677 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
679 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
680 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
681 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
682 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
684 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
685 pa_assert_se(import
= segment
->import
);
687 pa_mutex_lock(import
->mutex
);
689 pa_assert_se(pa_hashmap_remove(
691 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
693 memblock_make_local(b
);
695 pa_assert(segment
->n_blocks
>= 1);
696 if (-- segment
->n_blocks
<= 0)
697 segment_detach(segment
);
699 pa_mutex_unlock(import
->mutex
);
702 pa_mempool
* pa_mempool_new(pa_bool_t shared
, size_t size
) {
704 char t1
[PA_BYTES_SNPRINT_MAX
], t2
[PA_BYTES_SNPRINT_MAX
];
706 p
= pa_xnew(pa_mempool
, 1);
708 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
709 p
->semaphore
= pa_semaphore_new(0);
711 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
712 if (p
->block_size
< PA_PAGE_SIZE
)
713 p
->block_size
= PA_PAGE_SIZE
;
716 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
718 p
->n_blocks
= (unsigned) (size
/ p
->block_size
);
724 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
729 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
730 p
->memory
.shared
? "shared" : "private",
732 pa_bytes_snprint(t1
, sizeof(t1
), (unsigned) p
->block_size
),
733 pa_bytes_snprint(t2
, sizeof(t2
), (unsigned) (p
->n_blocks
* p
->block_size
)),
734 (unsigned long) pa_mempool_block_size_max(p
));
736 memset(&p
->stat
, 0, sizeof(p
->stat
));
737 pa_atomic_store(&p
->n_init
, 0);
739 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
740 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
742 p
->free_slots
= pa_flist_new(p
->n_blocks
);
747 void pa_mempool_free(pa_mempool
*p
) {
750 pa_mutex_lock(p
->mutex
);
753 pa_memimport_free(p
->imports
);
756 pa_memexport_free(p
->exports
);
758 pa_mutex_unlock(p
->mutex
);
760 pa_flist_free(p
->free_slots
, NULL
);
762 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
764 /* Ouch, somebody is retaining a memory block reference! */
770 /* Let's try to find at least one of those leaked memory blocks */
772 list
= pa_flist_new(p
->n_blocks
);
774 for (i
= 0; i
< (unsigned) pa_atomic_load(&p
->n_init
); i
++) {
775 struct mempool_slot
*slot
;
778 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) i
));
779 b
= mempool_slot_data(slot
);
781 while ((k
= pa_flist_pop(p
->free_slots
))) {
782 while (pa_flist_push(list
, k
) < 0)
790 pa_log("REF: Leaked memory block %p", b
);
792 while ((k
= pa_flist_pop(list
)))
793 while (pa_flist_push(p
->free_slots
, k
) < 0)
797 pa_flist_free(list
, NULL
);
801 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
806 pa_shm_free(&p
->memory
);
808 pa_mutex_free(p
->mutex
);
809 pa_semaphore_free(p
->semaphore
);
814 /* No lock necessary */
815 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
821 /* No lock necessary */
822 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
825 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
828 /* No lock necessary */
829 void pa_mempool_vacuum(pa_mempool
*p
) {
830 struct mempool_slot
*slot
;
835 list
= pa_flist_new(p
->n_blocks
);
837 while ((slot
= pa_flist_pop(p
->free_slots
)))
838 while (pa_flist_push(list
, slot
) < 0)
841 while ((slot
= pa_flist_pop(list
))) {
842 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
844 while (pa_flist_push(p
->free_slots
, slot
))
848 pa_flist_free(list
, NULL
);
851 /* No lock necessary */
852 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
855 if (!p
->memory
.shared
)
863 /* No lock necessary */
864 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
867 return !!p
->memory
.shared
;
870 /* For recieving blocks from other nodes */
871 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
877 i
= pa_xnew(pa_memimport
, 1);
878 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
880 i
->segments
= pa_hashmap_new(NULL
, NULL
);
881 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
883 i
->userdata
= userdata
;
885 pa_mutex_lock(p
->mutex
);
886 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
887 pa_mutex_unlock(p
->mutex
);
892 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
894 /* Should be called locked */
895 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
896 pa_memimport_segment
* seg
;
898 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
901 seg
= pa_xnew0(pa_memimport_segment
, 1);
903 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
909 seg
->trap
= pa_memtrap_add(seg
->memory
.ptr
, seg
->memory
.size
);
911 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
), seg
);
915 /* Should be called locked */
916 static void segment_detach(pa_memimport_segment
*seg
) {
919 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
920 pa_shm_free(&seg
->memory
);
923 pa_memtrap_remove(seg
->trap
);
928 /* Self-locked. Not multiple-caller safe */
929 void pa_memimport_free(pa_memimport
*i
) {
935 pa_mutex_lock(i
->mutex
);
937 while ((b
= pa_hashmap_first(i
->blocks
)))
938 memblock_replace_import(b
);
940 pa_assert(pa_hashmap_size(i
->segments
) == 0);
942 pa_mutex_unlock(i
->mutex
);
944 pa_mutex_lock(i
->pool
->mutex
);
946 /* If we've exported this block further we need to revoke that export */
947 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
948 memexport_revoke_blocks(e
, i
);
950 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
952 pa_mutex_unlock(i
->pool
->mutex
);
954 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
955 pa_hashmap_free(i
->segments
, NULL
, NULL
);
957 pa_mutex_free(i
->mutex
);
963 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
964 pa_memblock
*b
= NULL
;
965 pa_memimport_segment
*seg
;
969 pa_mutex_lock(i
->mutex
);
971 if ((b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(block_id
)))) {
976 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
979 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
980 if (!(seg
= segment_attach(i
, shm_id
)))
983 if (offset
+size
> seg
->memory
.size
)
986 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
987 b
= pa_xnew(pa_memblock
, 1);
991 b
->type
= PA_MEMBLOCK_IMPORTED
;
993 b
->is_silence
= FALSE
;
994 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
996 pa_atomic_store(&b
->n_acquired
, 0);
997 pa_atomic_store(&b
->please_signal
, 0);
998 b
->per_type
.imported
.id
= block_id
;
999 b
->per_type
.imported
.segment
= seg
;
1001 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
1008 pa_mutex_unlock(i
->mutex
);
1013 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
1018 pa_mutex_lock(i
->mutex
);
1020 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
1025 memblock_replace_import(b
);
1028 pa_mutex_unlock(i
->mutex
);
1033 /* For sending blocks to other nodes */
1034 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
1040 if (!p
->memory
.shared
)
1043 e
= pa_xnew(pa_memexport
, 1);
1044 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
1046 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
1047 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
1050 e
->userdata
= userdata
;
1052 pa_mutex_lock(p
->mutex
);
1053 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
1054 pa_mutex_unlock(p
->mutex
);
1058 void pa_memexport_free(pa_memexport
*e
) {
1061 pa_mutex_lock(e
->mutex
);
1062 while (e
->used_slots
)
1063 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
1064 pa_mutex_unlock(e
->mutex
);
1066 pa_mutex_lock(e
->pool
->mutex
);
1067 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
1068 pa_mutex_unlock(e
->pool
->mutex
);
1070 pa_mutex_free(e
->mutex
);
1075 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
1080 pa_mutex_lock(e
->mutex
);
1082 if (id
>= e
->n_init
)
1085 if (!e
->slots
[id
].block
)
1088 b
= e
->slots
[id
].block
;
1089 e
->slots
[id
].block
= NULL
;
1091 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1092 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1094 pa_mutex_unlock(e
->mutex
);
1096 /* pa_log("Processing release for %u", id); */
1098 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1099 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1101 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1102 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1104 pa_memblock_unref(b
);
1109 pa_mutex_unlock(e
->mutex
);
1115 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1116 struct memexport_slot
*slot
, *next
;
1120 pa_mutex_lock(e
->mutex
);
1122 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1126 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1127 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1130 idx
= (uint32_t) (slot
- e
->slots
);
1131 e
->revoke_cb(e
, idx
, e
->userdata
);
1132 pa_memexport_process_release(e
, idx
);
1135 pa_mutex_unlock(e
->mutex
);
1138 /* No lock necessary */
1139 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1145 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1146 b
->type
== PA_MEMBLOCK_POOL
||
1147 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1148 pa_assert(b
->pool
== p
);
1149 return pa_memblock_ref(b
);
1152 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1155 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1160 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1162 struct memexport_slot
*slot
;
1167 pa_assert(block_id
);
1171 pa_assert(b
->pool
== e
->pool
);
1173 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1176 pa_mutex_lock(e
->mutex
);
1178 if (e
->free_slots
) {
1179 slot
= e
->free_slots
;
1180 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1181 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1182 slot
= &e
->slots
[e
->n_init
++];
1184 pa_mutex_unlock(e
->mutex
);
1185 pa_memblock_unref(b
);
1189 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1191 *block_id
= (uint32_t) (slot
- e
->slots
);
1193 pa_mutex_unlock(e
->mutex
);
1194 /* pa_log("Got block id %u", *block_id); */
1196 data
= pa_memblock_acquire(b
);
1198 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1199 pa_assert(b
->per_type
.imported
.segment
);
1200 memory
= &b
->per_type
.imported
.segment
->memory
;
1202 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1204 memory
= &b
->pool
->memory
;
1207 pa_assert(data
>= memory
->ptr
);
1208 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1210 *shm_id
= memory
->id
;
1211 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1214 pa_memblock_release(b
);
1216 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1217 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);