2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
51 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
52 * note that the footprint is usually much smaller, since the data is
53 * stored in SHM and our OS does not commit the memory before we use
54 * it for the first time. */
55 #define PA_MEMPOOL_SLOTS_MAX 1024
56 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58 #define PA_MEMEXPORT_SLOTS_MAX 128
60 #define PA_MEMIMPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SEGMENTS_MAX 16
64 PA_REFCNT_DECLARE
; /* the reference counter */
67 pa_memblock_type_t type
;
69 pa_bool_t read_only
:1;
70 pa_bool_t is_silence
:1;
75 pa_atomic_t n_acquired
;
76 pa_atomic_t please_signal
;
80 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
86 pa_memimport_segment
*segment
;
91 struct pa_memimport_segment
{
101 pa_hashmap
*segments
;
104 /* Called whenever an imported memory block is no longer
106 pa_memimport_release_cb_t release_cb
;
109 PA_LLIST_FIELDS(pa_memimport
);
112 struct memexport_slot
{
113 PA_LLIST_FIELDS(struct memexport_slot
);
117 struct pa_memexport
{
121 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
123 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
124 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
127 /* Called whenever a client from which we imported a memory block
128 which we in turn exported to another client dies and we need to
129 revoke the memory block accordingly */
130 pa_memexport_revoke_cb_t revoke_cb
;
133 PA_LLIST_FIELDS(pa_memexport
);
137 pa_semaphore
*semaphore
;
146 PA_LLIST_HEAD(pa_memimport
, imports
);
147 PA_LLIST_HEAD(pa_memexport
, exports
);
149 /* A list of free slots that may be reused */
150 pa_flist
*free_slots
;
152 pa_mempool_stat stat
;
155 static void segment_detach(pa_memimport_segment
*seg
);
157 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
159 /* No lock necessary */
160 static void stat_add(pa_memblock
*b
) {
164 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
165 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
167 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
168 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
170 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
171 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
172 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
175 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
176 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
179 /* No lock necessary */
180 static void stat_remove(pa_memblock
*b
) {
184 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
185 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
187 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
188 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
190 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
191 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
192 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
194 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
195 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
198 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
201 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
203 /* No lock necessary */
204 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
210 if (!(b
= pa_memblock_new_pool(p
, length
)))
211 b
= memblock_new_appended(p
, length
);
216 /* No lock necessary */
217 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
223 /* If -1 is passed as length we choose the size for the caller. */
225 if (length
== (size_t) -1)
226 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
228 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
231 b
->type
= PA_MEMBLOCK_APPENDED
;
232 b
->read_only
= b
->is_silence
= FALSE
;
233 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
235 pa_atomic_store(&b
->n_acquired
, 0);
236 pa_atomic_store(&b
->please_signal
, 0);
242 /* No lock necessary */
243 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
244 struct mempool_slot
*slot
;
247 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
250 /* The free list was empty, we have to allocate a new entry */
252 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
253 pa_atomic_dec(&p
->n_init
);
255 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
258 pa_log_info("Pool full");
259 pa_atomic_inc(&p
->stat
.n_pool_full
);
264 #ifdef HAVE_VALGRIND_MEMCHECK_H
265 VALGRIND_MALLOCLIKE_BLOCK(slot
, p
->block_size
, 0, 0);
271 /* No lock necessary, totally redundant anyway */
272 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
276 /* No lock necessary */
277 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
280 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
281 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
283 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
286 /* No lock necessary */
287 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
290 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
293 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
296 /* No lock necessary */
297 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
298 pa_memblock
*b
= NULL
;
299 struct mempool_slot
*slot
;
304 /* If -1 is passed as length we choose the size for the caller: we
305 * take the largest size that fits in one of our slots. */
307 if (length
== (size_t) -1)
308 length
= pa_mempool_block_size_max(p
);
310 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
312 if (!(slot
= mempool_allocate_slot(p
)))
315 b
= mempool_slot_data(slot
);
316 b
->type
= PA_MEMBLOCK_POOL
;
317 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
319 } else if (p
->block_size
>= length
) {
321 if (!(slot
= mempool_allocate_slot(p
)))
324 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
325 b
= pa_xnew(pa_memblock
, 1);
327 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
328 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
331 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
332 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
338 b
->read_only
= b
->is_silence
= FALSE
;
340 pa_atomic_store(&b
->n_acquired
, 0);
341 pa_atomic_store(&b
->please_signal
, 0);
347 /* No lock necessary */
348 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
353 pa_assert(length
!= (size_t) -1);
356 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
357 b
= pa_xnew(pa_memblock
, 1);
360 b
->type
= PA_MEMBLOCK_FIXED
;
361 b
->read_only
= read_only
;
362 b
->is_silence
= FALSE
;
363 pa_atomic_ptr_store(&b
->data
, d
);
365 pa_atomic_store(&b
->n_acquired
, 0);
366 pa_atomic_store(&b
->please_signal
, 0);
372 /* No lock necessary */
373 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
379 pa_assert(length
!= (size_t) -1);
382 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
383 b
= pa_xnew(pa_memblock
, 1);
386 b
->type
= PA_MEMBLOCK_USER
;
387 b
->read_only
= read_only
;
388 b
->is_silence
= FALSE
;
389 pa_atomic_ptr_store(&b
->data
, d
);
391 pa_atomic_store(&b
->n_acquired
, 0);
392 pa_atomic_store(&b
->please_signal
, 0);
394 b
->per_type
.user
.free_cb
= free_cb
;
400 /* No lock necessary */
401 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
403 pa_assert(PA_REFCNT_VALUE(b
) > 0);
405 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
408 /* No lock necessary */
409 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
411 pa_assert(PA_REFCNT_VALUE(b
) > 0);
413 return b
->is_silence
;
416 /* No lock necessary */
417 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
419 pa_assert(PA_REFCNT_VALUE(b
) > 0);
424 /* No lock necessary */
425 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
429 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
434 /* No lock necessary */
435 void* pa_memblock_acquire(pa_memblock
*b
) {
437 pa_assert(PA_REFCNT_VALUE(b
) > 0);
439 pa_atomic_inc(&b
->n_acquired
);
441 return pa_atomic_ptr_load(&b
->data
);
444 /* No lock necessary, in corner cases locks by its own */
445 void pa_memblock_release(pa_memblock
*b
) {
448 pa_assert(PA_REFCNT_VALUE(b
) > 0);
450 r
= pa_atomic_dec(&b
->n_acquired
);
453 /* Signal a waiting thread that this memblock is no longer used */
454 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
455 pa_semaphore_post(b
->pool
->semaphore
);
458 size_t pa_memblock_get_length(pa_memblock
*b
) {
460 pa_assert(PA_REFCNT_VALUE(b
) > 0);
465 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
467 pa_assert(PA_REFCNT_VALUE(b
) > 0);
472 /* No lock necessary */
473 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
475 pa_assert(PA_REFCNT_VALUE(b
) > 0);
481 static void memblock_free(pa_memblock
*b
) {
484 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
489 case PA_MEMBLOCK_USER
:
490 pa_assert(b
->per_type
.user
.free_cb
);
491 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
495 case PA_MEMBLOCK_FIXED
:
496 case PA_MEMBLOCK_APPENDED
:
497 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
502 case PA_MEMBLOCK_IMPORTED
: {
503 pa_memimport_segment
*segment
;
504 pa_memimport
*import
;
506 /* FIXME! This should be implemented lock-free */
508 segment
= b
->per_type
.imported
.segment
;
510 import
= segment
->import
;
513 pa_mutex_lock(import
->mutex
);
514 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
515 if (-- segment
->n_blocks
<= 0)
516 segment_detach(segment
);
518 pa_mutex_unlock(import
->mutex
);
520 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
522 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
527 case PA_MEMBLOCK_POOL_EXTERNAL
:
528 case PA_MEMBLOCK_POOL
: {
529 struct mempool_slot
*slot
;
532 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
535 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
537 /* The free list dimensions should easily allow all slots
538 * to fit in, hence try harder if pushing this slot into
539 * the free list fails */
540 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
543 #ifdef HAVE_VALGRIND_MEMCHECK_H
544 VALGRIND_FREELIKE_BLOCK(slot
, b
->pool
->block_size
);
548 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
554 case PA_MEMBLOCK_TYPE_MAX
:
556 pa_assert_not_reached();
560 /* No lock necessary */
561 void pa_memblock_unref(pa_memblock
*b
) {
563 pa_assert(PA_REFCNT_VALUE(b
) > 0);
565 if (PA_REFCNT_DEC(b
) > 0)
572 static void memblock_wait(pa_memblock
*b
) {
575 if (pa_atomic_load(&b
->n_acquired
) > 0) {
576 /* We need to wait until all threads gave up access to the
577 * memory block before we can go on. Unfortunately this means
578 * that we have to lock and wait here. Sniff! */
580 pa_atomic_inc(&b
->please_signal
);
582 while (pa_atomic_load(&b
->n_acquired
) > 0)
583 pa_semaphore_wait(b
->pool
->semaphore
);
585 pa_atomic_dec(&b
->please_signal
);
589 /* No lock necessary. This function is not multiple caller safe! */
590 static void memblock_make_local(pa_memblock
*b
) {
593 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
595 if (b
->length
<= b
->pool
->block_size
) {
596 struct mempool_slot
*slot
;
598 if ((slot
= mempool_allocate_slot(b
->pool
))) {
600 /* We can move it into a local pool, perfect! */
602 new_data
= mempool_slot_data(slot
);
603 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
604 pa_atomic_ptr_store(&b
->data
, new_data
);
606 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
607 b
->read_only
= FALSE
;
613 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
614 b
->per_type
.user
.free_cb
= pa_xfree
;
615 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
617 b
->type
= PA_MEMBLOCK_USER
;
618 b
->read_only
= FALSE
;
621 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
622 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
626 /* No lock necessary. This function is not multiple caller safe*/
627 void pa_memblock_unref_fixed(pa_memblock
*b
) {
629 pa_assert(PA_REFCNT_VALUE(b
) > 0);
630 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
632 if (PA_REFCNT_VALUE(b
) > 1)
633 memblock_make_local(b
);
635 pa_memblock_unref(b
);
638 /* No lock necessary. */
639 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
643 pa_assert(PA_REFCNT_VALUE(b
) > 0);
645 p
= pa_memblock_acquire(b
);
646 pa_will_need(p
, b
->length
);
647 pa_memblock_release(b
);
652 /* Self-locked. This function is not multiple-caller safe */
653 static void memblock_replace_import(pa_memblock
*b
) {
654 pa_memimport_segment
*seg
;
657 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
659 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
660 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
661 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
662 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
664 seg
= b
->per_type
.imported
.segment
;
666 pa_assert(seg
->import
);
668 pa_mutex_lock(seg
->import
->mutex
);
672 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
674 memblock_make_local(b
);
676 if (-- seg
->n_blocks
<= 0) {
677 pa_mutex_unlock(seg
->import
->mutex
);
680 pa_mutex_unlock(seg
->import
->mutex
);
683 pa_mempool
* pa_mempool_new(pa_bool_t shared
) {
686 p
= pa_xnew(pa_mempool
, 1);
688 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
689 p
->semaphore
= pa_semaphore_new(0);
691 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
692 if (p
->block_size
< PA_PAGE_SIZE
)
693 p
->block_size
= PA_PAGE_SIZE
;
695 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
697 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
702 memset(&p
->stat
, 0, sizeof(p
->stat
));
703 pa_atomic_store(&p
->n_init
, 0);
705 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
706 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
708 p
->free_slots
= pa_flist_new(p
->n_blocks
);
713 void pa_mempool_free(pa_mempool
*p
) {
716 pa_mutex_lock(p
->mutex
);
719 pa_memimport_free(p
->imports
);
722 pa_memexport_free(p
->exports
);
724 pa_mutex_unlock(p
->mutex
);
726 pa_flist_free(p
->free_slots
, NULL
);
728 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
729 /* raise(SIGTRAP); */
730 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
733 pa_shm_free(&p
->memory
);
735 pa_mutex_free(p
->mutex
);
736 pa_semaphore_free(p
->semaphore
);
741 /* No lock necessary */
742 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
748 /* No lock necessary */
749 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
752 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
755 /* No lock necessary */
756 void pa_mempool_vacuum(pa_mempool
*p
) {
757 struct mempool_slot
*slot
;
762 list
= pa_flist_new(p
->n_blocks
);
764 while ((slot
= pa_flist_pop(p
->free_slots
)))
765 while (pa_flist_push(list
, slot
) < 0)
768 while ((slot
= pa_flist_pop(list
))) {
769 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
771 while (pa_flist_push(p
->free_slots
, slot
))
775 pa_flist_free(list
, NULL
);
778 /* No lock necessary */
779 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
782 if (!p
->memory
.shared
)
790 /* No lock necessary */
791 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
794 return !!p
->memory
.shared
;
797 /* For recieving blocks from other nodes */
798 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
804 i
= pa_xnew(pa_memimport
, 1);
805 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
807 i
->segments
= pa_hashmap_new(NULL
, NULL
);
808 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
810 i
->userdata
= userdata
;
812 pa_mutex_lock(p
->mutex
);
813 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
814 pa_mutex_unlock(p
->mutex
);
819 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
821 /* Should be called locked */
822 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
823 pa_memimport_segment
* seg
;
825 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
828 seg
= pa_xnew(pa_memimport_segment
, 1);
830 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
838 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
842 /* Should be called locked */
843 static void segment_detach(pa_memimport_segment
*seg
) {
846 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
847 pa_shm_free(&seg
->memory
);
851 /* Self-locked. Not multiple-caller safe */
852 void pa_memimport_free(pa_memimport
*i
) {
858 pa_mutex_lock(i
->mutex
);
860 while ((b
= pa_hashmap_first(i
->blocks
)))
861 memblock_replace_import(b
);
863 pa_assert(pa_hashmap_size(i
->segments
) == 0);
865 pa_mutex_unlock(i
->mutex
);
867 pa_mutex_lock(i
->pool
->mutex
);
869 /* If we've exported this block further we need to revoke that export */
870 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
871 memexport_revoke_blocks(e
, i
);
873 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
875 pa_mutex_unlock(i
->pool
->mutex
);
877 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
878 pa_hashmap_free(i
->segments
, NULL
, NULL
);
880 pa_mutex_free(i
->mutex
);
886 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
887 pa_memblock
*b
= NULL
;
888 pa_memimport_segment
*seg
;
892 pa_mutex_lock(i
->mutex
);
894 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
897 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
898 if (!(seg
= segment_attach(i
, shm_id
)))
901 if (offset
+size
> seg
->memory
.size
)
904 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
905 b
= pa_xnew(pa_memblock
, 1);
909 b
->type
= PA_MEMBLOCK_IMPORTED
;
911 b
->is_silence
= FALSE
;
912 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
914 pa_atomic_store(&b
->n_acquired
, 0);
915 pa_atomic_store(&b
->please_signal
, 0);
916 b
->per_type
.imported
.id
= block_id
;
917 b
->per_type
.imported
.segment
= seg
;
919 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
924 pa_mutex_unlock(i
->mutex
);
932 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
937 pa_mutex_lock(i
->mutex
);
939 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
944 memblock_replace_import(b
);
947 pa_mutex_unlock(i
->mutex
);
952 /* For sending blocks to other nodes */
953 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
959 if (!p
->memory
.shared
)
962 e
= pa_xnew(pa_memexport
, 1);
963 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
965 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
966 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
969 e
->userdata
= userdata
;
971 pa_mutex_lock(p
->mutex
);
972 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
973 pa_mutex_unlock(p
->mutex
);
977 void pa_memexport_free(pa_memexport
*e
) {
980 pa_mutex_lock(e
->mutex
);
981 while (e
->used_slots
)
982 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
983 pa_mutex_unlock(e
->mutex
);
985 pa_mutex_lock(e
->pool
->mutex
);
986 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
987 pa_mutex_unlock(e
->pool
->mutex
);
989 pa_mutex_free(e
->mutex
);
994 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
999 pa_mutex_lock(e
->mutex
);
1001 if (id
>= e
->n_init
)
1004 if (!e
->slots
[id
].block
)
1007 b
= e
->slots
[id
].block
;
1008 e
->slots
[id
].block
= NULL
;
1010 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1011 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1013 pa_mutex_unlock(e
->mutex
);
1015 /* pa_log("Processing release for %u", id); */
1017 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1018 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1020 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1021 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1023 pa_memblock_unref(b
);
1028 pa_mutex_unlock(e
->mutex
);
1034 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1035 struct memexport_slot
*slot
, *next
;
1039 pa_mutex_lock(e
->mutex
);
1041 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1045 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1046 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1049 idx
= (uint32_t) (slot
- e
->slots
);
1050 e
->revoke_cb(e
, idx
, e
->userdata
);
1051 pa_memexport_process_release(e
, idx
);
1054 pa_mutex_unlock(e
->mutex
);
1057 /* No lock necessary */
1058 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1064 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1065 b
->type
== PA_MEMBLOCK_POOL
||
1066 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1067 pa_assert(b
->pool
== p
);
1068 return pa_memblock_ref(b
);
1071 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1074 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1079 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1081 struct memexport_slot
*slot
;
1086 pa_assert(block_id
);
1090 pa_assert(b
->pool
== e
->pool
);
1092 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1095 pa_mutex_lock(e
->mutex
);
1097 if (e
->free_slots
) {
1098 slot
= e
->free_slots
;
1099 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1100 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1101 slot
= &e
->slots
[e
->n_init
++];
1103 pa_mutex_unlock(e
->mutex
);
1104 pa_memblock_unref(b
);
1108 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1110 *block_id
= (uint32_t) (slot
- e
->slots
);
1112 pa_mutex_unlock(e
->mutex
);
1113 /* pa_log("Got block id %u", *block_id); */
1115 data
= pa_memblock_acquire(b
);
1117 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1118 pa_assert(b
->per_type
.imported
.segment
);
1119 memory
= &b
->per_type
.imported
.segment
->memory
;
1121 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1123 memory
= &b
->pool
->memory
;
1126 pa_assert(data
>= memory
->ptr
);
1127 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1129 *shm_id
= memory
->id
;
1130 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1133 pa_memblock_release(b
);
1135 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1136 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);