4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
49 #define PA_MEMPOOL_SLOTS_MAX 512
50 #define PA_MEMPOOL_SLOT_SIZE (32*1024)
52 #define PA_MEMEXPORT_SLOTS_MAX 128
54 #define PA_MEMIMPORT_SLOTS_MAX 128
55 #define PA_MEMIMPORT_SEGMENTS_MAX 16
58 PA_REFCNT_DECLARE
; /* the reference counter */
61 pa_memblock_type_t type
;
62 pa_bool_t read_only
:1, is_silence
:1;
67 pa_atomic_t n_acquired
;
68 pa_atomic_t please_signal
;
72 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
78 pa_memimport_segment
*segment
;
83 struct pa_memimport_segment
{
96 /* Called whenever an imported memory block is no longer
98 pa_memimport_release_cb_t release_cb
;
101 PA_LLIST_FIELDS(pa_memimport
);
104 struct memexport_slot
{
105 PA_LLIST_FIELDS(struct memexport_slot
);
109 struct pa_memexport
{
113 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
115 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
116 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
119 /* Called whenever a client from which we imported a memory block
120 which we in turn exported to another client dies and we need to
121 revoke the memory block accordingly */
122 pa_memexport_revoke_cb_t revoke_cb
;
125 PA_LLIST_FIELDS(pa_memexport
);
128 struct mempool_slot
{
129 PA_LLIST_FIELDS(struct mempool_slot
);
130 /* the actual data follows immediately hereafter */
134 pa_semaphore
*semaphore
;
143 PA_LLIST_HEAD(pa_memimport
, imports
);
144 PA_LLIST_HEAD(pa_memexport
, exports
);
146 /* A list of free slots that may be reused */
147 pa_flist
*free_slots
;
149 pa_mempool_stat stat
;
152 static void segment_detach(pa_memimport_segment
*seg
);
154 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
156 /* No lock necessary */
157 static void stat_add(pa_memblock
*b
) {
161 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
162 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
164 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
165 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
167 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
168 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
169 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
172 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
173 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
176 /* No lock necessary */
177 static void stat_remove(pa_memblock
*b
) {
181 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
182 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
184 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
185 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
187 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
189 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
191 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
192 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
195 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
198 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
200 /* No lock necessary */
201 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
205 pa_assert(length
> 0);
207 if (!(b
= pa_memblock_new_pool(p
, length
)))
208 b
= memblock_new_appended(p
, length
);
213 /* No lock necessary */
214 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
218 pa_assert(length
> 0);
220 /* If -1 is passed as length we choose the size for the caller. */
222 if (length
== (size_t) -1)
223 length
= p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
225 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
228 b
->type
= PA_MEMBLOCK_APPENDED
;
229 b
->read_only
= b
->is_silence
= FALSE
;
230 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
232 pa_atomic_store(&b
->n_acquired
, 0);
233 pa_atomic_store(&b
->please_signal
, 0);
239 /* No lock necessary */
240 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
241 struct mempool_slot
*slot
;
244 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
247 /* The free list was empty, we have to allocate a new entry */
249 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
250 pa_atomic_dec(&p
->n_init
);
252 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
255 pa_log_info("Pool full");
256 pa_atomic_inc(&p
->stat
.n_pool_full
);
264 /* No lock necessary */
265 static void* mempool_slot_data(struct mempool_slot
*slot
) {
268 return (uint8_t*) slot
+ PA_ALIGN(sizeof(struct mempool_slot
));
271 /* No lock necessary */
272 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
275 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
276 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
278 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
281 /* No lock necessary */
282 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
285 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
288 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
291 /* No lock necessary */
292 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
293 pa_memblock
*b
= NULL
;
294 struct mempool_slot
*slot
;
297 pa_assert(length
> 0);
299 /* If -1 is passed as length we choose the size for the caller: we
300 * take the largest size that fits in one of our slots. */
302 if (length
== (size_t) -1)
303 length
= pa_mempool_block_size_max(p
);
305 if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
307 if (!(slot
= mempool_allocate_slot(p
)))
310 b
= mempool_slot_data(slot
);
311 b
->type
= PA_MEMBLOCK_POOL
;
312 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
314 } else if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= length
) {
316 if (!(slot
= mempool_allocate_slot(p
)))
319 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
320 b
= pa_xnew(pa_memblock
, 1);
322 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
323 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
326 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))));
327 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
333 b
->read_only
= b
->is_silence
= FALSE
;
335 pa_atomic_store(&b
->n_acquired
, 0);
336 pa_atomic_store(&b
->please_signal
, 0);
342 /* No lock necessary */
343 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
348 pa_assert(length
!= (size_t) -1);
349 pa_assert(length
> 0);
351 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
352 b
= pa_xnew(pa_memblock
, 1);
355 b
->type
= PA_MEMBLOCK_FIXED
;
356 b
->read_only
= read_only
;
357 b
->is_silence
= FALSE
;
358 pa_atomic_ptr_store(&b
->data
, d
);
360 pa_atomic_store(&b
->n_acquired
, 0);
361 pa_atomic_store(&b
->please_signal
, 0);
367 /* No lock necessary */
368 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
373 pa_assert(length
> 0);
374 pa_assert(length
!= (size_t) -1);
377 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
378 b
= pa_xnew(pa_memblock
, 1);
381 b
->type
= PA_MEMBLOCK_USER
;
382 b
->read_only
= read_only
;
383 b
->is_silence
= FALSE
;
384 pa_atomic_ptr_store(&b
->data
, d
);
386 pa_atomic_store(&b
->n_acquired
, 0);
387 pa_atomic_store(&b
->please_signal
, 0);
389 b
->per_type
.user
.free_cb
= free_cb
;
395 /* No lock necessary */
396 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
398 pa_assert(PA_REFCNT_VALUE(b
) > 0);
400 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
403 /* No lock necessary */
404 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
406 pa_assert(PA_REFCNT_VALUE(b
) > 0);
408 return b
->is_silence
;
411 /* No lock necessary */
412 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
414 pa_assert(PA_REFCNT_VALUE(b
) > 0);
419 /* No lock necessary */
420 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
425 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
430 /* No lock necessary */
431 void* pa_memblock_acquire(pa_memblock
*b
) {
433 pa_assert(PA_REFCNT_VALUE(b
) > 0);
435 pa_atomic_inc(&b
->n_acquired
);
437 return pa_atomic_ptr_load(&b
->data
);
440 /* No lock necessary, in corner cases locks by its own */
441 void pa_memblock_release(pa_memblock
*b
) {
444 pa_assert(PA_REFCNT_VALUE(b
) > 0);
446 r
= pa_atomic_dec(&b
->n_acquired
);
449 /* Signal a waiting thread that this memblock is no longer used */
450 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
451 pa_semaphore_post(b
->pool
->semaphore
);
454 size_t pa_memblock_get_length(pa_memblock
*b
) {
456 pa_assert(PA_REFCNT_VALUE(b
) > 0);
461 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
463 pa_assert(PA_REFCNT_VALUE(b
) > 0);
468 /* No lock necessary */
469 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
471 pa_assert(PA_REFCNT_VALUE(b
) > 0);
477 static void memblock_free(pa_memblock
*b
) {
480 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
485 case PA_MEMBLOCK_USER
:
486 pa_assert(b
->per_type
.user
.free_cb
);
487 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
491 case PA_MEMBLOCK_FIXED
:
492 case PA_MEMBLOCK_APPENDED
:
493 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
498 case PA_MEMBLOCK_IMPORTED
: {
499 pa_memimport_segment
*segment
;
500 pa_memimport
*import
;
502 /* FIXME! This should be implemented lock-free */
504 segment
= b
->per_type
.imported
.segment
;
506 import
= segment
->import
;
509 pa_mutex_lock(import
->mutex
);
510 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
511 if (-- segment
->n_blocks
<= 0)
512 segment_detach(segment
);
514 pa_mutex_unlock(import
->mutex
);
516 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
518 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
523 case PA_MEMBLOCK_POOL_EXTERNAL
:
524 case PA_MEMBLOCK_POOL
: {
525 struct mempool_slot
*slot
;
528 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
531 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
533 /* The free list dimensions should easily allow all slots
534 * to fit in, hence try harder if pushing this slot into
535 * the free list fails */
536 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
540 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
546 case PA_MEMBLOCK_TYPE_MAX
:
548 pa_assert_not_reached();
552 /* No lock necessary */
553 void pa_memblock_unref(pa_memblock
*b
) {
555 pa_assert(PA_REFCNT_VALUE(b
) > 0);
557 if (PA_REFCNT_DEC(b
) > 0)
564 static void memblock_wait(pa_memblock
*b
) {
567 if (pa_atomic_load(&b
->n_acquired
) > 0) {
568 /* We need to wait until all threads gave up access to the
569 * memory block before we can go on. Unfortunately this means
570 * that we have to lock and wait here. Sniff! */
572 pa_atomic_inc(&b
->please_signal
);
574 while (pa_atomic_load(&b
->n_acquired
) > 0)
575 pa_semaphore_wait(b
->pool
->semaphore
);
577 pa_atomic_dec(&b
->please_signal
);
581 /* No lock necessary. This function is not multiple caller safe! */
582 static void memblock_make_local(pa_memblock
*b
) {
585 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
587 if (b
->length
<= b
->pool
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))) {
588 struct mempool_slot
*slot
;
590 if ((slot
= mempool_allocate_slot(b
->pool
))) {
592 /* We can move it into a local pool, perfect! */
594 new_data
= mempool_slot_data(slot
);
595 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
596 pa_atomic_ptr_store(&b
->data
, new_data
);
598 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
599 b
->read_only
= FALSE
;
605 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
606 b
->per_type
.user
.free_cb
= pa_xfree
;
607 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
609 b
->type
= PA_MEMBLOCK_USER
;
610 b
->read_only
= FALSE
;
613 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
614 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
618 /* No lock necessary. This function is not multiple caller safe*/
619 void pa_memblock_unref_fixed(pa_memblock
*b
) {
621 pa_assert(PA_REFCNT_VALUE(b
) > 0);
622 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
624 if (PA_REFCNT_VALUE(b
) > 1)
625 memblock_make_local(b
);
627 pa_memblock_unref(b
);
630 /* No lock necessary. */
631 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
635 pa_assert(PA_REFCNT_VALUE(b
) > 0);
637 p
= pa_memblock_acquire(b
);
638 pa_will_need(p
, b
->length
);
639 pa_memblock_release(b
);
644 /* Self-locked. This function is not multiple-caller safe */
645 static void memblock_replace_import(pa_memblock
*b
) {
646 pa_memimport_segment
*seg
;
649 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
651 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
652 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
653 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
654 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
656 seg
= b
->per_type
.imported
.segment
;
658 pa_assert(seg
->import
);
660 pa_mutex_lock(seg
->import
->mutex
);
664 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
666 memblock_make_local(b
);
668 if (-- seg
->n_blocks
<= 0) {
669 pa_mutex_unlock(seg
->import
->mutex
);
672 pa_mutex_unlock(seg
->import
->mutex
);
675 pa_mempool
* pa_mempool_new(int shared
) {
678 p
= pa_xnew(pa_mempool
, 1);
680 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
681 p
->semaphore
= pa_semaphore_new(0);
683 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
684 if (p
->block_size
< PA_PAGE_SIZE
)
685 p
->block_size
= PA_PAGE_SIZE
;
687 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
689 pa_assert(p
->block_size
> PA_ALIGN(sizeof(struct mempool_slot
)));
691 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
696 memset(&p
->stat
, 0, sizeof(p
->stat
));
697 pa_atomic_store(&p
->n_init
, 0);
699 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
700 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
702 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
707 void pa_mempool_free(pa_mempool
*p
) {
710 pa_mutex_lock(p
->mutex
);
713 pa_memimport_free(p
->imports
);
716 pa_memexport_free(p
->exports
);
718 pa_mutex_unlock(p
->mutex
);
720 pa_flist_free(p
->free_slots
, NULL
);
722 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
723 /* raise(SIGTRAP); */
724 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
727 pa_shm_free(&p
->memory
);
729 pa_mutex_free(p
->mutex
);
730 pa_semaphore_free(p
->semaphore
);
735 /* No lock necessary */
736 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
742 /* No lock necessary */
743 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
746 return p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
749 /* No lock necessary */
750 void pa_mempool_vacuum(pa_mempool
*p
) {
751 struct mempool_slot
*slot
;
756 list
= pa_flist_new(p
->n_blocks
*2);
758 while ((slot
= pa_flist_pop(p
->free_slots
)))
759 while (pa_flist_push(list
, slot
) < 0)
762 while ((slot
= pa_flist_pop(list
))) {
763 pa_shm_punch(&p
->memory
,
764 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ PA_ALIGN(sizeof(struct mempool_slot
)),
765 p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)));
767 while (pa_flist_push(p
->free_slots
, slot
))
771 pa_flist_free(list
, NULL
);
774 /* No lock necessary */
775 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
778 if (!p
->memory
.shared
)
786 /* No lock necessary */
787 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
790 return !!p
->memory
.shared
;
793 /* For recieving blocks from other nodes */
794 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
800 i
= pa_xnew(pa_memimport
, 1);
801 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
803 i
->segments
= pa_hashmap_new(NULL
, NULL
);
804 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
806 i
->userdata
= userdata
;
808 pa_mutex_lock(p
->mutex
);
809 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
810 pa_mutex_unlock(p
->mutex
);
815 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
817 /* Should be called locked */
818 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
819 pa_memimport_segment
* seg
;
821 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
824 seg
= pa_xnew(pa_memimport_segment
, 1);
826 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
834 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
838 /* Should be called locked */
839 static void segment_detach(pa_memimport_segment
*seg
) {
842 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
843 pa_shm_free(&seg
->memory
);
847 /* Self-locked. Not multiple-caller safe */
848 void pa_memimport_free(pa_memimport
*i
) {
854 pa_mutex_lock(i
->mutex
);
856 while ((b
= pa_hashmap_get_first(i
->blocks
)))
857 memblock_replace_import(b
);
859 pa_assert(pa_hashmap_size(i
->segments
) == 0);
861 pa_mutex_unlock(i
->mutex
);
863 pa_mutex_lock(i
->pool
->mutex
);
865 /* If we've exported this block further we need to revoke that export */
866 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
867 memexport_revoke_blocks(e
, i
);
869 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
871 pa_mutex_unlock(i
->pool
->mutex
);
873 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
874 pa_hashmap_free(i
->segments
, NULL
, NULL
);
876 pa_mutex_free(i
->mutex
);
882 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
883 pa_memblock
*b
= NULL
;
884 pa_memimport_segment
*seg
;
888 pa_mutex_lock(i
->mutex
);
890 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
893 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
894 if (!(seg
= segment_attach(i
, shm_id
)))
897 if (offset
+size
> seg
->memory
.size
)
900 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
901 b
= pa_xnew(pa_memblock
, 1);
905 b
->type
= PA_MEMBLOCK_IMPORTED
;
907 b
->is_silence
= FALSE
;
908 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
910 pa_atomic_store(&b
->n_acquired
, 0);
911 pa_atomic_store(&b
->please_signal
, 0);
912 b
->per_type
.imported
.id
= block_id
;
913 b
->per_type
.imported
.segment
= seg
;
915 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
920 pa_mutex_unlock(i
->mutex
);
928 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
933 pa_mutex_lock(i
->mutex
);
935 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
940 memblock_replace_import(b
);
943 pa_mutex_unlock(i
->mutex
);
948 /* For sending blocks to other nodes */
949 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
955 if (!p
->memory
.shared
)
958 e
= pa_xnew(pa_memexport
, 1);
959 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
961 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
962 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
965 e
->userdata
= userdata
;
967 pa_mutex_lock(p
->mutex
);
968 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
969 pa_mutex_unlock(p
->mutex
);
973 void pa_memexport_free(pa_memexport
*e
) {
976 pa_mutex_lock(e
->mutex
);
977 while (e
->used_slots
)
978 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
979 pa_mutex_unlock(e
->mutex
);
981 pa_mutex_lock(e
->pool
->mutex
);
982 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
983 pa_mutex_unlock(e
->pool
->mutex
);
985 pa_mutex_free(e
->mutex
);
990 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
995 pa_mutex_lock(e
->mutex
);
1000 if (!e
->slots
[id
].block
)
1003 b
= e
->slots
[id
].block
;
1004 e
->slots
[id
].block
= NULL
;
1006 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1007 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1009 pa_mutex_unlock(e
->mutex
);
1011 /* pa_log("Processing release for %u", id); */
1013 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1014 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1016 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1017 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
1019 pa_memblock_unref(b
);
1024 pa_mutex_unlock(e
->mutex
);
1030 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1031 struct memexport_slot
*slot
, *next
;
1035 pa_mutex_lock(e
->mutex
);
1037 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1041 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1042 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1045 idx
= slot
- e
->slots
;
1046 e
->revoke_cb(e
, idx
, e
->userdata
);
1047 pa_memexport_process_release(e
, idx
);
1050 pa_mutex_unlock(e
->mutex
);
1053 /* No lock necessary */
1054 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1060 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1061 b
->type
== PA_MEMBLOCK_POOL
||
1062 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1063 pa_assert(b
->pool
== p
);
1064 return pa_memblock_ref(b
);
1067 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1070 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1075 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1077 struct memexport_slot
*slot
;
1082 pa_assert(block_id
);
1086 pa_assert(b
->pool
== e
->pool
);
1088 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1091 pa_mutex_lock(e
->mutex
);
1093 if (e
->free_slots
) {
1094 slot
= e
->free_slots
;
1095 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1096 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1097 slot
= &e
->slots
[e
->n_init
++];
1099 pa_mutex_unlock(e
->mutex
);
1100 pa_memblock_unref(b
);
1104 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1106 *block_id
= slot
- e
->slots
;
1108 pa_mutex_unlock(e
->mutex
);
1109 /* pa_log("Got block id %u", *block_id); */
1111 data
= pa_memblock_acquire(b
);
1113 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1114 pa_assert(b
->per_type
.imported
.segment
);
1115 memory
= &b
->per_type
.imported
.segment
->memory
;
1117 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1119 memory
= &b
->pool
->memory
;
1122 pa_assert(data
>= memory
->ptr
);
1123 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1125 *shm_id
= memory
->id
;
1126 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1129 pa_memblock_release(b
);
1131 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1132 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);