4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
49 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
50 * note that the footprint is usually much smaller, since the data is
51 * stored in SHM and our OS does not commit the memory before we use
52 * it for the first time. */
53 #define PA_MEMPOOL_SLOTS_MAX 1024
54 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
56 #define PA_MEMEXPORT_SLOTS_MAX 128
58 #define PA_MEMIMPORT_SLOTS_MAX 128
59 #define PA_MEMIMPORT_SEGMENTS_MAX 16
62 PA_REFCNT_DECLARE
; /* the reference counter */
65 pa_memblock_type_t type
;
67 pa_bool_t read_only
:1;
68 pa_bool_t is_silence
:1;
73 pa_atomic_t n_acquired
;
74 pa_atomic_t please_signal
;
78 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
84 pa_memimport_segment
*segment
;
89 struct pa_memimport_segment
{
102 /* Called whenever an imported memory block is no longer
104 pa_memimport_release_cb_t release_cb
;
107 PA_LLIST_FIELDS(pa_memimport
);
110 struct memexport_slot
{
111 PA_LLIST_FIELDS(struct memexport_slot
);
115 struct pa_memexport
{
119 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
121 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
122 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
125 /* Called whenever a client from which we imported a memory block
126 which we in turn exported to another client dies and we need to
127 revoke the memory block accordingly */
128 pa_memexport_revoke_cb_t revoke_cb
;
131 PA_LLIST_FIELDS(pa_memexport
);
135 pa_semaphore
*semaphore
;
144 PA_LLIST_HEAD(pa_memimport
, imports
);
145 PA_LLIST_HEAD(pa_memexport
, exports
);
147 /* A list of free slots that may be reused */
148 pa_flist
*free_slots
;
150 pa_mempool_stat stat
;
153 static void segment_detach(pa_memimport_segment
*seg
);
155 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
157 /* No lock necessary */
158 static void stat_add(pa_memblock
*b
) {
162 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
163 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
165 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
166 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
168 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
169 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
170 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
173 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
174 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
177 /* No lock necessary */
178 static void stat_remove(pa_memblock
*b
) {
182 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
183 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
185 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
186 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
188 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
189 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
190 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
192 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
193 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
196 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
199 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
201 /* No lock necessary */
202 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
208 if (!(b
= pa_memblock_new_pool(p
, length
)))
209 b
= memblock_new_appended(p
, length
);
214 /* No lock necessary */
215 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
221 /* If -1 is passed as length we choose the size for the caller. */
223 if (length
== (size_t) -1)
224 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
226 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
229 b
->type
= PA_MEMBLOCK_APPENDED
;
230 b
->read_only
= b
->is_silence
= FALSE
;
231 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
233 pa_atomic_store(&b
->n_acquired
, 0);
234 pa_atomic_store(&b
->please_signal
, 0);
240 /* No lock necessary */
241 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
242 struct mempool_slot
*slot
;
245 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
248 /* The free list was empty, we have to allocate a new entry */
250 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
251 pa_atomic_dec(&p
->n_init
);
253 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
256 pa_log_info("Pool full");
257 pa_atomic_inc(&p
->stat
.n_pool_full
);
265 /* No lock necessary, totally redundant anyway */
266 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
270 /* No lock necessary */
271 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
274 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
275 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
277 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
280 /* No lock necessary */
281 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
284 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
287 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
290 /* No lock necessary */
291 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
292 pa_memblock
*b
= NULL
;
293 struct mempool_slot
*slot
;
298 /* If -1 is passed as length we choose the size for the caller: we
299 * take the largest size that fits in one of our slots. */
301 if (length
== (size_t) -1)
302 length
= pa_mempool_block_size_max(p
);
304 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
306 if (!(slot
= mempool_allocate_slot(p
)))
309 b
= mempool_slot_data(slot
);
310 b
->type
= PA_MEMBLOCK_POOL
;
311 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
313 } else if (p
->block_size
>= length
) {
315 if (!(slot
= mempool_allocate_slot(p
)))
318 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
319 b
= pa_xnew(pa_memblock
, 1);
321 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
322 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
325 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
326 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
332 b
->read_only
= b
->is_silence
= FALSE
;
334 pa_atomic_store(&b
->n_acquired
, 0);
335 pa_atomic_store(&b
->please_signal
, 0);
341 /* No lock necessary */
342 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
347 pa_assert(length
!= (size_t) -1);
350 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
351 b
= pa_xnew(pa_memblock
, 1);
354 b
->type
= PA_MEMBLOCK_FIXED
;
355 b
->read_only
= read_only
;
356 b
->is_silence
= FALSE
;
357 pa_atomic_ptr_store(&b
->data
, d
);
359 pa_atomic_store(&b
->n_acquired
, 0);
360 pa_atomic_store(&b
->please_signal
, 0);
366 /* No lock necessary */
367 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
373 pa_assert(length
!= (size_t) -1);
376 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
377 b
= pa_xnew(pa_memblock
, 1);
380 b
->type
= PA_MEMBLOCK_USER
;
381 b
->read_only
= read_only
;
382 b
->is_silence
= FALSE
;
383 pa_atomic_ptr_store(&b
->data
, d
);
385 pa_atomic_store(&b
->n_acquired
, 0);
386 pa_atomic_store(&b
->please_signal
, 0);
388 b
->per_type
.user
.free_cb
= free_cb
;
394 /* No lock necessary */
395 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
397 pa_assert(PA_REFCNT_VALUE(b
) > 0);
399 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
402 /* No lock necessary */
403 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
405 pa_assert(PA_REFCNT_VALUE(b
) > 0);
407 return b
->is_silence
;
410 /* No lock necessary */
411 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
413 pa_assert(PA_REFCNT_VALUE(b
) > 0);
418 /* No lock necessary */
419 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
423 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
428 /* No lock necessary */
429 void* pa_memblock_acquire(pa_memblock
*b
) {
431 pa_assert(PA_REFCNT_VALUE(b
) > 0);
433 pa_atomic_inc(&b
->n_acquired
);
435 return pa_atomic_ptr_load(&b
->data
);
438 /* No lock necessary, in corner cases locks by its own */
439 void pa_memblock_release(pa_memblock
*b
) {
442 pa_assert(PA_REFCNT_VALUE(b
) > 0);
444 r
= pa_atomic_dec(&b
->n_acquired
);
447 /* Signal a waiting thread that this memblock is no longer used */
448 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
449 pa_semaphore_post(b
->pool
->semaphore
);
452 size_t pa_memblock_get_length(pa_memblock
*b
) {
454 pa_assert(PA_REFCNT_VALUE(b
) > 0);
459 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
461 pa_assert(PA_REFCNT_VALUE(b
) > 0);
466 /* No lock necessary */
467 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
469 pa_assert(PA_REFCNT_VALUE(b
) > 0);
475 static void memblock_free(pa_memblock
*b
) {
478 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
483 case PA_MEMBLOCK_USER
:
484 pa_assert(b
->per_type
.user
.free_cb
);
485 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
489 case PA_MEMBLOCK_FIXED
:
490 case PA_MEMBLOCK_APPENDED
:
491 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
496 case PA_MEMBLOCK_IMPORTED
: {
497 pa_memimport_segment
*segment
;
498 pa_memimport
*import
;
500 /* FIXME! This should be implemented lock-free */
502 segment
= b
->per_type
.imported
.segment
;
504 import
= segment
->import
;
507 pa_mutex_lock(import
->mutex
);
508 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
509 if (-- segment
->n_blocks
<= 0)
510 segment_detach(segment
);
512 pa_mutex_unlock(import
->mutex
);
514 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
516 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
521 case PA_MEMBLOCK_POOL_EXTERNAL
:
522 case PA_MEMBLOCK_POOL
: {
523 struct mempool_slot
*slot
;
526 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
529 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
531 /* The free list dimensions should easily allow all slots
532 * to fit in, hence try harder if pushing this slot into
533 * the free list fails */
534 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
538 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
544 case PA_MEMBLOCK_TYPE_MAX
:
546 pa_assert_not_reached();
550 /* No lock necessary */
551 void pa_memblock_unref(pa_memblock
*b
) {
553 pa_assert(PA_REFCNT_VALUE(b
) > 0);
555 if (PA_REFCNT_DEC(b
) > 0)
562 static void memblock_wait(pa_memblock
*b
) {
565 if (pa_atomic_load(&b
->n_acquired
) > 0) {
566 /* We need to wait until all threads gave up access to the
567 * memory block before we can go on. Unfortunately this means
568 * that we have to lock and wait here. Sniff! */
570 pa_atomic_inc(&b
->please_signal
);
572 while (pa_atomic_load(&b
->n_acquired
) > 0)
573 pa_semaphore_wait(b
->pool
->semaphore
);
575 pa_atomic_dec(&b
->please_signal
);
579 /* No lock necessary. This function is not multiple caller safe! */
580 static void memblock_make_local(pa_memblock
*b
) {
583 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
585 if (b
->length
<= b
->pool
->block_size
) {
586 struct mempool_slot
*slot
;
588 if ((slot
= mempool_allocate_slot(b
->pool
))) {
590 /* We can move it into a local pool, perfect! */
592 new_data
= mempool_slot_data(slot
);
593 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
594 pa_atomic_ptr_store(&b
->data
, new_data
);
596 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
597 b
->read_only
= FALSE
;
603 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
604 b
->per_type
.user
.free_cb
= pa_xfree
;
605 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
607 b
->type
= PA_MEMBLOCK_USER
;
608 b
->read_only
= FALSE
;
611 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
612 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
616 /* No lock necessary. This function is not multiple caller safe*/
617 void pa_memblock_unref_fixed(pa_memblock
*b
) {
619 pa_assert(PA_REFCNT_VALUE(b
) > 0);
620 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
622 if (PA_REFCNT_VALUE(b
) > 1)
623 memblock_make_local(b
);
625 pa_memblock_unref(b
);
628 /* No lock necessary. */
629 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
633 pa_assert(PA_REFCNT_VALUE(b
) > 0);
635 p
= pa_memblock_acquire(b
);
636 pa_will_need(p
, b
->length
);
637 pa_memblock_release(b
);
642 /* Self-locked. This function is not multiple-caller safe */
643 static void memblock_replace_import(pa_memblock
*b
) {
644 pa_memimport_segment
*seg
;
647 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
649 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
650 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
651 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
652 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
654 seg
= b
->per_type
.imported
.segment
;
656 pa_assert(seg
->import
);
658 pa_mutex_lock(seg
->import
->mutex
);
662 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
664 memblock_make_local(b
);
666 if (-- seg
->n_blocks
<= 0) {
667 pa_mutex_unlock(seg
->import
->mutex
);
670 pa_mutex_unlock(seg
->import
->mutex
);
673 pa_mempool
* pa_mempool_new(pa_bool_t shared
) {
676 p
= pa_xnew(pa_mempool
, 1);
678 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
679 p
->semaphore
= pa_semaphore_new(0);
681 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
682 if (p
->block_size
< PA_PAGE_SIZE
)
683 p
->block_size
= PA_PAGE_SIZE
;
685 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
687 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
692 memset(&p
->stat
, 0, sizeof(p
->stat
));
693 pa_atomic_store(&p
->n_init
, 0);
695 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
696 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
698 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
703 void pa_mempool_free(pa_mempool
*p
) {
706 pa_mutex_lock(p
->mutex
);
709 pa_memimport_free(p
->imports
);
712 pa_memexport_free(p
->exports
);
714 pa_mutex_unlock(p
->mutex
);
716 pa_flist_free(p
->free_slots
, NULL
);
718 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
719 /* raise(SIGTRAP); */
720 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
723 pa_shm_free(&p
->memory
);
725 pa_mutex_free(p
->mutex
);
726 pa_semaphore_free(p
->semaphore
);
731 /* No lock necessary */
732 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
738 /* No lock necessary */
739 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
742 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
745 /* No lock necessary */
746 void pa_mempool_vacuum(pa_mempool
*p
) {
747 struct mempool_slot
*slot
;
752 list
= pa_flist_new(p
->n_blocks
*2);
754 while ((slot
= pa_flist_pop(p
->free_slots
)))
755 while (pa_flist_push(list
, slot
) < 0)
758 while ((slot
= pa_flist_pop(list
))) {
759 pa_shm_punch(&p
->memory
, (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
, p
->block_size
);
761 while (pa_flist_push(p
->free_slots
, slot
))
765 pa_flist_free(list
, NULL
);
768 /* No lock necessary */
769 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
772 if (!p
->memory
.shared
)
780 /* No lock necessary */
781 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
784 return !!p
->memory
.shared
;
787 /* For recieving blocks from other nodes */
788 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
794 i
= pa_xnew(pa_memimport
, 1);
795 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
797 i
->segments
= pa_hashmap_new(NULL
, NULL
);
798 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
800 i
->userdata
= userdata
;
802 pa_mutex_lock(p
->mutex
);
803 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
804 pa_mutex_unlock(p
->mutex
);
809 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
811 /* Should be called locked */
812 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
813 pa_memimport_segment
* seg
;
815 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
818 seg
= pa_xnew(pa_memimport_segment
, 1);
820 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
828 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
832 /* Should be called locked */
833 static void segment_detach(pa_memimport_segment
*seg
) {
836 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
837 pa_shm_free(&seg
->memory
);
841 /* Self-locked. Not multiple-caller safe */
842 void pa_memimport_free(pa_memimport
*i
) {
848 pa_mutex_lock(i
->mutex
);
850 while ((b
= pa_hashmap_get_first(i
->blocks
)))
851 memblock_replace_import(b
);
853 pa_assert(pa_hashmap_size(i
->segments
) == 0);
855 pa_mutex_unlock(i
->mutex
);
857 pa_mutex_lock(i
->pool
->mutex
);
859 /* If we've exported this block further we need to revoke that export */
860 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
861 memexport_revoke_blocks(e
, i
);
863 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
865 pa_mutex_unlock(i
->pool
->mutex
);
867 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
868 pa_hashmap_free(i
->segments
, NULL
, NULL
);
870 pa_mutex_free(i
->mutex
);
876 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
877 pa_memblock
*b
= NULL
;
878 pa_memimport_segment
*seg
;
882 pa_mutex_lock(i
->mutex
);
884 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
887 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
888 if (!(seg
= segment_attach(i
, shm_id
)))
891 if (offset
+size
> seg
->memory
.size
)
894 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
895 b
= pa_xnew(pa_memblock
, 1);
899 b
->type
= PA_MEMBLOCK_IMPORTED
;
901 b
->is_silence
= FALSE
;
902 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
904 pa_atomic_store(&b
->n_acquired
, 0);
905 pa_atomic_store(&b
->please_signal
, 0);
906 b
->per_type
.imported
.id
= block_id
;
907 b
->per_type
.imported
.segment
= seg
;
909 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
914 pa_mutex_unlock(i
->mutex
);
922 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
927 pa_mutex_lock(i
->mutex
);
929 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
934 memblock_replace_import(b
);
937 pa_mutex_unlock(i
->mutex
);
942 /* For sending blocks to other nodes */
943 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
949 if (!p
->memory
.shared
)
952 e
= pa_xnew(pa_memexport
, 1);
953 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
955 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
956 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
959 e
->userdata
= userdata
;
961 pa_mutex_lock(p
->mutex
);
962 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
963 pa_mutex_unlock(p
->mutex
);
967 void pa_memexport_free(pa_memexport
*e
) {
970 pa_mutex_lock(e
->mutex
);
971 while (e
->used_slots
)
972 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
973 pa_mutex_unlock(e
->mutex
);
975 pa_mutex_lock(e
->pool
->mutex
);
976 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
977 pa_mutex_unlock(e
->pool
->mutex
);
979 pa_mutex_free(e
->mutex
);
984 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
989 pa_mutex_lock(e
->mutex
);
994 if (!e
->slots
[id
].block
)
997 b
= e
->slots
[id
].block
;
998 e
->slots
[id
].block
= NULL
;
1000 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1001 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1003 pa_mutex_unlock(e
->mutex
);
1005 /* pa_log("Processing release for %u", id); */
1007 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1008 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1010 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1011 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
1013 pa_memblock_unref(b
);
1018 pa_mutex_unlock(e
->mutex
);
1024 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1025 struct memexport_slot
*slot
, *next
;
1029 pa_mutex_lock(e
->mutex
);
1031 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1035 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1036 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1039 idx
= slot
- e
->slots
;
1040 e
->revoke_cb(e
, idx
, e
->userdata
);
1041 pa_memexport_process_release(e
, idx
);
1044 pa_mutex_unlock(e
->mutex
);
1047 /* No lock necessary */
1048 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1054 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1055 b
->type
== PA_MEMBLOCK_POOL
||
1056 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1057 pa_assert(b
->pool
== p
);
1058 return pa_memblock_ref(b
);
1061 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1064 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1069 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1071 struct memexport_slot
*slot
;
1076 pa_assert(block_id
);
1080 pa_assert(b
->pool
== e
->pool
);
1082 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1085 pa_mutex_lock(e
->mutex
);
1087 if (e
->free_slots
) {
1088 slot
= e
->free_slots
;
1089 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1090 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1091 slot
= &e
->slots
[e
->n_init
++];
1093 pa_mutex_unlock(e
->mutex
);
1094 pa_memblock_unref(b
);
1098 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1100 *block_id
= slot
- e
->slots
;
1102 pa_mutex_unlock(e
->mutex
);
1103 /* pa_log("Got block id %u", *block_id); */
1105 data
= pa_memblock_acquire(b
);
1107 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1108 pa_assert(b
->per_type
.imported
.segment
);
1109 memory
= &b
->per_type
.imported
.segment
->memory
;
1111 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1113 memory
= &b
->pool
->memory
;
1116 pa_assert(data
>= memory
->ptr
);
1117 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1119 *shm_id
= memory
->id
;
1120 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1123 pa_memblock_release(b
);
1125 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1126 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);