4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
49 #define PA_MEMPOOL_SLOTS_MAX 128
50 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
52 #define PA_MEMEXPORT_SLOTS_MAX 128
54 #define PA_MEMIMPORT_SLOTS_MAX 128
55 #define PA_MEMIMPORT_SEGMENTS_MAX 16
58 PA_REFCNT_DECLARE
; /* the reference counter */
61 pa_memblock_type_t type
;
62 int read_only
; /* boolean */
67 pa_atomic_t n_acquired
;
68 pa_atomic_t please_signal
;
72 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
78 pa_memimport_segment
*segment
;
83 struct pa_memimport_segment
{
96 /* Called whenever an imported memory block is no longer
98 pa_memimport_release_cb_t release_cb
;
101 PA_LLIST_FIELDS(pa_memimport
);
104 struct memexport_slot
{
105 PA_LLIST_FIELDS(struct memexport_slot
);
109 struct pa_memexport
{
113 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
115 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
116 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
119 /* Called whenever a client from which we imported a memory block
120 which we in turn exported to another client dies and we need to
121 revoke the memory block accordingly */
122 pa_memexport_revoke_cb_t revoke_cb
;
125 PA_LLIST_FIELDS(pa_memexport
);
128 struct mempool_slot
{
129 PA_LLIST_FIELDS(struct mempool_slot
);
130 /* the actual data follows immediately hereafter */
134 pa_semaphore
*semaphore
;
143 PA_LLIST_HEAD(pa_memimport
, imports
);
144 PA_LLIST_HEAD(pa_memexport
, exports
);
146 /* A list of free slots that may be reused */
147 pa_flist
*free_slots
;
149 pa_mempool_stat stat
;
152 static void segment_detach(pa_memimport_segment
*seg
);
154 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
156 /* No lock necessary */
157 static void stat_add(pa_memblock
*b
) {
161 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
162 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
164 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
165 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
167 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
168 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
169 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
172 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
173 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
176 /* No lock necessary */
177 static void stat_remove(pa_memblock
*b
) {
181 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
182 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
184 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
185 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
187 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
189 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
191 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
192 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
195 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
198 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
200 /* No lock necessary */
201 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
205 pa_assert(length
> 0);
207 if (!(b
= pa_memblock_new_pool(p
, length
)))
208 b
= memblock_new_appended(p
, length
);
213 /* No lock necessary */
214 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
218 pa_assert(length
> 0);
220 /* If -1 is passed as length we choose the size for the caller. */
222 if (length
== (size_t) -1)
223 length
= p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
225 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
228 b
->type
= PA_MEMBLOCK_APPENDED
;
230 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
232 pa_atomic_store(&b
->n_acquired
, 0);
233 pa_atomic_store(&b
->please_signal
, 0);
239 /* No lock necessary */
240 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
241 struct mempool_slot
*slot
;
244 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
247 /* The free list was empty, we have to allocate a new entry */
249 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
250 pa_atomic_dec(&p
->n_init
);
252 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
255 pa_log_debug("Pool full");
256 pa_atomic_inc(&p
->stat
.n_pool_full
);
264 /* No lock necessary */
265 static void* mempool_slot_data(struct mempool_slot
*slot
) {
268 return (uint8_t*) slot
+ PA_ALIGN(sizeof(struct mempool_slot
));
271 /* No lock necessary */
272 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
275 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
276 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
278 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
281 /* No lock necessary */
282 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
285 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
288 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
291 /* No lock necessary */
292 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
293 pa_memblock
*b
= NULL
;
294 struct mempool_slot
*slot
;
297 pa_assert(length
> 0);
299 /* If -1 is passed as length we choose the size for the caller: we
300 * take the largest size that fits in one of our slots. */
302 if (length
== (size_t) -1)
303 length
= pa_mempool_block_size_max(p
);
305 if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
307 if (!(slot
= mempool_allocate_slot(p
)))
310 b
= mempool_slot_data(slot
);
311 b
->type
= PA_MEMBLOCK_POOL
;
312 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
314 } else if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= length
) {
316 if (!(slot
= mempool_allocate_slot(p
)))
319 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
320 b
= pa_xnew(pa_memblock
, 1);
322 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
323 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
326 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))));
327 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
335 pa_atomic_store(&b
->n_acquired
, 0);
336 pa_atomic_store(&b
->please_signal
, 0);
342 /* No lock necessary */
343 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, int read_only
) {
348 pa_assert(length
!= (size_t) -1);
349 pa_assert(length
> 0);
351 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
352 b
= pa_xnew(pa_memblock
, 1);
355 b
->type
= PA_MEMBLOCK_FIXED
;
356 b
->read_only
= read_only
;
357 pa_atomic_ptr_store(&b
->data
, d
);
359 pa_atomic_store(&b
->n_acquired
, 0);
360 pa_atomic_store(&b
->please_signal
, 0);
366 /* No lock necessary */
367 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, void (*free_cb
)(void *p
), int read_only
) {
372 pa_assert(length
> 0);
373 pa_assert(length
!= (size_t) -1);
376 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
377 b
= pa_xnew(pa_memblock
, 1);
380 b
->type
= PA_MEMBLOCK_USER
;
381 b
->read_only
= read_only
;
382 pa_atomic_ptr_store(&b
->data
, d
);
384 pa_atomic_store(&b
->n_acquired
, 0);
385 pa_atomic_store(&b
->please_signal
, 0);
387 b
->per_type
.user
.free_cb
= free_cb
;
393 /* No lock necessary */
394 int pa_memblock_is_read_only(pa_memblock
*b
) {
396 pa_assert(PA_REFCNT_VALUE(b
) > 0);
398 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
401 /* No lock necessary */
402 int pa_memblock_ref_is_one(pa_memblock
*b
) {
407 r
= PA_REFCNT_VALUE(b
);
413 /* No lock necessary */
414 void* pa_memblock_acquire(pa_memblock
*b
) {
416 pa_assert(PA_REFCNT_VALUE(b
) > 0);
418 pa_atomic_inc(&b
->n_acquired
);
420 return pa_atomic_ptr_load(&b
->data
);
423 /* No lock necessary, in corner cases locks by its own */
424 void pa_memblock_release(pa_memblock
*b
) {
427 pa_assert(PA_REFCNT_VALUE(b
) > 0);
429 r
= pa_atomic_dec(&b
->n_acquired
);
432 /* Signal a waiting thread that this memblock is no longer used */
433 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
434 pa_semaphore_post(b
->pool
->semaphore
);
437 size_t pa_memblock_get_length(pa_memblock
*b
) {
439 pa_assert(PA_REFCNT_VALUE(b
) > 0);
444 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
446 pa_assert(PA_REFCNT_VALUE(b
) > 0);
451 /* No lock necessary */
452 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
454 pa_assert(PA_REFCNT_VALUE(b
) > 0);
460 static void memblock_free(pa_memblock
*b
) {
463 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
468 case PA_MEMBLOCK_USER
:
469 pa_assert(b
->per_type
.user
.free_cb
);
470 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
474 case PA_MEMBLOCK_FIXED
:
475 case PA_MEMBLOCK_APPENDED
:
476 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
481 case PA_MEMBLOCK_IMPORTED
: {
482 pa_memimport_segment
*segment
;
483 pa_memimport
*import
;
485 /* FIXME! This should be implemented lock-free */
487 segment
= b
->per_type
.imported
.segment
;
489 import
= segment
->import
;
492 pa_mutex_lock(import
->mutex
);
493 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
494 if (-- segment
->n_blocks
<= 0)
495 segment_detach(segment
);
497 pa_mutex_unlock(import
->mutex
);
499 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
501 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
506 case PA_MEMBLOCK_POOL_EXTERNAL
:
507 case PA_MEMBLOCK_POOL
: {
508 struct mempool_slot
*slot
;
511 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
514 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
516 /* The free list dimensions should easily allow all slots
517 * to fit in, hence try harder if pushing this slot into
518 * the free list fails */
519 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
523 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
529 case PA_MEMBLOCK_TYPE_MAX
:
531 pa_assert_not_reached();
535 /* No lock necessary */
536 void pa_memblock_unref(pa_memblock
*b
) {
538 pa_assert(PA_REFCNT_VALUE(b
) > 0);
540 if (PA_REFCNT_DEC(b
) > 0)
547 static void memblock_wait(pa_memblock
*b
) {
550 if (pa_atomic_load(&b
->n_acquired
) > 0) {
551 /* We need to wait until all threads gave up access to the
552 * memory block before we can go on. Unfortunately this means
553 * that we have to lock and wait here. Sniff! */
555 pa_atomic_inc(&b
->please_signal
);
557 while (pa_atomic_load(&b
->n_acquired
) > 0)
558 pa_semaphore_wait(b
->pool
->semaphore
);
560 pa_atomic_dec(&b
->please_signal
);
564 /* No lock necessary. This function is not multiple caller safe! */
565 static void memblock_make_local(pa_memblock
*b
) {
568 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
570 if (b
->length
<= b
->pool
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))) {
571 struct mempool_slot
*slot
;
573 if ((slot
= mempool_allocate_slot(b
->pool
))) {
575 /* We can move it into a local pool, perfect! */
577 new_data
= mempool_slot_data(slot
);
578 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
579 pa_atomic_ptr_store(&b
->data
, new_data
);
581 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
588 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
589 b
->per_type
.user
.free_cb
= pa_xfree
;
590 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
592 b
->type
= PA_MEMBLOCK_USER
;
596 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
597 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
601 /* No lock necessary. This function is not multiple caller safe*/
602 void pa_memblock_unref_fixed(pa_memblock
*b
) {
604 pa_assert(PA_REFCNT_VALUE(b
) > 0);
605 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
607 if (PA_REFCNT_VALUE(b
) > 1)
608 memblock_make_local(b
);
610 pa_memblock_unref(b
);
613 /* No lock necessary. */
614 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
618 pa_assert(PA_REFCNT_VALUE(b
) > 0);
620 p
= pa_memblock_acquire(b
);
621 pa_will_need(p
, b
->length
);
622 pa_memblock_release(b
);
627 /* Self-locked. This function is not multiple-caller safe */
628 static void memblock_replace_import(pa_memblock
*b
) {
629 pa_memimport_segment
*seg
;
632 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
634 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
635 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
636 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
637 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
639 seg
= b
->per_type
.imported
.segment
;
641 pa_assert(seg
->import
);
643 pa_mutex_lock(seg
->import
->mutex
);
647 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
649 memblock_make_local(b
);
651 if (-- seg
->n_blocks
<= 0) {
652 pa_mutex_unlock(seg
->import
->mutex
);
655 pa_mutex_unlock(seg
->import
->mutex
);
658 pa_mempool
* pa_mempool_new(int shared
) {
661 p
= pa_xnew(pa_mempool
, 1);
663 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
664 p
->semaphore
= pa_semaphore_new(0);
666 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
667 if (p
->block_size
< PA_PAGE_SIZE
)
668 p
->block_size
= PA_PAGE_SIZE
;
670 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
672 pa_assert(p
->block_size
> PA_ALIGN(sizeof(struct mempool_slot
)));
674 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
679 memset(&p
->stat
, 0, sizeof(p
->stat
));
680 pa_atomic_store(&p
->n_init
, 0);
682 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
683 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
685 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
690 void pa_mempool_free(pa_mempool
*p
) {
693 pa_mutex_lock(p
->mutex
);
696 pa_memimport_free(p
->imports
);
699 pa_memexport_free(p
->exports
);
701 pa_mutex_unlock(p
->mutex
);
703 pa_flist_free(p
->free_slots
, NULL
);
705 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
706 /* raise(SIGTRAP); */
707 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
710 pa_shm_free(&p
->memory
);
712 pa_mutex_free(p
->mutex
);
713 pa_semaphore_free(p
->semaphore
);
718 /* No lock necessary */
719 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
725 /* No lock necessary */
726 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
729 return p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
732 /* No lock necessary */
733 void pa_mempool_vacuum(pa_mempool
*p
) {
734 struct mempool_slot
*slot
;
739 list
= pa_flist_new(p
->n_blocks
*2);
741 while ((slot
= pa_flist_pop(p
->free_slots
)))
742 while (pa_flist_push(list
, slot
) < 0)
745 while ((slot
= pa_flist_pop(list
))) {
746 pa_shm_punch(&p
->memory
,
747 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ PA_ALIGN(sizeof(struct mempool_slot
)),
748 p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)));
750 while (pa_flist_push(p
->free_slots
, slot
))
754 pa_flist_free(list
, NULL
);
757 /* No lock necessary */
758 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
761 if (!p
->memory
.shared
)
769 /* No lock necessary */
770 int pa_mempool_is_shared(pa_mempool
*p
) {
773 return !!p
->memory
.shared
;
776 /* For recieving blocks from other nodes */
777 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
783 i
= pa_xnew(pa_memimport
, 1);
784 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
786 i
->segments
= pa_hashmap_new(NULL
, NULL
);
787 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
789 i
->userdata
= userdata
;
791 pa_mutex_lock(p
->mutex
);
792 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
793 pa_mutex_unlock(p
->mutex
);
798 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
800 /* Should be called locked */
801 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
802 pa_memimport_segment
* seg
;
804 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
807 seg
= pa_xnew(pa_memimport_segment
, 1);
809 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
817 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
821 /* Should be called locked */
822 static void segment_detach(pa_memimport_segment
*seg
) {
825 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
826 pa_shm_free(&seg
->memory
);
830 /* Self-locked. Not multiple-caller safe */
831 void pa_memimport_free(pa_memimport
*i
) {
837 pa_mutex_lock(i
->mutex
);
839 while ((b
= pa_hashmap_get_first(i
->blocks
)))
840 memblock_replace_import(b
);
842 pa_assert(pa_hashmap_size(i
->segments
) == 0);
844 pa_mutex_unlock(i
->mutex
);
846 pa_mutex_lock(i
->pool
->mutex
);
848 /* If we've exported this block further we need to revoke that export */
849 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
850 memexport_revoke_blocks(e
, i
);
852 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
854 pa_mutex_unlock(i
->pool
->mutex
);
856 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
857 pa_hashmap_free(i
->segments
, NULL
, NULL
);
859 pa_mutex_free(i
->mutex
);
865 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
866 pa_memblock
*b
= NULL
;
867 pa_memimport_segment
*seg
;
871 pa_mutex_lock(i
->mutex
);
873 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
876 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
877 if (!(seg
= segment_attach(i
, shm_id
)))
880 if (offset
+size
> seg
->memory
.size
)
883 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
884 b
= pa_xnew(pa_memblock
, 1);
888 b
->type
= PA_MEMBLOCK_IMPORTED
;
890 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
892 pa_atomic_store(&b
->n_acquired
, 0);
893 pa_atomic_store(&b
->please_signal
, 0);
894 b
->per_type
.imported
.id
= block_id
;
895 b
->per_type
.imported
.segment
= seg
;
897 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
902 pa_mutex_unlock(i
->mutex
);
910 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
915 pa_mutex_lock(i
->mutex
);
917 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
922 memblock_replace_import(b
);
925 pa_mutex_unlock(i
->mutex
);
930 /* For sending blocks to other nodes */
931 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
937 if (!p
->memory
.shared
)
940 e
= pa_xnew(pa_memexport
, 1);
941 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
943 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
944 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
947 e
->userdata
= userdata
;
949 pa_mutex_lock(p
->mutex
);
950 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
951 pa_mutex_unlock(p
->mutex
);
955 void pa_memexport_free(pa_memexport
*e
) {
958 pa_mutex_lock(e
->mutex
);
959 while (e
->used_slots
)
960 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
961 pa_mutex_unlock(e
->mutex
);
963 pa_mutex_lock(e
->pool
->mutex
);
964 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
965 pa_mutex_unlock(e
->pool
->mutex
);
967 pa_mutex_free(e
->mutex
);
972 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
977 pa_mutex_lock(e
->mutex
);
982 if (!e
->slots
[id
].block
)
985 b
= e
->slots
[id
].block
;
986 e
->slots
[id
].block
= NULL
;
988 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
989 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
991 pa_mutex_unlock(e
->mutex
);
993 /* pa_log("Processing release for %u", id); */
995 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
996 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
998 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
999 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
1001 pa_memblock_unref(b
);
1006 pa_mutex_unlock(e
->mutex
);
1012 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1013 struct memexport_slot
*slot
, *next
;
1017 pa_mutex_lock(e
->mutex
);
1019 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1023 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1024 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1027 idx
= slot
- e
->slots
;
1028 e
->revoke_cb(e
, idx
, e
->userdata
);
1029 pa_memexport_process_release(e
, idx
);
1032 pa_mutex_unlock(e
->mutex
);
1035 /* No lock necessary */
1036 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1042 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1043 b
->type
== PA_MEMBLOCK_POOL
||
1044 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1045 pa_assert(b
->pool
== p
);
1046 return pa_memblock_ref(b
);
1049 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1052 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1057 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1059 struct memexport_slot
*slot
;
1064 pa_assert(block_id
);
1068 pa_assert(b
->pool
== e
->pool
);
1070 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1073 pa_mutex_lock(e
->mutex
);
1075 if (e
->free_slots
) {
1076 slot
= e
->free_slots
;
1077 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1078 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1079 slot
= &e
->slots
[e
->n_init
++];
1081 pa_mutex_unlock(e
->mutex
);
1082 pa_memblock_unref(b
);
1086 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1088 *block_id
= slot
- e
->slots
;
1090 pa_mutex_unlock(e
->mutex
);
1091 /* pa_log("Got block id %u", *block_id); */
1093 data
= pa_memblock_acquire(b
);
1095 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1096 pa_assert(b
->per_type
.imported
.segment
);
1097 memory
= &b
->per_type
.imported
.segment
->memory
;
1099 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1101 memory
= &b
->pool
->memory
;
1104 pa_assert(data
>= memory
->ptr
);
1105 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1107 *shm_id
= memory
->id
;
1108 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1111 pa_memblock_release(b
);
1113 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1114 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);