4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
37 #include <pulse/xmalloc.h>
38 #include <pulse/def.h>
40 #include <pulsecore/shm.h>
41 #include <pulsecore/log.h>
42 #include <pulsecore/hashmap.h>
43 #include <pulsecore/semaphore.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/flist.h>
46 #include <pulsecore/core-util.h>
50 #define PA_MEMPOOL_SLOTS_MAX 128
51 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
53 #define PA_MEMEXPORT_SLOTS_MAX 128
55 #define PA_MEMIMPORT_SLOTS_MAX 128
56 #define PA_MEMIMPORT_SEGMENTS_MAX 16
59 PA_REFCNT_DECLARE
; /* the reference counter */
62 pa_memblock_type_t type
;
63 int read_only
; /* boolean */
68 pa_atomic_t n_acquired
;
69 pa_atomic_t please_signal
;
73 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
79 pa_memimport_segment
*segment
;
84 struct pa_memimport_segment
{
97 /* Called whenever an imported memory block is no longer
99 pa_memimport_release_cb_t release_cb
;
102 PA_LLIST_FIELDS(pa_memimport
);
105 struct memexport_slot
{
106 PA_LLIST_FIELDS(struct memexport_slot
);
110 struct pa_memexport
{
114 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
116 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
117 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
120 /* Called whenever a client from which we imported a memory block
121 which we in turn exported to another client dies and we need to
122 revoke the memory block accordingly */
123 pa_memexport_revoke_cb_t revoke_cb
;
126 PA_LLIST_FIELDS(pa_memexport
);
129 struct mempool_slot
{
130 PA_LLIST_FIELDS(struct mempool_slot
);
131 /* the actual data follows immediately hereafter */
135 pa_semaphore
*semaphore
;
144 PA_LLIST_HEAD(pa_memimport
, imports
);
145 PA_LLIST_HEAD(pa_memexport
, exports
);
147 /* A list of free slots that may be reused */
148 pa_flist
*free_slots
;
150 pa_mempool_stat stat
;
153 static void segment_detach(pa_memimport_segment
*seg
);
155 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
157 /* No lock necessary */
158 static void stat_add(pa_memblock
*b
) {
162 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
163 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
165 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
166 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
168 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
169 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
170 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
173 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
174 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
177 /* No lock necessary */
178 static void stat_remove(pa_memblock
*b
) {
182 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
183 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
185 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
186 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
188 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
189 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
190 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
192 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
193 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
196 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
199 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
201 /* No lock necessary */
202 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
206 pa_assert(length
> 0);
208 if (!(b
= pa_memblock_new_pool(p
, length
)))
209 b
= memblock_new_appended(p
, length
);
214 /* No lock necessary */
215 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
219 pa_assert(length
> 0);
221 /* If -1 is passed as length we choose the size for the caller. */
223 if (length
== (size_t) -1)
224 length
= p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
226 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
229 b
->type
= PA_MEMBLOCK_APPENDED
;
231 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
233 pa_atomic_store(&b
->n_acquired
, 0);
234 pa_atomic_store(&b
->please_signal
, 0);
240 /* No lock necessary */
241 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
242 struct mempool_slot
*slot
;
245 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
248 /* The free list was empty, we have to allocate a new entry */
250 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
251 pa_atomic_dec(&p
->n_init
);
253 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
256 pa_log_debug("Pool full");
257 pa_atomic_inc(&p
->stat
.n_pool_full
);
265 /* No lock necessary */
266 static void* mempool_slot_data(struct mempool_slot
*slot
) {
269 return (uint8_t*) slot
+ PA_ALIGN(sizeof(struct mempool_slot
));
272 /* No lock necessary */
273 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
276 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
277 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
279 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
282 /* No lock necessary */
283 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
286 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
289 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
292 /* No lock necessary */
293 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
294 pa_memblock
*b
= NULL
;
295 struct mempool_slot
*slot
;
298 pa_assert(length
> 0);
300 /* If -1 is passed as length we choose the size for the caller: we
301 * take the largest size that fits in one of our slots. */
303 if (length
== (size_t) -1)
304 length
= p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
306 if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
308 if (!(slot
= mempool_allocate_slot(p
)))
311 b
= mempool_slot_data(slot
);
312 b
->type
= PA_MEMBLOCK_POOL
;
313 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
315 } else if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= length
) {
317 if (!(slot
= mempool_allocate_slot(p
)))
320 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
321 b
= pa_xnew(pa_memblock
, 1);
323 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
324 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
327 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))));
328 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
336 pa_atomic_store(&b
->n_acquired
, 0);
337 pa_atomic_store(&b
->please_signal
, 0);
343 /* No lock necessary */
344 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, int read_only
) {
349 pa_assert(length
!= (size_t) -1);
350 pa_assert(length
> 0);
352 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
353 b
= pa_xnew(pa_memblock
, 1);
356 b
->type
= PA_MEMBLOCK_FIXED
;
357 b
->read_only
= read_only
;
358 pa_atomic_ptr_store(&b
->data
, d
);
360 pa_atomic_store(&b
->n_acquired
, 0);
361 pa_atomic_store(&b
->please_signal
, 0);
367 /* No lock necessary */
368 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, void (*free_cb
)(void *p
), int read_only
) {
373 pa_assert(length
> 0);
374 pa_assert(length
!= (size_t) -1);
377 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
378 b
= pa_xnew(pa_memblock
, 1);
381 b
->type
= PA_MEMBLOCK_USER
;
382 b
->read_only
= read_only
;
383 pa_atomic_ptr_store(&b
->data
, d
);
385 pa_atomic_store(&b
->n_acquired
, 0);
386 pa_atomic_store(&b
->please_signal
, 0);
388 b
->per_type
.user
.free_cb
= free_cb
;
394 /* No lock necessary */
395 int pa_memblock_is_read_only(pa_memblock
*b
) {
397 pa_assert(PA_REFCNT_VALUE(b
) > 0);
399 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
402 /* No lock necessary */
403 int pa_memblock_ref_is_one(pa_memblock
*b
) {
408 r
= PA_REFCNT_VALUE(b
);
414 /* No lock necessary */
415 void* pa_memblock_acquire(pa_memblock
*b
) {
417 pa_assert(PA_REFCNT_VALUE(b
) > 0);
419 pa_atomic_inc(&b
->n_acquired
);
421 return pa_atomic_ptr_load(&b
->data
);
424 /* No lock necessary, in corner cases locks by its own */
425 void pa_memblock_release(pa_memblock
*b
) {
428 pa_assert(PA_REFCNT_VALUE(b
) > 0);
430 r
= pa_atomic_dec(&b
->n_acquired
);
433 /* Signal a waiting thread that this memblock is no longer used */
434 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
435 pa_semaphore_post(b
->pool
->semaphore
);
438 size_t pa_memblock_get_length(pa_memblock
*b
) {
440 pa_assert(PA_REFCNT_VALUE(b
) > 0);
445 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
447 pa_assert(PA_REFCNT_VALUE(b
) > 0);
452 /* No lock necessary */
453 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
455 pa_assert(PA_REFCNT_VALUE(b
) > 0);
461 static void memblock_free(pa_memblock
*b
) {
464 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
469 case PA_MEMBLOCK_USER
:
470 pa_assert(b
->per_type
.user
.free_cb
);
471 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
475 case PA_MEMBLOCK_FIXED
:
476 case PA_MEMBLOCK_APPENDED
:
477 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
482 case PA_MEMBLOCK_IMPORTED
: {
483 pa_memimport_segment
*segment
;
484 pa_memimport
*import
;
486 /* FIXME! This should be implemented lock-free */
488 segment
= b
->per_type
.imported
.segment
;
490 import
= segment
->import
;
493 pa_mutex_lock(import
->mutex
);
494 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
495 if (-- segment
->n_blocks
<= 0)
496 segment_detach(segment
);
498 pa_mutex_unlock(import
->mutex
);
500 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
502 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
507 case PA_MEMBLOCK_POOL_EXTERNAL
:
508 case PA_MEMBLOCK_POOL
: {
509 struct mempool_slot
*slot
;
512 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
515 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
517 /* The free list dimensions should easily allow all slots
518 * to fit in, hence try harder if pushing this slot into
519 * the free list fails */
520 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
524 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
530 case PA_MEMBLOCK_TYPE_MAX
:
532 pa_assert_not_reached();
536 /* No lock necessary */
537 void pa_memblock_unref(pa_memblock
*b
) {
539 pa_assert(PA_REFCNT_VALUE(b
) > 0);
541 if (PA_REFCNT_DEC(b
) > 0)
548 static void memblock_wait(pa_memblock
*b
) {
551 if (pa_atomic_load(&b
->n_acquired
) > 0) {
552 /* We need to wait until all threads gave up access to the
553 * memory block before we can go on. Unfortunately this means
554 * that we have to lock and wait here. Sniff! */
556 pa_atomic_inc(&b
->please_signal
);
558 while (pa_atomic_load(&b
->n_acquired
) > 0)
559 pa_semaphore_wait(b
->pool
->semaphore
);
561 pa_atomic_dec(&b
->please_signal
);
565 /* No lock necessary. This function is not multiple caller safe! */
566 static void memblock_make_local(pa_memblock
*b
) {
569 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
571 if (b
->length
<= b
->pool
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))) {
572 struct mempool_slot
*slot
;
574 if ((slot
= mempool_allocate_slot(b
->pool
))) {
576 /* We can move it into a local pool, perfect! */
578 new_data
= mempool_slot_data(slot
);
579 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
580 pa_atomic_ptr_store(&b
->data
, new_data
);
582 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
589 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
590 b
->per_type
.user
.free_cb
= pa_xfree
;
591 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
593 b
->type
= PA_MEMBLOCK_USER
;
597 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
598 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
602 /* No lock necessary. This function is not multiple caller safe*/
603 void pa_memblock_unref_fixed(pa_memblock
*b
) {
605 pa_assert(PA_REFCNT_VALUE(b
) > 0);
606 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
608 if (PA_REFCNT_VALUE(b
) > 1)
609 memblock_make_local(b
);
611 pa_memblock_unref(b
);
614 /* No lock necessary. */
615 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
619 pa_assert(PA_REFCNT_VALUE(b
) > 0);
621 p
= pa_memblock_acquire(b
);
622 pa_will_need(p
, b
->length
);
623 pa_memblock_release(b
);
628 /* Self-locked. This function is not multiple-caller safe */
629 static void memblock_replace_import(pa_memblock
*b
) {
630 pa_memimport_segment
*seg
;
633 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
635 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
636 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
637 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
638 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
640 seg
= b
->per_type
.imported
.segment
;
642 pa_assert(seg
->import
);
644 pa_mutex_lock(seg
->import
->mutex
);
648 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
650 memblock_make_local(b
);
652 if (-- seg
->n_blocks
<= 0) {
653 pa_mutex_unlock(seg
->import
->mutex
);
656 pa_mutex_unlock(seg
->import
->mutex
);
659 pa_mempool
* pa_mempool_new(int shared
) {
662 p
= pa_xnew(pa_mempool
, 1);
664 p
->mutex
= pa_mutex_new(1);
665 p
->semaphore
= pa_semaphore_new(0);
667 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
668 if (p
->block_size
< PA_PAGE_SIZE
)
669 p
->block_size
= PA_PAGE_SIZE
;
671 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
673 pa_assert(p
->block_size
> PA_ALIGN(sizeof(struct mempool_slot
)));
675 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
680 memset(&p
->stat
, 0, sizeof(p
->stat
));
681 pa_atomic_store(&p
->n_init
, 0);
683 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
684 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
686 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
691 void pa_mempool_free(pa_mempool
*p
) {
694 pa_mutex_lock(p
->mutex
);
697 pa_memimport_free(p
->imports
);
700 pa_memexport_free(p
->exports
);
702 pa_mutex_unlock(p
->mutex
);
704 pa_flist_free(p
->free_slots
, NULL
);
706 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
707 /* raise(SIGTRAP); */
708 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
711 pa_shm_free(&p
->memory
);
713 pa_mutex_free(p
->mutex
);
714 pa_semaphore_free(p
->semaphore
);
719 /* No lock necessary */
720 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
726 /* No lock necessary */
727 void pa_mempool_vacuum(pa_mempool
*p
) {
728 struct mempool_slot
*slot
;
733 list
= pa_flist_new(p
->n_blocks
*2);
735 while ((slot
= pa_flist_pop(p
->free_slots
)))
736 while (pa_flist_push(list
, slot
) < 0)
739 while ((slot
= pa_flist_pop(list
))) {
740 pa_shm_punch(&p
->memory
,
741 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ PA_ALIGN(sizeof(struct mempool_slot
)),
742 p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)));
744 while (pa_flist_push(p
->free_slots
, slot
))
748 pa_flist_free(list
, NULL
);
751 /* No lock necessary */
752 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
755 if (!p
->memory
.shared
)
763 /* No lock necessary */
764 int pa_mempool_is_shared(pa_mempool
*p
) {
767 return !!p
->memory
.shared
;
770 /* For recieving blocks from other nodes */
771 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
777 i
= pa_xnew(pa_memimport
, 1);
778 i
->mutex
= pa_mutex_new(1);
780 i
->segments
= pa_hashmap_new(NULL
, NULL
);
781 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
783 i
->userdata
= userdata
;
785 pa_mutex_lock(p
->mutex
);
786 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
787 pa_mutex_unlock(p
->mutex
);
792 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
794 /* Should be called locked */
795 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
796 pa_memimport_segment
* seg
;
798 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
801 seg
= pa_xnew(pa_memimport_segment
, 1);
803 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
811 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
815 /* Should be called locked */
816 static void segment_detach(pa_memimport_segment
*seg
) {
819 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
820 pa_shm_free(&seg
->memory
);
824 /* Self-locked. Not multiple-caller safe */
825 void pa_memimport_free(pa_memimport
*i
) {
831 pa_mutex_lock(i
->mutex
);
833 while ((b
= pa_hashmap_get_first(i
->blocks
)))
834 memblock_replace_import(b
);
836 pa_assert(pa_hashmap_size(i
->segments
) == 0);
838 pa_mutex_unlock(i
->mutex
);
840 pa_mutex_lock(i
->pool
->mutex
);
842 /* If we've exported this block further we need to revoke that export */
843 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
844 memexport_revoke_blocks(e
, i
);
846 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
848 pa_mutex_unlock(i
->pool
->mutex
);
850 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
851 pa_hashmap_free(i
->segments
, NULL
, NULL
);
853 pa_mutex_free(i
->mutex
);
859 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
860 pa_memblock
*b
= NULL
;
861 pa_memimport_segment
*seg
;
865 pa_mutex_lock(i
->mutex
);
867 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
870 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
871 if (!(seg
= segment_attach(i
, shm_id
)))
874 if (offset
+size
> seg
->memory
.size
)
877 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
878 b
= pa_xnew(pa_memblock
, 1);
882 b
->type
= PA_MEMBLOCK_IMPORTED
;
884 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
886 pa_atomic_store(&b
->n_acquired
, 0);
887 pa_atomic_store(&b
->please_signal
, 0);
888 b
->per_type
.imported
.id
= block_id
;
889 b
->per_type
.imported
.segment
= seg
;
891 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
896 pa_mutex_unlock(i
->mutex
);
904 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
908 pa_mutex_lock(i
->mutex
);
910 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
))))
913 memblock_replace_import(b
);
915 pa_mutex_unlock(i
->mutex
);
920 /* For sending blocks to other nodes */
921 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
927 if (!p
->memory
.shared
)
930 e
= pa_xnew(pa_memexport
, 1);
931 e
->mutex
= pa_mutex_new(1);
933 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
934 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
937 e
->userdata
= userdata
;
939 pa_mutex_lock(p
->mutex
);
940 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
941 pa_mutex_unlock(p
->mutex
);
945 void pa_memexport_free(pa_memexport
*e
) {
948 pa_mutex_lock(e
->mutex
);
949 while (e
->used_slots
)
950 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
951 pa_mutex_unlock(e
->mutex
);
953 pa_mutex_lock(e
->pool
->mutex
);
954 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
955 pa_mutex_unlock(e
->pool
->mutex
);
957 pa_mutex_free(e
->mutex
);
962 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
967 pa_mutex_lock(e
->mutex
);
972 if (!e
->slots
[id
].block
)
975 b
= e
->slots
[id
].block
;
976 e
->slots
[id
].block
= NULL
;
978 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
979 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
981 pa_mutex_unlock(e
->mutex
);
983 /* pa_log("Processing release for %u", id); */
985 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
986 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
988 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
989 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
991 pa_memblock_unref(b
);
996 pa_mutex_unlock(e
->mutex
);
1002 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1003 struct memexport_slot
*slot
, *next
;
1007 pa_mutex_lock(e
->mutex
);
1009 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1013 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1014 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1017 idx
= slot
- e
->slots
;
1018 e
->revoke_cb(e
, idx
, e
->userdata
);
1019 pa_memexport_process_release(e
, idx
);
1022 pa_mutex_unlock(e
->mutex
);
1025 /* No lock necessary */
1026 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1032 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1033 b
->type
== PA_MEMBLOCK_POOL
||
1034 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1035 pa_assert(b
->pool
== p
);
1036 return pa_memblock_ref(b
);
1039 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1042 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1047 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1049 struct memexport_slot
*slot
;
1054 pa_assert(block_id
);
1058 pa_assert(b
->pool
== e
->pool
);
1060 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1063 pa_mutex_lock(e
->mutex
);
1065 if (e
->free_slots
) {
1066 slot
= e
->free_slots
;
1067 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1068 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1069 slot
= &e
->slots
[e
->n_init
++];
1071 pa_mutex_unlock(e
->mutex
);
1072 pa_memblock_unref(b
);
1076 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1078 *block_id
= slot
- e
->slots
;
1080 pa_mutex_unlock(e
->mutex
);
1081 /* pa_log("Got block id %u", *block_id); */
1083 data
= pa_memblock_acquire(b
);
1085 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1086 pa_assert(b
->per_type
.imported
.segment
);
1087 memory
= &b
->per_type
.imported
.segment
->memory
;
1089 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1091 memory
= &b
->pool
->memory
;
1094 pa_assert(data
>= memory
->ptr
);
1095 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1097 *shm_id
= memory
->id
;
1098 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1101 pa_memblock_release(b
);
1103 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1104 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);