2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #include <pulse/xmalloc.h>
35 #include <pulse/def.h>
37 #include <pulsecore/shm.h>
38 #include <pulsecore/log.h>
39 #include <pulsecore/hashmap.h>
40 #include <pulsecore/semaphore.h>
41 #include <pulsecore/macro.h>
42 #include <pulsecore/flist.h>
43 #include <pulsecore/core-util.h>
47 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
48 * note that the footprint is usually much smaller, since the data is
49 * stored in SHM and our OS does not commit the memory before we use
50 * it for the first time. */
51 #define PA_MEMPOOL_SLOTS_MAX 1024
52 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
54 #define PA_MEMEXPORT_SLOTS_MAX 128
56 #define PA_MEMIMPORT_SLOTS_MAX 128
57 #define PA_MEMIMPORT_SEGMENTS_MAX 16
60 PA_REFCNT_DECLARE
; /* the reference counter */
63 pa_memblock_type_t type
;
65 pa_bool_t read_only
:1;
66 pa_bool_t is_silence
:1;
71 pa_atomic_t n_acquired
;
72 pa_atomic_t please_signal
;
76 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_memimport_segment
*segment
;
87 struct pa_memimport_segment
{
100 /* Called whenever an imported memory block is no longer
102 pa_memimport_release_cb_t release_cb
;
105 PA_LLIST_FIELDS(pa_memimport
);
108 struct memexport_slot
{
109 PA_LLIST_FIELDS(struct memexport_slot
);
113 struct pa_memexport
{
117 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
119 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
120 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
123 /* Called whenever a client from which we imported a memory block
124 which we in turn exported to another client dies and we need to
125 revoke the memory block accordingly */
126 pa_memexport_revoke_cb_t revoke_cb
;
129 PA_LLIST_FIELDS(pa_memexport
);
133 pa_semaphore
*semaphore
;
142 PA_LLIST_HEAD(pa_memimport
, imports
);
143 PA_LLIST_HEAD(pa_memexport
, exports
);
145 /* A list of free slots that may be reused */
146 pa_flist
*free_slots
;
148 pa_mempool_stat stat
;
151 static void segment_detach(pa_memimport_segment
*seg
);
153 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
155 /* No lock necessary */
156 static void stat_add(pa_memblock
*b
) {
160 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
161 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
163 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
164 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
166 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
168 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
171 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
172 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
175 /* No lock necessary */
176 static void stat_remove(pa_memblock
*b
) {
180 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
181 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
183 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
184 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
186 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
191 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
194 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
197 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
199 /* No lock necessary */
200 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
206 if (!(b
= pa_memblock_new_pool(p
, length
)))
207 b
= memblock_new_appended(p
, length
);
212 /* No lock necessary */
213 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
219 /* If -1 is passed as length we choose the size for the caller. */
221 if (length
== (size_t) -1)
222 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
224 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
227 b
->type
= PA_MEMBLOCK_APPENDED
;
228 b
->read_only
= b
->is_silence
= FALSE
;
229 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
231 pa_atomic_store(&b
->n_acquired
, 0);
232 pa_atomic_store(&b
->please_signal
, 0);
238 /* No lock necessary */
239 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
240 struct mempool_slot
*slot
;
243 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
246 /* The free list was empty, we have to allocate a new entry */
248 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
249 pa_atomic_dec(&p
->n_init
);
251 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
254 pa_log_info("Pool full");
255 pa_atomic_inc(&p
->stat
.n_pool_full
);
263 /* No lock necessary, totally redundant anyway */
264 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
268 /* No lock necessary */
269 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
272 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
273 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
275 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
278 /* No lock necessary */
279 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
282 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
285 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
288 /* No lock necessary */
289 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
290 pa_memblock
*b
= NULL
;
291 struct mempool_slot
*slot
;
296 /* If -1 is passed as length we choose the size for the caller: we
297 * take the largest size that fits in one of our slots. */
299 if (length
== (size_t) -1)
300 length
= pa_mempool_block_size_max(p
);
302 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
304 if (!(slot
= mempool_allocate_slot(p
)))
307 b
= mempool_slot_data(slot
);
308 b
->type
= PA_MEMBLOCK_POOL
;
309 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
311 } else if (p
->block_size
>= length
) {
313 if (!(slot
= mempool_allocate_slot(p
)))
316 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
317 b
= pa_xnew(pa_memblock
, 1);
319 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
320 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
323 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
324 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
330 b
->read_only
= b
->is_silence
= FALSE
;
332 pa_atomic_store(&b
->n_acquired
, 0);
333 pa_atomic_store(&b
->please_signal
, 0);
339 /* No lock necessary */
340 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
345 pa_assert(length
!= (size_t) -1);
348 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
349 b
= pa_xnew(pa_memblock
, 1);
352 b
->type
= PA_MEMBLOCK_FIXED
;
353 b
->read_only
= read_only
;
354 b
->is_silence
= FALSE
;
355 pa_atomic_ptr_store(&b
->data
, d
);
357 pa_atomic_store(&b
->n_acquired
, 0);
358 pa_atomic_store(&b
->please_signal
, 0);
364 /* No lock necessary */
365 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
371 pa_assert(length
!= (size_t) -1);
374 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
375 b
= pa_xnew(pa_memblock
, 1);
378 b
->type
= PA_MEMBLOCK_USER
;
379 b
->read_only
= read_only
;
380 b
->is_silence
= FALSE
;
381 pa_atomic_ptr_store(&b
->data
, d
);
383 pa_atomic_store(&b
->n_acquired
, 0);
384 pa_atomic_store(&b
->please_signal
, 0);
386 b
->per_type
.user
.free_cb
= free_cb
;
392 /* No lock necessary */
393 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
395 pa_assert(PA_REFCNT_VALUE(b
) > 0);
397 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
400 /* No lock necessary */
401 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
403 pa_assert(PA_REFCNT_VALUE(b
) > 0);
405 return b
->is_silence
;
408 /* No lock necessary */
409 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
411 pa_assert(PA_REFCNT_VALUE(b
) > 0);
416 /* No lock necessary */
417 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
421 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
426 /* No lock necessary */
427 void* pa_memblock_acquire(pa_memblock
*b
) {
429 pa_assert(PA_REFCNT_VALUE(b
) > 0);
431 pa_atomic_inc(&b
->n_acquired
);
433 return pa_atomic_ptr_load(&b
->data
);
436 /* No lock necessary, in corner cases locks by its own */
437 void pa_memblock_release(pa_memblock
*b
) {
440 pa_assert(PA_REFCNT_VALUE(b
) > 0);
442 r
= pa_atomic_dec(&b
->n_acquired
);
445 /* Signal a waiting thread that this memblock is no longer used */
446 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
447 pa_semaphore_post(b
->pool
->semaphore
);
450 size_t pa_memblock_get_length(pa_memblock
*b
) {
452 pa_assert(PA_REFCNT_VALUE(b
) > 0);
457 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
459 pa_assert(PA_REFCNT_VALUE(b
) > 0);
464 /* No lock necessary */
465 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
467 pa_assert(PA_REFCNT_VALUE(b
) > 0);
473 static void memblock_free(pa_memblock
*b
) {
476 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
481 case PA_MEMBLOCK_USER
:
482 pa_assert(b
->per_type
.user
.free_cb
);
483 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
487 case PA_MEMBLOCK_FIXED
:
488 case PA_MEMBLOCK_APPENDED
:
489 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
494 case PA_MEMBLOCK_IMPORTED
: {
495 pa_memimport_segment
*segment
;
496 pa_memimport
*import
;
498 /* FIXME! This should be implemented lock-free */
500 segment
= b
->per_type
.imported
.segment
;
502 import
= segment
->import
;
505 pa_mutex_lock(import
->mutex
);
506 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
507 if (-- segment
->n_blocks
<= 0)
508 segment_detach(segment
);
510 pa_mutex_unlock(import
->mutex
);
512 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
514 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
519 case PA_MEMBLOCK_POOL_EXTERNAL
:
520 case PA_MEMBLOCK_POOL
: {
521 struct mempool_slot
*slot
;
524 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
527 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
529 /* The free list dimensions should easily allow all slots
530 * to fit in, hence try harder if pushing this slot into
531 * the free list fails */
532 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
536 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
542 case PA_MEMBLOCK_TYPE_MAX
:
544 pa_assert_not_reached();
548 /* No lock necessary */
549 void pa_memblock_unref(pa_memblock
*b
) {
551 pa_assert(PA_REFCNT_VALUE(b
) > 0);
553 if (PA_REFCNT_DEC(b
) > 0)
560 static void memblock_wait(pa_memblock
*b
) {
563 if (pa_atomic_load(&b
->n_acquired
) > 0) {
564 /* We need to wait until all threads gave up access to the
565 * memory block before we can go on. Unfortunately this means
566 * that we have to lock and wait here. Sniff! */
568 pa_atomic_inc(&b
->please_signal
);
570 while (pa_atomic_load(&b
->n_acquired
) > 0)
571 pa_semaphore_wait(b
->pool
->semaphore
);
573 pa_atomic_dec(&b
->please_signal
);
577 /* No lock necessary. This function is not multiple caller safe! */
578 static void memblock_make_local(pa_memblock
*b
) {
581 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
583 if (b
->length
<= b
->pool
->block_size
) {
584 struct mempool_slot
*slot
;
586 if ((slot
= mempool_allocate_slot(b
->pool
))) {
588 /* We can move it into a local pool, perfect! */
590 new_data
= mempool_slot_data(slot
);
591 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
592 pa_atomic_ptr_store(&b
->data
, new_data
);
594 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
595 b
->read_only
= FALSE
;
601 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
602 b
->per_type
.user
.free_cb
= pa_xfree
;
603 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
605 b
->type
= PA_MEMBLOCK_USER
;
606 b
->read_only
= FALSE
;
609 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
610 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
614 /* No lock necessary. This function is not multiple caller safe*/
615 void pa_memblock_unref_fixed(pa_memblock
*b
) {
617 pa_assert(PA_REFCNT_VALUE(b
) > 0);
618 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
620 if (PA_REFCNT_VALUE(b
) > 1)
621 memblock_make_local(b
);
623 pa_memblock_unref(b
);
626 /* No lock necessary. */
627 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
631 pa_assert(PA_REFCNT_VALUE(b
) > 0);
633 p
= pa_memblock_acquire(b
);
634 pa_will_need(p
, b
->length
);
635 pa_memblock_release(b
);
640 /* Self-locked. This function is not multiple-caller safe */
641 static void memblock_replace_import(pa_memblock
*b
) {
642 pa_memimport_segment
*seg
;
645 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
647 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
648 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
649 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
650 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
652 seg
= b
->per_type
.imported
.segment
;
654 pa_assert(seg
->import
);
656 pa_mutex_lock(seg
->import
->mutex
);
660 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
662 memblock_make_local(b
);
664 if (-- seg
->n_blocks
<= 0) {
665 pa_mutex_unlock(seg
->import
->mutex
);
668 pa_mutex_unlock(seg
->import
->mutex
);
671 pa_mempool
* pa_mempool_new(pa_bool_t shared
) {
674 p
= pa_xnew(pa_mempool
, 1);
676 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
677 p
->semaphore
= pa_semaphore_new(0);
679 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
680 if (p
->block_size
< PA_PAGE_SIZE
)
681 p
->block_size
= PA_PAGE_SIZE
;
683 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
685 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
690 memset(&p
->stat
, 0, sizeof(p
->stat
));
691 pa_atomic_store(&p
->n_init
, 0);
693 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
694 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
696 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
701 void pa_mempool_free(pa_mempool
*p
) {
704 pa_mutex_lock(p
->mutex
);
707 pa_memimport_free(p
->imports
);
710 pa_memexport_free(p
->exports
);
712 pa_mutex_unlock(p
->mutex
);
714 pa_flist_free(p
->free_slots
, NULL
);
716 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
717 /* raise(SIGTRAP); */
718 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
721 pa_shm_free(&p
->memory
);
723 pa_mutex_free(p
->mutex
);
724 pa_semaphore_free(p
->semaphore
);
729 /* No lock necessary */
730 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
736 /* No lock necessary */
737 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
740 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
743 /* No lock necessary */
744 void pa_mempool_vacuum(pa_mempool
*p
) {
745 struct mempool_slot
*slot
;
750 list
= pa_flist_new(p
->n_blocks
*2);
752 while ((slot
= pa_flist_pop(p
->free_slots
)))
753 while (pa_flist_push(list
, slot
) < 0)
756 while ((slot
= pa_flist_pop(list
))) {
757 pa_shm_punch(&p
->memory
, (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
, p
->block_size
);
759 while (pa_flist_push(p
->free_slots
, slot
))
763 pa_flist_free(list
, NULL
);
766 /* No lock necessary */
767 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
770 if (!p
->memory
.shared
)
778 /* No lock necessary */
779 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
782 return !!p
->memory
.shared
;
785 /* For recieving blocks from other nodes */
786 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
792 i
= pa_xnew(pa_memimport
, 1);
793 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
795 i
->segments
= pa_hashmap_new(NULL
, NULL
);
796 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
798 i
->userdata
= userdata
;
800 pa_mutex_lock(p
->mutex
);
801 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
802 pa_mutex_unlock(p
->mutex
);
807 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
809 /* Should be called locked */
810 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
811 pa_memimport_segment
* seg
;
813 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
816 seg
= pa_xnew(pa_memimport_segment
, 1);
818 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
826 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
830 /* Should be called locked */
831 static void segment_detach(pa_memimport_segment
*seg
) {
834 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
835 pa_shm_free(&seg
->memory
);
839 /* Self-locked. Not multiple-caller safe */
840 void pa_memimport_free(pa_memimport
*i
) {
846 pa_mutex_lock(i
->mutex
);
848 while ((b
= pa_hashmap_first(i
->blocks
)))
849 memblock_replace_import(b
);
851 pa_assert(pa_hashmap_size(i
->segments
) == 0);
853 pa_mutex_unlock(i
->mutex
);
855 pa_mutex_lock(i
->pool
->mutex
);
857 /* If we've exported this block further we need to revoke that export */
858 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
859 memexport_revoke_blocks(e
, i
);
861 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
863 pa_mutex_unlock(i
->pool
->mutex
);
865 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
866 pa_hashmap_free(i
->segments
, NULL
, NULL
);
868 pa_mutex_free(i
->mutex
);
874 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
875 pa_memblock
*b
= NULL
;
876 pa_memimport_segment
*seg
;
880 pa_mutex_lock(i
->mutex
);
882 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
885 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
886 if (!(seg
= segment_attach(i
, shm_id
)))
889 if (offset
+size
> seg
->memory
.size
)
892 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
893 b
= pa_xnew(pa_memblock
, 1);
897 b
->type
= PA_MEMBLOCK_IMPORTED
;
899 b
->is_silence
= FALSE
;
900 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
902 pa_atomic_store(&b
->n_acquired
, 0);
903 pa_atomic_store(&b
->please_signal
, 0);
904 b
->per_type
.imported
.id
= block_id
;
905 b
->per_type
.imported
.segment
= seg
;
907 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
912 pa_mutex_unlock(i
->mutex
);
920 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
925 pa_mutex_lock(i
->mutex
);
927 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
932 memblock_replace_import(b
);
935 pa_mutex_unlock(i
->mutex
);
940 /* For sending blocks to other nodes */
941 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
947 if (!p
->memory
.shared
)
950 e
= pa_xnew(pa_memexport
, 1);
951 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
953 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
954 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
957 e
->userdata
= userdata
;
959 pa_mutex_lock(p
->mutex
);
960 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
961 pa_mutex_unlock(p
->mutex
);
965 void pa_memexport_free(pa_memexport
*e
) {
968 pa_mutex_lock(e
->mutex
);
969 while (e
->used_slots
)
970 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
971 pa_mutex_unlock(e
->mutex
);
973 pa_mutex_lock(e
->pool
->mutex
);
974 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
975 pa_mutex_unlock(e
->pool
->mutex
);
977 pa_mutex_free(e
->mutex
);
982 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
987 pa_mutex_lock(e
->mutex
);
992 if (!e
->slots
[id
].block
)
995 b
= e
->slots
[id
].block
;
996 e
->slots
[id
].block
= NULL
;
998 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
999 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1001 pa_mutex_unlock(e
->mutex
);
1003 /* pa_log("Processing release for %u", id); */
1005 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1006 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1008 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1009 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
1011 pa_memblock_unref(b
);
1016 pa_mutex_unlock(e
->mutex
);
1022 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1023 struct memexport_slot
*slot
, *next
;
1027 pa_mutex_lock(e
->mutex
);
1029 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1033 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1034 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1037 idx
= slot
- e
->slots
;
1038 e
->revoke_cb(e
, idx
, e
->userdata
);
1039 pa_memexport_process_release(e
, idx
);
1042 pa_mutex_unlock(e
->mutex
);
1045 /* No lock necessary */
1046 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1052 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1053 b
->type
== PA_MEMBLOCK_POOL
||
1054 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1055 pa_assert(b
->pool
== p
);
1056 return pa_memblock_ref(b
);
1059 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1062 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1067 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1069 struct memexport_slot
*slot
;
1074 pa_assert(block_id
);
1078 pa_assert(b
->pool
== e
->pool
);
1080 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1083 pa_mutex_lock(e
->mutex
);
1085 if (e
->free_slots
) {
1086 slot
= e
->free_slots
;
1087 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1088 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1089 slot
= &e
->slots
[e
->n_init
++];
1091 pa_mutex_unlock(e
->mutex
);
1092 pa_memblock_unref(b
);
1096 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1098 *block_id
= slot
- e
->slots
;
1100 pa_mutex_unlock(e
->mutex
);
1101 /* pa_log("Got block id %u", *block_id); */
1103 data
= pa_memblock_acquire(b
);
1105 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1106 pa_assert(b
->per_type
.imported
.segment
);
1107 memory
= &b
->per_type
.imported
.segment
->memory
;
1109 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1111 memory
= &b
->pool
->memory
;
1114 pa_assert(data
>= memory
->ptr
);
1115 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1117 *shm_id
= memory
->id
;
1118 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1121 pa_memblock_release(b
);
1123 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1124 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);