2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
51 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
52 * note that the footprint is usually much smaller, since the data is
53 * stored in SHM and our OS does not commit the memory before we use
54 * it for the first time. */
55 #define PA_MEMPOOL_SLOTS_MAX 1024
56 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58 #define PA_MEMEXPORT_SLOTS_MAX 128
60 #define PA_MEMIMPORT_SLOTS_MAX 160
61 #define PA_MEMIMPORT_SEGMENTS_MAX 16
64 PA_REFCNT_DECLARE
; /* the reference counter */
67 pa_memblock_type_t type
;
69 pa_bool_t read_only
:1;
70 pa_bool_t is_silence
:1;
75 pa_atomic_t n_acquired
;
76 pa_atomic_t please_signal
;
80 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
86 pa_memimport_segment
*segment
;
91 struct pa_memimport_segment
{
101 pa_hashmap
*segments
;
104 /* Called whenever an imported memory block is no longer
106 pa_memimport_release_cb_t release_cb
;
109 PA_LLIST_FIELDS(pa_memimport
);
112 struct memexport_slot
{
113 PA_LLIST_FIELDS(struct memexport_slot
);
117 struct pa_memexport
{
121 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
123 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
124 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
127 /* Called whenever a client from which we imported a memory block
128 which we in turn exported to another client dies and we need to
129 revoke the memory block accordingly */
130 pa_memexport_revoke_cb_t revoke_cb
;
133 PA_LLIST_FIELDS(pa_memexport
);
137 pa_semaphore
*semaphore
;
146 PA_LLIST_HEAD(pa_memimport
, imports
);
147 PA_LLIST_HEAD(pa_memexport
, exports
);
149 /* A list of free slots that may be reused */
150 pa_flist
*free_slots
;
152 pa_mempool_stat stat
;
155 static void segment_detach(pa_memimport_segment
*seg
);
157 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
159 /* No lock necessary */
160 static void stat_add(pa_memblock
*b
) {
164 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
165 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
167 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
168 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
170 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
171 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
172 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
175 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
176 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
179 /* No lock necessary */
180 static void stat_remove(pa_memblock
*b
) {
184 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
185 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
187 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
188 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
190 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
191 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
192 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
194 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
195 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
198 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
201 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
203 /* No lock necessary */
204 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
210 if (!(b
= pa_memblock_new_pool(p
, length
)))
211 b
= memblock_new_appended(p
, length
);
216 /* No lock necessary */
217 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
223 /* If -1 is passed as length we choose the size for the caller. */
225 if (length
== (size_t) -1)
226 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
228 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
231 b
->type
= PA_MEMBLOCK_APPENDED
;
232 b
->read_only
= b
->is_silence
= FALSE
;
233 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
235 pa_atomic_store(&b
->n_acquired
, 0);
236 pa_atomic_store(&b
->please_signal
, 0);
242 /* No lock necessary */
243 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
244 struct mempool_slot
*slot
;
247 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
250 /* The free list was empty, we have to allocate a new entry */
252 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
253 pa_atomic_dec(&p
->n_init
);
255 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
258 pa_log_info("Pool full");
259 pa_atomic_inc(&p
->stat
.n_pool_full
);
264 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
265 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
266 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
273 /* No lock necessary, totally redundant anyway */
274 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
278 /* No lock necessary */
279 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
282 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
283 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
285 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
288 /* No lock necessary */
289 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
292 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
295 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
298 /* No lock necessary */
299 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
300 pa_memblock
*b
= NULL
;
301 struct mempool_slot
*slot
;
306 /* If -1 is passed as length we choose the size for the caller: we
307 * take the largest size that fits in one of our slots. */
309 if (length
== (size_t) -1)
310 length
= pa_mempool_block_size_max(p
);
312 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
314 if (!(slot
= mempool_allocate_slot(p
)))
317 b
= mempool_slot_data(slot
);
318 b
->type
= PA_MEMBLOCK_POOL
;
319 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
321 } else if (p
->block_size
>= length
) {
323 if (!(slot
= mempool_allocate_slot(p
)))
326 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
327 b
= pa_xnew(pa_memblock
, 1);
329 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
330 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
333 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
334 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
340 b
->read_only
= b
->is_silence
= FALSE
;
342 pa_atomic_store(&b
->n_acquired
, 0);
343 pa_atomic_store(&b
->please_signal
, 0);
349 /* No lock necessary */
350 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
355 pa_assert(length
!= (size_t) -1);
358 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
359 b
= pa_xnew(pa_memblock
, 1);
362 b
->type
= PA_MEMBLOCK_FIXED
;
363 b
->read_only
= read_only
;
364 b
->is_silence
= FALSE
;
365 pa_atomic_ptr_store(&b
->data
, d
);
367 pa_atomic_store(&b
->n_acquired
, 0);
368 pa_atomic_store(&b
->please_signal
, 0);
374 /* No lock necessary */
375 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
381 pa_assert(length
!= (size_t) -1);
384 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
385 b
= pa_xnew(pa_memblock
, 1);
388 b
->type
= PA_MEMBLOCK_USER
;
389 b
->read_only
= read_only
;
390 b
->is_silence
= FALSE
;
391 pa_atomic_ptr_store(&b
->data
, d
);
393 pa_atomic_store(&b
->n_acquired
, 0);
394 pa_atomic_store(&b
->please_signal
, 0);
396 b
->per_type
.user
.free_cb
= free_cb
;
402 /* No lock necessary */
403 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
405 pa_assert(PA_REFCNT_VALUE(b
) > 0);
407 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
410 /* No lock necessary */
411 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
413 pa_assert(PA_REFCNT_VALUE(b
) > 0);
415 return b
->is_silence
;
418 /* No lock necessary */
419 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
421 pa_assert(PA_REFCNT_VALUE(b
) > 0);
426 /* No lock necessary */
427 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
431 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
436 /* No lock necessary */
437 void* pa_memblock_acquire(pa_memblock
*b
) {
439 pa_assert(PA_REFCNT_VALUE(b
) > 0);
441 pa_atomic_inc(&b
->n_acquired
);
443 return pa_atomic_ptr_load(&b
->data
);
446 /* No lock necessary, in corner cases locks by its own */
447 void pa_memblock_release(pa_memblock
*b
) {
450 pa_assert(PA_REFCNT_VALUE(b
) > 0);
452 r
= pa_atomic_dec(&b
->n_acquired
);
455 /* Signal a waiting thread that this memblock is no longer used */
456 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
457 pa_semaphore_post(b
->pool
->semaphore
);
460 size_t pa_memblock_get_length(pa_memblock
*b
) {
462 pa_assert(PA_REFCNT_VALUE(b
) > 0);
467 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
469 pa_assert(PA_REFCNT_VALUE(b
) > 0);
474 /* No lock necessary */
475 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
477 pa_assert(PA_REFCNT_VALUE(b
) > 0);
483 static void memblock_free(pa_memblock
*b
) {
486 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
491 case PA_MEMBLOCK_USER
:
492 pa_assert(b
->per_type
.user
.free_cb
);
493 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
497 case PA_MEMBLOCK_FIXED
:
498 case PA_MEMBLOCK_APPENDED
:
499 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
504 case PA_MEMBLOCK_IMPORTED
: {
505 pa_memimport_segment
*segment
;
506 pa_memimport
*import
;
508 /* FIXME! This should be implemented lock-free */
510 segment
= b
->per_type
.imported
.segment
;
512 import
= segment
->import
;
515 pa_mutex_lock(import
->mutex
);
516 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
517 if (-- segment
->n_blocks
<= 0)
518 segment_detach(segment
);
520 pa_mutex_unlock(import
->mutex
);
522 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
524 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
529 case PA_MEMBLOCK_POOL_EXTERNAL
:
530 case PA_MEMBLOCK_POOL
: {
531 struct mempool_slot
*slot
;
534 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
537 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
539 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
540 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
541 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
545 /* The free list dimensions should easily allow all slots
546 * to fit in, hence try harder if pushing this slot into
547 * the free list fails */
548 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
552 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
558 case PA_MEMBLOCK_TYPE_MAX
:
560 pa_assert_not_reached();
564 /* No lock necessary */
565 void pa_memblock_unref(pa_memblock
*b
) {
567 pa_assert(PA_REFCNT_VALUE(b
) > 0);
569 if (PA_REFCNT_DEC(b
) > 0)
576 static void memblock_wait(pa_memblock
*b
) {
579 if (pa_atomic_load(&b
->n_acquired
) > 0) {
580 /* We need to wait until all threads gave up access to the
581 * memory block before we can go on. Unfortunately this means
582 * that we have to lock and wait here. Sniff! */
584 pa_atomic_inc(&b
->please_signal
);
586 while (pa_atomic_load(&b
->n_acquired
) > 0)
587 pa_semaphore_wait(b
->pool
->semaphore
);
589 pa_atomic_dec(&b
->please_signal
);
593 /* No lock necessary. This function is not multiple caller safe! */
594 static void memblock_make_local(pa_memblock
*b
) {
597 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
599 if (b
->length
<= b
->pool
->block_size
) {
600 struct mempool_slot
*slot
;
602 if ((slot
= mempool_allocate_slot(b
->pool
))) {
604 /* We can move it into a local pool, perfect! */
606 new_data
= mempool_slot_data(slot
);
607 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
608 pa_atomic_ptr_store(&b
->data
, new_data
);
610 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
611 b
->read_only
= FALSE
;
617 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
618 b
->per_type
.user
.free_cb
= pa_xfree
;
619 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
621 b
->type
= PA_MEMBLOCK_USER
;
622 b
->read_only
= FALSE
;
625 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
626 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
630 /* No lock necessary. This function is not multiple caller safe*/
631 void pa_memblock_unref_fixed(pa_memblock
*b
) {
633 pa_assert(PA_REFCNT_VALUE(b
) > 0);
634 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
636 if (PA_REFCNT_VALUE(b
) > 1)
637 memblock_make_local(b
);
639 pa_memblock_unref(b
);
642 /* No lock necessary. */
643 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
647 pa_assert(PA_REFCNT_VALUE(b
) > 0);
649 p
= pa_memblock_acquire(b
);
650 pa_will_need(p
, b
->length
);
651 pa_memblock_release(b
);
656 /* Self-locked. This function is not multiple-caller safe */
657 static void memblock_replace_import(pa_memblock
*b
) {
658 pa_memimport_segment
*seg
;
661 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
663 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
664 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
665 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
666 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
668 seg
= b
->per_type
.imported
.segment
;
670 pa_assert(seg
->import
);
672 pa_mutex_lock(seg
->import
->mutex
);
676 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
678 memblock_make_local(b
);
680 if (-- seg
->n_blocks
<= 0) {
681 pa_mutex_unlock(seg
->import
->mutex
);
684 pa_mutex_unlock(seg
->import
->mutex
);
687 pa_mempool
* pa_mempool_new(pa_bool_t shared
, size_t size
) {
691 p
= pa_xnew(pa_mempool
, 1);
693 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
694 p
->semaphore
= pa_semaphore_new(0);
696 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
697 if (p
->block_size
< PA_PAGE_SIZE
)
698 p
->block_size
= PA_PAGE_SIZE
;
701 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
703 p
->n_blocks
= (unsigned) (size
/ p
->block_size
);
709 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
714 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
715 p
->memory
.shared
? "shared" : "private",
717 pa_bytes_snprint(t1
, sizeof(t1
), (unsigned) p
->block_size
),
718 pa_bytes_snprint(t2
, sizeof(t2
), (unsigned) (p
->n_blocks
* p
->block_size
)),
719 (unsigned long) pa_mempool_block_size_max(p
));
721 memset(&p
->stat
, 0, sizeof(p
->stat
));
722 pa_atomic_store(&p
->n_init
, 0);
724 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
725 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
727 p
->free_slots
= pa_flist_new(p
->n_blocks
);
732 void pa_mempool_free(pa_mempool
*p
) {
735 pa_mutex_lock(p
->mutex
);
738 pa_memimport_free(p
->imports
);
741 pa_memexport_free(p
->exports
);
743 pa_mutex_unlock(p
->mutex
);
745 pa_flist_free(p
->free_slots
, NULL
);
747 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
748 /* raise(SIGTRAP); */
749 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
752 pa_shm_free(&p
->memory
);
754 pa_mutex_free(p
->mutex
);
755 pa_semaphore_free(p
->semaphore
);
760 /* No lock necessary */
761 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
767 /* No lock necessary */
768 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
771 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
774 /* No lock necessary */
775 void pa_mempool_vacuum(pa_mempool
*p
) {
776 struct mempool_slot
*slot
;
781 list
= pa_flist_new(p
->n_blocks
);
783 while ((slot
= pa_flist_pop(p
->free_slots
)))
784 while (pa_flist_push(list
, slot
) < 0)
787 while ((slot
= pa_flist_pop(list
))) {
788 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
790 while (pa_flist_push(p
->free_slots
, slot
))
794 pa_flist_free(list
, NULL
);
797 /* No lock necessary */
798 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
801 if (!p
->memory
.shared
)
809 /* No lock necessary */
810 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
813 return !!p
->memory
.shared
;
816 /* For recieving blocks from other nodes */
817 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
823 i
= pa_xnew(pa_memimport
, 1);
824 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
826 i
->segments
= pa_hashmap_new(NULL
, NULL
);
827 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
829 i
->userdata
= userdata
;
831 pa_mutex_lock(p
->mutex
);
832 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
833 pa_mutex_unlock(p
->mutex
);
838 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
840 /* Should be called locked */
841 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
842 pa_memimport_segment
* seg
;
844 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
847 seg
= pa_xnew(pa_memimport_segment
, 1);
849 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
857 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
861 /* Should be called locked */
862 static void segment_detach(pa_memimport_segment
*seg
) {
865 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
866 pa_shm_free(&seg
->memory
);
870 /* Self-locked. Not multiple-caller safe */
871 void pa_memimport_free(pa_memimport
*i
) {
877 pa_mutex_lock(i
->mutex
);
879 while ((b
= pa_hashmap_first(i
->blocks
)))
880 memblock_replace_import(b
);
882 pa_assert(pa_hashmap_size(i
->segments
) == 0);
884 pa_mutex_unlock(i
->mutex
);
886 pa_mutex_lock(i
->pool
->mutex
);
888 /* If we've exported this block further we need to revoke that export */
889 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
890 memexport_revoke_blocks(e
, i
);
892 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
894 pa_mutex_unlock(i
->pool
->mutex
);
896 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
897 pa_hashmap_free(i
->segments
, NULL
, NULL
);
899 pa_mutex_free(i
->mutex
);
905 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
906 pa_memblock
*b
= NULL
;
907 pa_memimport_segment
*seg
;
911 pa_mutex_lock(i
->mutex
);
913 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
916 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
917 if (!(seg
= segment_attach(i
, shm_id
)))
920 if (offset
+size
> seg
->memory
.size
)
923 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
924 b
= pa_xnew(pa_memblock
, 1);
928 b
->type
= PA_MEMBLOCK_IMPORTED
;
930 b
->is_silence
= FALSE
;
931 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
933 pa_atomic_store(&b
->n_acquired
, 0);
934 pa_atomic_store(&b
->please_signal
, 0);
935 b
->per_type
.imported
.id
= block_id
;
936 b
->per_type
.imported
.segment
= seg
;
938 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
943 pa_mutex_unlock(i
->mutex
);
951 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
956 pa_mutex_lock(i
->mutex
);
958 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
963 memblock_replace_import(b
);
966 pa_mutex_unlock(i
->mutex
);
971 /* For sending blocks to other nodes */
972 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
978 if (!p
->memory
.shared
)
981 e
= pa_xnew(pa_memexport
, 1);
982 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
984 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
985 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
988 e
->userdata
= userdata
;
990 pa_mutex_lock(p
->mutex
);
991 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
992 pa_mutex_unlock(p
->mutex
);
996 void pa_memexport_free(pa_memexport
*e
) {
999 pa_mutex_lock(e
->mutex
);
1000 while (e
->used_slots
)
1001 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
1002 pa_mutex_unlock(e
->mutex
);
1004 pa_mutex_lock(e
->pool
->mutex
);
1005 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
1006 pa_mutex_unlock(e
->pool
->mutex
);
1008 pa_mutex_free(e
->mutex
);
1013 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
1018 pa_mutex_lock(e
->mutex
);
1020 if (id
>= e
->n_init
)
1023 if (!e
->slots
[id
].block
)
1026 b
= e
->slots
[id
].block
;
1027 e
->slots
[id
].block
= NULL
;
1029 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1030 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1032 pa_mutex_unlock(e
->mutex
);
1034 /* pa_log("Processing release for %u", id); */
1036 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1037 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1039 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1040 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1042 pa_memblock_unref(b
);
1047 pa_mutex_unlock(e
->mutex
);
1053 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1054 struct memexport_slot
*slot
, *next
;
1058 pa_mutex_lock(e
->mutex
);
1060 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1064 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1065 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1068 idx
= (uint32_t) (slot
- e
->slots
);
1069 e
->revoke_cb(e
, idx
, e
->userdata
);
1070 pa_memexport_process_release(e
, idx
);
1073 pa_mutex_unlock(e
->mutex
);
1076 /* No lock necessary */
1077 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1083 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1084 b
->type
== PA_MEMBLOCK_POOL
||
1085 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1086 pa_assert(b
->pool
== p
);
1087 return pa_memblock_ref(b
);
1090 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1093 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1098 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1100 struct memexport_slot
*slot
;
1105 pa_assert(block_id
);
1109 pa_assert(b
->pool
== e
->pool
);
1111 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1114 pa_mutex_lock(e
->mutex
);
1116 if (e
->free_slots
) {
1117 slot
= e
->free_slots
;
1118 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1119 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1120 slot
= &e
->slots
[e
->n_init
++];
1122 pa_mutex_unlock(e
->mutex
);
1123 pa_memblock_unref(b
);
1127 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1129 *block_id
= (uint32_t) (slot
- e
->slots
);
1131 pa_mutex_unlock(e
->mutex
);
1132 /* pa_log("Got block id %u", *block_id); */
1134 data
= pa_memblock_acquire(b
);
1136 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1137 pa_assert(b
->per_type
.imported
.segment
);
1138 memory
= &b
->per_type
.imported
.segment
->memory
;
1140 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1142 memory
= &b
->pool
->memory
;
1145 pa_assert(data
>= memory
->ptr
);
1146 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1148 *shm_id
= memory
->id
;
1149 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1152 pa_memblock_release(b
);
1154 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1155 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);