4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
48 #define PA_MEMPOOL_SLOTS_MAX 128
49 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
51 #define PA_MEMEXPORT_SLOTS_MAX 128
53 #define PA_MEMIMPORT_SLOTS_MAX 128
54 #define PA_MEMIMPORT_SEGMENTS_MAX 16
57 PA_REFCNT_DECLARE
; /* the reference counter */
60 pa_memblock_type_t type
;
61 int read_only
; /* boolean */
66 pa_atomic_t n_acquired
;
67 pa_atomic_t please_signal
;
71 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
77 pa_memimport_segment
*segment
;
82 struct pa_memimport_segment
{
95 /* Called whenever an imported memory block is no longer
97 pa_memimport_release_cb_t release_cb
;
100 PA_LLIST_FIELDS(pa_memimport
);
103 struct memexport_slot
{
104 PA_LLIST_FIELDS(struct memexport_slot
);
108 struct pa_memexport
{
112 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
114 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
115 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
118 /* Called whenever a client from which we imported a memory block
119 which we in turn exported to another client dies and we need to
120 revoke the memory block accordingly */
121 pa_memexport_revoke_cb_t revoke_cb
;
124 PA_LLIST_FIELDS(pa_memexport
);
127 struct mempool_slot
{
128 PA_LLIST_FIELDS(struct mempool_slot
);
129 /* the actual data follows immediately hereafter */
133 pa_semaphore
*semaphore
;
142 PA_LLIST_HEAD(pa_memimport
, imports
);
143 PA_LLIST_HEAD(pa_memexport
, exports
);
145 /* A list of free slots that may be reused */
146 pa_flist
*free_slots
;
148 pa_mempool_stat stat
;
151 static void segment_detach(pa_memimport_segment
*seg
);
153 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
155 /* No lock necessary */
156 static void stat_add(pa_memblock
*b
) {
160 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
161 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
163 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
164 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
166 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
168 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
171 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
172 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
175 /* No lock necessary */
176 static void stat_remove(pa_memblock
*b
) {
180 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
181 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
183 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
184 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
186 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
191 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
194 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
197 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
199 /* No lock necessary */
200 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
204 pa_assert(length
> 0);
206 if (!(b
= pa_memblock_new_pool(p
, length
)))
207 b
= memblock_new_appended(p
, length
);
212 /* No lock necessary */
213 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
217 pa_assert(length
> 0);
219 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
222 b
->type
= PA_MEMBLOCK_APPENDED
;
224 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
226 pa_atomic_store(&b
->n_acquired
, 0);
227 pa_atomic_store(&b
->please_signal
, 0);
233 /* No lock necessary */
234 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
235 struct mempool_slot
*slot
;
238 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
241 /* The free list was empty, we have to allocate a new entry */
243 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
244 pa_atomic_dec(&p
->n_init
);
246 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
249 pa_log_debug("Pool full");
250 pa_atomic_inc(&p
->stat
.n_pool_full
);
258 /* No lock necessary */
259 static void* mempool_slot_data(struct mempool_slot
*slot
) {
262 return (uint8_t*) slot
+ sizeof(struct mempool_slot
);
265 /* No lock necessary */
266 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
269 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
270 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
272 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
275 /* No lock necessary */
276 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
279 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
282 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
285 /* No lock necessary */
286 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
287 pa_memblock
*b
= NULL
;
288 struct mempool_slot
*slot
;
291 pa_assert(length
> 0);
293 if (p
->block_size
- sizeof(struct mempool_slot
) >= sizeof(pa_memblock
) + length
) {
295 if (!(slot
= mempool_allocate_slot(p
)))
298 b
= mempool_slot_data(slot
);
299 b
->type
= PA_MEMBLOCK_POOL
;
300 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ sizeof(pa_memblock
));
302 } else if (p
->block_size
- sizeof(struct mempool_slot
) >= length
) {
304 if (!(slot
= mempool_allocate_slot(p
)))
307 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
308 b
= pa_xnew(pa_memblock
, 1);
310 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
311 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
314 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) (p
->block_size
- sizeof(struct mempool_slot
)));
315 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
323 pa_atomic_store(&b
->n_acquired
, 0);
324 pa_atomic_store(&b
->please_signal
, 0);
330 /* No lock necessary */
331 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, int read_only
) {
336 pa_assert(length
> 0);
338 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
339 b
= pa_xnew(pa_memblock
, 1);
342 b
->type
= PA_MEMBLOCK_FIXED
;
343 b
->read_only
= read_only
;
344 pa_atomic_ptr_store(&b
->data
, d
);
346 pa_atomic_store(&b
->n_acquired
, 0);
347 pa_atomic_store(&b
->please_signal
, 0);
353 /* No lock necessary */
354 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, void (*free_cb
)(void *p
), int read_only
) {
359 pa_assert(length
> 0);
362 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
363 b
= pa_xnew(pa_memblock
, 1);
366 b
->type
= PA_MEMBLOCK_USER
;
367 b
->read_only
= read_only
;
368 pa_atomic_ptr_store(&b
->data
, d
);
370 pa_atomic_store(&b
->n_acquired
, 0);
371 pa_atomic_store(&b
->please_signal
, 0);
373 b
->per_type
.user
.free_cb
= free_cb
;
379 /* No lock necessary */
380 int pa_memblock_is_read_only(pa_memblock
*b
) {
382 pa_assert(PA_REFCNT_VALUE(b
) > 0);
384 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
387 /* No lock necessary */
388 void* pa_memblock_acquire(pa_memblock
*b
) {
390 pa_assert(PA_REFCNT_VALUE(b
) > 0);
392 pa_atomic_inc(&b
->n_acquired
);
394 return pa_atomic_ptr_load(&b
->data
);
397 /* No lock necessary, in corner cases locks by its own */
398 void pa_memblock_release(pa_memblock
*b
) {
401 pa_assert(PA_REFCNT_VALUE(b
) > 0);
403 r
= pa_atomic_dec(&b
->n_acquired
);
406 /* Signal a waiting thread that this memblock is no longer used */
407 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
408 pa_semaphore_post(b
->pool
->semaphore
);
411 size_t pa_memblock_get_length(pa_memblock
*b
) {
413 pa_assert(PA_REFCNT_VALUE(b
) > 0);
418 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
420 pa_assert(PA_REFCNT_VALUE(b
) > 0);
425 /* No lock necessary */
426 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
428 pa_assert(PA_REFCNT_VALUE(b
) > 0);
434 static void memblock_free(pa_memblock
*b
) {
437 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
442 case PA_MEMBLOCK_USER
:
443 pa_assert(b
->per_type
.user
.free_cb
);
444 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
448 case PA_MEMBLOCK_FIXED
:
449 case PA_MEMBLOCK_APPENDED
:
450 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
455 case PA_MEMBLOCK_IMPORTED
: {
456 pa_memimport_segment
*segment
;
457 pa_memimport
*import
;
459 /* FIXME! This should be implemented lock-free */
461 segment
= b
->per_type
.imported
.segment
;
463 import
= segment
->import
;
466 pa_mutex_lock(import
->mutex
);
467 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
468 if (-- segment
->n_blocks
<= 0)
469 segment_detach(segment
);
471 pa_mutex_unlock(import
->mutex
);
473 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
475 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
480 case PA_MEMBLOCK_POOL_EXTERNAL
:
481 case PA_MEMBLOCK_POOL
: {
482 struct mempool_slot
*slot
;
485 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
488 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
490 /* The free list dimensions should easily allow all slots
491 * to fit in, hence try harder if pushing this slot into
492 * the free list fails */
493 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
497 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
503 case PA_MEMBLOCK_TYPE_MAX
:
505 pa_assert_not_reached();
509 /* No lock necessary */
510 void pa_memblock_unref(pa_memblock
*b
) {
512 pa_assert(PA_REFCNT_VALUE(b
) > 0);
514 if (PA_REFCNT_DEC(b
) > 0)
521 static void memblock_wait(pa_memblock
*b
) {
524 if (pa_atomic_load(&b
->n_acquired
) > 0) {
525 /* We need to wait until all threads gave up access to the
526 * memory block before we can go on. Unfortunately this means
527 * that we have to lock and wait here. Sniff! */
529 pa_atomic_inc(&b
->please_signal
);
531 while (pa_atomic_load(&b
->n_acquired
) > 0)
532 pa_semaphore_wait(b
->pool
->semaphore
);
534 pa_atomic_dec(&b
->please_signal
);
538 /* No lock necessary. This function is not multiple caller safe! */
539 static void memblock_make_local(pa_memblock
*b
) {
542 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
544 if (b
->length
<= b
->pool
->block_size
- sizeof(struct mempool_slot
)) {
545 struct mempool_slot
*slot
;
547 if ((slot
= mempool_allocate_slot(b
->pool
))) {
549 /* We can move it into a local pool, perfect! */
551 new_data
= mempool_slot_data(slot
);
552 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
553 pa_atomic_ptr_store(&b
->data
, new_data
);
555 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
562 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
563 b
->per_type
.user
.free_cb
= pa_xfree
;
564 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
566 b
->type
= PA_MEMBLOCK_USER
;
570 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
571 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
575 /* No lock necessary. This function is not multiple caller safe*/
576 void pa_memblock_unref_fixed(pa_memblock
*b
) {
578 pa_assert(PA_REFCNT_VALUE(b
) > 0);
579 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
581 if (PA_REFCNT_VALUE(b
) > 1)
582 memblock_make_local(b
);
584 pa_memblock_unref(b
);
587 /* Self-locked. This function is not multiple-caller safe */
588 static void memblock_replace_import(pa_memblock
*b
) {
589 pa_memimport_segment
*seg
;
592 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
594 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
595 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
596 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
597 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
599 seg
= b
->per_type
.imported
.segment
;
601 pa_assert(seg
->import
);
603 pa_mutex_lock(seg
->import
->mutex
);
607 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
609 memblock_make_local(b
);
611 if (-- seg
->n_blocks
<= 0) {
612 pa_mutex_unlock(seg
->import
->mutex
);
615 pa_mutex_unlock(seg
->import
->mutex
);
618 pa_mempool
* pa_mempool_new(int shared
) {
622 p
= pa_xnew(pa_mempool
, 1);
624 p
->mutex
= pa_mutex_new(1);
625 p
->semaphore
= pa_semaphore_new(0);
628 ps
= (size_t) sysconf(_SC_PAGESIZE
);
629 #elif defined(PAGE_SIZE)
630 ps
= (size_t) PAGE_SIZE
;
632 ps
= 4096; /* Let's hope it's like x86. */
635 p
->block_size
= (PA_MEMPOOL_SLOT_SIZE
/ps
)*ps
;
637 if (p
->block_size
< ps
)
640 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
642 pa_assert(p
->block_size
> sizeof(struct mempool_slot
));
644 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
649 memset(&p
->stat
, 0, sizeof(p
->stat
));
650 pa_atomic_store(&p
->n_init
, 0);
652 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
653 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
655 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
660 void pa_mempool_free(pa_mempool
*p
) {
663 pa_mutex_lock(p
->mutex
);
666 pa_memimport_free(p
->imports
);
669 pa_memexport_free(p
->exports
);
671 pa_mutex_unlock(p
->mutex
);
673 pa_flist_free(p
->free_slots
, NULL
);
675 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
676 /* raise(SIGTRAP); */
677 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
680 pa_shm_free(&p
->memory
);
682 pa_mutex_free(p
->mutex
);
683 pa_semaphore_free(p
->semaphore
);
688 /* No lock necessary */
689 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
695 /* No lock necessary */
696 void pa_mempool_vacuum(pa_mempool
*p
) {
697 struct mempool_slot
*slot
;
702 list
= pa_flist_new(p
->n_blocks
*2);
704 while ((slot
= pa_flist_pop(p
->free_slots
)))
705 while (pa_flist_push(list
, slot
) < 0)
708 while ((slot
= pa_flist_pop(list
))) {
709 pa_shm_punch(&p
->memory
,
710 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ sizeof(struct mempool_slot
),
711 p
->block_size
- sizeof(struct mempool_slot
));
713 while (pa_flist_push(p
->free_slots
, slot
))
717 pa_flist_free(list
, NULL
);
720 /* No lock necessary */
721 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
724 if (!p
->memory
.shared
)
732 /* No lock necessary */
733 int pa_mempool_is_shared(pa_mempool
*p
) {
736 return !!p
->memory
.shared
;
739 /* For recieving blocks from other nodes */
740 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
746 i
= pa_xnew(pa_memimport
, 1);
747 i
->mutex
= pa_mutex_new(1);
749 i
->segments
= pa_hashmap_new(NULL
, NULL
);
750 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
752 i
->userdata
= userdata
;
754 pa_mutex_lock(p
->mutex
);
755 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
756 pa_mutex_unlock(p
->mutex
);
761 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
763 /* Should be called locked */
764 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
765 pa_memimport_segment
* seg
;
767 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
770 seg
= pa_xnew(pa_memimport_segment
, 1);
772 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
780 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
784 /* Should be called locked */
785 static void segment_detach(pa_memimport_segment
*seg
) {
788 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
789 pa_shm_free(&seg
->memory
);
793 /* Self-locked. Not multiple-caller safe */
794 void pa_memimport_free(pa_memimport
*i
) {
800 pa_mutex_lock(i
->mutex
);
802 while ((b
= pa_hashmap_get_first(i
->blocks
)))
803 memblock_replace_import(b
);
805 pa_assert(pa_hashmap_size(i
->segments
) == 0);
807 pa_mutex_unlock(i
->mutex
);
809 pa_mutex_lock(i
->pool
->mutex
);
811 /* If we've exported this block further we need to revoke that export */
812 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
813 memexport_revoke_blocks(e
, i
);
815 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
817 pa_mutex_unlock(i
->pool
->mutex
);
819 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
820 pa_hashmap_free(i
->segments
, NULL
, NULL
);
822 pa_mutex_free(i
->mutex
);
828 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
829 pa_memblock
*b
= NULL
;
830 pa_memimport_segment
*seg
;
834 pa_mutex_lock(i
->mutex
);
836 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
839 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
840 if (!(seg
= segment_attach(i
, shm_id
)))
843 if (offset
+size
> seg
->memory
.size
)
846 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
847 b
= pa_xnew(pa_memblock
, 1);
851 b
->type
= PA_MEMBLOCK_IMPORTED
;
853 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
855 pa_atomic_store(&b
->n_acquired
, 0);
856 pa_atomic_store(&b
->please_signal
, 0);
857 b
->per_type
.imported
.id
= block_id
;
858 b
->per_type
.imported
.segment
= seg
;
860 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
865 pa_mutex_unlock(i
->mutex
);
873 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
877 pa_mutex_lock(i
->mutex
);
879 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
))))
882 memblock_replace_import(b
);
884 pa_mutex_unlock(i
->mutex
);
889 /* For sending blocks to other nodes */
890 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
896 if (!p
->memory
.shared
)
899 e
= pa_xnew(pa_memexport
, 1);
900 e
->mutex
= pa_mutex_new(1);
902 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
903 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
906 e
->userdata
= userdata
;
908 pa_mutex_lock(p
->mutex
);
909 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
910 pa_mutex_unlock(p
->mutex
);
914 void pa_memexport_free(pa_memexport
*e
) {
917 pa_mutex_lock(e
->mutex
);
918 while (e
->used_slots
)
919 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
920 pa_mutex_unlock(e
->mutex
);
922 pa_mutex_lock(e
->pool
->mutex
);
923 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
924 pa_mutex_unlock(e
->pool
->mutex
);
926 pa_mutex_free(e
->mutex
);
931 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
936 pa_mutex_lock(e
->mutex
);
941 if (!e
->slots
[id
].block
)
944 b
= e
->slots
[id
].block
;
945 e
->slots
[id
].block
= NULL
;
947 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
948 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
950 pa_mutex_unlock(e
->mutex
);
952 /* pa_log("Processing release for %u", id); */
954 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
955 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
957 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
958 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
960 pa_memblock_unref(b
);
965 pa_mutex_unlock(e
->mutex
);
971 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
972 struct memexport_slot
*slot
, *next
;
976 pa_mutex_lock(e
->mutex
);
978 for (slot
= e
->used_slots
; slot
; slot
= next
) {
982 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
983 slot
->block
->per_type
.imported
.segment
->import
!= i
)
986 idx
= slot
- e
->slots
;
987 e
->revoke_cb(e
, idx
, e
->userdata
);
988 pa_memexport_process_release(e
, idx
);
991 pa_mutex_unlock(e
->mutex
);
994 /* No lock necessary */
995 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1001 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1002 b
->type
== PA_MEMBLOCK_POOL
||
1003 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1004 pa_assert(b
->pool
== p
);
1005 return pa_memblock_ref(b
);
1008 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1011 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1016 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1018 struct memexport_slot
*slot
;
1023 pa_assert(block_id
);
1027 pa_assert(b
->pool
== e
->pool
);
1029 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1032 pa_mutex_lock(e
->mutex
);
1034 if (e
->free_slots
) {
1035 slot
= e
->free_slots
;
1036 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1037 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1038 slot
= &e
->slots
[e
->n_init
++];
1040 pa_mutex_unlock(e
->mutex
);
1041 pa_memblock_unref(b
);
1045 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1047 *block_id
= slot
- e
->slots
;
1049 pa_mutex_unlock(e
->mutex
);
1050 /* pa_log("Got block id %u", *block_id); */
1052 data
= pa_memblock_acquire(b
);
1054 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1055 pa_assert(b
->per_type
.imported
.segment
);
1056 memory
= &b
->per_type
.imported
.segment
->memory
;
1058 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1060 memory
= &b
->pool
->memory
;
1063 pa_assert(data
>= memory
->ptr
);
1064 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1066 *shm_id
= memory
->id
;
1067 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1070 pa_memblock_release(b
);
1072 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1073 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);