]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
Merge dead branch 'ossman'
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #include <pulse/xmalloc.h>
35 #include <pulse/def.h>
36
37 #include <pulsecore/shm.h>
38 #include <pulsecore/log.h>
39 #include <pulsecore/hashmap.h>
40 #include <pulsecore/semaphore.h>
41 #include <pulsecore/macro.h>
42 #include <pulsecore/flist.h>
43 #include <pulsecore/core-util.h>
44
45 #include "memblock.h"
46
47 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
48 * note that the footprint is usually much smaller, since the data is
49 * stored in SHM and our OS does not commit the memory before we use
50 * it for the first time. */
51 #define PA_MEMPOOL_SLOTS_MAX 1024
52 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
53
54 #define PA_MEMEXPORT_SLOTS_MAX 128
55
56 #define PA_MEMIMPORT_SLOTS_MAX 128
57 #define PA_MEMIMPORT_SEGMENTS_MAX 16
58
59 struct pa_memblock {
60 PA_REFCNT_DECLARE; /* the reference counter */
61 pa_mempool *pool;
62
63 pa_memblock_type_t type;
64
65 pa_bool_t read_only:1;
66 pa_bool_t is_silence:1;
67
68 pa_atomic_ptr_t data;
69 size_t length;
70
71 pa_atomic_t n_acquired;
72 pa_atomic_t please_signal;
73
74 union {
75 struct {
76 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
77 pa_free_cb_t free_cb;
78 } user;
79
80 struct {
81 uint32_t id;
82 pa_memimport_segment *segment;
83 } imported;
84 } per_type;
85 };
86
87 struct pa_memimport_segment {
88 pa_memimport *import;
89 pa_shm memory;
90 unsigned n_blocks;
91 };
92
93 struct pa_memimport {
94 pa_mutex *mutex;
95
96 pa_mempool *pool;
97 pa_hashmap *segments;
98 pa_hashmap *blocks;
99
100 /* Called whenever an imported memory block is no longer
101 * needed. */
102 pa_memimport_release_cb_t release_cb;
103 void *userdata;
104
105 PA_LLIST_FIELDS(pa_memimport);
106 };
107
108 struct memexport_slot {
109 PA_LLIST_FIELDS(struct memexport_slot);
110 pa_memblock *block;
111 };
112
113 struct pa_memexport {
114 pa_mutex *mutex;
115 pa_mempool *pool;
116
117 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
118
119 PA_LLIST_HEAD(struct memexport_slot, free_slots);
120 PA_LLIST_HEAD(struct memexport_slot, used_slots);
121 unsigned n_init;
122
123 /* Called whenever a client from which we imported a memory block
124 which we in turn exported to another client dies and we need to
125 revoke the memory block accordingly */
126 pa_memexport_revoke_cb_t revoke_cb;
127 void *userdata;
128
129 PA_LLIST_FIELDS(pa_memexport);
130 };
131
132 struct pa_mempool {
133 pa_semaphore *semaphore;
134 pa_mutex *mutex;
135
136 pa_shm memory;
137 size_t block_size;
138 unsigned n_blocks;
139
140 pa_atomic_t n_init;
141
142 PA_LLIST_HEAD(pa_memimport, imports);
143 PA_LLIST_HEAD(pa_memexport, exports);
144
145 /* A list of free slots that may be reused */
146 pa_flist *free_slots;
147
148 pa_mempool_stat stat;
149 };
150
151 static void segment_detach(pa_memimport_segment *seg);
152
153 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
154
155 /* No lock necessary */
156 static void stat_add(pa_memblock*b) {
157 pa_assert(b);
158 pa_assert(b->pool);
159
160 pa_atomic_inc(&b->pool->stat.n_allocated);
161 pa_atomic_add(&b->pool->stat.allocated_size, b->length);
162
163 pa_atomic_inc(&b->pool->stat.n_accumulated);
164 pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
165
166 if (b->type == PA_MEMBLOCK_IMPORTED) {
167 pa_atomic_inc(&b->pool->stat.n_imported);
168 pa_atomic_add(&b->pool->stat.imported_size, b->length);
169 }
170
171 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
172 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
173 }
174
175 /* No lock necessary */
176 static void stat_remove(pa_memblock *b) {
177 pa_assert(b);
178 pa_assert(b->pool);
179
180 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
181 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
182
183 pa_atomic_dec(&b->pool->stat.n_allocated);
184 pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
185
186 if (b->type == PA_MEMBLOCK_IMPORTED) {
187 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
189
190 pa_atomic_dec(&b->pool->stat.n_imported);
191 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
192 }
193
194 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
195 }
196
197 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
198
199 /* No lock necessary */
200 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
201 pa_memblock *b;
202
203 pa_assert(p);
204 pa_assert(length);
205
206 if (!(b = pa_memblock_new_pool(p, length)))
207 b = memblock_new_appended(p, length);
208
209 return b;
210 }
211
212 /* No lock necessary */
213 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
214 pa_memblock *b;
215
216 pa_assert(p);
217 pa_assert(length);
218
219 /* If -1 is passed as length we choose the size for the caller. */
220
221 if (length == (size_t) -1)
222 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
223
224 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
225 PA_REFCNT_INIT(b);
226 b->pool = p;
227 b->type = PA_MEMBLOCK_APPENDED;
228 b->read_only = b->is_silence = FALSE;
229 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
230 b->length = length;
231 pa_atomic_store(&b->n_acquired, 0);
232 pa_atomic_store(&b->please_signal, 0);
233
234 stat_add(b);
235 return b;
236 }
237
238 /* No lock necessary */
239 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
240 struct mempool_slot *slot;
241 pa_assert(p);
242
243 if (!(slot = pa_flist_pop(p->free_slots))) {
244 int idx;
245
246 /* The free list was empty, we have to allocate a new entry */
247
248 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
249 pa_atomic_dec(&p->n_init);
250 else
251 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));
252
253 if (!slot) {
254 pa_log_info("Pool full");
255 pa_atomic_inc(&p->stat.n_pool_full);
256 return NULL;
257 }
258 }
259
260 return slot;
261 }
262
263 /* No lock necessary, totally redundant anyway */
264 static inline void* mempool_slot_data(struct mempool_slot *slot) {
265 return slot;
266 }
267
268 /* No lock necessary */
269 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
270 pa_assert(p);
271
272 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
273 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
274
275 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
276 }
277
278 /* No lock necessary */
279 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
280 unsigned idx;
281
282 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
283 return NULL;
284
285 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
286 }
287
288 /* No lock necessary */
289 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
290 pa_memblock *b = NULL;
291 struct mempool_slot *slot;
292
293 pa_assert(p);
294 pa_assert(length);
295
296 /* If -1 is passed as length we choose the size for the caller: we
297 * take the largest size that fits in one of our slots. */
298
299 if (length == (size_t) -1)
300 length = pa_mempool_block_size_max(p);
301
302 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
303
304 if (!(slot = mempool_allocate_slot(p)))
305 return NULL;
306
307 b = mempool_slot_data(slot);
308 b->type = PA_MEMBLOCK_POOL;
309 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
310
311 } else if (p->block_size >= length) {
312
313 if (!(slot = mempool_allocate_slot(p)))
314 return NULL;
315
316 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
317 b = pa_xnew(pa_memblock, 1);
318
319 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
320 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
321
322 } else {
323 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
324 pa_atomic_inc(&p->stat.n_too_large_for_pool);
325 return NULL;
326 }
327
328 PA_REFCNT_INIT(b);
329 b->pool = p;
330 b->read_only = b->is_silence = FALSE;
331 b->length = length;
332 pa_atomic_store(&b->n_acquired, 0);
333 pa_atomic_store(&b->please_signal, 0);
334
335 stat_add(b);
336 return b;
337 }
338
339 /* No lock necessary */
340 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
341 pa_memblock *b;
342
343 pa_assert(p);
344 pa_assert(d);
345 pa_assert(length != (size_t) -1);
346 pa_assert(length);
347
348 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
349 b = pa_xnew(pa_memblock, 1);
350 PA_REFCNT_INIT(b);
351 b->pool = p;
352 b->type = PA_MEMBLOCK_FIXED;
353 b->read_only = read_only;
354 b->is_silence = FALSE;
355 pa_atomic_ptr_store(&b->data, d);
356 b->length = length;
357 pa_atomic_store(&b->n_acquired, 0);
358 pa_atomic_store(&b->please_signal, 0);
359
360 stat_add(b);
361 return b;
362 }
363
364 /* No lock necessary */
365 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
366 pa_memblock *b;
367
368 pa_assert(p);
369 pa_assert(d);
370 pa_assert(length);
371 pa_assert(length != (size_t) -1);
372 pa_assert(free_cb);
373
374 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
375 b = pa_xnew(pa_memblock, 1);
376 PA_REFCNT_INIT(b);
377 b->pool = p;
378 b->type = PA_MEMBLOCK_USER;
379 b->read_only = read_only;
380 b->is_silence = FALSE;
381 pa_atomic_ptr_store(&b->data, d);
382 b->length = length;
383 pa_atomic_store(&b->n_acquired, 0);
384 pa_atomic_store(&b->please_signal, 0);
385
386 b->per_type.user.free_cb = free_cb;
387
388 stat_add(b);
389 return b;
390 }
391
392 /* No lock necessary */
393 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
394 pa_assert(b);
395 pa_assert(PA_REFCNT_VALUE(b) > 0);
396
397 return b->read_only && PA_REFCNT_VALUE(b) == 1;
398 }
399
400 /* No lock necessary */
401 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
402 pa_assert(b);
403 pa_assert(PA_REFCNT_VALUE(b) > 0);
404
405 return b->is_silence;
406 }
407
408 /* No lock necessary */
409 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
410 pa_assert(b);
411 pa_assert(PA_REFCNT_VALUE(b) > 0);
412
413 b->is_silence = v;
414 }
415
416 /* No lock necessary */
417 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
418 int r;
419 pa_assert(b);
420
421 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
422
423 return r == 1;
424 }
425
426 /* No lock necessary */
427 void* pa_memblock_acquire(pa_memblock *b) {
428 pa_assert(b);
429 pa_assert(PA_REFCNT_VALUE(b) > 0);
430
431 pa_atomic_inc(&b->n_acquired);
432
433 return pa_atomic_ptr_load(&b->data);
434 }
435
436 /* No lock necessary, in corner cases locks by its own */
437 void pa_memblock_release(pa_memblock *b) {
438 int r;
439 pa_assert(b);
440 pa_assert(PA_REFCNT_VALUE(b) > 0);
441
442 r = pa_atomic_dec(&b->n_acquired);
443 pa_assert(r >= 1);
444
445 /* Signal a waiting thread that this memblock is no longer used */
446 if (r == 1 && pa_atomic_load(&b->please_signal))
447 pa_semaphore_post(b->pool->semaphore);
448 }
449
450 size_t pa_memblock_get_length(pa_memblock *b) {
451 pa_assert(b);
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
453
454 return b->length;
455 }
456
457 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
458 pa_assert(b);
459 pa_assert(PA_REFCNT_VALUE(b) > 0);
460
461 return b->pool;
462 }
463
464 /* No lock necessary */
465 pa_memblock* pa_memblock_ref(pa_memblock*b) {
466 pa_assert(b);
467 pa_assert(PA_REFCNT_VALUE(b) > 0);
468
469 PA_REFCNT_INC(b);
470 return b;
471 }
472
473 static void memblock_free(pa_memblock *b) {
474 pa_assert(b);
475
476 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
477
478 stat_remove(b);
479
480 switch (b->type) {
481 case PA_MEMBLOCK_USER :
482 pa_assert(b->per_type.user.free_cb);
483 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
484
485 /* Fall through */
486
487 case PA_MEMBLOCK_FIXED:
488 case PA_MEMBLOCK_APPENDED :
489 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
490 pa_xfree(b);
491
492 break;
493
494 case PA_MEMBLOCK_IMPORTED : {
495 pa_memimport_segment *segment;
496 pa_memimport *import;
497
498 /* FIXME! This should be implemented lock-free */
499
500 segment = b->per_type.imported.segment;
501 pa_assert(segment);
502 import = segment->import;
503 pa_assert(import);
504
505 pa_mutex_lock(import->mutex);
506 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
507 if (-- segment->n_blocks <= 0)
508 segment_detach(segment);
509
510 pa_mutex_unlock(import->mutex);
511
512 import->release_cb(import, b->per_type.imported.id, import->userdata);
513
514 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
515 pa_xfree(b);
516 break;
517 }
518
519 case PA_MEMBLOCK_POOL_EXTERNAL:
520 case PA_MEMBLOCK_POOL: {
521 struct mempool_slot *slot;
522 int call_free;
523
524 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
525 pa_assert(slot);
526
527 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
528
529 /* The free list dimensions should easily allow all slots
530 * to fit in, hence try harder if pushing this slot into
531 * the free list fails */
532 while (pa_flist_push(b->pool->free_slots, slot) < 0)
533 ;
534
535 if (call_free)
536 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
537 pa_xfree(b);
538
539 break;
540 }
541
542 case PA_MEMBLOCK_TYPE_MAX:
543 default:
544 pa_assert_not_reached();
545 }
546 }
547
548 /* No lock necessary */
549 void pa_memblock_unref(pa_memblock*b) {
550 pa_assert(b);
551 pa_assert(PA_REFCNT_VALUE(b) > 0);
552
553 if (PA_REFCNT_DEC(b) > 0)
554 return;
555
556 memblock_free(b);
557 }
558
559 /* Self locked */
560 static void memblock_wait(pa_memblock *b) {
561 pa_assert(b);
562
563 if (pa_atomic_load(&b->n_acquired) > 0) {
564 /* We need to wait until all threads gave up access to the
565 * memory block before we can go on. Unfortunately this means
566 * that we have to lock and wait here. Sniff! */
567
568 pa_atomic_inc(&b->please_signal);
569
570 while (pa_atomic_load(&b->n_acquired) > 0)
571 pa_semaphore_wait(b->pool->semaphore);
572
573 pa_atomic_dec(&b->please_signal);
574 }
575 }
576
577 /* No lock necessary. This function is not multiple caller safe! */
578 static void memblock_make_local(pa_memblock *b) {
579 pa_assert(b);
580
581 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
582
583 if (b->length <= b->pool->block_size) {
584 struct mempool_slot *slot;
585
586 if ((slot = mempool_allocate_slot(b->pool))) {
587 void *new_data;
588 /* We can move it into a local pool, perfect! */
589
590 new_data = mempool_slot_data(slot);
591 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
592 pa_atomic_ptr_store(&b->data, new_data);
593
594 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
595 b->read_only = FALSE;
596
597 goto finish;
598 }
599 }
600
601 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
602 b->per_type.user.free_cb = pa_xfree;
603 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
604
605 b->type = PA_MEMBLOCK_USER;
606 b->read_only = FALSE;
607
608 finish:
609 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
610 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
611 memblock_wait(b);
612 }
613
614 /* No lock necessary. This function is not multiple caller safe*/
615 void pa_memblock_unref_fixed(pa_memblock *b) {
616 pa_assert(b);
617 pa_assert(PA_REFCNT_VALUE(b) > 0);
618 pa_assert(b->type == PA_MEMBLOCK_FIXED);
619
620 if (PA_REFCNT_VALUE(b) > 1)
621 memblock_make_local(b);
622
623 pa_memblock_unref(b);
624 }
625
626 /* No lock necessary. */
627 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
628 void *p;
629
630 pa_assert(b);
631 pa_assert(PA_REFCNT_VALUE(b) > 0);
632
633 p = pa_memblock_acquire(b);
634 pa_will_need(p, b->length);
635 pa_memblock_release(b);
636
637 return b;
638 }
639
640 /* Self-locked. This function is not multiple-caller safe */
641 static void memblock_replace_import(pa_memblock *b) {
642 pa_memimport_segment *seg;
643
644 pa_assert(b);
645 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
646
647 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
648 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
649 pa_atomic_dec(&b->pool->stat.n_imported);
650 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
651
652 seg = b->per_type.imported.segment;
653 pa_assert(seg);
654 pa_assert(seg->import);
655
656 pa_mutex_lock(seg->import->mutex);
657
658 pa_hashmap_remove(
659 seg->import->blocks,
660 PA_UINT32_TO_PTR(b->per_type.imported.id));
661
662 memblock_make_local(b);
663
664 if (-- seg->n_blocks <= 0) {
665 pa_mutex_unlock(seg->import->mutex);
666 segment_detach(seg);
667 } else
668 pa_mutex_unlock(seg->import->mutex);
669 }
670
671 pa_mempool* pa_mempool_new(pa_bool_t shared) {
672 pa_mempool *p;
673
674 p = pa_xnew(pa_mempool, 1);
675
676 p->mutex = pa_mutex_new(TRUE, TRUE);
677 p->semaphore = pa_semaphore_new(0);
678
679 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
680 if (p->block_size < PA_PAGE_SIZE)
681 p->block_size = PA_PAGE_SIZE;
682
683 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
684
685 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
686 pa_xfree(p);
687 return NULL;
688 }
689
690 memset(&p->stat, 0, sizeof(p->stat));
691 pa_atomic_store(&p->n_init, 0);
692
693 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
694 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
695
696 p->free_slots = pa_flist_new(p->n_blocks*2);
697
698 return p;
699 }
700
701 void pa_mempool_free(pa_mempool *p) {
702 pa_assert(p);
703
704 pa_mutex_lock(p->mutex);
705
706 while (p->imports)
707 pa_memimport_free(p->imports);
708
709 while (p->exports)
710 pa_memexport_free(p->exports);
711
712 pa_mutex_unlock(p->mutex);
713
714 pa_flist_free(p->free_slots, NULL);
715
716 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
717 /* raise(SIGTRAP); */
718 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
719 }
720
721 pa_shm_free(&p->memory);
722
723 pa_mutex_free(p->mutex);
724 pa_semaphore_free(p->semaphore);
725
726 pa_xfree(p);
727 }
728
729 /* No lock necessary */
730 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
731 pa_assert(p);
732
733 return &p->stat;
734 }
735
736 /* No lock necessary */
737 size_t pa_mempool_block_size_max(pa_mempool *p) {
738 pa_assert(p);
739
740 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
741 }
742
743 /* No lock necessary */
744 void pa_mempool_vacuum(pa_mempool *p) {
745 struct mempool_slot *slot;
746 pa_flist *list;
747
748 pa_assert(p);
749
750 list = pa_flist_new(p->n_blocks*2);
751
752 while ((slot = pa_flist_pop(p->free_slots)))
753 while (pa_flist_push(list, slot) < 0)
754 ;
755
756 while ((slot = pa_flist_pop(list))) {
757 pa_shm_punch(&p->memory, (uint8_t*) slot - (uint8_t*) p->memory.ptr, p->block_size);
758
759 while (pa_flist_push(p->free_slots, slot))
760 ;
761 }
762
763 pa_flist_free(list, NULL);
764 }
765
766 /* No lock necessary */
767 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
768 pa_assert(p);
769
770 if (!p->memory.shared)
771 return -1;
772
773 *id = p->memory.id;
774
775 return 0;
776 }
777
778 /* No lock necessary */
779 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
780 pa_assert(p);
781
782 return !!p->memory.shared;
783 }
784
785 /* For recieving blocks from other nodes */
786 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
787 pa_memimport *i;
788
789 pa_assert(p);
790 pa_assert(cb);
791
792 i = pa_xnew(pa_memimport, 1);
793 i->mutex = pa_mutex_new(TRUE, TRUE);
794 i->pool = p;
795 i->segments = pa_hashmap_new(NULL, NULL);
796 i->blocks = pa_hashmap_new(NULL, NULL);
797 i->release_cb = cb;
798 i->userdata = userdata;
799
800 pa_mutex_lock(p->mutex);
801 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
802 pa_mutex_unlock(p->mutex);
803
804 return i;
805 }
806
807 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
808
809 /* Should be called locked */
810 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
811 pa_memimport_segment* seg;
812
813 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
814 return NULL;
815
816 seg = pa_xnew(pa_memimport_segment, 1);
817
818 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
819 pa_xfree(seg);
820 return NULL;
821 }
822
823 seg->import = i;
824 seg->n_blocks = 0;
825
826 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
827 return seg;
828 }
829
830 /* Should be called locked */
831 static void segment_detach(pa_memimport_segment *seg) {
832 pa_assert(seg);
833
834 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
835 pa_shm_free(&seg->memory);
836 pa_xfree(seg);
837 }
838
839 /* Self-locked. Not multiple-caller safe */
840 void pa_memimport_free(pa_memimport *i) {
841 pa_memexport *e;
842 pa_memblock *b;
843
844 pa_assert(i);
845
846 pa_mutex_lock(i->mutex);
847
848 while ((b = pa_hashmap_get_first(i->blocks)))
849 memblock_replace_import(b);
850
851 pa_assert(pa_hashmap_size(i->segments) == 0);
852
853 pa_mutex_unlock(i->mutex);
854
855 pa_mutex_lock(i->pool->mutex);
856
857 /* If we've exported this block further we need to revoke that export */
858 for (e = i->pool->exports; e; e = e->next)
859 memexport_revoke_blocks(e, i);
860
861 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
862
863 pa_mutex_unlock(i->pool->mutex);
864
865 pa_hashmap_free(i->blocks, NULL, NULL);
866 pa_hashmap_free(i->segments, NULL, NULL);
867
868 pa_mutex_free(i->mutex);
869
870 pa_xfree(i);
871 }
872
873 /* Self-locked */
874 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
875 pa_memblock *b = NULL;
876 pa_memimport_segment *seg;
877
878 pa_assert(i);
879
880 pa_mutex_lock(i->mutex);
881
882 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
883 goto finish;
884
885 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
886 if (!(seg = segment_attach(i, shm_id)))
887 goto finish;
888
889 if (offset+size > seg->memory.size)
890 goto finish;
891
892 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
893 b = pa_xnew(pa_memblock, 1);
894
895 PA_REFCNT_INIT(b);
896 b->pool = i->pool;
897 b->type = PA_MEMBLOCK_IMPORTED;
898 b->read_only = TRUE;
899 b->is_silence = FALSE;
900 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
901 b->length = size;
902 pa_atomic_store(&b->n_acquired, 0);
903 pa_atomic_store(&b->please_signal, 0);
904 b->per_type.imported.id = block_id;
905 b->per_type.imported.segment = seg;
906
907 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
908
909 seg->n_blocks++;
910
911 finish:
912 pa_mutex_unlock(i->mutex);
913
914 if (b)
915 stat_add(b);
916
917 return b;
918 }
919
920 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
921 pa_memblock *b;
922 int ret = 0;
923 pa_assert(i);
924
925 pa_mutex_lock(i->mutex);
926
927 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
928 ret = -1;
929 goto finish;
930 }
931
932 memblock_replace_import(b);
933
934 finish:
935 pa_mutex_unlock(i->mutex);
936
937 return ret;
938 }
939
940 /* For sending blocks to other nodes */
941 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
942 pa_memexport *e;
943
944 pa_assert(p);
945 pa_assert(cb);
946
947 if (!p->memory.shared)
948 return NULL;
949
950 e = pa_xnew(pa_memexport, 1);
951 e->mutex = pa_mutex_new(TRUE, TRUE);
952 e->pool = p;
953 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
954 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
955 e->n_init = 0;
956 e->revoke_cb = cb;
957 e->userdata = userdata;
958
959 pa_mutex_lock(p->mutex);
960 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
961 pa_mutex_unlock(p->mutex);
962 return e;
963 }
964
965 void pa_memexport_free(pa_memexport *e) {
966 pa_assert(e);
967
968 pa_mutex_lock(e->mutex);
969 while (e->used_slots)
970 pa_memexport_process_release(e, e->used_slots - e->slots);
971 pa_mutex_unlock(e->mutex);
972
973 pa_mutex_lock(e->pool->mutex);
974 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
975 pa_mutex_unlock(e->pool->mutex);
976
977 pa_mutex_free(e->mutex);
978 pa_xfree(e);
979 }
980
981 /* Self-locked */
982 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
983 pa_memblock *b;
984
985 pa_assert(e);
986
987 pa_mutex_lock(e->mutex);
988
989 if (id >= e->n_init)
990 goto fail;
991
992 if (!e->slots[id].block)
993 goto fail;
994
995 b = e->slots[id].block;
996 e->slots[id].block = NULL;
997
998 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
999 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1000
1001 pa_mutex_unlock(e->mutex);
1002
1003 /* pa_log("Processing release for %u", id); */
1004
1005 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1006 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1007
1008 pa_atomic_dec(&e->pool->stat.n_exported);
1009 pa_atomic_sub(&e->pool->stat.exported_size, b->length);
1010
1011 pa_memblock_unref(b);
1012
1013 return 0;
1014
1015 fail:
1016 pa_mutex_unlock(e->mutex);
1017
1018 return -1;
1019 }
1020
1021 /* Self-locked */
1022 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1023 struct memexport_slot *slot, *next;
1024 pa_assert(e);
1025 pa_assert(i);
1026
1027 pa_mutex_lock(e->mutex);
1028
1029 for (slot = e->used_slots; slot; slot = next) {
1030 uint32_t idx;
1031 next = slot->next;
1032
1033 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1034 slot->block->per_type.imported.segment->import != i)
1035 continue;
1036
1037 idx = slot - e->slots;
1038 e->revoke_cb(e, idx, e->userdata);
1039 pa_memexport_process_release(e, idx);
1040 }
1041
1042 pa_mutex_unlock(e->mutex);
1043 }
1044
1045 /* No lock necessary */
1046 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1047 pa_memblock *n;
1048
1049 pa_assert(p);
1050 pa_assert(b);
1051
1052 if (b->type == PA_MEMBLOCK_IMPORTED ||
1053 b->type == PA_MEMBLOCK_POOL ||
1054 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1055 pa_assert(b->pool == p);
1056 return pa_memblock_ref(b);
1057 }
1058
1059 if (!(n = pa_memblock_new_pool(p, b->length)))
1060 return NULL;
1061
1062 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1063 return n;
1064 }
1065
1066 /* Self-locked */
1067 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1068 pa_shm *memory;
1069 struct memexport_slot *slot;
1070 void *data;
1071
1072 pa_assert(e);
1073 pa_assert(b);
1074 pa_assert(block_id);
1075 pa_assert(shm_id);
1076 pa_assert(offset);
1077 pa_assert(size);
1078 pa_assert(b->pool == e->pool);
1079
1080 if (!(b = memblock_shared_copy(e->pool, b)))
1081 return -1;
1082
1083 pa_mutex_lock(e->mutex);
1084
1085 if (e->free_slots) {
1086 slot = e->free_slots;
1087 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1088 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1089 slot = &e->slots[e->n_init++];
1090 else {
1091 pa_mutex_unlock(e->mutex);
1092 pa_memblock_unref(b);
1093 return -1;
1094 }
1095
1096 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1097 slot->block = b;
1098 *block_id = slot - e->slots;
1099
1100 pa_mutex_unlock(e->mutex);
1101 /* pa_log("Got block id %u", *block_id); */
1102
1103 data = pa_memblock_acquire(b);
1104
1105 if (b->type == PA_MEMBLOCK_IMPORTED) {
1106 pa_assert(b->per_type.imported.segment);
1107 memory = &b->per_type.imported.segment->memory;
1108 } else {
1109 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1110 pa_assert(b->pool);
1111 memory = &b->pool->memory;
1112 }
1113
1114 pa_assert(data >= memory->ptr);
1115 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1116
1117 *shm_id = memory->id;
1118 *offset = (uint8_t*) data - (uint8_t*) memory->ptr;
1119 *size = b->length;
1120
1121 pa_memblock_release(b);
1122
1123 pa_atomic_inc(&e->pool->stat.n_exported);
1124 pa_atomic_add(&e->pool->stat.exported_size, b->length);
1125
1126 return 0;
1127 }