]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
Make the shared memory segment size configurable
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48
49 #include "memblock.h"
50
51 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
52 * note that the footprint is usually much smaller, since the data is
53 * stored in SHM and our OS does not commit the memory before we use
54 * it for the first time. */
55 #define PA_MEMPOOL_SLOTS_MAX 1024
56 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
57
58 #define PA_MEMEXPORT_SLOTS_MAX 128
59
60 #define PA_MEMIMPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SEGMENTS_MAX 16
62
63 struct pa_memblock {
64 PA_REFCNT_DECLARE; /* the reference counter */
65 pa_mempool *pool;
66
67 pa_memblock_type_t type;
68
69 pa_bool_t read_only:1;
70 pa_bool_t is_silence:1;
71
72 pa_atomic_ptr_t data;
73 size_t length;
74
75 pa_atomic_t n_acquired;
76 pa_atomic_t please_signal;
77
78 union {
79 struct {
80 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
81 pa_free_cb_t free_cb;
82 } user;
83
84 struct {
85 uint32_t id;
86 pa_memimport_segment *segment;
87 } imported;
88 } per_type;
89 };
90
91 struct pa_memimport_segment {
92 pa_memimport *import;
93 pa_shm memory;
94 unsigned n_blocks;
95 };
96
97 struct pa_memimport {
98 pa_mutex *mutex;
99
100 pa_mempool *pool;
101 pa_hashmap *segments;
102 pa_hashmap *blocks;
103
104 /* Called whenever an imported memory block is no longer
105 * needed. */
106 pa_memimport_release_cb_t release_cb;
107 void *userdata;
108
109 PA_LLIST_FIELDS(pa_memimport);
110 };
111
112 struct memexport_slot {
113 PA_LLIST_FIELDS(struct memexport_slot);
114 pa_memblock *block;
115 };
116
117 struct pa_memexport {
118 pa_mutex *mutex;
119 pa_mempool *pool;
120
121 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
122
123 PA_LLIST_HEAD(struct memexport_slot, free_slots);
124 PA_LLIST_HEAD(struct memexport_slot, used_slots);
125 unsigned n_init;
126
127 /* Called whenever a client from which we imported a memory block
128 which we in turn exported to another client dies and we need to
129 revoke the memory block accordingly */
130 pa_memexport_revoke_cb_t revoke_cb;
131 void *userdata;
132
133 PA_LLIST_FIELDS(pa_memexport);
134 };
135
136 struct pa_mempool {
137 pa_semaphore *semaphore;
138 pa_mutex *mutex;
139
140 pa_shm memory;
141 size_t block_size;
142 unsigned n_blocks;
143
144 pa_atomic_t n_init;
145
146 PA_LLIST_HEAD(pa_memimport, imports);
147 PA_LLIST_HEAD(pa_memexport, exports);
148
149 /* A list of free slots that may be reused */
150 pa_flist *free_slots;
151
152 pa_mempool_stat stat;
153 };
154
155 static void segment_detach(pa_memimport_segment *seg);
156
157 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
158
159 /* No lock necessary */
160 static void stat_add(pa_memblock*b) {
161 pa_assert(b);
162 pa_assert(b->pool);
163
164 pa_atomic_inc(&b->pool->stat.n_allocated);
165 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
166
167 pa_atomic_inc(&b->pool->stat.n_accumulated);
168 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
169
170 if (b->type == PA_MEMBLOCK_IMPORTED) {
171 pa_atomic_inc(&b->pool->stat.n_imported);
172 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
173 }
174
175 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
176 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
177 }
178
179 /* No lock necessary */
180 static void stat_remove(pa_memblock *b) {
181 pa_assert(b);
182 pa_assert(b->pool);
183
184 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
185 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
186
187 pa_atomic_dec(&b->pool->stat.n_allocated);
188 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
189
190 if (b->type == PA_MEMBLOCK_IMPORTED) {
191 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
192 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
193
194 pa_atomic_dec(&b->pool->stat.n_imported);
195 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
196 }
197
198 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
199 }
200
201 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
202
203 /* No lock necessary */
204 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
205 pa_memblock *b;
206
207 pa_assert(p);
208 pa_assert(length);
209
210 if (!(b = pa_memblock_new_pool(p, length)))
211 b = memblock_new_appended(p, length);
212
213 return b;
214 }
215
216 /* No lock necessary */
217 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
218 pa_memblock *b;
219
220 pa_assert(p);
221 pa_assert(length);
222
223 /* If -1 is passed as length we choose the size for the caller. */
224
225 if (length == (size_t) -1)
226 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
227
228 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
229 PA_REFCNT_INIT(b);
230 b->pool = p;
231 b->type = PA_MEMBLOCK_APPENDED;
232 b->read_only = b->is_silence = FALSE;
233 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
234 b->length = length;
235 pa_atomic_store(&b->n_acquired, 0);
236 pa_atomic_store(&b->please_signal, 0);
237
238 stat_add(b);
239 return b;
240 }
241
242 /* No lock necessary */
243 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
244 struct mempool_slot *slot;
245 pa_assert(p);
246
247 if (!(slot = pa_flist_pop(p->free_slots))) {
248 int idx;
249
250 /* The free list was empty, we have to allocate a new entry */
251
252 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
253 pa_atomic_dec(&p->n_init);
254 else
255 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
256
257 if (!slot) {
258 pa_log_info("Pool full");
259 pa_atomic_inc(&p->stat.n_pool_full);
260 return NULL;
261 }
262 }
263
264 #ifdef HAVE_VALGRIND_MEMCHECK_H
265 VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0);
266 #endif
267
268 return slot;
269 }
270
271 /* No lock necessary, totally redundant anyway */
272 static inline void* mempool_slot_data(struct mempool_slot *slot) {
273 return slot;
274 }
275
276 /* No lock necessary */
277 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
278 pa_assert(p);
279
280 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
281 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
282
283 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
284 }
285
286 /* No lock necessary */
287 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
288 unsigned idx;
289
290 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
291 return NULL;
292
293 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
294 }
295
296 /* No lock necessary */
297 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
298 pa_memblock *b = NULL;
299 struct mempool_slot *slot;
300
301 pa_assert(p);
302 pa_assert(length);
303
304 /* If -1 is passed as length we choose the size for the caller: we
305 * take the largest size that fits in one of our slots. */
306
307 if (length == (size_t) -1)
308 length = pa_mempool_block_size_max(p);
309
310 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
311
312 if (!(slot = mempool_allocate_slot(p)))
313 return NULL;
314
315 b = mempool_slot_data(slot);
316 b->type = PA_MEMBLOCK_POOL;
317 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
318
319 } else if (p->block_size >= length) {
320
321 if (!(slot = mempool_allocate_slot(p)))
322 return NULL;
323
324 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
325 b = pa_xnew(pa_memblock, 1);
326
327 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
328 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
329
330 } else {
331 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
332 pa_atomic_inc(&p->stat.n_too_large_for_pool);
333 return NULL;
334 }
335
336 PA_REFCNT_INIT(b);
337 b->pool = p;
338 b->read_only = b->is_silence = FALSE;
339 b->length = length;
340 pa_atomic_store(&b->n_acquired, 0);
341 pa_atomic_store(&b->please_signal, 0);
342
343 stat_add(b);
344 return b;
345 }
346
347 /* No lock necessary */
348 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
349 pa_memblock *b;
350
351 pa_assert(p);
352 pa_assert(d);
353 pa_assert(length != (size_t) -1);
354 pa_assert(length);
355
356 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
357 b = pa_xnew(pa_memblock, 1);
358 PA_REFCNT_INIT(b);
359 b->pool = p;
360 b->type = PA_MEMBLOCK_FIXED;
361 b->read_only = read_only;
362 b->is_silence = FALSE;
363 pa_atomic_ptr_store(&b->data, d);
364 b->length = length;
365 pa_atomic_store(&b->n_acquired, 0);
366 pa_atomic_store(&b->please_signal, 0);
367
368 stat_add(b);
369 return b;
370 }
371
372 /* No lock necessary */
373 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
374 pa_memblock *b;
375
376 pa_assert(p);
377 pa_assert(d);
378 pa_assert(length);
379 pa_assert(length != (size_t) -1);
380 pa_assert(free_cb);
381
382 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
383 b = pa_xnew(pa_memblock, 1);
384 PA_REFCNT_INIT(b);
385 b->pool = p;
386 b->type = PA_MEMBLOCK_USER;
387 b->read_only = read_only;
388 b->is_silence = FALSE;
389 pa_atomic_ptr_store(&b->data, d);
390 b->length = length;
391 pa_atomic_store(&b->n_acquired, 0);
392 pa_atomic_store(&b->please_signal, 0);
393
394 b->per_type.user.free_cb = free_cb;
395
396 stat_add(b);
397 return b;
398 }
399
400 /* No lock necessary */
401 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
402 pa_assert(b);
403 pa_assert(PA_REFCNT_VALUE(b) > 0);
404
405 return b->read_only && PA_REFCNT_VALUE(b) == 1;
406 }
407
408 /* No lock necessary */
409 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
410 pa_assert(b);
411 pa_assert(PA_REFCNT_VALUE(b) > 0);
412
413 return b->is_silence;
414 }
415
416 /* No lock necessary */
417 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
418 pa_assert(b);
419 pa_assert(PA_REFCNT_VALUE(b) > 0);
420
421 b->is_silence = v;
422 }
423
424 /* No lock necessary */
425 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
426 int r;
427 pa_assert(b);
428
429 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
430
431 return r == 1;
432 }
433
434 /* No lock necessary */
435 void* pa_memblock_acquire(pa_memblock *b) {
436 pa_assert(b);
437 pa_assert(PA_REFCNT_VALUE(b) > 0);
438
439 pa_atomic_inc(&b->n_acquired);
440
441 return pa_atomic_ptr_load(&b->data);
442 }
443
444 /* No lock necessary, in corner cases locks by its own */
445 void pa_memblock_release(pa_memblock *b) {
446 int r;
447 pa_assert(b);
448 pa_assert(PA_REFCNT_VALUE(b) > 0);
449
450 r = pa_atomic_dec(&b->n_acquired);
451 pa_assert(r >= 1);
452
453 /* Signal a waiting thread that this memblock is no longer used */
454 if (r == 1 && pa_atomic_load(&b->please_signal))
455 pa_semaphore_post(b->pool->semaphore);
456 }
457
458 size_t pa_memblock_get_length(pa_memblock *b) {
459 pa_assert(b);
460 pa_assert(PA_REFCNT_VALUE(b) > 0);
461
462 return b->length;
463 }
464
465 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
466 pa_assert(b);
467 pa_assert(PA_REFCNT_VALUE(b) > 0);
468
469 return b->pool;
470 }
471
472 /* No lock necessary */
473 pa_memblock* pa_memblock_ref(pa_memblock*b) {
474 pa_assert(b);
475 pa_assert(PA_REFCNT_VALUE(b) > 0);
476
477 PA_REFCNT_INC(b);
478 return b;
479 }
480
481 static void memblock_free(pa_memblock *b) {
482 pa_assert(b);
483
484 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
485
486 stat_remove(b);
487
488 switch (b->type) {
489 case PA_MEMBLOCK_USER :
490 pa_assert(b->per_type.user.free_cb);
491 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
492
493 /* Fall through */
494
495 case PA_MEMBLOCK_FIXED:
496 case PA_MEMBLOCK_APPENDED :
497 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
498 pa_xfree(b);
499
500 break;
501
502 case PA_MEMBLOCK_IMPORTED : {
503 pa_memimport_segment *segment;
504 pa_memimport *import;
505
506 /* FIXME! This should be implemented lock-free */
507
508 segment = b->per_type.imported.segment;
509 pa_assert(segment);
510 import = segment->import;
511 pa_assert(import);
512
513 pa_mutex_lock(import->mutex);
514 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
515 if (-- segment->n_blocks <= 0)
516 segment_detach(segment);
517
518 pa_mutex_unlock(import->mutex);
519
520 import->release_cb(import, b->per_type.imported.id, import->userdata);
521
522 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
523 pa_xfree(b);
524 break;
525 }
526
527 case PA_MEMBLOCK_POOL_EXTERNAL:
528 case PA_MEMBLOCK_POOL: {
529 struct mempool_slot *slot;
530 pa_bool_t call_free;
531
532 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
533 pa_assert(slot);
534
535 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
536
537 /* The free list dimensions should easily allow all slots
538 * to fit in, hence try harder if pushing this slot into
539 * the free list fails */
540 while (pa_flist_push(b->pool->free_slots, slot) < 0)
541 ;
542
543 #ifdef HAVE_VALGRIND_MEMCHECK_H
544 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size);
545 #endif
546
547 if (call_free)
548 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
549 pa_xfree(b);
550
551 break;
552 }
553
554 case PA_MEMBLOCK_TYPE_MAX:
555 default:
556 pa_assert_not_reached();
557 }
558 }
559
560 /* No lock necessary */
561 void pa_memblock_unref(pa_memblock*b) {
562 pa_assert(b);
563 pa_assert(PA_REFCNT_VALUE(b) > 0);
564
565 if (PA_REFCNT_DEC(b) > 0)
566 return;
567
568 memblock_free(b);
569 }
570
571 /* Self locked */
572 static void memblock_wait(pa_memblock *b) {
573 pa_assert(b);
574
575 if (pa_atomic_load(&b->n_acquired) > 0) {
576 /* We need to wait until all threads gave up access to the
577 * memory block before we can go on. Unfortunately this means
578 * that we have to lock and wait here. Sniff! */
579
580 pa_atomic_inc(&b->please_signal);
581
582 while (pa_atomic_load(&b->n_acquired) > 0)
583 pa_semaphore_wait(b->pool->semaphore);
584
585 pa_atomic_dec(&b->please_signal);
586 }
587 }
588
589 /* No lock necessary. This function is not multiple caller safe! */
590 static void memblock_make_local(pa_memblock *b) {
591 pa_assert(b);
592
593 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
594
595 if (b->length <= b->pool->block_size) {
596 struct mempool_slot *slot;
597
598 if ((slot = mempool_allocate_slot(b->pool))) {
599 void *new_data;
600 /* We can move it into a local pool, perfect! */
601
602 new_data = mempool_slot_data(slot);
603 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
604 pa_atomic_ptr_store(&b->data, new_data);
605
606 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
607 b->read_only = FALSE;
608
609 goto finish;
610 }
611 }
612
613 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
614 b->per_type.user.free_cb = pa_xfree;
615 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
616
617 b->type = PA_MEMBLOCK_USER;
618 b->read_only = FALSE;
619
620 finish:
621 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
622 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
623 memblock_wait(b);
624 }
625
626 /* No lock necessary. This function is not multiple caller safe*/
627 void pa_memblock_unref_fixed(pa_memblock *b) {
628 pa_assert(b);
629 pa_assert(PA_REFCNT_VALUE(b) > 0);
630 pa_assert(b->type == PA_MEMBLOCK_FIXED);
631
632 if (PA_REFCNT_VALUE(b) > 1)
633 memblock_make_local(b);
634
635 pa_memblock_unref(b);
636 }
637
638 /* No lock necessary. */
639 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
640 void *p;
641
642 pa_assert(b);
643 pa_assert(PA_REFCNT_VALUE(b) > 0);
644
645 p = pa_memblock_acquire(b);
646 pa_will_need(p, b->length);
647 pa_memblock_release(b);
648
649 return b;
650 }
651
652 /* Self-locked. This function is not multiple-caller safe */
653 static void memblock_replace_import(pa_memblock *b) {
654 pa_memimport_segment *seg;
655
656 pa_assert(b);
657 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
658
659 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
660 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
661 pa_atomic_dec(&b->pool->stat.n_imported);
662 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
663
664 seg = b->per_type.imported.segment;
665 pa_assert(seg);
666 pa_assert(seg->import);
667
668 pa_mutex_lock(seg->import->mutex);
669
670 pa_hashmap_remove(
671 seg->import->blocks,
672 PA_UINT32_TO_PTR(b->per_type.imported.id));
673
674 memblock_make_local(b);
675
676 if (-- seg->n_blocks <= 0) {
677 pa_mutex_unlock(seg->import->mutex);
678 segment_detach(seg);
679 } else
680 pa_mutex_unlock(seg->import->mutex);
681 }
682
683 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
684 pa_mempool *p;
685 char t1[64], t2[64];
686
687 p = pa_xnew(pa_mempool, 1);
688
689 p->mutex = pa_mutex_new(TRUE, TRUE);
690 p->semaphore = pa_semaphore_new(0);
691
692 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
693 if (p->block_size < PA_PAGE_SIZE)
694 p->block_size = PA_PAGE_SIZE;
695
696 if (size <= 0)
697 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
698 else {
699 p->n_blocks = (unsigned) (size / p->block_size);
700
701 if (p->n_blocks < 2)
702 p->n_blocks = 2;
703 }
704
705 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
706 pa_xfree(p);
707 return NULL;
708 }
709
710 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s",
711 p->memory.shared ? "shared" : "private",
712 p->n_blocks,
713 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
714 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)));
715
716 memset(&p->stat, 0, sizeof(p->stat));
717 pa_atomic_store(&p->n_init, 0);
718
719 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
720 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
721
722 p->free_slots = pa_flist_new(p->n_blocks);
723
724 return p;
725 }
726
727 void pa_mempool_free(pa_mempool *p) {
728 pa_assert(p);
729
730 pa_mutex_lock(p->mutex);
731
732 while (p->imports)
733 pa_memimport_free(p->imports);
734
735 while (p->exports)
736 pa_memexport_free(p->exports);
737
738 pa_mutex_unlock(p->mutex);
739
740 pa_flist_free(p->free_slots, NULL);
741
742 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
743 /* raise(SIGTRAP); */
744 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
745 }
746
747 pa_shm_free(&p->memory);
748
749 pa_mutex_free(p->mutex);
750 pa_semaphore_free(p->semaphore);
751
752 pa_xfree(p);
753 }
754
755 /* No lock necessary */
756 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
757 pa_assert(p);
758
759 return &p->stat;
760 }
761
762 /* No lock necessary */
763 size_t pa_mempool_block_size_max(pa_mempool *p) {
764 pa_assert(p);
765
766 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
767 }
768
769 /* No lock necessary */
770 void pa_mempool_vacuum(pa_mempool *p) {
771 struct mempool_slot *slot;
772 pa_flist *list;
773
774 pa_assert(p);
775
776 list = pa_flist_new(p->n_blocks);
777
778 while ((slot = pa_flist_pop(p->free_slots)))
779 while (pa_flist_push(list, slot) < 0)
780 ;
781
782 while ((slot = pa_flist_pop(list))) {
783 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
784
785 while (pa_flist_push(p->free_slots, slot))
786 ;
787 }
788
789 pa_flist_free(list, NULL);
790 }
791
792 /* No lock necessary */
793 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
794 pa_assert(p);
795
796 if (!p->memory.shared)
797 return -1;
798
799 *id = p->memory.id;
800
801 return 0;
802 }
803
804 /* No lock necessary */
805 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
806 pa_assert(p);
807
808 return !!p->memory.shared;
809 }
810
811 /* For recieving blocks from other nodes */
812 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
813 pa_memimport *i;
814
815 pa_assert(p);
816 pa_assert(cb);
817
818 i = pa_xnew(pa_memimport, 1);
819 i->mutex = pa_mutex_new(TRUE, TRUE);
820 i->pool = p;
821 i->segments = pa_hashmap_new(NULL, NULL);
822 i->blocks = pa_hashmap_new(NULL, NULL);
823 i->release_cb = cb;
824 i->userdata = userdata;
825
826 pa_mutex_lock(p->mutex);
827 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
828 pa_mutex_unlock(p->mutex);
829
830 return i;
831 }
832
833 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
834
835 /* Should be called locked */
836 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
837 pa_memimport_segment* seg;
838
839 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
840 return NULL;
841
842 seg = pa_xnew(pa_memimport_segment, 1);
843
844 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
845 pa_xfree(seg);
846 return NULL;
847 }
848
849 seg->import = i;
850 seg->n_blocks = 0;
851
852 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
853 return seg;
854 }
855
856 /* Should be called locked */
857 static void segment_detach(pa_memimport_segment *seg) {
858 pa_assert(seg);
859
860 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
861 pa_shm_free(&seg->memory);
862 pa_xfree(seg);
863 }
864
865 /* Self-locked. Not multiple-caller safe */
866 void pa_memimport_free(pa_memimport *i) {
867 pa_memexport *e;
868 pa_memblock *b;
869
870 pa_assert(i);
871
872 pa_mutex_lock(i->mutex);
873
874 while ((b = pa_hashmap_first(i->blocks)))
875 memblock_replace_import(b);
876
877 pa_assert(pa_hashmap_size(i->segments) == 0);
878
879 pa_mutex_unlock(i->mutex);
880
881 pa_mutex_lock(i->pool->mutex);
882
883 /* If we've exported this block further we need to revoke that export */
884 for (e = i->pool->exports; e; e = e->next)
885 memexport_revoke_blocks(e, i);
886
887 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
888
889 pa_mutex_unlock(i->pool->mutex);
890
891 pa_hashmap_free(i->blocks, NULL, NULL);
892 pa_hashmap_free(i->segments, NULL, NULL);
893
894 pa_mutex_free(i->mutex);
895
896 pa_xfree(i);
897 }
898
899 /* Self-locked */
900 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
901 pa_memblock *b = NULL;
902 pa_memimport_segment *seg;
903
904 pa_assert(i);
905
906 pa_mutex_lock(i->mutex);
907
908 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
909 goto finish;
910
911 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
912 if (!(seg = segment_attach(i, shm_id)))
913 goto finish;
914
915 if (offset+size > seg->memory.size)
916 goto finish;
917
918 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
919 b = pa_xnew(pa_memblock, 1);
920
921 PA_REFCNT_INIT(b);
922 b->pool = i->pool;
923 b->type = PA_MEMBLOCK_IMPORTED;
924 b->read_only = TRUE;
925 b->is_silence = FALSE;
926 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
927 b->length = size;
928 pa_atomic_store(&b->n_acquired, 0);
929 pa_atomic_store(&b->please_signal, 0);
930 b->per_type.imported.id = block_id;
931 b->per_type.imported.segment = seg;
932
933 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
934
935 seg->n_blocks++;
936
937 finish:
938 pa_mutex_unlock(i->mutex);
939
940 if (b)
941 stat_add(b);
942
943 return b;
944 }
945
946 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
947 pa_memblock *b;
948 int ret = 0;
949 pa_assert(i);
950
951 pa_mutex_lock(i->mutex);
952
953 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
954 ret = -1;
955 goto finish;
956 }
957
958 memblock_replace_import(b);
959
960 finish:
961 pa_mutex_unlock(i->mutex);
962
963 return ret;
964 }
965
966 /* For sending blocks to other nodes */
967 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
968 pa_memexport *e;
969
970 pa_assert(p);
971 pa_assert(cb);
972
973 if (!p->memory.shared)
974 return NULL;
975
976 e = pa_xnew(pa_memexport, 1);
977 e->mutex = pa_mutex_new(TRUE, TRUE);
978 e->pool = p;
979 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
980 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
981 e->n_init = 0;
982 e->revoke_cb = cb;
983 e->userdata = userdata;
984
985 pa_mutex_lock(p->mutex);
986 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
987 pa_mutex_unlock(p->mutex);
988 return e;
989 }
990
991 void pa_memexport_free(pa_memexport *e) {
992 pa_assert(e);
993
994 pa_mutex_lock(e->mutex);
995 while (e->used_slots)
996 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
997 pa_mutex_unlock(e->mutex);
998
999 pa_mutex_lock(e->pool->mutex);
1000 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1001 pa_mutex_unlock(e->pool->mutex);
1002
1003 pa_mutex_free(e->mutex);
1004 pa_xfree(e);
1005 }
1006
1007 /* Self-locked */
1008 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1009 pa_memblock *b;
1010
1011 pa_assert(e);
1012
1013 pa_mutex_lock(e->mutex);
1014
1015 if (id >= e->n_init)
1016 goto fail;
1017
1018 if (!e->slots[id].block)
1019 goto fail;
1020
1021 b = e->slots[id].block;
1022 e->slots[id].block = NULL;
1023
1024 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1025 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1026
1027 pa_mutex_unlock(e->mutex);
1028
1029 /* pa_log("Processing release for %u", id); */
1030
1031 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1032 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1033
1034 pa_atomic_dec(&e->pool->stat.n_exported);
1035 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1036
1037 pa_memblock_unref(b);
1038
1039 return 0;
1040
1041 fail:
1042 pa_mutex_unlock(e->mutex);
1043
1044 return -1;
1045 }
1046
1047 /* Self-locked */
1048 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1049 struct memexport_slot *slot, *next;
1050 pa_assert(e);
1051 pa_assert(i);
1052
1053 pa_mutex_lock(e->mutex);
1054
1055 for (slot = e->used_slots; slot; slot = next) {
1056 uint32_t idx;
1057 next = slot->next;
1058
1059 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1060 slot->block->per_type.imported.segment->import != i)
1061 continue;
1062
1063 idx = (uint32_t) (slot - e->slots);
1064 e->revoke_cb(e, idx, e->userdata);
1065 pa_memexport_process_release(e, idx);
1066 }
1067
1068 pa_mutex_unlock(e->mutex);
1069 }
1070
1071 /* No lock necessary */
1072 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1073 pa_memblock *n;
1074
1075 pa_assert(p);
1076 pa_assert(b);
1077
1078 if (b->type == PA_MEMBLOCK_IMPORTED ||
1079 b->type == PA_MEMBLOCK_POOL ||
1080 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1081 pa_assert(b->pool == p);
1082 return pa_memblock_ref(b);
1083 }
1084
1085 if (!(n = pa_memblock_new_pool(p, b->length)))
1086 return NULL;
1087
1088 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1089 return n;
1090 }
1091
1092 /* Self-locked */
1093 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1094 pa_shm *memory;
1095 struct memexport_slot *slot;
1096 void *data;
1097
1098 pa_assert(e);
1099 pa_assert(b);
1100 pa_assert(block_id);
1101 pa_assert(shm_id);
1102 pa_assert(offset);
1103 pa_assert(size);
1104 pa_assert(b->pool == e->pool);
1105
1106 if (!(b = memblock_shared_copy(e->pool, b)))
1107 return -1;
1108
1109 pa_mutex_lock(e->mutex);
1110
1111 if (e->free_slots) {
1112 slot = e->free_slots;
1113 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1114 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1115 slot = &e->slots[e->n_init++];
1116 else {
1117 pa_mutex_unlock(e->mutex);
1118 pa_memblock_unref(b);
1119 return -1;
1120 }
1121
1122 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1123 slot->block = b;
1124 *block_id = (uint32_t) (slot - e->slots);
1125
1126 pa_mutex_unlock(e->mutex);
1127 /* pa_log("Got block id %u", *block_id); */
1128
1129 data = pa_memblock_acquire(b);
1130
1131 if (b->type == PA_MEMBLOCK_IMPORTED) {
1132 pa_assert(b->per_type.imported.segment);
1133 memory = &b->per_type.imported.segment->memory;
1134 } else {
1135 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1136 pa_assert(b->pool);
1137 memory = &b->pool->memory;
1138 }
1139
1140 pa_assert(data >= memory->ptr);
1141 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1142
1143 *shm_id = memory->id;
1144 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1145 *size = b->length;
1146
1147 pa_memblock_release(b);
1148
1149 pa_atomic_inc(&e->pool->stat.n_exported);
1150 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1151
1152 return 0;
1153 }