]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
allow importing of more memory blocks than exporting
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48
49 #include "memblock.h"
50
51 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
52 * note that the footprint is usually much smaller, since the data is
53 * stored in SHM and our OS does not commit the memory before we use
54 * it for the first time. */
55 #define PA_MEMPOOL_SLOTS_MAX 1024
56 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
57
58 #define PA_MEMEXPORT_SLOTS_MAX 128
59
60 #define PA_MEMIMPORT_SLOTS_MAX 160
61 #define PA_MEMIMPORT_SEGMENTS_MAX 16
62
63 struct pa_memblock {
64 PA_REFCNT_DECLARE; /* the reference counter */
65 pa_mempool *pool;
66
67 pa_memblock_type_t type;
68
69 pa_bool_t read_only:1;
70 pa_bool_t is_silence:1;
71
72 pa_atomic_ptr_t data;
73 size_t length;
74
75 pa_atomic_t n_acquired;
76 pa_atomic_t please_signal;
77
78 union {
79 struct {
80 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
81 pa_free_cb_t free_cb;
82 } user;
83
84 struct {
85 uint32_t id;
86 pa_memimport_segment *segment;
87 } imported;
88 } per_type;
89 };
90
91 struct pa_memimport_segment {
92 pa_memimport *import;
93 pa_shm memory;
94 unsigned n_blocks;
95 };
96
97 struct pa_memimport {
98 pa_mutex *mutex;
99
100 pa_mempool *pool;
101 pa_hashmap *segments;
102 pa_hashmap *blocks;
103
104 /* Called whenever an imported memory block is no longer
105 * needed. */
106 pa_memimport_release_cb_t release_cb;
107 void *userdata;
108
109 PA_LLIST_FIELDS(pa_memimport);
110 };
111
112 struct memexport_slot {
113 PA_LLIST_FIELDS(struct memexport_slot);
114 pa_memblock *block;
115 };
116
117 struct pa_memexport {
118 pa_mutex *mutex;
119 pa_mempool *pool;
120
121 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
122
123 PA_LLIST_HEAD(struct memexport_slot, free_slots);
124 PA_LLIST_HEAD(struct memexport_slot, used_slots);
125 unsigned n_init;
126
127 /* Called whenever a client from which we imported a memory block
128 which we in turn exported to another client dies and we need to
129 revoke the memory block accordingly */
130 pa_memexport_revoke_cb_t revoke_cb;
131 void *userdata;
132
133 PA_LLIST_FIELDS(pa_memexport);
134 };
135
136 struct pa_mempool {
137 pa_semaphore *semaphore;
138 pa_mutex *mutex;
139
140 pa_shm memory;
141 size_t block_size;
142 unsigned n_blocks;
143
144 pa_atomic_t n_init;
145
146 PA_LLIST_HEAD(pa_memimport, imports);
147 PA_LLIST_HEAD(pa_memexport, exports);
148
149 /* A list of free slots that may be reused */
150 pa_flist *free_slots;
151
152 pa_mempool_stat stat;
153 };
154
155 static void segment_detach(pa_memimport_segment *seg);
156
157 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
158
159 /* No lock necessary */
160 static void stat_add(pa_memblock*b) {
161 pa_assert(b);
162 pa_assert(b->pool);
163
164 pa_atomic_inc(&b->pool->stat.n_allocated);
165 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
166
167 pa_atomic_inc(&b->pool->stat.n_accumulated);
168 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
169
170 if (b->type == PA_MEMBLOCK_IMPORTED) {
171 pa_atomic_inc(&b->pool->stat.n_imported);
172 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
173 }
174
175 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
176 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
177 }
178
179 /* No lock necessary */
180 static void stat_remove(pa_memblock *b) {
181 pa_assert(b);
182 pa_assert(b->pool);
183
184 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
185 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
186
187 pa_atomic_dec(&b->pool->stat.n_allocated);
188 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
189
190 if (b->type == PA_MEMBLOCK_IMPORTED) {
191 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
192 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
193
194 pa_atomic_dec(&b->pool->stat.n_imported);
195 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
196 }
197
198 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
199 }
200
201 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
202
203 /* No lock necessary */
204 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
205 pa_memblock *b;
206
207 pa_assert(p);
208 pa_assert(length);
209
210 if (!(b = pa_memblock_new_pool(p, length)))
211 b = memblock_new_appended(p, length);
212
213 return b;
214 }
215
216 /* No lock necessary */
217 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
218 pa_memblock *b;
219
220 pa_assert(p);
221 pa_assert(length);
222
223 /* If -1 is passed as length we choose the size for the caller. */
224
225 if (length == (size_t) -1)
226 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
227
228 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
229 PA_REFCNT_INIT(b);
230 b->pool = p;
231 b->type = PA_MEMBLOCK_APPENDED;
232 b->read_only = b->is_silence = FALSE;
233 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
234 b->length = length;
235 pa_atomic_store(&b->n_acquired, 0);
236 pa_atomic_store(&b->please_signal, 0);
237
238 stat_add(b);
239 return b;
240 }
241
242 /* No lock necessary */
243 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
244 struct mempool_slot *slot;
245 pa_assert(p);
246
247 if (!(slot = pa_flist_pop(p->free_slots))) {
248 int idx;
249
250 /* The free list was empty, we have to allocate a new entry */
251
252 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
253 pa_atomic_dec(&p->n_init);
254 else
255 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
256
257 if (!slot) {
258 pa_log_info("Pool full");
259 pa_atomic_inc(&p->stat.n_pool_full);
260 return NULL;
261 }
262 }
263
264 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
265 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
266 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
267 /* } */
268 /* #endif */
269
270 return slot;
271 }
272
273 /* No lock necessary, totally redundant anyway */
274 static inline void* mempool_slot_data(struct mempool_slot *slot) {
275 return slot;
276 }
277
278 /* No lock necessary */
279 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
280 pa_assert(p);
281
282 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
283 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
284
285 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
286 }
287
288 /* No lock necessary */
289 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
290 unsigned idx;
291
292 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
293 return NULL;
294
295 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
296 }
297
298 /* No lock necessary */
299 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
300 pa_memblock *b = NULL;
301 struct mempool_slot *slot;
302
303 pa_assert(p);
304 pa_assert(length);
305
306 /* If -1 is passed as length we choose the size for the caller: we
307 * take the largest size that fits in one of our slots. */
308
309 if (length == (size_t) -1)
310 length = pa_mempool_block_size_max(p);
311
312 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
313
314 if (!(slot = mempool_allocate_slot(p)))
315 return NULL;
316
317 b = mempool_slot_data(slot);
318 b->type = PA_MEMBLOCK_POOL;
319 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
320
321 } else if (p->block_size >= length) {
322
323 if (!(slot = mempool_allocate_slot(p)))
324 return NULL;
325
326 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
327 b = pa_xnew(pa_memblock, 1);
328
329 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
330 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
331
332 } else {
333 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
334 pa_atomic_inc(&p->stat.n_too_large_for_pool);
335 return NULL;
336 }
337
338 PA_REFCNT_INIT(b);
339 b->pool = p;
340 b->read_only = b->is_silence = FALSE;
341 b->length = length;
342 pa_atomic_store(&b->n_acquired, 0);
343 pa_atomic_store(&b->please_signal, 0);
344
345 stat_add(b);
346 return b;
347 }
348
349 /* No lock necessary */
350 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
351 pa_memblock *b;
352
353 pa_assert(p);
354 pa_assert(d);
355 pa_assert(length != (size_t) -1);
356 pa_assert(length);
357
358 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
359 b = pa_xnew(pa_memblock, 1);
360 PA_REFCNT_INIT(b);
361 b->pool = p;
362 b->type = PA_MEMBLOCK_FIXED;
363 b->read_only = read_only;
364 b->is_silence = FALSE;
365 pa_atomic_ptr_store(&b->data, d);
366 b->length = length;
367 pa_atomic_store(&b->n_acquired, 0);
368 pa_atomic_store(&b->please_signal, 0);
369
370 stat_add(b);
371 return b;
372 }
373
374 /* No lock necessary */
375 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
376 pa_memblock *b;
377
378 pa_assert(p);
379 pa_assert(d);
380 pa_assert(length);
381 pa_assert(length != (size_t) -1);
382 pa_assert(free_cb);
383
384 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
385 b = pa_xnew(pa_memblock, 1);
386 PA_REFCNT_INIT(b);
387 b->pool = p;
388 b->type = PA_MEMBLOCK_USER;
389 b->read_only = read_only;
390 b->is_silence = FALSE;
391 pa_atomic_ptr_store(&b->data, d);
392 b->length = length;
393 pa_atomic_store(&b->n_acquired, 0);
394 pa_atomic_store(&b->please_signal, 0);
395
396 b->per_type.user.free_cb = free_cb;
397
398 stat_add(b);
399 return b;
400 }
401
402 /* No lock necessary */
403 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
404 pa_assert(b);
405 pa_assert(PA_REFCNT_VALUE(b) > 0);
406
407 return b->read_only && PA_REFCNT_VALUE(b) == 1;
408 }
409
410 /* No lock necessary */
411 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
412 pa_assert(b);
413 pa_assert(PA_REFCNT_VALUE(b) > 0);
414
415 return b->is_silence;
416 }
417
418 /* No lock necessary */
419 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
420 pa_assert(b);
421 pa_assert(PA_REFCNT_VALUE(b) > 0);
422
423 b->is_silence = v;
424 }
425
426 /* No lock necessary */
427 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
428 int r;
429 pa_assert(b);
430
431 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
432
433 return r == 1;
434 }
435
436 /* No lock necessary */
437 void* pa_memblock_acquire(pa_memblock *b) {
438 pa_assert(b);
439 pa_assert(PA_REFCNT_VALUE(b) > 0);
440
441 pa_atomic_inc(&b->n_acquired);
442
443 return pa_atomic_ptr_load(&b->data);
444 }
445
446 /* No lock necessary, in corner cases locks by its own */
447 void pa_memblock_release(pa_memblock *b) {
448 int r;
449 pa_assert(b);
450 pa_assert(PA_REFCNT_VALUE(b) > 0);
451
452 r = pa_atomic_dec(&b->n_acquired);
453 pa_assert(r >= 1);
454
455 /* Signal a waiting thread that this memblock is no longer used */
456 if (r == 1 && pa_atomic_load(&b->please_signal))
457 pa_semaphore_post(b->pool->semaphore);
458 }
459
460 size_t pa_memblock_get_length(pa_memblock *b) {
461 pa_assert(b);
462 pa_assert(PA_REFCNT_VALUE(b) > 0);
463
464 return b->length;
465 }
466
467 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
468 pa_assert(b);
469 pa_assert(PA_REFCNT_VALUE(b) > 0);
470
471 return b->pool;
472 }
473
474 /* No lock necessary */
475 pa_memblock* pa_memblock_ref(pa_memblock*b) {
476 pa_assert(b);
477 pa_assert(PA_REFCNT_VALUE(b) > 0);
478
479 PA_REFCNT_INC(b);
480 return b;
481 }
482
483 static void memblock_free(pa_memblock *b) {
484 pa_assert(b);
485
486 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
487
488 stat_remove(b);
489
490 switch (b->type) {
491 case PA_MEMBLOCK_USER :
492 pa_assert(b->per_type.user.free_cb);
493 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
494
495 /* Fall through */
496
497 case PA_MEMBLOCK_FIXED:
498 case PA_MEMBLOCK_APPENDED :
499 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
500 pa_xfree(b);
501
502 break;
503
504 case PA_MEMBLOCK_IMPORTED : {
505 pa_memimport_segment *segment;
506 pa_memimport *import;
507
508 /* FIXME! This should be implemented lock-free */
509
510 segment = b->per_type.imported.segment;
511 pa_assert(segment);
512 import = segment->import;
513 pa_assert(import);
514
515 pa_mutex_lock(import->mutex);
516 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
517 if (-- segment->n_blocks <= 0)
518 segment_detach(segment);
519
520 pa_mutex_unlock(import->mutex);
521
522 import->release_cb(import, b->per_type.imported.id, import->userdata);
523
524 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
525 pa_xfree(b);
526 break;
527 }
528
529 case PA_MEMBLOCK_POOL_EXTERNAL:
530 case PA_MEMBLOCK_POOL: {
531 struct mempool_slot *slot;
532 pa_bool_t call_free;
533
534 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
535 pa_assert(slot);
536
537 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
538
539 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
540 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
541 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
542 /* } */
543 /* #endif */
544
545 /* The free list dimensions should easily allow all slots
546 * to fit in, hence try harder if pushing this slot into
547 * the free list fails */
548 while (pa_flist_push(b->pool->free_slots, slot) < 0)
549 ;
550
551 if (call_free)
552 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
553 pa_xfree(b);
554
555 break;
556 }
557
558 case PA_MEMBLOCK_TYPE_MAX:
559 default:
560 pa_assert_not_reached();
561 }
562 }
563
564 /* No lock necessary */
565 void pa_memblock_unref(pa_memblock*b) {
566 pa_assert(b);
567 pa_assert(PA_REFCNT_VALUE(b) > 0);
568
569 if (PA_REFCNT_DEC(b) > 0)
570 return;
571
572 memblock_free(b);
573 }
574
575 /* Self locked */
576 static void memblock_wait(pa_memblock *b) {
577 pa_assert(b);
578
579 if (pa_atomic_load(&b->n_acquired) > 0) {
580 /* We need to wait until all threads gave up access to the
581 * memory block before we can go on. Unfortunately this means
582 * that we have to lock and wait here. Sniff! */
583
584 pa_atomic_inc(&b->please_signal);
585
586 while (pa_atomic_load(&b->n_acquired) > 0)
587 pa_semaphore_wait(b->pool->semaphore);
588
589 pa_atomic_dec(&b->please_signal);
590 }
591 }
592
593 /* No lock necessary. This function is not multiple caller safe! */
594 static void memblock_make_local(pa_memblock *b) {
595 pa_assert(b);
596
597 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
598
599 if (b->length <= b->pool->block_size) {
600 struct mempool_slot *slot;
601
602 if ((slot = mempool_allocate_slot(b->pool))) {
603 void *new_data;
604 /* We can move it into a local pool, perfect! */
605
606 new_data = mempool_slot_data(slot);
607 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
608 pa_atomic_ptr_store(&b->data, new_data);
609
610 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
611 b->read_only = FALSE;
612
613 goto finish;
614 }
615 }
616
617 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
618 b->per_type.user.free_cb = pa_xfree;
619 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
620
621 b->type = PA_MEMBLOCK_USER;
622 b->read_only = FALSE;
623
624 finish:
625 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
626 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
627 memblock_wait(b);
628 }
629
630 /* No lock necessary. This function is not multiple caller safe*/
631 void pa_memblock_unref_fixed(pa_memblock *b) {
632 pa_assert(b);
633 pa_assert(PA_REFCNT_VALUE(b) > 0);
634 pa_assert(b->type == PA_MEMBLOCK_FIXED);
635
636 if (PA_REFCNT_VALUE(b) > 1)
637 memblock_make_local(b);
638
639 pa_memblock_unref(b);
640 }
641
642 /* No lock necessary. */
643 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
644 void *p;
645
646 pa_assert(b);
647 pa_assert(PA_REFCNT_VALUE(b) > 0);
648
649 p = pa_memblock_acquire(b);
650 pa_will_need(p, b->length);
651 pa_memblock_release(b);
652
653 return b;
654 }
655
656 /* Self-locked. This function is not multiple-caller safe */
657 static void memblock_replace_import(pa_memblock *b) {
658 pa_memimport_segment *seg;
659
660 pa_assert(b);
661 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
662
663 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
664 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
665 pa_atomic_dec(&b->pool->stat.n_imported);
666 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
667
668 seg = b->per_type.imported.segment;
669 pa_assert(seg);
670 pa_assert(seg->import);
671
672 pa_mutex_lock(seg->import->mutex);
673
674 pa_hashmap_remove(
675 seg->import->blocks,
676 PA_UINT32_TO_PTR(b->per_type.imported.id));
677
678 memblock_make_local(b);
679
680 if (-- seg->n_blocks <= 0) {
681 pa_mutex_unlock(seg->import->mutex);
682 segment_detach(seg);
683 } else
684 pa_mutex_unlock(seg->import->mutex);
685 }
686
687 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
688 pa_mempool *p;
689 char t1[64], t2[64];
690
691 p = pa_xnew(pa_mempool, 1);
692
693 p->mutex = pa_mutex_new(TRUE, TRUE);
694 p->semaphore = pa_semaphore_new(0);
695
696 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
697 if (p->block_size < PA_PAGE_SIZE)
698 p->block_size = PA_PAGE_SIZE;
699
700 if (size <= 0)
701 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
702 else {
703 p->n_blocks = (unsigned) (size / p->block_size);
704
705 if (p->n_blocks < 2)
706 p->n_blocks = 2;
707 }
708
709 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
710 pa_xfree(p);
711 return NULL;
712 }
713
714 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
715 p->memory.shared ? "shared" : "private",
716 p->n_blocks,
717 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
718 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
719 (unsigned long) pa_mempool_block_size_max(p));
720
721 memset(&p->stat, 0, sizeof(p->stat));
722 pa_atomic_store(&p->n_init, 0);
723
724 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
725 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
726
727 p->free_slots = pa_flist_new(p->n_blocks);
728
729 return p;
730 }
731
732 void pa_mempool_free(pa_mempool *p) {
733 pa_assert(p);
734
735 pa_mutex_lock(p->mutex);
736
737 while (p->imports)
738 pa_memimport_free(p->imports);
739
740 while (p->exports)
741 pa_memexport_free(p->exports);
742
743 pa_mutex_unlock(p->mutex);
744
745 pa_flist_free(p->free_slots, NULL);
746
747 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
748 /* raise(SIGTRAP); */
749 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
750 }
751
752 pa_shm_free(&p->memory);
753
754 pa_mutex_free(p->mutex);
755 pa_semaphore_free(p->semaphore);
756
757 pa_xfree(p);
758 }
759
760 /* No lock necessary */
761 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
762 pa_assert(p);
763
764 return &p->stat;
765 }
766
767 /* No lock necessary */
768 size_t pa_mempool_block_size_max(pa_mempool *p) {
769 pa_assert(p);
770
771 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
772 }
773
774 /* No lock necessary */
775 void pa_mempool_vacuum(pa_mempool *p) {
776 struct mempool_slot *slot;
777 pa_flist *list;
778
779 pa_assert(p);
780
781 list = pa_flist_new(p->n_blocks);
782
783 while ((slot = pa_flist_pop(p->free_slots)))
784 while (pa_flist_push(list, slot) < 0)
785 ;
786
787 while ((slot = pa_flist_pop(list))) {
788 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
789
790 while (pa_flist_push(p->free_slots, slot))
791 ;
792 }
793
794 pa_flist_free(list, NULL);
795 }
796
797 /* No lock necessary */
798 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
799 pa_assert(p);
800
801 if (!p->memory.shared)
802 return -1;
803
804 *id = p->memory.id;
805
806 return 0;
807 }
808
809 /* No lock necessary */
810 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
811 pa_assert(p);
812
813 return !!p->memory.shared;
814 }
815
816 /* For recieving blocks from other nodes */
817 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
818 pa_memimport *i;
819
820 pa_assert(p);
821 pa_assert(cb);
822
823 i = pa_xnew(pa_memimport, 1);
824 i->mutex = pa_mutex_new(TRUE, TRUE);
825 i->pool = p;
826 i->segments = pa_hashmap_new(NULL, NULL);
827 i->blocks = pa_hashmap_new(NULL, NULL);
828 i->release_cb = cb;
829 i->userdata = userdata;
830
831 pa_mutex_lock(p->mutex);
832 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
833 pa_mutex_unlock(p->mutex);
834
835 return i;
836 }
837
838 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
839
840 /* Should be called locked */
841 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
842 pa_memimport_segment* seg;
843
844 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
845 return NULL;
846
847 seg = pa_xnew(pa_memimport_segment, 1);
848
849 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
850 pa_xfree(seg);
851 return NULL;
852 }
853
854 seg->import = i;
855 seg->n_blocks = 0;
856
857 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
858 return seg;
859 }
860
861 /* Should be called locked */
862 static void segment_detach(pa_memimport_segment *seg) {
863 pa_assert(seg);
864
865 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
866 pa_shm_free(&seg->memory);
867 pa_xfree(seg);
868 }
869
870 /* Self-locked. Not multiple-caller safe */
871 void pa_memimport_free(pa_memimport *i) {
872 pa_memexport *e;
873 pa_memblock *b;
874
875 pa_assert(i);
876
877 pa_mutex_lock(i->mutex);
878
879 while ((b = pa_hashmap_first(i->blocks)))
880 memblock_replace_import(b);
881
882 pa_assert(pa_hashmap_size(i->segments) == 0);
883
884 pa_mutex_unlock(i->mutex);
885
886 pa_mutex_lock(i->pool->mutex);
887
888 /* If we've exported this block further we need to revoke that export */
889 for (e = i->pool->exports; e; e = e->next)
890 memexport_revoke_blocks(e, i);
891
892 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
893
894 pa_mutex_unlock(i->pool->mutex);
895
896 pa_hashmap_free(i->blocks, NULL, NULL);
897 pa_hashmap_free(i->segments, NULL, NULL);
898
899 pa_mutex_free(i->mutex);
900
901 pa_xfree(i);
902 }
903
904 /* Self-locked */
905 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
906 pa_memblock *b = NULL;
907 pa_memimport_segment *seg;
908
909 pa_assert(i);
910
911 pa_mutex_lock(i->mutex);
912
913 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
914 goto finish;
915
916 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
917 if (!(seg = segment_attach(i, shm_id)))
918 goto finish;
919
920 if (offset+size > seg->memory.size)
921 goto finish;
922
923 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
924 b = pa_xnew(pa_memblock, 1);
925
926 PA_REFCNT_INIT(b);
927 b->pool = i->pool;
928 b->type = PA_MEMBLOCK_IMPORTED;
929 b->read_only = TRUE;
930 b->is_silence = FALSE;
931 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
932 b->length = size;
933 pa_atomic_store(&b->n_acquired, 0);
934 pa_atomic_store(&b->please_signal, 0);
935 b->per_type.imported.id = block_id;
936 b->per_type.imported.segment = seg;
937
938 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
939
940 seg->n_blocks++;
941
942 finish:
943 pa_mutex_unlock(i->mutex);
944
945 if (b)
946 stat_add(b);
947
948 return b;
949 }
950
951 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
952 pa_memblock *b;
953 int ret = 0;
954 pa_assert(i);
955
956 pa_mutex_lock(i->mutex);
957
958 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
959 ret = -1;
960 goto finish;
961 }
962
963 memblock_replace_import(b);
964
965 finish:
966 pa_mutex_unlock(i->mutex);
967
968 return ret;
969 }
970
971 /* For sending blocks to other nodes */
972 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
973 pa_memexport *e;
974
975 pa_assert(p);
976 pa_assert(cb);
977
978 if (!p->memory.shared)
979 return NULL;
980
981 e = pa_xnew(pa_memexport, 1);
982 e->mutex = pa_mutex_new(TRUE, TRUE);
983 e->pool = p;
984 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
985 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
986 e->n_init = 0;
987 e->revoke_cb = cb;
988 e->userdata = userdata;
989
990 pa_mutex_lock(p->mutex);
991 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
992 pa_mutex_unlock(p->mutex);
993 return e;
994 }
995
996 void pa_memexport_free(pa_memexport *e) {
997 pa_assert(e);
998
999 pa_mutex_lock(e->mutex);
1000 while (e->used_slots)
1001 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1002 pa_mutex_unlock(e->mutex);
1003
1004 pa_mutex_lock(e->pool->mutex);
1005 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1006 pa_mutex_unlock(e->pool->mutex);
1007
1008 pa_mutex_free(e->mutex);
1009 pa_xfree(e);
1010 }
1011
1012 /* Self-locked */
1013 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1014 pa_memblock *b;
1015
1016 pa_assert(e);
1017
1018 pa_mutex_lock(e->mutex);
1019
1020 if (id >= e->n_init)
1021 goto fail;
1022
1023 if (!e->slots[id].block)
1024 goto fail;
1025
1026 b = e->slots[id].block;
1027 e->slots[id].block = NULL;
1028
1029 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1030 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1031
1032 pa_mutex_unlock(e->mutex);
1033
1034 /* pa_log("Processing release for %u", id); */
1035
1036 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1037 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1038
1039 pa_atomic_dec(&e->pool->stat.n_exported);
1040 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1041
1042 pa_memblock_unref(b);
1043
1044 return 0;
1045
1046 fail:
1047 pa_mutex_unlock(e->mutex);
1048
1049 return -1;
1050 }
1051
1052 /* Self-locked */
1053 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1054 struct memexport_slot *slot, *next;
1055 pa_assert(e);
1056 pa_assert(i);
1057
1058 pa_mutex_lock(e->mutex);
1059
1060 for (slot = e->used_slots; slot; slot = next) {
1061 uint32_t idx;
1062 next = slot->next;
1063
1064 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1065 slot->block->per_type.imported.segment->import != i)
1066 continue;
1067
1068 idx = (uint32_t) (slot - e->slots);
1069 e->revoke_cb(e, idx, e->userdata);
1070 pa_memexport_process_release(e, idx);
1071 }
1072
1073 pa_mutex_unlock(e->mutex);
1074 }
1075
1076 /* No lock necessary */
1077 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1078 pa_memblock *n;
1079
1080 pa_assert(p);
1081 pa_assert(b);
1082
1083 if (b->type == PA_MEMBLOCK_IMPORTED ||
1084 b->type == PA_MEMBLOCK_POOL ||
1085 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1086 pa_assert(b->pool == p);
1087 return pa_memblock_ref(b);
1088 }
1089
1090 if (!(n = pa_memblock_new_pool(p, b->length)))
1091 return NULL;
1092
1093 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1094 return n;
1095 }
1096
1097 /* Self-locked */
1098 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1099 pa_shm *memory;
1100 struct memexport_slot *slot;
1101 void *data;
1102
1103 pa_assert(e);
1104 pa_assert(b);
1105 pa_assert(block_id);
1106 pa_assert(shm_id);
1107 pa_assert(offset);
1108 pa_assert(size);
1109 pa_assert(b->pool == e->pool);
1110
1111 if (!(b = memblock_shared_copy(e->pool, b)))
1112 return -1;
1113
1114 pa_mutex_lock(e->mutex);
1115
1116 if (e->free_slots) {
1117 slot = e->free_slots;
1118 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1119 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1120 slot = &e->slots[e->n_init++];
1121 else {
1122 pa_mutex_unlock(e->mutex);
1123 pa_memblock_unref(b);
1124 return -1;
1125 }
1126
1127 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1128 slot->block = b;
1129 *block_id = (uint32_t) (slot - e->slots);
1130
1131 pa_mutex_unlock(e->mutex);
1132 /* pa_log("Got block id %u", *block_id); */
1133
1134 data = pa_memblock_acquire(b);
1135
1136 if (b->type == PA_MEMBLOCK_IMPORTED) {
1137 pa_assert(b->per_type.imported.segment);
1138 memory = &b->per_type.imported.segment->memory;
1139 } else {
1140 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1141 pa_assert(b->pool);
1142 memory = &b->pool->memory;
1143 }
1144
1145 pa_assert(data >= memory->ptr);
1146 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1147
1148 *shm_id = memory->id;
1149 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1150 *size = b->length;
1151
1152 pa_memblock_release(b);
1153
1154 pa_atomic_inc(&e->pool->stat.n_exported);
1155 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1156
1157 return 0;
1158 }