]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 /* A collection of multiple segments */
100 struct pa_memimport {
101 pa_mutex *mutex;
102
103 pa_mempool *pool;
104 pa_hashmap *segments;
105 pa_hashmap *blocks;
106
107 /* Called whenever an imported memory block is no longer
108 * needed. */
109 pa_memimport_release_cb_t release_cb;
110 void *userdata;
111
112 PA_LLIST_FIELDS(pa_memimport);
113 };
114
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
117 pa_memblock *block;
118 };
119
120 struct pa_memexport {
121 pa_mutex *mutex;
122 pa_mempool *pool;
123
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
125
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
128 unsigned n_init;
129
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
134 void *userdata;
135
136 PA_LLIST_FIELDS(pa_memexport);
137 };
138
139 struct pa_mempool {
140 pa_semaphore *semaphore;
141 pa_mutex *mutex;
142
143 pa_shm memory;
144 size_t block_size;
145 unsigned n_blocks;
146
147 pa_atomic_t n_init;
148
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
151
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
154
155 pa_mempool_stat stat;
156 };
157
158 static void segment_detach(pa_memimport_segment *seg);
159
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
161
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
164 pa_assert(b);
165 pa_assert(b->pool);
166
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
169
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
172
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
176 }
177
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
180 }
181
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
184 pa_assert(b);
185 pa_assert(b->pool);
186
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
189
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
192
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
196
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
199 }
200
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
202 }
203
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
205
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
208 pa_memblock *b;
209
210 pa_assert(p);
211 pa_assert(length);
212
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
215
216 return b;
217 }
218
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
221 pa_memblock *b;
222
223 pa_assert(p);
224 pa_assert(length);
225
226 /* If -1 is passed as length we choose the size for the caller. */
227
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
230
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
232 PA_REFCNT_INIT(b);
233 b->pool = p;
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
237 b->length = length;
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
240
241 stat_add(b);
242 return b;
243 }
244
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
248 pa_assert(p);
249
250 if (!(slot = pa_flist_pop(p->free_slots))) {
251 int idx;
252
253 /* The free list was empty, we have to allocate a new entry */
254
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
257 else
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
259
260 if (!slot) {
261 pa_log_debug("Pool full");
262 pa_atomic_inc(&p->stat.n_pool_full);
263 return NULL;
264 }
265 }
266
267 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
268 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
269 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
270 /* } */
271 /* #endif */
272
273 return slot;
274 }
275
276 /* No lock necessary, totally redundant anyway */
277 static inline void* mempool_slot_data(struct mempool_slot *slot) {
278 return slot;
279 }
280
281 /* No lock necessary */
282 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
283 pa_assert(p);
284
285 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
286 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
287
288 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
289 }
290
291 /* No lock necessary */
292 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
293 unsigned idx;
294
295 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
296 return NULL;
297
298 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
299 }
300
301 /* No lock necessary */
302 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
303 pa_memblock *b = NULL;
304 struct mempool_slot *slot;
305
306 pa_assert(p);
307 pa_assert(length);
308
309 /* If -1 is passed as length we choose the size for the caller: we
310 * take the largest size that fits in one of our slots. */
311
312 if (length == (size_t) -1)
313 length = pa_mempool_block_size_max(p);
314
315 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
316
317 if (!(slot = mempool_allocate_slot(p)))
318 return NULL;
319
320 b = mempool_slot_data(slot);
321 b->type = PA_MEMBLOCK_POOL;
322 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
323
324 } else if (p->block_size >= length) {
325
326 if (!(slot = mempool_allocate_slot(p)))
327 return NULL;
328
329 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
330 b = pa_xnew(pa_memblock, 1);
331
332 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
333 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
334
335 } else {
336 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
337 pa_atomic_inc(&p->stat.n_too_large_for_pool);
338 return NULL;
339 }
340
341 PA_REFCNT_INIT(b);
342 b->pool = p;
343 b->read_only = b->is_silence = FALSE;
344 b->length = length;
345 pa_atomic_store(&b->n_acquired, 0);
346 pa_atomic_store(&b->please_signal, 0);
347
348 stat_add(b);
349 return b;
350 }
351
352 /* No lock necessary */
353 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
354 pa_memblock *b;
355
356 pa_assert(p);
357 pa_assert(d);
358 pa_assert(length != (size_t) -1);
359 pa_assert(length);
360
361 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
362 b = pa_xnew(pa_memblock, 1);
363 PA_REFCNT_INIT(b);
364 b->pool = p;
365 b->type = PA_MEMBLOCK_FIXED;
366 b->read_only = read_only;
367 b->is_silence = FALSE;
368 pa_atomic_ptr_store(&b->data, d);
369 b->length = length;
370 pa_atomic_store(&b->n_acquired, 0);
371 pa_atomic_store(&b->please_signal, 0);
372
373 stat_add(b);
374 return b;
375 }
376
377 /* No lock necessary */
378 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
379 pa_memblock *b;
380
381 pa_assert(p);
382 pa_assert(d);
383 pa_assert(length);
384 pa_assert(length != (size_t) -1);
385 pa_assert(free_cb);
386
387 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
388 b = pa_xnew(pa_memblock, 1);
389 PA_REFCNT_INIT(b);
390 b->pool = p;
391 b->type = PA_MEMBLOCK_USER;
392 b->read_only = read_only;
393 b->is_silence = FALSE;
394 pa_atomic_ptr_store(&b->data, d);
395 b->length = length;
396 pa_atomic_store(&b->n_acquired, 0);
397 pa_atomic_store(&b->please_signal, 0);
398
399 b->per_type.user.free_cb = free_cb;
400
401 stat_add(b);
402 return b;
403 }
404
405 /* No lock necessary */
406 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
407 pa_assert(b);
408 pa_assert(PA_REFCNT_VALUE(b) > 0);
409
410 return b->read_only && PA_REFCNT_VALUE(b) == 1;
411 }
412
413 /* No lock necessary */
414 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
415 pa_assert(b);
416 pa_assert(PA_REFCNT_VALUE(b) > 0);
417
418 return b->is_silence;
419 }
420
421 /* No lock necessary */
422 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
423 pa_assert(b);
424 pa_assert(PA_REFCNT_VALUE(b) > 0);
425
426 b->is_silence = v;
427 }
428
429 /* No lock necessary */
430 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
431 int r;
432 pa_assert(b);
433
434 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
435
436 return r == 1;
437 }
438
439 /* No lock necessary */
440 void* pa_memblock_acquire(pa_memblock *b) {
441 pa_assert(b);
442 pa_assert(PA_REFCNT_VALUE(b) > 0);
443
444 pa_atomic_inc(&b->n_acquired);
445
446 return pa_atomic_ptr_load(&b->data);
447 }
448
449 /* No lock necessary, in corner cases locks by its own */
450 void pa_memblock_release(pa_memblock *b) {
451 int r;
452 pa_assert(b);
453 pa_assert(PA_REFCNT_VALUE(b) > 0);
454
455 r = pa_atomic_dec(&b->n_acquired);
456 pa_assert(r >= 1);
457
458 /* Signal a waiting thread that this memblock is no longer used */
459 if (r == 1 && pa_atomic_load(&b->please_signal))
460 pa_semaphore_post(b->pool->semaphore);
461 }
462
463 size_t pa_memblock_get_length(pa_memblock *b) {
464 pa_assert(b);
465 pa_assert(PA_REFCNT_VALUE(b) > 0);
466
467 return b->length;
468 }
469
470 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
471 pa_assert(b);
472 pa_assert(PA_REFCNT_VALUE(b) > 0);
473
474 return b->pool;
475 }
476
477 /* No lock necessary */
478 pa_memblock* pa_memblock_ref(pa_memblock*b) {
479 pa_assert(b);
480 pa_assert(PA_REFCNT_VALUE(b) > 0);
481
482 PA_REFCNT_INC(b);
483 return b;
484 }
485
486 static void memblock_free(pa_memblock *b) {
487 pa_assert(b);
488
489 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
490
491 stat_remove(b);
492
493 switch (b->type) {
494 case PA_MEMBLOCK_USER :
495 pa_assert(b->per_type.user.free_cb);
496 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
497
498 /* Fall through */
499
500 case PA_MEMBLOCK_FIXED:
501 case PA_MEMBLOCK_APPENDED :
502 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
503 pa_xfree(b);
504
505 break;
506
507 case PA_MEMBLOCK_IMPORTED : {
508 pa_memimport_segment *segment;
509 pa_memimport *import;
510
511 /* FIXME! This should be implemented lock-free */
512
513 pa_assert_se(segment = b->per_type.imported.segment);
514 pa_assert_se(import = segment->import);
515
516 pa_mutex_lock(import->mutex);
517
518 pa_assert_se(pa_hashmap_remove(
519 import->blocks,
520 PA_UINT32_TO_PTR(b->per_type.imported.id)));
521
522 pa_assert(segment->n_blocks >= 1);
523 if (-- segment->n_blocks <= 0)
524 segment_detach(segment);
525
526 pa_mutex_unlock(import->mutex);
527
528 import->release_cb(import, b->per_type.imported.id, import->userdata);
529
530 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
531 pa_xfree(b);
532
533 break;
534 }
535
536 case PA_MEMBLOCK_POOL_EXTERNAL:
537 case PA_MEMBLOCK_POOL: {
538 struct mempool_slot *slot;
539 pa_bool_t call_free;
540
541 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
542 pa_assert(slot);
543
544 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
545
546 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
547 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
548 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
549 /* } */
550 /* #endif */
551
552 /* The free list dimensions should easily allow all slots
553 * to fit in, hence try harder if pushing this slot into
554 * the free list fails */
555 while (pa_flist_push(b->pool->free_slots, slot) < 0)
556 ;
557
558 if (call_free)
559 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
560 pa_xfree(b);
561
562 break;
563 }
564
565 case PA_MEMBLOCK_TYPE_MAX:
566 default:
567 pa_assert_not_reached();
568 }
569 }
570
571 /* No lock necessary */
572 void pa_memblock_unref(pa_memblock*b) {
573 pa_assert(b);
574 pa_assert(PA_REFCNT_VALUE(b) > 0);
575
576 if (PA_REFCNT_DEC(b) > 0)
577 return;
578
579 memblock_free(b);
580 }
581
582 /* Self locked */
583 static void memblock_wait(pa_memblock *b) {
584 pa_assert(b);
585
586 if (pa_atomic_load(&b->n_acquired) > 0) {
587 /* We need to wait until all threads gave up access to the
588 * memory block before we can go on. Unfortunately this means
589 * that we have to lock and wait here. Sniff! */
590
591 pa_atomic_inc(&b->please_signal);
592
593 while (pa_atomic_load(&b->n_acquired) > 0)
594 pa_semaphore_wait(b->pool->semaphore);
595
596 pa_atomic_dec(&b->please_signal);
597 }
598 }
599
600 /* No lock necessary. This function is not multiple caller safe! */
601 static void memblock_make_local(pa_memblock *b) {
602 pa_assert(b);
603
604 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
605
606 if (b->length <= b->pool->block_size) {
607 struct mempool_slot *slot;
608
609 if ((slot = mempool_allocate_slot(b->pool))) {
610 void *new_data;
611 /* We can move it into a local pool, perfect! */
612
613 new_data = mempool_slot_data(slot);
614 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
615 pa_atomic_ptr_store(&b->data, new_data);
616
617 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
618 b->read_only = FALSE;
619
620 goto finish;
621 }
622 }
623
624 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
625 b->per_type.user.free_cb = pa_xfree;
626 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
627
628 b->type = PA_MEMBLOCK_USER;
629 b->read_only = FALSE;
630
631 finish:
632 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
633 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
634 memblock_wait(b);
635 }
636
637 /* No lock necessary. This function is not multiple caller safe*/
638 void pa_memblock_unref_fixed(pa_memblock *b) {
639 pa_assert(b);
640 pa_assert(PA_REFCNT_VALUE(b) > 0);
641 pa_assert(b->type == PA_MEMBLOCK_FIXED);
642
643 if (PA_REFCNT_VALUE(b) > 1)
644 memblock_make_local(b);
645
646 pa_memblock_unref(b);
647 }
648
649 /* No lock necessary. */
650 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
651 void *p;
652
653 pa_assert(b);
654 pa_assert(PA_REFCNT_VALUE(b) > 0);
655
656 p = pa_memblock_acquire(b);
657 pa_will_need(p, b->length);
658 pa_memblock_release(b);
659
660 return b;
661 }
662
663 /* Self-locked. This function is not multiple-caller safe */
664 static void memblock_replace_import(pa_memblock *b) {
665 pa_memimport_segment *segment;
666 pa_memimport *import;
667
668 pa_assert(b);
669 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
670
671 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
672 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
673 pa_atomic_dec(&b->pool->stat.n_imported);
674 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
675
676 pa_assert_se(segment = b->per_type.imported.segment);
677 pa_assert_se(import = segment->import);
678
679 pa_mutex_lock(import->mutex);
680
681 pa_assert_se(pa_hashmap_remove(
682 import->blocks,
683 PA_UINT32_TO_PTR(b->per_type.imported.id)));
684
685 memblock_make_local(b);
686
687 pa_assert(segment->n_blocks >= 1);
688 if (-- segment->n_blocks <= 0)
689 segment_detach(segment);
690
691 pa_mutex_unlock(import->mutex);
692 }
693
694 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
695 pa_mempool *p;
696 char t1[64], t2[64];
697
698 p = pa_xnew(pa_mempool, 1);
699
700 p->mutex = pa_mutex_new(TRUE, TRUE);
701 p->semaphore = pa_semaphore_new(0);
702
703 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
704 if (p->block_size < PA_PAGE_SIZE)
705 p->block_size = PA_PAGE_SIZE;
706
707 if (size <= 0)
708 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
709 else {
710 p->n_blocks = (unsigned) (size / p->block_size);
711
712 if (p->n_blocks < 2)
713 p->n_blocks = 2;
714 }
715
716 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
717 pa_xfree(p);
718 return NULL;
719 }
720
721 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
722 p->memory.shared ? "shared" : "private",
723 p->n_blocks,
724 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
725 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
726 (unsigned long) pa_mempool_block_size_max(p));
727
728 memset(&p->stat, 0, sizeof(p->stat));
729 pa_atomic_store(&p->n_init, 0);
730
731 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
732 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
733
734 p->free_slots = pa_flist_new(p->n_blocks);
735
736 return p;
737 }
738
739 void pa_mempool_free(pa_mempool *p) {
740 pa_assert(p);
741
742 pa_mutex_lock(p->mutex);
743
744 while (p->imports)
745 pa_memimport_free(p->imports);
746
747 while (p->exports)
748 pa_memexport_free(p->exports);
749
750 pa_mutex_unlock(p->mutex);
751
752 pa_flist_free(p->free_slots, NULL);
753
754 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
755
756 /* Ouch, somebody is retaining a memory block reference! */
757
758 #ifdef DEBUG_REF
759 unsigned i;
760 pa_flist *list;
761
762 /* Let's try to find at least one of those leaked memory blocks */
763
764 list = pa_flist_new(p->n_blocks);
765
766 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
767 struct mempool_slot *slot;
768 pa_memblock *b, *k;
769
770 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
771 b = mempool_slot_data(slot);
772
773 while ((k = pa_flist_pop(p->free_slots))) {
774 while (pa_flist_push(list, k) < 0)
775 ;
776
777 if (b == k)
778 break;
779 }
780
781 if (!k)
782 pa_log("REF: Leaked memory block %p", b);
783
784 while ((k = pa_flist_pop(list)))
785 while (pa_flist_push(p->free_slots, k) < 0)
786 ;
787 }
788
789 pa_flist_free(list, NULL);
790
791 #endif
792
793 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
794
795 /* PA_DEBUG_TRAP; */
796 }
797
798 pa_shm_free(&p->memory);
799
800 pa_mutex_free(p->mutex);
801 pa_semaphore_free(p->semaphore);
802
803 pa_xfree(p);
804 }
805
806 /* No lock necessary */
807 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
808 pa_assert(p);
809
810 return &p->stat;
811 }
812
813 /* No lock necessary */
814 size_t pa_mempool_block_size_max(pa_mempool *p) {
815 pa_assert(p);
816
817 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
818 }
819
820 /* No lock necessary */
821 void pa_mempool_vacuum(pa_mempool *p) {
822 struct mempool_slot *slot;
823 pa_flist *list;
824
825 pa_assert(p);
826
827 list = pa_flist_new(p->n_blocks);
828
829 while ((slot = pa_flist_pop(p->free_slots)))
830 while (pa_flist_push(list, slot) < 0)
831 ;
832
833 while ((slot = pa_flist_pop(list))) {
834 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
835
836 while (pa_flist_push(p->free_slots, slot))
837 ;
838 }
839
840 pa_flist_free(list, NULL);
841 }
842
843 /* No lock necessary */
844 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
845 pa_assert(p);
846
847 if (!p->memory.shared)
848 return -1;
849
850 *id = p->memory.id;
851
852 return 0;
853 }
854
855 /* No lock necessary */
856 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
857 pa_assert(p);
858
859 return !!p->memory.shared;
860 }
861
862 /* For recieving blocks from other nodes */
863 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
864 pa_memimport *i;
865
866 pa_assert(p);
867 pa_assert(cb);
868
869 i = pa_xnew(pa_memimport, 1);
870 i->mutex = pa_mutex_new(TRUE, TRUE);
871 i->pool = p;
872 i->segments = pa_hashmap_new(NULL, NULL);
873 i->blocks = pa_hashmap_new(NULL, NULL);
874 i->release_cb = cb;
875 i->userdata = userdata;
876
877 pa_mutex_lock(p->mutex);
878 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
879 pa_mutex_unlock(p->mutex);
880
881 return i;
882 }
883
884 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
885
886 /* Should be called locked */
887 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
888 pa_memimport_segment* seg;
889
890 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
891 return NULL;
892
893 seg = pa_xnew(pa_memimport_segment, 1);
894
895 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
896 pa_xfree(seg);
897 return NULL;
898 }
899
900 seg->import = i;
901 seg->n_blocks = 0;
902 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
903
904 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
905 return seg;
906 }
907
908 /* Should be called locked */
909 static void segment_detach(pa_memimport_segment *seg) {
910 pa_assert(seg);
911
912 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
913 pa_shm_free(&seg->memory);
914
915 if (seg->trap)
916 pa_memtrap_remove(seg->trap);
917
918 pa_xfree(seg);
919 }
920
921 /* Self-locked. Not multiple-caller safe */
922 void pa_memimport_free(pa_memimport *i) {
923 pa_memexport *e;
924 pa_memblock *b;
925
926 pa_assert(i);
927
928 pa_mutex_lock(i->mutex);
929
930 while ((b = pa_hashmap_first(i->blocks)))
931 memblock_replace_import(b);
932
933 pa_assert(pa_hashmap_size(i->segments) == 0);
934
935 pa_mutex_unlock(i->mutex);
936
937 pa_mutex_lock(i->pool->mutex);
938
939 /* If we've exported this block further we need to revoke that export */
940 for (e = i->pool->exports; e; e = e->next)
941 memexport_revoke_blocks(e, i);
942
943 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
944
945 pa_mutex_unlock(i->pool->mutex);
946
947 pa_hashmap_free(i->blocks, NULL, NULL);
948 pa_hashmap_free(i->segments, NULL, NULL);
949
950 pa_mutex_free(i->mutex);
951
952 pa_xfree(i);
953 }
954
955 /* Self-locked */
956 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
957 pa_memblock *b = NULL;
958 pa_memimport_segment *seg;
959
960 pa_assert(i);
961
962 pa_mutex_lock(i->mutex);
963
964 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
965 pa_memblock_ref(b);
966 goto finish;
967 }
968
969 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
970 goto finish;
971
972 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
973 if (!(seg = segment_attach(i, shm_id)))
974 goto finish;
975
976 if (offset+size > seg->memory.size)
977 goto finish;
978
979 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
980 b = pa_xnew(pa_memblock, 1);
981
982 PA_REFCNT_INIT(b);
983 b->pool = i->pool;
984 b->type = PA_MEMBLOCK_IMPORTED;
985 b->read_only = TRUE;
986 b->is_silence = FALSE;
987 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
988 b->length = size;
989 pa_atomic_store(&b->n_acquired, 0);
990 pa_atomic_store(&b->please_signal, 0);
991 b->per_type.imported.id = block_id;
992 b->per_type.imported.segment = seg;
993
994 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
995
996 seg->n_blocks++;
997
998 stat_add(b);
999
1000 finish:
1001 pa_mutex_unlock(i->mutex);
1002
1003 return b;
1004 }
1005
1006 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1007 pa_memblock *b;
1008 int ret = 0;
1009 pa_assert(i);
1010
1011 pa_mutex_lock(i->mutex);
1012
1013 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1014 ret = -1;
1015 goto finish;
1016 }
1017
1018 memblock_replace_import(b);
1019
1020 finish:
1021 pa_mutex_unlock(i->mutex);
1022
1023 return ret;
1024 }
1025
1026 /* For sending blocks to other nodes */
1027 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1028 pa_memexport *e;
1029
1030 pa_assert(p);
1031 pa_assert(cb);
1032
1033 if (!p->memory.shared)
1034 return NULL;
1035
1036 e = pa_xnew(pa_memexport, 1);
1037 e->mutex = pa_mutex_new(TRUE, TRUE);
1038 e->pool = p;
1039 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1040 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1041 e->n_init = 0;
1042 e->revoke_cb = cb;
1043 e->userdata = userdata;
1044
1045 pa_mutex_lock(p->mutex);
1046 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1047 pa_mutex_unlock(p->mutex);
1048 return e;
1049 }
1050
1051 void pa_memexport_free(pa_memexport *e) {
1052 pa_assert(e);
1053
1054 pa_mutex_lock(e->mutex);
1055 while (e->used_slots)
1056 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1057 pa_mutex_unlock(e->mutex);
1058
1059 pa_mutex_lock(e->pool->mutex);
1060 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1061 pa_mutex_unlock(e->pool->mutex);
1062
1063 pa_mutex_free(e->mutex);
1064 pa_xfree(e);
1065 }
1066
1067 /* Self-locked */
1068 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1069 pa_memblock *b;
1070
1071 pa_assert(e);
1072
1073 pa_mutex_lock(e->mutex);
1074
1075 if (id >= e->n_init)
1076 goto fail;
1077
1078 if (!e->slots[id].block)
1079 goto fail;
1080
1081 b = e->slots[id].block;
1082 e->slots[id].block = NULL;
1083
1084 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1085 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1086
1087 pa_mutex_unlock(e->mutex);
1088
1089 /* pa_log("Processing release for %u", id); */
1090
1091 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1092 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1093
1094 pa_atomic_dec(&e->pool->stat.n_exported);
1095 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1096
1097 pa_memblock_unref(b);
1098
1099 return 0;
1100
1101 fail:
1102 pa_mutex_unlock(e->mutex);
1103
1104 return -1;
1105 }
1106
1107 /* Self-locked */
1108 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1109 struct memexport_slot *slot, *next;
1110 pa_assert(e);
1111 pa_assert(i);
1112
1113 pa_mutex_lock(e->mutex);
1114
1115 for (slot = e->used_slots; slot; slot = next) {
1116 uint32_t idx;
1117 next = slot->next;
1118
1119 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1120 slot->block->per_type.imported.segment->import != i)
1121 continue;
1122
1123 idx = (uint32_t) (slot - e->slots);
1124 e->revoke_cb(e, idx, e->userdata);
1125 pa_memexport_process_release(e, idx);
1126 }
1127
1128 pa_mutex_unlock(e->mutex);
1129 }
1130
1131 /* No lock necessary */
1132 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1133 pa_memblock *n;
1134
1135 pa_assert(p);
1136 pa_assert(b);
1137
1138 if (b->type == PA_MEMBLOCK_IMPORTED ||
1139 b->type == PA_MEMBLOCK_POOL ||
1140 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1141 pa_assert(b->pool == p);
1142 return pa_memblock_ref(b);
1143 }
1144
1145 if (!(n = pa_memblock_new_pool(p, b->length)))
1146 return NULL;
1147
1148 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1149 return n;
1150 }
1151
1152 /* Self-locked */
1153 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1154 pa_shm *memory;
1155 struct memexport_slot *slot;
1156 void *data;
1157
1158 pa_assert(e);
1159 pa_assert(b);
1160 pa_assert(block_id);
1161 pa_assert(shm_id);
1162 pa_assert(offset);
1163 pa_assert(size);
1164 pa_assert(b->pool == e->pool);
1165
1166 if (!(b = memblock_shared_copy(e->pool, b)))
1167 return -1;
1168
1169 pa_mutex_lock(e->mutex);
1170
1171 if (e->free_slots) {
1172 slot = e->free_slots;
1173 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1174 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1175 slot = &e->slots[e->n_init++];
1176 else {
1177 pa_mutex_unlock(e->mutex);
1178 pa_memblock_unref(b);
1179 return -1;
1180 }
1181
1182 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1183 slot->block = b;
1184 *block_id = (uint32_t) (slot - e->slots);
1185
1186 pa_mutex_unlock(e->mutex);
1187 /* pa_log("Got block id %u", *block_id); */
1188
1189 data = pa_memblock_acquire(b);
1190
1191 if (b->type == PA_MEMBLOCK_IMPORTED) {
1192 pa_assert(b->per_type.imported.segment);
1193 memory = &b->per_type.imported.segment->memory;
1194 } else {
1195 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1196 pa_assert(b->pool);
1197 memory = &b->pool->memory;
1198 }
1199
1200 pa_assert(data >= memory->ptr);
1201 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1202
1203 *shm_id = memory->id;
1204 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1205 *size = b->length;
1206
1207 pa_memblock_release(b);
1208
1209 pa_atomic_inc(&e->pool->stat.n_exported);
1210 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1211
1212 return 0;
1213 }