]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
Remove unnecessary #includes
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/mutex.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/refcnt.h>
48 #include <pulsecore/llist.h>
49 #include <pulsecore/flist.h>
50 #include <pulsecore/core-util.h>
51 #include <pulsecore/memtrap.h>
52
53 #include "memblock.h"
54
55 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
56 * note that the footprint is usually much smaller, since the data is
57 * stored in SHM and our OS does not commit the memory before we use
58 * it for the first time. */
59 #define PA_MEMPOOL_SLOTS_MAX 1024
60 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
61
62 #define PA_MEMEXPORT_SLOTS_MAX 128
63
64 #define PA_MEMIMPORT_SLOTS_MAX 160
65 #define PA_MEMIMPORT_SEGMENTS_MAX 16
66
67 struct pa_memblock {
68 PA_REFCNT_DECLARE; /* the reference counter */
69 pa_mempool *pool;
70
71 pa_memblock_type_t type;
72
73 pa_bool_t read_only:1;
74 pa_bool_t is_silence:1;
75
76 pa_atomic_ptr_t data;
77 size_t length;
78
79 pa_atomic_t n_acquired;
80 pa_atomic_t please_signal;
81
82 union {
83 struct {
84 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
85 pa_free_cb_t free_cb;
86 } user;
87
88 struct {
89 uint32_t id;
90 pa_memimport_segment *segment;
91 } imported;
92 } per_type;
93 };
94
95 struct pa_memimport_segment {
96 pa_memimport *import;
97 pa_shm memory;
98 pa_memtrap *trap;
99 unsigned n_blocks;
100 };
101
102 /* A collection of multiple segments */
103 struct pa_memimport {
104 pa_mutex *mutex;
105
106 pa_mempool *pool;
107 pa_hashmap *segments;
108 pa_hashmap *blocks;
109
110 /* Called whenever an imported memory block is no longer
111 * needed. */
112 pa_memimport_release_cb_t release_cb;
113 void *userdata;
114
115 PA_LLIST_FIELDS(pa_memimport);
116 };
117
118 struct memexport_slot {
119 PA_LLIST_FIELDS(struct memexport_slot);
120 pa_memblock *block;
121 };
122
123 struct pa_memexport {
124 pa_mutex *mutex;
125 pa_mempool *pool;
126
127 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
128
129 PA_LLIST_HEAD(struct memexport_slot, free_slots);
130 PA_LLIST_HEAD(struct memexport_slot, used_slots);
131 unsigned n_init;
132
133 /* Called whenever a client from which we imported a memory block
134 which we in turn exported to another client dies and we need to
135 revoke the memory block accordingly */
136 pa_memexport_revoke_cb_t revoke_cb;
137 void *userdata;
138
139 PA_LLIST_FIELDS(pa_memexport);
140 };
141
142 struct pa_mempool {
143 pa_semaphore *semaphore;
144 pa_mutex *mutex;
145
146 pa_shm memory;
147 size_t block_size;
148 unsigned n_blocks;
149
150 pa_atomic_t n_init;
151
152 PA_LLIST_HEAD(pa_memimport, imports);
153 PA_LLIST_HEAD(pa_memexport, exports);
154
155 /* A list of free slots that may be reused */
156 pa_flist *free_slots;
157
158 pa_mempool_stat stat;
159 };
160
161 static void segment_detach(pa_memimport_segment *seg);
162
163 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
164
165 /* No lock necessary */
166 static void stat_add(pa_memblock*b) {
167 pa_assert(b);
168 pa_assert(b->pool);
169
170 pa_atomic_inc(&b->pool->stat.n_allocated);
171 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
172
173 pa_atomic_inc(&b->pool->stat.n_accumulated);
174 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
175
176 if (b->type == PA_MEMBLOCK_IMPORTED) {
177 pa_atomic_inc(&b->pool->stat.n_imported);
178 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
179 }
180
181 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
182 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
183 }
184
185 /* No lock necessary */
186 static void stat_remove(pa_memblock *b) {
187 pa_assert(b);
188 pa_assert(b->pool);
189
190 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
191 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
192
193 pa_atomic_dec(&b->pool->stat.n_allocated);
194 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
195
196 if (b->type == PA_MEMBLOCK_IMPORTED) {
197 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
198 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
199
200 pa_atomic_dec(&b->pool->stat.n_imported);
201 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
202 }
203
204 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
205 }
206
207 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
208
209 /* No lock necessary */
210 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
211 pa_memblock *b;
212
213 pa_assert(p);
214 pa_assert(length);
215
216 if (!(b = pa_memblock_new_pool(p, length)))
217 b = memblock_new_appended(p, length);
218
219 return b;
220 }
221
222 /* No lock necessary */
223 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
224 pa_memblock *b;
225
226 pa_assert(p);
227 pa_assert(length);
228
229 /* If -1 is passed as length we choose the size for the caller. */
230
231 if (length == (size_t) -1)
232 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
233
234 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
235 PA_REFCNT_INIT(b);
236 b->pool = p;
237 b->type = PA_MEMBLOCK_APPENDED;
238 b->read_only = b->is_silence = FALSE;
239 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
240 b->length = length;
241 pa_atomic_store(&b->n_acquired, 0);
242 pa_atomic_store(&b->please_signal, 0);
243
244 stat_add(b);
245 return b;
246 }
247
248 /* No lock necessary */
249 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
250 struct mempool_slot *slot;
251 pa_assert(p);
252
253 if (!(slot = pa_flist_pop(p->free_slots))) {
254 int idx;
255
256 /* The free list was empty, we have to allocate a new entry */
257
258 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
259 pa_atomic_dec(&p->n_init);
260 else
261 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
262
263 if (!slot) {
264 if (pa_log_ratelimit(PA_LOG_DEBUG))
265 pa_log_debug("Pool full");
266 pa_atomic_inc(&p->stat.n_pool_full);
267 return NULL;
268 }
269 }
270
271 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
272 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
273 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
274 /* } */
275 /* #endif */
276
277 return slot;
278 }
279
280 /* No lock necessary, totally redundant anyway */
281 static inline void* mempool_slot_data(struct mempool_slot *slot) {
282 return slot;
283 }
284
285 /* No lock necessary */
286 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
287 pa_assert(p);
288
289 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
290 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
291
292 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
293 }
294
295 /* No lock necessary */
296 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
297 unsigned idx;
298
299 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
300 return NULL;
301
302 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
303 }
304
305 /* No lock necessary */
306 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
307 pa_memblock *b = NULL;
308 struct mempool_slot *slot;
309 static int mempool_disable = 0;
310
311 pa_assert(p);
312 pa_assert(length);
313
314 if (mempool_disable == 0)
315 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
316
317 if (mempool_disable > 0)
318 return NULL;
319
320 /* If -1 is passed as length we choose the size for the caller: we
321 * take the largest size that fits in one of our slots. */
322
323 if (length == (size_t) -1)
324 length = pa_mempool_block_size_max(p);
325
326 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
327
328 if (!(slot = mempool_allocate_slot(p)))
329 return NULL;
330
331 b = mempool_slot_data(slot);
332 b->type = PA_MEMBLOCK_POOL;
333 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
334
335 } else if (p->block_size >= length) {
336
337 if (!(slot = mempool_allocate_slot(p)))
338 return NULL;
339
340 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
341 b = pa_xnew(pa_memblock, 1);
342
343 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
344 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
345
346 } else {
347 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
348 pa_atomic_inc(&p->stat.n_too_large_for_pool);
349 return NULL;
350 }
351
352 PA_REFCNT_INIT(b);
353 b->pool = p;
354 b->read_only = b->is_silence = FALSE;
355 b->length = length;
356 pa_atomic_store(&b->n_acquired, 0);
357 pa_atomic_store(&b->please_signal, 0);
358
359 stat_add(b);
360 return b;
361 }
362
363 /* No lock necessary */
364 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
365 pa_memblock *b;
366
367 pa_assert(p);
368 pa_assert(d);
369 pa_assert(length != (size_t) -1);
370 pa_assert(length);
371
372 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
373 b = pa_xnew(pa_memblock, 1);
374
375 PA_REFCNT_INIT(b);
376 b->pool = p;
377 b->type = PA_MEMBLOCK_FIXED;
378 b->read_only = read_only;
379 b->is_silence = FALSE;
380 pa_atomic_ptr_store(&b->data, d);
381 b->length = length;
382 pa_atomic_store(&b->n_acquired, 0);
383 pa_atomic_store(&b->please_signal, 0);
384
385 stat_add(b);
386 return b;
387 }
388
389 /* No lock necessary */
390 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
391 pa_memblock *b;
392
393 pa_assert(p);
394 pa_assert(d);
395 pa_assert(length);
396 pa_assert(length != (size_t) -1);
397 pa_assert(free_cb);
398
399 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
400 b = pa_xnew(pa_memblock, 1);
401
402 PA_REFCNT_INIT(b);
403 b->pool = p;
404 b->type = PA_MEMBLOCK_USER;
405 b->read_only = read_only;
406 b->is_silence = FALSE;
407 pa_atomic_ptr_store(&b->data, d);
408 b->length = length;
409 pa_atomic_store(&b->n_acquired, 0);
410 pa_atomic_store(&b->please_signal, 0);
411
412 b->per_type.user.free_cb = free_cb;
413
414 stat_add(b);
415 return b;
416 }
417
418 /* No lock necessary */
419 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
420 pa_assert(b);
421 pa_assert(PA_REFCNT_VALUE(b) > 0);
422
423 return b->read_only && PA_REFCNT_VALUE(b) == 1;
424 }
425
426 /* No lock necessary */
427 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
428 pa_assert(b);
429 pa_assert(PA_REFCNT_VALUE(b) > 0);
430
431 return b->is_silence;
432 }
433
434 /* No lock necessary */
435 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
436 pa_assert(b);
437 pa_assert(PA_REFCNT_VALUE(b) > 0);
438
439 b->is_silence = v;
440 }
441
442 /* No lock necessary */
443 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
444 int r;
445 pa_assert(b);
446
447 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
448
449 return r == 1;
450 }
451
452 /* No lock necessary */
453 void* pa_memblock_acquire(pa_memblock *b) {
454 pa_assert(b);
455 pa_assert(PA_REFCNT_VALUE(b) > 0);
456
457 pa_atomic_inc(&b->n_acquired);
458
459 return pa_atomic_ptr_load(&b->data);
460 }
461
462 /* No lock necessary, in corner cases locks by its own */
463 void pa_memblock_release(pa_memblock *b) {
464 int r;
465 pa_assert(b);
466 pa_assert(PA_REFCNT_VALUE(b) > 0);
467
468 r = pa_atomic_dec(&b->n_acquired);
469 pa_assert(r >= 1);
470
471 /* Signal a waiting thread that this memblock is no longer used */
472 if (r == 1 && pa_atomic_load(&b->please_signal))
473 pa_semaphore_post(b->pool->semaphore);
474 }
475
476 size_t pa_memblock_get_length(pa_memblock *b) {
477 pa_assert(b);
478 pa_assert(PA_REFCNT_VALUE(b) > 0);
479
480 return b->length;
481 }
482
483 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
484 pa_assert(b);
485 pa_assert(PA_REFCNT_VALUE(b) > 0);
486
487 return b->pool;
488 }
489
490 /* No lock necessary */
491 pa_memblock* pa_memblock_ref(pa_memblock*b) {
492 pa_assert(b);
493 pa_assert(PA_REFCNT_VALUE(b) > 0);
494
495 PA_REFCNT_INC(b);
496 return b;
497 }
498
499 static void memblock_free(pa_memblock *b) {
500 pa_assert(b);
501
502 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
503
504 stat_remove(b);
505
506 switch (b->type) {
507 case PA_MEMBLOCK_USER :
508 pa_assert(b->per_type.user.free_cb);
509 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
510
511 /* Fall through */
512
513 case PA_MEMBLOCK_FIXED:
514 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
515 pa_xfree(b);
516
517 break;
518
519 case PA_MEMBLOCK_APPENDED:
520
521 /* We could attached it unused_memblocks, but that would
522 * probably waste some considerable memory */
523 pa_xfree(b);
524 break;
525
526 case PA_MEMBLOCK_IMPORTED: {
527 pa_memimport_segment *segment;
528 pa_memimport *import;
529
530 /* FIXME! This should be implemented lock-free */
531
532 pa_assert_se(segment = b->per_type.imported.segment);
533 pa_assert_se(import = segment->import);
534
535 pa_mutex_lock(import->mutex);
536
537 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
538
539 pa_assert(segment->n_blocks >= 1);
540 if (-- segment->n_blocks <= 0)
541 segment_detach(segment);
542
543 pa_mutex_unlock(import->mutex);
544
545 import->release_cb(import, b->per_type.imported.id, import->userdata);
546
547 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
548 pa_xfree(b);
549
550 break;
551 }
552
553 case PA_MEMBLOCK_POOL_EXTERNAL:
554 case PA_MEMBLOCK_POOL: {
555 struct mempool_slot *slot;
556 pa_bool_t call_free;
557
558 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
559
560 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
561
562 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
563 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
564 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
565 /* } */
566 /* #endif */
567
568 /* The free list dimensions should easily allow all slots
569 * to fit in, hence try harder if pushing this slot into
570 * the free list fails */
571 while (pa_flist_push(b->pool->free_slots, slot) < 0)
572 ;
573
574 if (call_free)
575 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
576 pa_xfree(b);
577
578 break;
579 }
580
581 case PA_MEMBLOCK_TYPE_MAX:
582 default:
583 pa_assert_not_reached();
584 }
585 }
586
587 /* No lock necessary */
588 void pa_memblock_unref(pa_memblock*b) {
589 pa_assert(b);
590 pa_assert(PA_REFCNT_VALUE(b) > 0);
591
592 if (PA_REFCNT_DEC(b) > 0)
593 return;
594
595 memblock_free(b);
596 }
597
598 /* Self locked */
599 static void memblock_wait(pa_memblock *b) {
600 pa_assert(b);
601
602 if (pa_atomic_load(&b->n_acquired) > 0) {
603 /* We need to wait until all threads gave up access to the
604 * memory block before we can go on. Unfortunately this means
605 * that we have to lock and wait here. Sniff! */
606
607 pa_atomic_inc(&b->please_signal);
608
609 while (pa_atomic_load(&b->n_acquired) > 0)
610 pa_semaphore_wait(b->pool->semaphore);
611
612 pa_atomic_dec(&b->please_signal);
613 }
614 }
615
616 /* No lock necessary. This function is not multiple caller safe! */
617 static void memblock_make_local(pa_memblock *b) {
618 pa_assert(b);
619
620 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
621
622 if (b->length <= b->pool->block_size) {
623 struct mempool_slot *slot;
624
625 if ((slot = mempool_allocate_slot(b->pool))) {
626 void *new_data;
627 /* We can move it into a local pool, perfect! */
628
629 new_data = mempool_slot_data(slot);
630 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
631 pa_atomic_ptr_store(&b->data, new_data);
632
633 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
634 b->read_only = FALSE;
635
636 goto finish;
637 }
638 }
639
640 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
641 b->per_type.user.free_cb = pa_xfree;
642 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
643
644 b->type = PA_MEMBLOCK_USER;
645 b->read_only = FALSE;
646
647 finish:
648 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
649 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
650 memblock_wait(b);
651 }
652
653 /* No lock necessary. This function is not multiple caller safe*/
654 void pa_memblock_unref_fixed(pa_memblock *b) {
655 pa_assert(b);
656 pa_assert(PA_REFCNT_VALUE(b) > 0);
657 pa_assert(b->type == PA_MEMBLOCK_FIXED);
658
659 if (PA_REFCNT_VALUE(b) > 1)
660 memblock_make_local(b);
661
662 pa_memblock_unref(b);
663 }
664
665 /* No lock necessary. */
666 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
667 void *p;
668
669 pa_assert(b);
670 pa_assert(PA_REFCNT_VALUE(b) > 0);
671
672 p = pa_memblock_acquire(b);
673 pa_will_need(p, b->length);
674 pa_memblock_release(b);
675
676 return b;
677 }
678
679 /* Self-locked. This function is not multiple-caller safe */
680 static void memblock_replace_import(pa_memblock *b) {
681 pa_memimport_segment *segment;
682 pa_memimport *import;
683
684 pa_assert(b);
685 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
686
687 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
688 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
689 pa_atomic_dec(&b->pool->stat.n_imported);
690 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
691
692 pa_assert_se(segment = b->per_type.imported.segment);
693 pa_assert_se(import = segment->import);
694
695 pa_mutex_lock(import->mutex);
696
697 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
698
699 memblock_make_local(b);
700
701 pa_assert(segment->n_blocks >= 1);
702 if (-- segment->n_blocks <= 0)
703 segment_detach(segment);
704
705 pa_mutex_unlock(import->mutex);
706 }
707
708 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
709 pa_mempool *p;
710 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
711
712 p = pa_xnew(pa_mempool, 1);
713
714 p->mutex = pa_mutex_new(TRUE, TRUE);
715 p->semaphore = pa_semaphore_new(0);
716
717 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
718 if (p->block_size < PA_PAGE_SIZE)
719 p->block_size = PA_PAGE_SIZE;
720
721 if (size <= 0)
722 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
723 else {
724 p->n_blocks = (unsigned) (size / p->block_size);
725
726 if (p->n_blocks < 2)
727 p->n_blocks = 2;
728 }
729
730 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
731 pa_xfree(p);
732 return NULL;
733 }
734
735 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
736 p->memory.shared ? "shared" : "private",
737 p->n_blocks,
738 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
739 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
740 (unsigned long) pa_mempool_block_size_max(p));
741
742 memset(&p->stat, 0, sizeof(p->stat));
743 pa_atomic_store(&p->n_init, 0);
744
745 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
746 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
747
748 p->free_slots = pa_flist_new(p->n_blocks);
749
750 return p;
751 }
752
753 void pa_mempool_free(pa_mempool *p) {
754 pa_assert(p);
755
756 pa_mutex_lock(p->mutex);
757
758 while (p->imports)
759 pa_memimport_free(p->imports);
760
761 while (p->exports)
762 pa_memexport_free(p->exports);
763
764 pa_mutex_unlock(p->mutex);
765
766 pa_flist_free(p->free_slots, NULL);
767
768 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
769
770 /* Ouch, somebody is retaining a memory block reference! */
771
772 #ifdef DEBUG_REF
773 unsigned i;
774 pa_flist *list;
775
776 /* Let's try to find at least one of those leaked memory blocks */
777
778 list = pa_flist_new(p->n_blocks);
779
780 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
781 struct mempool_slot *slot;
782 pa_memblock *b, *k;
783
784 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
785 b = mempool_slot_data(slot);
786
787 while ((k = pa_flist_pop(p->free_slots))) {
788 while (pa_flist_push(list, k) < 0)
789 ;
790
791 if (b == k)
792 break;
793 }
794
795 if (!k)
796 pa_log("REF: Leaked memory block %p", b);
797
798 while ((k = pa_flist_pop(list)))
799 while (pa_flist_push(p->free_slots, k) < 0)
800 ;
801 }
802
803 pa_flist_free(list, NULL);
804
805 #endif
806
807 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
808
809 /* PA_DEBUG_TRAP; */
810 }
811
812 pa_shm_free(&p->memory);
813
814 pa_mutex_free(p->mutex);
815 pa_semaphore_free(p->semaphore);
816
817 pa_xfree(p);
818 }
819
820 /* No lock necessary */
821 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
822 pa_assert(p);
823
824 return &p->stat;
825 }
826
827 /* No lock necessary */
828 size_t pa_mempool_block_size_max(pa_mempool *p) {
829 pa_assert(p);
830
831 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
832 }
833
834 /* No lock necessary */
835 void pa_mempool_vacuum(pa_mempool *p) {
836 struct mempool_slot *slot;
837 pa_flist *list;
838
839 pa_assert(p);
840
841 list = pa_flist_new(p->n_blocks);
842
843 while ((slot = pa_flist_pop(p->free_slots)))
844 while (pa_flist_push(list, slot) < 0)
845 ;
846
847 while ((slot = pa_flist_pop(list))) {
848 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
849
850 while (pa_flist_push(p->free_slots, slot))
851 ;
852 }
853
854 pa_flist_free(list, NULL);
855 }
856
857 /* No lock necessary */
858 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
859 pa_assert(p);
860
861 if (!p->memory.shared)
862 return -1;
863
864 *id = p->memory.id;
865
866 return 0;
867 }
868
869 /* No lock necessary */
870 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
871 pa_assert(p);
872
873 return !!p->memory.shared;
874 }
875
876 /* For recieving blocks from other nodes */
877 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
878 pa_memimport *i;
879
880 pa_assert(p);
881 pa_assert(cb);
882
883 i = pa_xnew(pa_memimport, 1);
884 i->mutex = pa_mutex_new(TRUE, TRUE);
885 i->pool = p;
886 i->segments = pa_hashmap_new(NULL, NULL);
887 i->blocks = pa_hashmap_new(NULL, NULL);
888 i->release_cb = cb;
889 i->userdata = userdata;
890
891 pa_mutex_lock(p->mutex);
892 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
893 pa_mutex_unlock(p->mutex);
894
895 return i;
896 }
897
898 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
899
900 /* Should be called locked */
901 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
902 pa_memimport_segment* seg;
903
904 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
905 return NULL;
906
907 seg = pa_xnew0(pa_memimport_segment, 1);
908
909 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
910 pa_xfree(seg);
911 return NULL;
912 }
913
914 seg->import = i;
915 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
916
917 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
918 return seg;
919 }
920
921 /* Should be called locked */
922 static void segment_detach(pa_memimport_segment *seg) {
923 pa_assert(seg);
924
925 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
926 pa_shm_free(&seg->memory);
927
928 if (seg->trap)
929 pa_memtrap_remove(seg->trap);
930
931 pa_xfree(seg);
932 }
933
934 /* Self-locked. Not multiple-caller safe */
935 void pa_memimport_free(pa_memimport *i) {
936 pa_memexport *e;
937 pa_memblock *b;
938
939 pa_assert(i);
940
941 pa_mutex_lock(i->mutex);
942
943 while ((b = pa_hashmap_first(i->blocks)))
944 memblock_replace_import(b);
945
946 pa_assert(pa_hashmap_size(i->segments) == 0);
947
948 pa_mutex_unlock(i->mutex);
949
950 pa_mutex_lock(i->pool->mutex);
951
952 /* If we've exported this block further we need to revoke that export */
953 for (e = i->pool->exports; e; e = e->next)
954 memexport_revoke_blocks(e, i);
955
956 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
957
958 pa_mutex_unlock(i->pool->mutex);
959
960 pa_hashmap_free(i->blocks, NULL, NULL);
961 pa_hashmap_free(i->segments, NULL, NULL);
962
963 pa_mutex_free(i->mutex);
964
965 pa_xfree(i);
966 }
967
968 /* Self-locked */
969 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
970 pa_memblock *b = NULL;
971 pa_memimport_segment *seg;
972
973 pa_assert(i);
974
975 pa_mutex_lock(i->mutex);
976
977 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
978 pa_memblock_ref(b);
979 goto finish;
980 }
981
982 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
983 goto finish;
984
985 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
986 if (!(seg = segment_attach(i, shm_id)))
987 goto finish;
988
989 if (offset+size > seg->memory.size)
990 goto finish;
991
992 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
993 b = pa_xnew(pa_memblock, 1);
994
995 PA_REFCNT_INIT(b);
996 b->pool = i->pool;
997 b->type = PA_MEMBLOCK_IMPORTED;
998 b->read_only = TRUE;
999 b->is_silence = FALSE;
1000 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1001 b->length = size;
1002 pa_atomic_store(&b->n_acquired, 0);
1003 pa_atomic_store(&b->please_signal, 0);
1004 b->per_type.imported.id = block_id;
1005 b->per_type.imported.segment = seg;
1006
1007 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1008
1009 seg->n_blocks++;
1010
1011 stat_add(b);
1012
1013 finish:
1014 pa_mutex_unlock(i->mutex);
1015
1016 return b;
1017 }
1018
1019 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1020 pa_memblock *b;
1021 int ret = 0;
1022 pa_assert(i);
1023
1024 pa_mutex_lock(i->mutex);
1025
1026 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1027 ret = -1;
1028 goto finish;
1029 }
1030
1031 memblock_replace_import(b);
1032
1033 finish:
1034 pa_mutex_unlock(i->mutex);
1035
1036 return ret;
1037 }
1038
1039 /* For sending blocks to other nodes */
1040 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1041 pa_memexport *e;
1042
1043 pa_assert(p);
1044 pa_assert(cb);
1045
1046 if (!p->memory.shared)
1047 return NULL;
1048
1049 e = pa_xnew(pa_memexport, 1);
1050 e->mutex = pa_mutex_new(TRUE, TRUE);
1051 e->pool = p;
1052 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1053 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1054 e->n_init = 0;
1055 e->revoke_cb = cb;
1056 e->userdata = userdata;
1057
1058 pa_mutex_lock(p->mutex);
1059 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1060 pa_mutex_unlock(p->mutex);
1061 return e;
1062 }
1063
1064 void pa_memexport_free(pa_memexport *e) {
1065 pa_assert(e);
1066
1067 pa_mutex_lock(e->mutex);
1068 while (e->used_slots)
1069 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1070 pa_mutex_unlock(e->mutex);
1071
1072 pa_mutex_lock(e->pool->mutex);
1073 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1074 pa_mutex_unlock(e->pool->mutex);
1075
1076 pa_mutex_free(e->mutex);
1077 pa_xfree(e);
1078 }
1079
1080 /* Self-locked */
1081 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1082 pa_memblock *b;
1083
1084 pa_assert(e);
1085
1086 pa_mutex_lock(e->mutex);
1087
1088 if (id >= e->n_init)
1089 goto fail;
1090
1091 if (!e->slots[id].block)
1092 goto fail;
1093
1094 b = e->slots[id].block;
1095 e->slots[id].block = NULL;
1096
1097 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1098 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1099
1100 pa_mutex_unlock(e->mutex);
1101
1102 /* pa_log("Processing release for %u", id); */
1103
1104 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1105 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1106
1107 pa_atomic_dec(&e->pool->stat.n_exported);
1108 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1109
1110 pa_memblock_unref(b);
1111
1112 return 0;
1113
1114 fail:
1115 pa_mutex_unlock(e->mutex);
1116
1117 return -1;
1118 }
1119
1120 /* Self-locked */
1121 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1122 struct memexport_slot *slot, *next;
1123 pa_assert(e);
1124 pa_assert(i);
1125
1126 pa_mutex_lock(e->mutex);
1127
1128 for (slot = e->used_slots; slot; slot = next) {
1129 uint32_t idx;
1130 next = slot->next;
1131
1132 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1133 slot->block->per_type.imported.segment->import != i)
1134 continue;
1135
1136 idx = (uint32_t) (slot - e->slots);
1137 e->revoke_cb(e, idx, e->userdata);
1138 pa_memexport_process_release(e, idx);
1139 }
1140
1141 pa_mutex_unlock(e->mutex);
1142 }
1143
1144 /* No lock necessary */
1145 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1146 pa_memblock *n;
1147
1148 pa_assert(p);
1149 pa_assert(b);
1150
1151 if (b->type == PA_MEMBLOCK_IMPORTED ||
1152 b->type == PA_MEMBLOCK_POOL ||
1153 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1154 pa_assert(b->pool == p);
1155 return pa_memblock_ref(b);
1156 }
1157
1158 if (!(n = pa_memblock_new_pool(p, b->length)))
1159 return NULL;
1160
1161 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1162 return n;
1163 }
1164
1165 /* Self-locked */
1166 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1167 pa_shm *memory;
1168 struct memexport_slot *slot;
1169 void *data;
1170
1171 pa_assert(e);
1172 pa_assert(b);
1173 pa_assert(block_id);
1174 pa_assert(shm_id);
1175 pa_assert(offset);
1176 pa_assert(size);
1177 pa_assert(b->pool == e->pool);
1178
1179 if (!(b = memblock_shared_copy(e->pool, b)))
1180 return -1;
1181
1182 pa_mutex_lock(e->mutex);
1183
1184 if (e->free_slots) {
1185 slot = e->free_slots;
1186 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1187 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1188 slot = &e->slots[e->n_init++];
1189 else {
1190 pa_mutex_unlock(e->mutex);
1191 pa_memblock_unref(b);
1192 return -1;
1193 }
1194
1195 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1196 slot->block = b;
1197 *block_id = (uint32_t) (slot - e->slots);
1198
1199 pa_mutex_unlock(e->mutex);
1200 /* pa_log("Got block id %u", *block_id); */
1201
1202 data = pa_memblock_acquire(b);
1203
1204 if (b->type == PA_MEMBLOCK_IMPORTED) {
1205 pa_assert(b->per_type.imported.segment);
1206 memory = &b->per_type.imported.segment->memory;
1207 } else {
1208 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1209 pa_assert(b->pool);
1210 memory = &b->pool->memory;
1211 }
1212
1213 pa_assert(data >= memory->ptr);
1214 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1215
1216 *shm_id = memory->id;
1217 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1218 *size = b->length;
1219
1220 pa_memblock_release(b);
1221
1222 pa_atomic_inc(&e->pool->stat.n_exported);
1223 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1224
1225 return 0;
1226 }