]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
6d12acdca66a98e3e5301633cbd3dd920679cd5e
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48
49 #include "memblock.h"
50
51 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
52 * note that the footprint is usually much smaller, since the data is
53 * stored in SHM and our OS does not commit the memory before we use
54 * it for the first time. */
55 #define PA_MEMPOOL_SLOTS_MAX 1024
56 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
57
58 #define PA_MEMEXPORT_SLOTS_MAX 128
59
60 #define PA_MEMIMPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SEGMENTS_MAX 16
62
63 struct pa_memblock {
64 PA_REFCNT_DECLARE; /* the reference counter */
65 pa_mempool *pool;
66
67 pa_memblock_type_t type;
68
69 pa_bool_t read_only:1;
70 pa_bool_t is_silence:1;
71
72 pa_atomic_ptr_t data;
73 size_t length;
74
75 pa_atomic_t n_acquired;
76 pa_atomic_t please_signal;
77
78 union {
79 struct {
80 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
81 pa_free_cb_t free_cb;
82 } user;
83
84 struct {
85 uint32_t id;
86 pa_memimport_segment *segment;
87 } imported;
88 } per_type;
89 };
90
91 struct pa_memimport_segment {
92 pa_memimport *import;
93 pa_shm memory;
94 unsigned n_blocks;
95 };
96
97 struct pa_memimport {
98 pa_mutex *mutex;
99
100 pa_mempool *pool;
101 pa_hashmap *segments;
102 pa_hashmap *blocks;
103
104 /* Called whenever an imported memory block is no longer
105 * needed. */
106 pa_memimport_release_cb_t release_cb;
107 void *userdata;
108
109 PA_LLIST_FIELDS(pa_memimport);
110 };
111
112 struct memexport_slot {
113 PA_LLIST_FIELDS(struct memexport_slot);
114 pa_memblock *block;
115 };
116
117 struct pa_memexport {
118 pa_mutex *mutex;
119 pa_mempool *pool;
120
121 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
122
123 PA_LLIST_HEAD(struct memexport_slot, free_slots);
124 PA_LLIST_HEAD(struct memexport_slot, used_slots);
125 unsigned n_init;
126
127 /* Called whenever a client from which we imported a memory block
128 which we in turn exported to another client dies and we need to
129 revoke the memory block accordingly */
130 pa_memexport_revoke_cb_t revoke_cb;
131 void *userdata;
132
133 PA_LLIST_FIELDS(pa_memexport);
134 };
135
136 struct pa_mempool {
137 pa_semaphore *semaphore;
138 pa_mutex *mutex;
139
140 pa_shm memory;
141 size_t block_size;
142 unsigned n_blocks;
143
144 pa_atomic_t n_init;
145
146 PA_LLIST_HEAD(pa_memimport, imports);
147 PA_LLIST_HEAD(pa_memexport, exports);
148
149 /* A list of free slots that may be reused */
150 pa_flist *free_slots;
151
152 pa_mempool_stat stat;
153 };
154
155 static void segment_detach(pa_memimport_segment *seg);
156
157 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
158
159 /* No lock necessary */
160 static void stat_add(pa_memblock*b) {
161 pa_assert(b);
162 pa_assert(b->pool);
163
164 pa_atomic_inc(&b->pool->stat.n_allocated);
165 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
166
167 pa_atomic_inc(&b->pool->stat.n_accumulated);
168 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
169
170 if (b->type == PA_MEMBLOCK_IMPORTED) {
171 pa_atomic_inc(&b->pool->stat.n_imported);
172 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
173 }
174
175 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
176 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
177 }
178
179 /* No lock necessary */
180 static void stat_remove(pa_memblock *b) {
181 pa_assert(b);
182 pa_assert(b->pool);
183
184 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
185 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
186
187 pa_atomic_dec(&b->pool->stat.n_allocated);
188 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
189
190 if (b->type == PA_MEMBLOCK_IMPORTED) {
191 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
192 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
193
194 pa_atomic_dec(&b->pool->stat.n_imported);
195 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
196 }
197
198 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
199 }
200
201 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
202
203 /* No lock necessary */
204 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
205 pa_memblock *b;
206
207 pa_assert(p);
208 pa_assert(length);
209
210 if (!(b = pa_memblock_new_pool(p, length)))
211 b = memblock_new_appended(p, length);
212
213 return b;
214 }
215
216 /* No lock necessary */
217 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
218 pa_memblock *b;
219
220 pa_assert(p);
221 pa_assert(length);
222
223 /* If -1 is passed as length we choose the size for the caller. */
224
225 if (length == (size_t) -1)
226 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
227
228 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
229 PA_REFCNT_INIT(b);
230 b->pool = p;
231 b->type = PA_MEMBLOCK_APPENDED;
232 b->read_only = b->is_silence = FALSE;
233 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
234 b->length = length;
235 pa_atomic_store(&b->n_acquired, 0);
236 pa_atomic_store(&b->please_signal, 0);
237
238 stat_add(b);
239 return b;
240 }
241
242 /* No lock necessary */
243 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
244 struct mempool_slot *slot;
245 pa_assert(p);
246
247 if (!(slot = pa_flist_pop(p->free_slots))) {
248 int idx;
249
250 /* The free list was empty, we have to allocate a new entry */
251
252 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
253 pa_atomic_dec(&p->n_init);
254 else
255 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
256
257 if (!slot) {
258 pa_log_info("Pool full");
259 pa_atomic_inc(&p->stat.n_pool_full);
260 return NULL;
261 }
262 }
263
264 #ifdef HAVE_VALGRIND_MEMCHECK_H
265 VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0);
266 #endif
267
268 return slot;
269 }
270
271 /* No lock necessary, totally redundant anyway */
272 static inline void* mempool_slot_data(struct mempool_slot *slot) {
273 return slot;
274 }
275
276 /* No lock necessary */
277 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
278 pa_assert(p);
279
280 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
281 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
282
283 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
284 }
285
286 /* No lock necessary */
287 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
288 unsigned idx;
289
290 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
291 return NULL;
292
293 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
294 }
295
296 /* No lock necessary */
297 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
298 pa_memblock *b = NULL;
299 struct mempool_slot *slot;
300
301 pa_assert(p);
302 pa_assert(length);
303
304 /* If -1 is passed as length we choose the size for the caller: we
305 * take the largest size that fits in one of our slots. */
306
307 if (length == (size_t) -1)
308 length = pa_mempool_block_size_max(p);
309
310 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
311
312 if (!(slot = mempool_allocate_slot(p)))
313 return NULL;
314
315 b = mempool_slot_data(slot);
316 b->type = PA_MEMBLOCK_POOL;
317 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
318
319 } else if (p->block_size >= length) {
320
321 if (!(slot = mempool_allocate_slot(p)))
322 return NULL;
323
324 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
325 b = pa_xnew(pa_memblock, 1);
326
327 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
328 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
329
330 } else {
331 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
332 pa_atomic_inc(&p->stat.n_too_large_for_pool);
333 return NULL;
334 }
335
336 PA_REFCNT_INIT(b);
337 b->pool = p;
338 b->read_only = b->is_silence = FALSE;
339 b->length = length;
340 pa_atomic_store(&b->n_acquired, 0);
341 pa_atomic_store(&b->please_signal, 0);
342
343 stat_add(b);
344 return b;
345 }
346
347 /* No lock necessary */
348 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
349 pa_memblock *b;
350
351 pa_assert(p);
352 pa_assert(d);
353 pa_assert(length != (size_t) -1);
354 pa_assert(length);
355
356 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
357 b = pa_xnew(pa_memblock, 1);
358 PA_REFCNT_INIT(b);
359 b->pool = p;
360 b->type = PA_MEMBLOCK_FIXED;
361 b->read_only = read_only;
362 b->is_silence = FALSE;
363 pa_atomic_ptr_store(&b->data, d);
364 b->length = length;
365 pa_atomic_store(&b->n_acquired, 0);
366 pa_atomic_store(&b->please_signal, 0);
367
368 stat_add(b);
369 return b;
370 }
371
372 /* No lock necessary */
373 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
374 pa_memblock *b;
375
376 pa_assert(p);
377 pa_assert(d);
378 pa_assert(length);
379 pa_assert(length != (size_t) -1);
380 pa_assert(free_cb);
381
382 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
383 b = pa_xnew(pa_memblock, 1);
384 PA_REFCNT_INIT(b);
385 b->pool = p;
386 b->type = PA_MEMBLOCK_USER;
387 b->read_only = read_only;
388 b->is_silence = FALSE;
389 pa_atomic_ptr_store(&b->data, d);
390 b->length = length;
391 pa_atomic_store(&b->n_acquired, 0);
392 pa_atomic_store(&b->please_signal, 0);
393
394 b->per_type.user.free_cb = free_cb;
395
396 stat_add(b);
397 return b;
398 }
399
400 /* No lock necessary */
401 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
402 pa_assert(b);
403 pa_assert(PA_REFCNT_VALUE(b) > 0);
404
405 return b->read_only && PA_REFCNT_VALUE(b) == 1;
406 }
407
408 /* No lock necessary */
409 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
410 pa_assert(b);
411 pa_assert(PA_REFCNT_VALUE(b) > 0);
412
413 return b->is_silence;
414 }
415
416 /* No lock necessary */
417 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
418 pa_assert(b);
419 pa_assert(PA_REFCNT_VALUE(b) > 0);
420
421 b->is_silence = v;
422 }
423
424 /* No lock necessary */
425 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
426 int r;
427 pa_assert(b);
428
429 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
430
431 return r == 1;
432 }
433
434 /* No lock necessary */
435 void* pa_memblock_acquire(pa_memblock *b) {
436 pa_assert(b);
437 pa_assert(PA_REFCNT_VALUE(b) > 0);
438
439 pa_atomic_inc(&b->n_acquired);
440
441 return pa_atomic_ptr_load(&b->data);
442 }
443
444 /* No lock necessary, in corner cases locks by its own */
445 void pa_memblock_release(pa_memblock *b) {
446 int r;
447 pa_assert(b);
448 pa_assert(PA_REFCNT_VALUE(b) > 0);
449
450 r = pa_atomic_dec(&b->n_acquired);
451 pa_assert(r >= 1);
452
453 /* Signal a waiting thread that this memblock is no longer used */
454 if (r == 1 && pa_atomic_load(&b->please_signal))
455 pa_semaphore_post(b->pool->semaphore);
456 }
457
458 size_t pa_memblock_get_length(pa_memblock *b) {
459 pa_assert(b);
460 pa_assert(PA_REFCNT_VALUE(b) > 0);
461
462 return b->length;
463 }
464
465 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
466 pa_assert(b);
467 pa_assert(PA_REFCNT_VALUE(b) > 0);
468
469 return b->pool;
470 }
471
472 /* No lock necessary */
473 pa_memblock* pa_memblock_ref(pa_memblock*b) {
474 pa_assert(b);
475 pa_assert(PA_REFCNT_VALUE(b) > 0);
476
477 PA_REFCNT_INC(b);
478 return b;
479 }
480
481 static void memblock_free(pa_memblock *b) {
482 pa_assert(b);
483
484 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
485
486 stat_remove(b);
487
488 switch (b->type) {
489 case PA_MEMBLOCK_USER :
490 pa_assert(b->per_type.user.free_cb);
491 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
492
493 /* Fall through */
494
495 case PA_MEMBLOCK_FIXED:
496 case PA_MEMBLOCK_APPENDED :
497 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
498 pa_xfree(b);
499
500 break;
501
502 case PA_MEMBLOCK_IMPORTED : {
503 pa_memimport_segment *segment;
504 pa_memimport *import;
505
506 /* FIXME! This should be implemented lock-free */
507
508 segment = b->per_type.imported.segment;
509 pa_assert(segment);
510 import = segment->import;
511 pa_assert(import);
512
513 pa_mutex_lock(import->mutex);
514 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
515 if (-- segment->n_blocks <= 0)
516 segment_detach(segment);
517
518 pa_mutex_unlock(import->mutex);
519
520 import->release_cb(import, b->per_type.imported.id, import->userdata);
521
522 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
523 pa_xfree(b);
524 break;
525 }
526
527 case PA_MEMBLOCK_POOL_EXTERNAL:
528 case PA_MEMBLOCK_POOL: {
529 struct mempool_slot *slot;
530 pa_bool_t call_free;
531
532 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
533 pa_assert(slot);
534
535 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
536
537 /* The free list dimensions should easily allow all slots
538 * to fit in, hence try harder if pushing this slot into
539 * the free list fails */
540 while (pa_flist_push(b->pool->free_slots, slot) < 0)
541 ;
542
543 #ifdef HAVE_VALGRIND_MEMCHECK_H
544 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size);
545 #endif
546
547 if (call_free)
548 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
549 pa_xfree(b);
550
551 break;
552 }
553
554 case PA_MEMBLOCK_TYPE_MAX:
555 default:
556 pa_assert_not_reached();
557 }
558 }
559
560 /* No lock necessary */
561 void pa_memblock_unref(pa_memblock*b) {
562 pa_assert(b);
563 pa_assert(PA_REFCNT_VALUE(b) > 0);
564
565 if (PA_REFCNT_DEC(b) > 0)
566 return;
567
568 memblock_free(b);
569 }
570
571 /* Self locked */
572 static void memblock_wait(pa_memblock *b) {
573 pa_assert(b);
574
575 if (pa_atomic_load(&b->n_acquired) > 0) {
576 /* We need to wait until all threads gave up access to the
577 * memory block before we can go on. Unfortunately this means
578 * that we have to lock and wait here. Sniff! */
579
580 pa_atomic_inc(&b->please_signal);
581
582 while (pa_atomic_load(&b->n_acquired) > 0)
583 pa_semaphore_wait(b->pool->semaphore);
584
585 pa_atomic_dec(&b->please_signal);
586 }
587 }
588
589 /* No lock necessary. This function is not multiple caller safe! */
590 static void memblock_make_local(pa_memblock *b) {
591 pa_assert(b);
592
593 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
594
595 if (b->length <= b->pool->block_size) {
596 struct mempool_slot *slot;
597
598 if ((slot = mempool_allocate_slot(b->pool))) {
599 void *new_data;
600 /* We can move it into a local pool, perfect! */
601
602 new_data = mempool_slot_data(slot);
603 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
604 pa_atomic_ptr_store(&b->data, new_data);
605
606 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
607 b->read_only = FALSE;
608
609 goto finish;
610 }
611 }
612
613 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
614 b->per_type.user.free_cb = pa_xfree;
615 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
616
617 b->type = PA_MEMBLOCK_USER;
618 b->read_only = FALSE;
619
620 finish:
621 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
622 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
623 memblock_wait(b);
624 }
625
626 /* No lock necessary. This function is not multiple caller safe*/
627 void pa_memblock_unref_fixed(pa_memblock *b) {
628 pa_assert(b);
629 pa_assert(PA_REFCNT_VALUE(b) > 0);
630 pa_assert(b->type == PA_MEMBLOCK_FIXED);
631
632 if (PA_REFCNT_VALUE(b) > 1)
633 memblock_make_local(b);
634
635 pa_memblock_unref(b);
636 }
637
638 /* No lock necessary. */
639 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
640 void *p;
641
642 pa_assert(b);
643 pa_assert(PA_REFCNT_VALUE(b) > 0);
644
645 p = pa_memblock_acquire(b);
646 pa_will_need(p, b->length);
647 pa_memblock_release(b);
648
649 return b;
650 }
651
652 /* Self-locked. This function is not multiple-caller safe */
653 static void memblock_replace_import(pa_memblock *b) {
654 pa_memimport_segment *seg;
655
656 pa_assert(b);
657 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
658
659 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
660 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
661 pa_atomic_dec(&b->pool->stat.n_imported);
662 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
663
664 seg = b->per_type.imported.segment;
665 pa_assert(seg);
666 pa_assert(seg->import);
667
668 pa_mutex_lock(seg->import->mutex);
669
670 pa_hashmap_remove(
671 seg->import->blocks,
672 PA_UINT32_TO_PTR(b->per_type.imported.id));
673
674 memblock_make_local(b);
675
676 if (-- seg->n_blocks <= 0) {
677 pa_mutex_unlock(seg->import->mutex);
678 segment_detach(seg);
679 } else
680 pa_mutex_unlock(seg->import->mutex);
681 }
682
683 pa_mempool* pa_mempool_new(pa_bool_t shared) {
684 pa_mempool *p;
685
686 p = pa_xnew(pa_mempool, 1);
687
688 p->mutex = pa_mutex_new(TRUE, TRUE);
689 p->semaphore = pa_semaphore_new(0);
690
691 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
692 if (p->block_size < PA_PAGE_SIZE)
693 p->block_size = PA_PAGE_SIZE;
694
695 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
696
697 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
698 pa_xfree(p);
699 return NULL;
700 }
701
702 memset(&p->stat, 0, sizeof(p->stat));
703 pa_atomic_store(&p->n_init, 0);
704
705 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
706 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
707
708 p->free_slots = pa_flist_new(p->n_blocks);
709
710 return p;
711 }
712
713 void pa_mempool_free(pa_mempool *p) {
714 pa_assert(p);
715
716 pa_mutex_lock(p->mutex);
717
718 while (p->imports)
719 pa_memimport_free(p->imports);
720
721 while (p->exports)
722 pa_memexport_free(p->exports);
723
724 pa_mutex_unlock(p->mutex);
725
726 pa_flist_free(p->free_slots, NULL);
727
728 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
729 /* raise(SIGTRAP); */
730 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
731 }
732
733 pa_shm_free(&p->memory);
734
735 pa_mutex_free(p->mutex);
736 pa_semaphore_free(p->semaphore);
737
738 pa_xfree(p);
739 }
740
741 /* No lock necessary */
742 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
743 pa_assert(p);
744
745 return &p->stat;
746 }
747
748 /* No lock necessary */
749 size_t pa_mempool_block_size_max(pa_mempool *p) {
750 pa_assert(p);
751
752 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
753 }
754
755 /* No lock necessary */
756 void pa_mempool_vacuum(pa_mempool *p) {
757 struct mempool_slot *slot;
758 pa_flist *list;
759
760 pa_assert(p);
761
762 list = pa_flist_new(p->n_blocks);
763
764 while ((slot = pa_flist_pop(p->free_slots)))
765 while (pa_flist_push(list, slot) < 0)
766 ;
767
768 while ((slot = pa_flist_pop(list))) {
769 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
770
771 while (pa_flist_push(p->free_slots, slot))
772 ;
773 }
774
775 pa_flist_free(list, NULL);
776 }
777
778 /* No lock necessary */
779 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
780 pa_assert(p);
781
782 if (!p->memory.shared)
783 return -1;
784
785 *id = p->memory.id;
786
787 return 0;
788 }
789
790 /* No lock necessary */
791 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
792 pa_assert(p);
793
794 return !!p->memory.shared;
795 }
796
797 /* For recieving blocks from other nodes */
798 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
799 pa_memimport *i;
800
801 pa_assert(p);
802 pa_assert(cb);
803
804 i = pa_xnew(pa_memimport, 1);
805 i->mutex = pa_mutex_new(TRUE, TRUE);
806 i->pool = p;
807 i->segments = pa_hashmap_new(NULL, NULL);
808 i->blocks = pa_hashmap_new(NULL, NULL);
809 i->release_cb = cb;
810 i->userdata = userdata;
811
812 pa_mutex_lock(p->mutex);
813 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
814 pa_mutex_unlock(p->mutex);
815
816 return i;
817 }
818
819 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
820
821 /* Should be called locked */
822 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
823 pa_memimport_segment* seg;
824
825 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
826 return NULL;
827
828 seg = pa_xnew(pa_memimport_segment, 1);
829
830 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
831 pa_xfree(seg);
832 return NULL;
833 }
834
835 seg->import = i;
836 seg->n_blocks = 0;
837
838 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
839 return seg;
840 }
841
842 /* Should be called locked */
843 static void segment_detach(pa_memimport_segment *seg) {
844 pa_assert(seg);
845
846 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
847 pa_shm_free(&seg->memory);
848 pa_xfree(seg);
849 }
850
851 /* Self-locked. Not multiple-caller safe */
852 void pa_memimport_free(pa_memimport *i) {
853 pa_memexport *e;
854 pa_memblock *b;
855
856 pa_assert(i);
857
858 pa_mutex_lock(i->mutex);
859
860 while ((b = pa_hashmap_first(i->blocks)))
861 memblock_replace_import(b);
862
863 pa_assert(pa_hashmap_size(i->segments) == 0);
864
865 pa_mutex_unlock(i->mutex);
866
867 pa_mutex_lock(i->pool->mutex);
868
869 /* If we've exported this block further we need to revoke that export */
870 for (e = i->pool->exports; e; e = e->next)
871 memexport_revoke_blocks(e, i);
872
873 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
874
875 pa_mutex_unlock(i->pool->mutex);
876
877 pa_hashmap_free(i->blocks, NULL, NULL);
878 pa_hashmap_free(i->segments, NULL, NULL);
879
880 pa_mutex_free(i->mutex);
881
882 pa_xfree(i);
883 }
884
885 /* Self-locked */
886 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
887 pa_memblock *b = NULL;
888 pa_memimport_segment *seg;
889
890 pa_assert(i);
891
892 pa_mutex_lock(i->mutex);
893
894 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
895 goto finish;
896
897 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
898 if (!(seg = segment_attach(i, shm_id)))
899 goto finish;
900
901 if (offset+size > seg->memory.size)
902 goto finish;
903
904 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
905 b = pa_xnew(pa_memblock, 1);
906
907 PA_REFCNT_INIT(b);
908 b->pool = i->pool;
909 b->type = PA_MEMBLOCK_IMPORTED;
910 b->read_only = TRUE;
911 b->is_silence = FALSE;
912 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
913 b->length = size;
914 pa_atomic_store(&b->n_acquired, 0);
915 pa_atomic_store(&b->please_signal, 0);
916 b->per_type.imported.id = block_id;
917 b->per_type.imported.segment = seg;
918
919 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
920
921 seg->n_blocks++;
922
923 finish:
924 pa_mutex_unlock(i->mutex);
925
926 if (b)
927 stat_add(b);
928
929 return b;
930 }
931
932 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
933 pa_memblock *b;
934 int ret = 0;
935 pa_assert(i);
936
937 pa_mutex_lock(i->mutex);
938
939 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
940 ret = -1;
941 goto finish;
942 }
943
944 memblock_replace_import(b);
945
946 finish:
947 pa_mutex_unlock(i->mutex);
948
949 return ret;
950 }
951
952 /* For sending blocks to other nodes */
953 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
954 pa_memexport *e;
955
956 pa_assert(p);
957 pa_assert(cb);
958
959 if (!p->memory.shared)
960 return NULL;
961
962 e = pa_xnew(pa_memexport, 1);
963 e->mutex = pa_mutex_new(TRUE, TRUE);
964 e->pool = p;
965 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
966 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
967 e->n_init = 0;
968 e->revoke_cb = cb;
969 e->userdata = userdata;
970
971 pa_mutex_lock(p->mutex);
972 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
973 pa_mutex_unlock(p->mutex);
974 return e;
975 }
976
977 void pa_memexport_free(pa_memexport *e) {
978 pa_assert(e);
979
980 pa_mutex_lock(e->mutex);
981 while (e->used_slots)
982 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
983 pa_mutex_unlock(e->mutex);
984
985 pa_mutex_lock(e->pool->mutex);
986 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
987 pa_mutex_unlock(e->pool->mutex);
988
989 pa_mutex_free(e->mutex);
990 pa_xfree(e);
991 }
992
993 /* Self-locked */
994 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
995 pa_memblock *b;
996
997 pa_assert(e);
998
999 pa_mutex_lock(e->mutex);
1000
1001 if (id >= e->n_init)
1002 goto fail;
1003
1004 if (!e->slots[id].block)
1005 goto fail;
1006
1007 b = e->slots[id].block;
1008 e->slots[id].block = NULL;
1009
1010 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1011 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1012
1013 pa_mutex_unlock(e->mutex);
1014
1015 /* pa_log("Processing release for %u", id); */
1016
1017 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1018 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1019
1020 pa_atomic_dec(&e->pool->stat.n_exported);
1021 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1022
1023 pa_memblock_unref(b);
1024
1025 return 0;
1026
1027 fail:
1028 pa_mutex_unlock(e->mutex);
1029
1030 return -1;
1031 }
1032
1033 /* Self-locked */
1034 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1035 struct memexport_slot *slot, *next;
1036 pa_assert(e);
1037 pa_assert(i);
1038
1039 pa_mutex_lock(e->mutex);
1040
1041 for (slot = e->used_slots; slot; slot = next) {
1042 uint32_t idx;
1043 next = slot->next;
1044
1045 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1046 slot->block->per_type.imported.segment->import != i)
1047 continue;
1048
1049 idx = (uint32_t) (slot - e->slots);
1050 e->revoke_cb(e, idx, e->userdata);
1051 pa_memexport_process_release(e, idx);
1052 }
1053
1054 pa_mutex_unlock(e->mutex);
1055 }
1056
1057 /* No lock necessary */
1058 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1059 pa_memblock *n;
1060
1061 pa_assert(p);
1062 pa_assert(b);
1063
1064 if (b->type == PA_MEMBLOCK_IMPORTED ||
1065 b->type == PA_MEMBLOCK_POOL ||
1066 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1067 pa_assert(b->pool == p);
1068 return pa_memblock_ref(b);
1069 }
1070
1071 if (!(n = pa_memblock_new_pool(p, b->length)))
1072 return NULL;
1073
1074 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1075 return n;
1076 }
1077
1078 /* Self-locked */
1079 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1080 pa_shm *memory;
1081 struct memexport_slot *slot;
1082 void *data;
1083
1084 pa_assert(e);
1085 pa_assert(b);
1086 pa_assert(block_id);
1087 pa_assert(shm_id);
1088 pa_assert(offset);
1089 pa_assert(size);
1090 pa_assert(b->pool == e->pool);
1091
1092 if (!(b = memblock_shared_copy(e->pool, b)))
1093 return -1;
1094
1095 pa_mutex_lock(e->mutex);
1096
1097 if (e->free_slots) {
1098 slot = e->free_slots;
1099 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1100 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1101 slot = &e->slots[e->n_init++];
1102 else {
1103 pa_mutex_unlock(e->mutex);
1104 pa_memblock_unref(b);
1105 return -1;
1106 }
1107
1108 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1109 slot->block = b;
1110 *block_id = (uint32_t) (slot - e->slots);
1111
1112 pa_mutex_unlock(e->mutex);
1113 /* pa_log("Got block id %u", *block_id); */
1114
1115 data = pa_memblock_acquire(b);
1116
1117 if (b->type == PA_MEMBLOCK_IMPORTED) {
1118 pa_assert(b->per_type.imported.segment);
1119 memory = &b->per_type.imported.segment->memory;
1120 } else {
1121 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1122 pa_assert(b->pool);
1123 memory = &b->pool->memory;
1124 }
1125
1126 pa_assert(data >= memory->ptr);
1127 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1128
1129 *shm_id = memory->id;
1130 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1131 *size = b->length;
1132
1133 pa_memblock_release(b);
1134
1135 pa_atomic_inc(&e->pool->stat.n_exported);
1136 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1137
1138 return 0;
1139 }