]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
increase default mempool size, make mempool_slot an abstract struct because the only...
[pulseaudio] / src / pulsecore / memblock.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
8
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
13
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
18
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 USA.
23 ***/
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <signal.h>
34 #include <errno.h>
35
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
38
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
46
47 #include "memblock.h"
48
49 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
50 * note that the footprint is usually much smaller, since the data is
51 * stored in SHM and our OS does not commit the memory before we use
52 * it for the first time. */
53 #define PA_MEMPOOL_SLOTS_MAX 1024
54 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
55
56 #define PA_MEMEXPORT_SLOTS_MAX 128
57
58 #define PA_MEMIMPORT_SLOTS_MAX 128
59 #define PA_MEMIMPORT_SEGMENTS_MAX 16
60
61 struct pa_memblock {
62 PA_REFCNT_DECLARE; /* the reference counter */
63 pa_mempool *pool;
64
65 pa_memblock_type_t type;
66
67 pa_bool_t read_only:1;
68 pa_bool_t is_silence:1;
69
70 pa_atomic_ptr_t data;
71 size_t length;
72
73 pa_atomic_t n_acquired;
74 pa_atomic_t please_signal;
75
76 union {
77 struct {
78 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
79 pa_free_cb_t free_cb;
80 } user;
81
82 struct {
83 uint32_t id;
84 pa_memimport_segment *segment;
85 } imported;
86 } per_type;
87 };
88
89 struct pa_memimport_segment {
90 pa_memimport *import;
91 pa_shm memory;
92 unsigned n_blocks;
93 };
94
95 struct pa_memimport {
96 pa_mutex *mutex;
97
98 pa_mempool *pool;
99 pa_hashmap *segments;
100 pa_hashmap *blocks;
101
102 /* Called whenever an imported memory block is no longer
103 * needed. */
104 pa_memimport_release_cb_t release_cb;
105 void *userdata;
106
107 PA_LLIST_FIELDS(pa_memimport);
108 };
109
110 struct memexport_slot {
111 PA_LLIST_FIELDS(struct memexport_slot);
112 pa_memblock *block;
113 };
114
115 struct pa_memexport {
116 pa_mutex *mutex;
117 pa_mempool *pool;
118
119 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
120
121 PA_LLIST_HEAD(struct memexport_slot, free_slots);
122 PA_LLIST_HEAD(struct memexport_slot, used_slots);
123 unsigned n_init;
124
125 /* Called whenever a client from which we imported a memory block
126 which we in turn exported to another client dies and we need to
127 revoke the memory block accordingly */
128 pa_memexport_revoke_cb_t revoke_cb;
129 void *userdata;
130
131 PA_LLIST_FIELDS(pa_memexport);
132 };
133
134 struct pa_mempool {
135 pa_semaphore *semaphore;
136 pa_mutex *mutex;
137
138 pa_shm memory;
139 size_t block_size;
140 unsigned n_blocks;
141
142 pa_atomic_t n_init;
143
144 PA_LLIST_HEAD(pa_memimport, imports);
145 PA_LLIST_HEAD(pa_memexport, exports);
146
147 /* A list of free slots that may be reused */
148 pa_flist *free_slots;
149
150 pa_mempool_stat stat;
151 };
152
153 static void segment_detach(pa_memimport_segment *seg);
154
155 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
156
157 /* No lock necessary */
158 static void stat_add(pa_memblock*b) {
159 pa_assert(b);
160 pa_assert(b->pool);
161
162 pa_atomic_inc(&b->pool->stat.n_allocated);
163 pa_atomic_add(&b->pool->stat.allocated_size, b->length);
164
165 pa_atomic_inc(&b->pool->stat.n_accumulated);
166 pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
167
168 if (b->type == PA_MEMBLOCK_IMPORTED) {
169 pa_atomic_inc(&b->pool->stat.n_imported);
170 pa_atomic_add(&b->pool->stat.imported_size, b->length);
171 }
172
173 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
174 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
175 }
176
177 /* No lock necessary */
178 static void stat_remove(pa_memblock *b) {
179 pa_assert(b);
180 pa_assert(b->pool);
181
182 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
183 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
184
185 pa_atomic_dec(&b->pool->stat.n_allocated);
186 pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
187
188 if (b->type == PA_MEMBLOCK_IMPORTED) {
189 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
190 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
191
192 pa_atomic_dec(&b->pool->stat.n_imported);
193 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
194 }
195
196 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
197 }
198
199 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
200
201 /* No lock necessary */
202 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
203 pa_memblock *b;
204
205 pa_assert(p);
206 pa_assert(length);
207
208 if (!(b = pa_memblock_new_pool(p, length)))
209 b = memblock_new_appended(p, length);
210
211 return b;
212 }
213
214 /* No lock necessary */
215 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
216 pa_memblock *b;
217
218 pa_assert(p);
219 pa_assert(length);
220
221 /* If -1 is passed as length we choose the size for the caller. */
222
223 if (length == (size_t) -1)
224 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
225
226 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
227 PA_REFCNT_INIT(b);
228 b->pool = p;
229 b->type = PA_MEMBLOCK_APPENDED;
230 b->read_only = b->is_silence = FALSE;
231 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
232 b->length = length;
233 pa_atomic_store(&b->n_acquired, 0);
234 pa_atomic_store(&b->please_signal, 0);
235
236 stat_add(b);
237 return b;
238 }
239
240 /* No lock necessary */
241 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
242 struct mempool_slot *slot;
243 pa_assert(p);
244
245 if (!(slot = pa_flist_pop(p->free_slots))) {
246 int idx;
247
248 /* The free list was empty, we have to allocate a new entry */
249
250 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
251 pa_atomic_dec(&p->n_init);
252 else
253 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));
254
255 if (!slot) {
256 pa_log_info("Pool full");
257 pa_atomic_inc(&p->stat.n_pool_full);
258 return NULL;
259 }
260 }
261
262 return slot;
263 }
264
265 /* No lock necessary, totally redundant anyway */
266 static inline void* mempool_slot_data(struct mempool_slot *slot) {
267 return slot;
268 }
269
270 /* No lock necessary */
271 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
272 pa_assert(p);
273
274 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
275 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
276
277 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
278 }
279
280 /* No lock necessary */
281 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
282 unsigned idx;
283
284 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
285 return NULL;
286
287 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
288 }
289
290 /* No lock necessary */
291 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
292 pa_memblock *b = NULL;
293 struct mempool_slot *slot;
294
295 pa_assert(p);
296 pa_assert(length);
297
298 /* If -1 is passed as length we choose the size for the caller: we
299 * take the largest size that fits in one of our slots. */
300
301 if (length == (size_t) -1)
302 length = pa_mempool_block_size_max(p);
303
304 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
305
306 if (!(slot = mempool_allocate_slot(p)))
307 return NULL;
308
309 b = mempool_slot_data(slot);
310 b->type = PA_MEMBLOCK_POOL;
311 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
312
313 } else if (p->block_size >= length) {
314
315 if (!(slot = mempool_allocate_slot(p)))
316 return NULL;
317
318 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
319 b = pa_xnew(pa_memblock, 1);
320
321 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
322 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
323
324 } else {
325 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
326 pa_atomic_inc(&p->stat.n_too_large_for_pool);
327 return NULL;
328 }
329
330 PA_REFCNT_INIT(b);
331 b->pool = p;
332 b->read_only = b->is_silence = FALSE;
333 b->length = length;
334 pa_atomic_store(&b->n_acquired, 0);
335 pa_atomic_store(&b->please_signal, 0);
336
337 stat_add(b);
338 return b;
339 }
340
341 /* No lock necessary */
342 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
343 pa_memblock *b;
344
345 pa_assert(p);
346 pa_assert(d);
347 pa_assert(length != (size_t) -1);
348 pa_assert(length);
349
350 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
351 b = pa_xnew(pa_memblock, 1);
352 PA_REFCNT_INIT(b);
353 b->pool = p;
354 b->type = PA_MEMBLOCK_FIXED;
355 b->read_only = read_only;
356 b->is_silence = FALSE;
357 pa_atomic_ptr_store(&b->data, d);
358 b->length = length;
359 pa_atomic_store(&b->n_acquired, 0);
360 pa_atomic_store(&b->please_signal, 0);
361
362 stat_add(b);
363 return b;
364 }
365
366 /* No lock necessary */
367 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
368 pa_memblock *b;
369
370 pa_assert(p);
371 pa_assert(d);
372 pa_assert(length);
373 pa_assert(length != (size_t) -1);
374 pa_assert(free_cb);
375
376 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
377 b = pa_xnew(pa_memblock, 1);
378 PA_REFCNT_INIT(b);
379 b->pool = p;
380 b->type = PA_MEMBLOCK_USER;
381 b->read_only = read_only;
382 b->is_silence = FALSE;
383 pa_atomic_ptr_store(&b->data, d);
384 b->length = length;
385 pa_atomic_store(&b->n_acquired, 0);
386 pa_atomic_store(&b->please_signal, 0);
387
388 b->per_type.user.free_cb = free_cb;
389
390 stat_add(b);
391 return b;
392 }
393
394 /* No lock necessary */
395 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
396 pa_assert(b);
397 pa_assert(PA_REFCNT_VALUE(b) > 0);
398
399 return b->read_only && PA_REFCNT_VALUE(b) == 1;
400 }
401
402 /* No lock necessary */
403 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
404 pa_assert(b);
405 pa_assert(PA_REFCNT_VALUE(b) > 0);
406
407 return b->is_silence;
408 }
409
410 /* No lock necessary */
411 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
412 pa_assert(b);
413 pa_assert(PA_REFCNT_VALUE(b) > 0);
414
415 b->is_silence = v;
416 }
417
418 /* No lock necessary */
419 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
420 int r;
421 pa_assert(b);
422
423 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
424
425 return r == 1;
426 }
427
428 /* No lock necessary */
429 void* pa_memblock_acquire(pa_memblock *b) {
430 pa_assert(b);
431 pa_assert(PA_REFCNT_VALUE(b) > 0);
432
433 pa_atomic_inc(&b->n_acquired);
434
435 return pa_atomic_ptr_load(&b->data);
436 }
437
438 /* No lock necessary, in corner cases locks by its own */
439 void pa_memblock_release(pa_memblock *b) {
440 int r;
441 pa_assert(b);
442 pa_assert(PA_REFCNT_VALUE(b) > 0);
443
444 r = pa_atomic_dec(&b->n_acquired);
445 pa_assert(r >= 1);
446
447 /* Signal a waiting thread that this memblock is no longer used */
448 if (r == 1 && pa_atomic_load(&b->please_signal))
449 pa_semaphore_post(b->pool->semaphore);
450 }
451
452 size_t pa_memblock_get_length(pa_memblock *b) {
453 pa_assert(b);
454 pa_assert(PA_REFCNT_VALUE(b) > 0);
455
456 return b->length;
457 }
458
459 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
460 pa_assert(b);
461 pa_assert(PA_REFCNT_VALUE(b) > 0);
462
463 return b->pool;
464 }
465
466 /* No lock necessary */
467 pa_memblock* pa_memblock_ref(pa_memblock*b) {
468 pa_assert(b);
469 pa_assert(PA_REFCNT_VALUE(b) > 0);
470
471 PA_REFCNT_INC(b);
472 return b;
473 }
474
475 static void memblock_free(pa_memblock *b) {
476 pa_assert(b);
477
478 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
479
480 stat_remove(b);
481
482 switch (b->type) {
483 case PA_MEMBLOCK_USER :
484 pa_assert(b->per_type.user.free_cb);
485 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
486
487 /* Fall through */
488
489 case PA_MEMBLOCK_FIXED:
490 case PA_MEMBLOCK_APPENDED :
491 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
492 pa_xfree(b);
493
494 break;
495
496 case PA_MEMBLOCK_IMPORTED : {
497 pa_memimport_segment *segment;
498 pa_memimport *import;
499
500 /* FIXME! This should be implemented lock-free */
501
502 segment = b->per_type.imported.segment;
503 pa_assert(segment);
504 import = segment->import;
505 pa_assert(import);
506
507 pa_mutex_lock(import->mutex);
508 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
509 if (-- segment->n_blocks <= 0)
510 segment_detach(segment);
511
512 pa_mutex_unlock(import->mutex);
513
514 import->release_cb(import, b->per_type.imported.id, import->userdata);
515
516 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
517 pa_xfree(b);
518 break;
519 }
520
521 case PA_MEMBLOCK_POOL_EXTERNAL:
522 case PA_MEMBLOCK_POOL: {
523 struct mempool_slot *slot;
524 int call_free;
525
526 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
527 pa_assert(slot);
528
529 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
530
531 /* The free list dimensions should easily allow all slots
532 * to fit in, hence try harder if pushing this slot into
533 * the free list fails */
534 while (pa_flist_push(b->pool->free_slots, slot) < 0)
535 ;
536
537 if (call_free)
538 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
539 pa_xfree(b);
540
541 break;
542 }
543
544 case PA_MEMBLOCK_TYPE_MAX:
545 default:
546 pa_assert_not_reached();
547 }
548 }
549
550 /* No lock necessary */
551 void pa_memblock_unref(pa_memblock*b) {
552 pa_assert(b);
553 pa_assert(PA_REFCNT_VALUE(b) > 0);
554
555 if (PA_REFCNT_DEC(b) > 0)
556 return;
557
558 memblock_free(b);
559 }
560
561 /* Self locked */
562 static void memblock_wait(pa_memblock *b) {
563 pa_assert(b);
564
565 if (pa_atomic_load(&b->n_acquired) > 0) {
566 /* We need to wait until all threads gave up access to the
567 * memory block before we can go on. Unfortunately this means
568 * that we have to lock and wait here. Sniff! */
569
570 pa_atomic_inc(&b->please_signal);
571
572 while (pa_atomic_load(&b->n_acquired) > 0)
573 pa_semaphore_wait(b->pool->semaphore);
574
575 pa_atomic_dec(&b->please_signal);
576 }
577 }
578
579 /* No lock necessary. This function is not multiple caller safe! */
580 static void memblock_make_local(pa_memblock *b) {
581 pa_assert(b);
582
583 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
584
585 if (b->length <= b->pool->block_size) {
586 struct mempool_slot *slot;
587
588 if ((slot = mempool_allocate_slot(b->pool))) {
589 void *new_data;
590 /* We can move it into a local pool, perfect! */
591
592 new_data = mempool_slot_data(slot);
593 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
594 pa_atomic_ptr_store(&b->data, new_data);
595
596 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
597 b->read_only = FALSE;
598
599 goto finish;
600 }
601 }
602
603 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
604 b->per_type.user.free_cb = pa_xfree;
605 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
606
607 b->type = PA_MEMBLOCK_USER;
608 b->read_only = FALSE;
609
610 finish:
611 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
612 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
613 memblock_wait(b);
614 }
615
616 /* No lock necessary. This function is not multiple caller safe*/
617 void pa_memblock_unref_fixed(pa_memblock *b) {
618 pa_assert(b);
619 pa_assert(PA_REFCNT_VALUE(b) > 0);
620 pa_assert(b->type == PA_MEMBLOCK_FIXED);
621
622 if (PA_REFCNT_VALUE(b) > 1)
623 memblock_make_local(b);
624
625 pa_memblock_unref(b);
626 }
627
628 /* No lock necessary. */
629 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
630 void *p;
631
632 pa_assert(b);
633 pa_assert(PA_REFCNT_VALUE(b) > 0);
634
635 p = pa_memblock_acquire(b);
636 pa_will_need(p, b->length);
637 pa_memblock_release(b);
638
639 return b;
640 }
641
642 /* Self-locked. This function is not multiple-caller safe */
643 static void memblock_replace_import(pa_memblock *b) {
644 pa_memimport_segment *seg;
645
646 pa_assert(b);
647 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
648
649 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
650 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
651 pa_atomic_dec(&b->pool->stat.n_imported);
652 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
653
654 seg = b->per_type.imported.segment;
655 pa_assert(seg);
656 pa_assert(seg->import);
657
658 pa_mutex_lock(seg->import->mutex);
659
660 pa_hashmap_remove(
661 seg->import->blocks,
662 PA_UINT32_TO_PTR(b->per_type.imported.id));
663
664 memblock_make_local(b);
665
666 if (-- seg->n_blocks <= 0) {
667 pa_mutex_unlock(seg->import->mutex);
668 segment_detach(seg);
669 } else
670 pa_mutex_unlock(seg->import->mutex);
671 }
672
673 pa_mempool* pa_mempool_new(pa_bool_t shared) {
674 pa_mempool *p;
675
676 p = pa_xnew(pa_mempool, 1);
677
678 p->mutex = pa_mutex_new(TRUE, TRUE);
679 p->semaphore = pa_semaphore_new(0);
680
681 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
682 if (p->block_size < PA_PAGE_SIZE)
683 p->block_size = PA_PAGE_SIZE;
684
685 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
686
687 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
688 pa_xfree(p);
689 return NULL;
690 }
691
692 memset(&p->stat, 0, sizeof(p->stat));
693 pa_atomic_store(&p->n_init, 0);
694
695 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
696 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
697
698 p->free_slots = pa_flist_new(p->n_blocks*2);
699
700 return p;
701 }
702
703 void pa_mempool_free(pa_mempool *p) {
704 pa_assert(p);
705
706 pa_mutex_lock(p->mutex);
707
708 while (p->imports)
709 pa_memimport_free(p->imports);
710
711 while (p->exports)
712 pa_memexport_free(p->exports);
713
714 pa_mutex_unlock(p->mutex);
715
716 pa_flist_free(p->free_slots, NULL);
717
718 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
719 /* raise(SIGTRAP); */
720 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
721 }
722
723 pa_shm_free(&p->memory);
724
725 pa_mutex_free(p->mutex);
726 pa_semaphore_free(p->semaphore);
727
728 pa_xfree(p);
729 }
730
731 /* No lock necessary */
732 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
733 pa_assert(p);
734
735 return &p->stat;
736 }
737
738 /* No lock necessary */
739 size_t pa_mempool_block_size_max(pa_mempool *p) {
740 pa_assert(p);
741
742 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
743 }
744
745 /* No lock necessary */
746 void pa_mempool_vacuum(pa_mempool *p) {
747 struct mempool_slot *slot;
748 pa_flist *list;
749
750 pa_assert(p);
751
752 list = pa_flist_new(p->n_blocks*2);
753
754 while ((slot = pa_flist_pop(p->free_slots)))
755 while (pa_flist_push(list, slot) < 0)
756 ;
757
758 while ((slot = pa_flist_pop(list))) {
759 pa_shm_punch(&p->memory, (uint8_t*) slot - (uint8_t*) p->memory.ptr, p->block_size);
760
761 while (pa_flist_push(p->free_slots, slot))
762 ;
763 }
764
765 pa_flist_free(list, NULL);
766 }
767
768 /* No lock necessary */
769 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
770 pa_assert(p);
771
772 if (!p->memory.shared)
773 return -1;
774
775 *id = p->memory.id;
776
777 return 0;
778 }
779
780 /* No lock necessary */
781 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
782 pa_assert(p);
783
784 return !!p->memory.shared;
785 }
786
787 /* For recieving blocks from other nodes */
788 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
789 pa_memimport *i;
790
791 pa_assert(p);
792 pa_assert(cb);
793
794 i = pa_xnew(pa_memimport, 1);
795 i->mutex = pa_mutex_new(TRUE, TRUE);
796 i->pool = p;
797 i->segments = pa_hashmap_new(NULL, NULL);
798 i->blocks = pa_hashmap_new(NULL, NULL);
799 i->release_cb = cb;
800 i->userdata = userdata;
801
802 pa_mutex_lock(p->mutex);
803 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
804 pa_mutex_unlock(p->mutex);
805
806 return i;
807 }
808
809 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
810
811 /* Should be called locked */
812 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
813 pa_memimport_segment* seg;
814
815 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
816 return NULL;
817
818 seg = pa_xnew(pa_memimport_segment, 1);
819
820 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
821 pa_xfree(seg);
822 return NULL;
823 }
824
825 seg->import = i;
826 seg->n_blocks = 0;
827
828 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
829 return seg;
830 }
831
832 /* Should be called locked */
833 static void segment_detach(pa_memimport_segment *seg) {
834 pa_assert(seg);
835
836 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
837 pa_shm_free(&seg->memory);
838 pa_xfree(seg);
839 }
840
841 /* Self-locked. Not multiple-caller safe */
842 void pa_memimport_free(pa_memimport *i) {
843 pa_memexport *e;
844 pa_memblock *b;
845
846 pa_assert(i);
847
848 pa_mutex_lock(i->mutex);
849
850 while ((b = pa_hashmap_get_first(i->blocks)))
851 memblock_replace_import(b);
852
853 pa_assert(pa_hashmap_size(i->segments) == 0);
854
855 pa_mutex_unlock(i->mutex);
856
857 pa_mutex_lock(i->pool->mutex);
858
859 /* If we've exported this block further we need to revoke that export */
860 for (e = i->pool->exports; e; e = e->next)
861 memexport_revoke_blocks(e, i);
862
863 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
864
865 pa_mutex_unlock(i->pool->mutex);
866
867 pa_hashmap_free(i->blocks, NULL, NULL);
868 pa_hashmap_free(i->segments, NULL, NULL);
869
870 pa_mutex_free(i->mutex);
871
872 pa_xfree(i);
873 }
874
875 /* Self-locked */
876 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
877 pa_memblock *b = NULL;
878 pa_memimport_segment *seg;
879
880 pa_assert(i);
881
882 pa_mutex_lock(i->mutex);
883
884 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
885 goto finish;
886
887 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
888 if (!(seg = segment_attach(i, shm_id)))
889 goto finish;
890
891 if (offset+size > seg->memory.size)
892 goto finish;
893
894 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
895 b = pa_xnew(pa_memblock, 1);
896
897 PA_REFCNT_INIT(b);
898 b->pool = i->pool;
899 b->type = PA_MEMBLOCK_IMPORTED;
900 b->read_only = TRUE;
901 b->is_silence = FALSE;
902 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
903 b->length = size;
904 pa_atomic_store(&b->n_acquired, 0);
905 pa_atomic_store(&b->please_signal, 0);
906 b->per_type.imported.id = block_id;
907 b->per_type.imported.segment = seg;
908
909 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
910
911 seg->n_blocks++;
912
913 finish:
914 pa_mutex_unlock(i->mutex);
915
916 if (b)
917 stat_add(b);
918
919 return b;
920 }
921
922 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
923 pa_memblock *b;
924 int ret = 0;
925 pa_assert(i);
926
927 pa_mutex_lock(i->mutex);
928
929 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
930 ret = -1;
931 goto finish;
932 }
933
934 memblock_replace_import(b);
935
936 finish:
937 pa_mutex_unlock(i->mutex);
938
939 return ret;
940 }
941
942 /* For sending blocks to other nodes */
943 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
944 pa_memexport *e;
945
946 pa_assert(p);
947 pa_assert(cb);
948
949 if (!p->memory.shared)
950 return NULL;
951
952 e = pa_xnew(pa_memexport, 1);
953 e->mutex = pa_mutex_new(TRUE, TRUE);
954 e->pool = p;
955 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
956 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
957 e->n_init = 0;
958 e->revoke_cb = cb;
959 e->userdata = userdata;
960
961 pa_mutex_lock(p->mutex);
962 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
963 pa_mutex_unlock(p->mutex);
964 return e;
965 }
966
967 void pa_memexport_free(pa_memexport *e) {
968 pa_assert(e);
969
970 pa_mutex_lock(e->mutex);
971 while (e->used_slots)
972 pa_memexport_process_release(e, e->used_slots - e->slots);
973 pa_mutex_unlock(e->mutex);
974
975 pa_mutex_lock(e->pool->mutex);
976 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
977 pa_mutex_unlock(e->pool->mutex);
978
979 pa_mutex_free(e->mutex);
980 pa_xfree(e);
981 }
982
983 /* Self-locked */
984 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
985 pa_memblock *b;
986
987 pa_assert(e);
988
989 pa_mutex_lock(e->mutex);
990
991 if (id >= e->n_init)
992 goto fail;
993
994 if (!e->slots[id].block)
995 goto fail;
996
997 b = e->slots[id].block;
998 e->slots[id].block = NULL;
999
1000 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1001 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1002
1003 pa_mutex_unlock(e->mutex);
1004
1005 /* pa_log("Processing release for %u", id); */
1006
1007 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1008 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1009
1010 pa_atomic_dec(&e->pool->stat.n_exported);
1011 pa_atomic_sub(&e->pool->stat.exported_size, b->length);
1012
1013 pa_memblock_unref(b);
1014
1015 return 0;
1016
1017 fail:
1018 pa_mutex_unlock(e->mutex);
1019
1020 return -1;
1021 }
1022
1023 /* Self-locked */
1024 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1025 struct memexport_slot *slot, *next;
1026 pa_assert(e);
1027 pa_assert(i);
1028
1029 pa_mutex_lock(e->mutex);
1030
1031 for (slot = e->used_slots; slot; slot = next) {
1032 uint32_t idx;
1033 next = slot->next;
1034
1035 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1036 slot->block->per_type.imported.segment->import != i)
1037 continue;
1038
1039 idx = slot - e->slots;
1040 e->revoke_cb(e, idx, e->userdata);
1041 pa_memexport_process_release(e, idx);
1042 }
1043
1044 pa_mutex_unlock(e->mutex);
1045 }
1046
1047 /* No lock necessary */
1048 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1049 pa_memblock *n;
1050
1051 pa_assert(p);
1052 pa_assert(b);
1053
1054 if (b->type == PA_MEMBLOCK_IMPORTED ||
1055 b->type == PA_MEMBLOCK_POOL ||
1056 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1057 pa_assert(b->pool == p);
1058 return pa_memblock_ref(b);
1059 }
1060
1061 if (!(n = pa_memblock_new_pool(p, b->length)))
1062 return NULL;
1063
1064 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1065 return n;
1066 }
1067
1068 /* Self-locked */
1069 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1070 pa_shm *memory;
1071 struct memexport_slot *slot;
1072 void *data;
1073
1074 pa_assert(e);
1075 pa_assert(b);
1076 pa_assert(block_id);
1077 pa_assert(shm_id);
1078 pa_assert(offset);
1079 pa_assert(size);
1080 pa_assert(b->pool == e->pool);
1081
1082 if (!(b = memblock_shared_copy(e->pool, b)))
1083 return -1;
1084
1085 pa_mutex_lock(e->mutex);
1086
1087 if (e->free_slots) {
1088 slot = e->free_slots;
1089 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1090 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1091 slot = &e->slots[e->n_init++];
1092 else {
1093 pa_mutex_unlock(e->mutex);
1094 pa_memblock_unref(b);
1095 return -1;
1096 }
1097
1098 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1099 slot->block = b;
1100 *block_id = slot - e->slots;
1101
1102 pa_mutex_unlock(e->mutex);
1103 /* pa_log("Got block id %u", *block_id); */
1104
1105 data = pa_memblock_acquire(b);
1106
1107 if (b->type == PA_MEMBLOCK_IMPORTED) {
1108 pa_assert(b->per_type.imported.segment);
1109 memory = &b->per_type.imported.segment->memory;
1110 } else {
1111 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1112 pa_assert(b->pool);
1113 memory = &b->pool->memory;
1114 }
1115
1116 pa_assert(data >= memory->ptr);
1117 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1118
1119 *shm_id = memory->id;
1120 *offset = (uint8_t*) data - (uint8_t*) memory->ptr;
1121 *size = b->length;
1122
1123 pa_memblock_release(b);
1124
1125 pa_atomic_inc(&e->pool->stat.n_exported);
1126 pa_atomic_add(&e->pool->stat.exported_size, b->length);
1127
1128 return 0;
1129 }