]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
properly initialize memblock->is_silence for imported memory blocks; make is_silence...
[pulseaudio] / src / pulsecore / memblock.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
8
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
13
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
18
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 USA.
23 ***/
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <signal.h>
34 #include <errno.h>
35
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
38
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
46
47 #include "memblock.h"
48
49 #define PA_MEMPOOL_SLOTS_MAX 512
50 #define PA_MEMPOOL_SLOT_SIZE (32*1024)
51
52 #define PA_MEMEXPORT_SLOTS_MAX 128
53
54 #define PA_MEMIMPORT_SLOTS_MAX 128
55 #define PA_MEMIMPORT_SEGMENTS_MAX 16
56
57 struct pa_memblock {
58 PA_REFCNT_DECLARE; /* the reference counter */
59 pa_mempool *pool;
60
61 pa_memblock_type_t type;
62 pa_bool_t read_only:1, is_silence:1;
63
64 pa_atomic_ptr_t data;
65 size_t length;
66
67 pa_atomic_t n_acquired;
68 pa_atomic_t please_signal;
69
70 union {
71 struct {
72 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
73 pa_free_cb_t free_cb;
74 } user;
75
76 struct {
77 uint32_t id;
78 pa_memimport_segment *segment;
79 } imported;
80 } per_type;
81 };
82
83 struct pa_memimport_segment {
84 pa_memimport *import;
85 pa_shm memory;
86 unsigned n_blocks;
87 };
88
89 struct pa_memimport {
90 pa_mutex *mutex;
91
92 pa_mempool *pool;
93 pa_hashmap *segments;
94 pa_hashmap *blocks;
95
96 /* Called whenever an imported memory block is no longer
97 * needed. */
98 pa_memimport_release_cb_t release_cb;
99 void *userdata;
100
101 PA_LLIST_FIELDS(pa_memimport);
102 };
103
104 struct memexport_slot {
105 PA_LLIST_FIELDS(struct memexport_slot);
106 pa_memblock *block;
107 };
108
109 struct pa_memexport {
110 pa_mutex *mutex;
111 pa_mempool *pool;
112
113 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
114
115 PA_LLIST_HEAD(struct memexport_slot, free_slots);
116 PA_LLIST_HEAD(struct memexport_slot, used_slots);
117 unsigned n_init;
118
119 /* Called whenever a client from which we imported a memory block
120 which we in turn exported to another client dies and we need to
121 revoke the memory block accordingly */
122 pa_memexport_revoke_cb_t revoke_cb;
123 void *userdata;
124
125 PA_LLIST_FIELDS(pa_memexport);
126 };
127
128 struct mempool_slot {
129 PA_LLIST_FIELDS(struct mempool_slot);
130 /* the actual data follows immediately hereafter */
131 };
132
133 struct pa_mempool {
134 pa_semaphore *semaphore;
135 pa_mutex *mutex;
136
137 pa_shm memory;
138 size_t block_size;
139 unsigned n_blocks;
140
141 pa_atomic_t n_init;
142
143 PA_LLIST_HEAD(pa_memimport, imports);
144 PA_LLIST_HEAD(pa_memexport, exports);
145
146 /* A list of free slots that may be reused */
147 pa_flist *free_slots;
148
149 pa_mempool_stat stat;
150 };
151
152 static void segment_detach(pa_memimport_segment *seg);
153
154 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
155
156 /* No lock necessary */
157 static void stat_add(pa_memblock*b) {
158 pa_assert(b);
159 pa_assert(b->pool);
160
161 pa_atomic_inc(&b->pool->stat.n_allocated);
162 pa_atomic_add(&b->pool->stat.allocated_size, b->length);
163
164 pa_atomic_inc(&b->pool->stat.n_accumulated);
165 pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
166
167 if (b->type == PA_MEMBLOCK_IMPORTED) {
168 pa_atomic_inc(&b->pool->stat.n_imported);
169 pa_atomic_add(&b->pool->stat.imported_size, b->length);
170 }
171
172 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
173 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
174 }
175
176 /* No lock necessary */
177 static void stat_remove(pa_memblock *b) {
178 pa_assert(b);
179 pa_assert(b->pool);
180
181 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
182 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
183
184 pa_atomic_dec(&b->pool->stat.n_allocated);
185 pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
186
187 if (b->type == PA_MEMBLOCK_IMPORTED) {
188 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
189 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
190
191 pa_atomic_dec(&b->pool->stat.n_imported);
192 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
193 }
194
195 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
196 }
197
198 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
199
200 /* No lock necessary */
201 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
202 pa_memblock *b;
203
204 pa_assert(p);
205 pa_assert(length > 0);
206
207 if (!(b = pa_memblock_new_pool(p, length)))
208 b = memblock_new_appended(p, length);
209
210 return b;
211 }
212
213 /* No lock necessary */
214 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
215 pa_memblock *b;
216
217 pa_assert(p);
218 pa_assert(length > 0);
219
220 /* If -1 is passed as length we choose the size for the caller. */
221
222 if (length == (size_t) -1)
223 length = p->block_size - PA_ALIGN(sizeof(struct mempool_slot)) - PA_ALIGN(sizeof(pa_memblock));
224
225 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
226 PA_REFCNT_INIT(b);
227 b->pool = p;
228 b->type = PA_MEMBLOCK_APPENDED;
229 b->read_only = b->is_silence = FALSE;
230 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
231 b->length = length;
232 pa_atomic_store(&b->n_acquired, 0);
233 pa_atomic_store(&b->please_signal, 0);
234
235 stat_add(b);
236 return b;
237 }
238
239 /* No lock necessary */
240 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
241 struct mempool_slot *slot;
242 pa_assert(p);
243
244 if (!(slot = pa_flist_pop(p->free_slots))) {
245 int idx;
246
247 /* The free list was empty, we have to allocate a new entry */
248
249 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
250 pa_atomic_dec(&p->n_init);
251 else
252 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));
253
254 if (!slot) {
255 pa_log_info("Pool full");
256 pa_atomic_inc(&p->stat.n_pool_full);
257 return NULL;
258 }
259 }
260
261 return slot;
262 }
263
264 /* No lock necessary */
265 static void* mempool_slot_data(struct mempool_slot *slot) {
266 pa_assert(slot);
267
268 return (uint8_t*) slot + PA_ALIGN(sizeof(struct mempool_slot));
269 }
270
271 /* No lock necessary */
272 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
273 pa_assert(p);
274
275 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
276 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
277
278 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
279 }
280
281 /* No lock necessary */
282 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
283 unsigned idx;
284
285 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
286 return NULL;
287
288 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
289 }
290
291 /* No lock necessary */
292 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
293 pa_memblock *b = NULL;
294 struct mempool_slot *slot;
295
296 pa_assert(p);
297 pa_assert(length > 0);
298
299 /* If -1 is passed as length we choose the size for the caller: we
300 * take the largest size that fits in one of our slots. */
301
302 if (length == (size_t) -1)
303 length = pa_mempool_block_size_max(p);
304
305 if (p->block_size - PA_ALIGN(sizeof(struct mempool_slot)) >= PA_ALIGN(sizeof(pa_memblock)) + length) {
306
307 if (!(slot = mempool_allocate_slot(p)))
308 return NULL;
309
310 b = mempool_slot_data(slot);
311 b->type = PA_MEMBLOCK_POOL;
312 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
313
314 } else if (p->block_size - PA_ALIGN(sizeof(struct mempool_slot)) >= length) {
315
316 if (!(slot = mempool_allocate_slot(p)))
317 return NULL;
318
319 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
320 b = pa_xnew(pa_memblock, 1);
321
322 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
323 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
324
325 } else {
326 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) (p->block_size - PA_ALIGN(sizeof(struct mempool_slot))));
327 pa_atomic_inc(&p->stat.n_too_large_for_pool);
328 return NULL;
329 }
330
331 PA_REFCNT_INIT(b);
332 b->pool = p;
333 b->read_only = b->is_silence = FALSE;
334 b->length = length;
335 pa_atomic_store(&b->n_acquired, 0);
336 pa_atomic_store(&b->please_signal, 0);
337
338 stat_add(b);
339 return b;
340 }
341
342 /* No lock necessary */
343 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
344 pa_memblock *b;
345
346 pa_assert(p);
347 pa_assert(d);
348 pa_assert(length != (size_t) -1);
349 pa_assert(length > 0);
350
351 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
352 b = pa_xnew(pa_memblock, 1);
353 PA_REFCNT_INIT(b);
354 b->pool = p;
355 b->type = PA_MEMBLOCK_FIXED;
356 b->read_only = read_only;
357 b->is_silence = FALSE;
358 pa_atomic_ptr_store(&b->data, d);
359 b->length = length;
360 pa_atomic_store(&b->n_acquired, 0);
361 pa_atomic_store(&b->please_signal, 0);
362
363 stat_add(b);
364 return b;
365 }
366
367 /* No lock necessary */
368 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
369 pa_memblock *b;
370
371 pa_assert(p);
372 pa_assert(d);
373 pa_assert(length > 0);
374 pa_assert(length != (size_t) -1);
375 pa_assert(free_cb);
376
377 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
378 b = pa_xnew(pa_memblock, 1);
379 PA_REFCNT_INIT(b);
380 b->pool = p;
381 b->type = PA_MEMBLOCK_USER;
382 b->read_only = read_only;
383 b->is_silence = FALSE;
384 pa_atomic_ptr_store(&b->data, d);
385 b->length = length;
386 pa_atomic_store(&b->n_acquired, 0);
387 pa_atomic_store(&b->please_signal, 0);
388
389 b->per_type.user.free_cb = free_cb;
390
391 stat_add(b);
392 return b;
393 }
394
395 /* No lock necessary */
396 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
397 pa_assert(b);
398 pa_assert(PA_REFCNT_VALUE(b) > 0);
399
400 return b->read_only && PA_REFCNT_VALUE(b) == 1;
401 }
402
403 /* No lock necessary */
404 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
405 pa_assert(b);
406 pa_assert(PA_REFCNT_VALUE(b) > 0);
407
408 return b->is_silence;
409 }
410
411 /* No lock necessary */
412 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
413 pa_assert(b);
414 pa_assert(PA_REFCNT_VALUE(b) > 0);
415
416 b->is_silence = v;
417 }
418
419 /* No lock necessary */
420 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
421 int r;
422
423 pa_assert(b);
424
425 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
426
427 return r == 1;
428 }
429
430 /* No lock necessary */
431 void* pa_memblock_acquire(pa_memblock *b) {
432 pa_assert(b);
433 pa_assert(PA_REFCNT_VALUE(b) > 0);
434
435 pa_atomic_inc(&b->n_acquired);
436
437 return pa_atomic_ptr_load(&b->data);
438 }
439
440 /* No lock necessary, in corner cases locks by its own */
441 void pa_memblock_release(pa_memblock *b) {
442 int r;
443 pa_assert(b);
444 pa_assert(PA_REFCNT_VALUE(b) > 0);
445
446 r = pa_atomic_dec(&b->n_acquired);
447 pa_assert(r >= 1);
448
449 /* Signal a waiting thread that this memblock is no longer used */
450 if (r == 1 && pa_atomic_load(&b->please_signal))
451 pa_semaphore_post(b->pool->semaphore);
452 }
453
454 size_t pa_memblock_get_length(pa_memblock *b) {
455 pa_assert(b);
456 pa_assert(PA_REFCNT_VALUE(b) > 0);
457
458 return b->length;
459 }
460
461 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
462 pa_assert(b);
463 pa_assert(PA_REFCNT_VALUE(b) > 0);
464
465 return b->pool;
466 }
467
468 /* No lock necessary */
469 pa_memblock* pa_memblock_ref(pa_memblock*b) {
470 pa_assert(b);
471 pa_assert(PA_REFCNT_VALUE(b) > 0);
472
473 PA_REFCNT_INC(b);
474 return b;
475 }
476
477 static void memblock_free(pa_memblock *b) {
478 pa_assert(b);
479
480 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
481
482 stat_remove(b);
483
484 switch (b->type) {
485 case PA_MEMBLOCK_USER :
486 pa_assert(b->per_type.user.free_cb);
487 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
488
489 /* Fall through */
490
491 case PA_MEMBLOCK_FIXED:
492 case PA_MEMBLOCK_APPENDED :
493 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
494 pa_xfree(b);
495
496 break;
497
498 case PA_MEMBLOCK_IMPORTED : {
499 pa_memimport_segment *segment;
500 pa_memimport *import;
501
502 /* FIXME! This should be implemented lock-free */
503
504 segment = b->per_type.imported.segment;
505 pa_assert(segment);
506 import = segment->import;
507 pa_assert(import);
508
509 pa_mutex_lock(import->mutex);
510 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
511 if (-- segment->n_blocks <= 0)
512 segment_detach(segment);
513
514 pa_mutex_unlock(import->mutex);
515
516 import->release_cb(import, b->per_type.imported.id, import->userdata);
517
518 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
519 pa_xfree(b);
520 break;
521 }
522
523 case PA_MEMBLOCK_POOL_EXTERNAL:
524 case PA_MEMBLOCK_POOL: {
525 struct mempool_slot *slot;
526 int call_free;
527
528 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
529 pa_assert(slot);
530
531 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
532
533 /* The free list dimensions should easily allow all slots
534 * to fit in, hence try harder if pushing this slot into
535 * the free list fails */
536 while (pa_flist_push(b->pool->free_slots, slot) < 0)
537 ;
538
539 if (call_free)
540 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
541 pa_xfree(b);
542
543 break;
544 }
545
546 case PA_MEMBLOCK_TYPE_MAX:
547 default:
548 pa_assert_not_reached();
549 }
550 }
551
552 /* No lock necessary */
553 void pa_memblock_unref(pa_memblock*b) {
554 pa_assert(b);
555 pa_assert(PA_REFCNT_VALUE(b) > 0);
556
557 if (PA_REFCNT_DEC(b) > 0)
558 return;
559
560 memblock_free(b);
561 }
562
563 /* Self locked */
564 static void memblock_wait(pa_memblock *b) {
565 pa_assert(b);
566
567 if (pa_atomic_load(&b->n_acquired) > 0) {
568 /* We need to wait until all threads gave up access to the
569 * memory block before we can go on. Unfortunately this means
570 * that we have to lock and wait here. Sniff! */
571
572 pa_atomic_inc(&b->please_signal);
573
574 while (pa_atomic_load(&b->n_acquired) > 0)
575 pa_semaphore_wait(b->pool->semaphore);
576
577 pa_atomic_dec(&b->please_signal);
578 }
579 }
580
581 /* No lock necessary. This function is not multiple caller safe! */
582 static void memblock_make_local(pa_memblock *b) {
583 pa_assert(b);
584
585 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
586
587 if (b->length <= b->pool->block_size - PA_ALIGN(sizeof(struct mempool_slot))) {
588 struct mempool_slot *slot;
589
590 if ((slot = mempool_allocate_slot(b->pool))) {
591 void *new_data;
592 /* We can move it into a local pool, perfect! */
593
594 new_data = mempool_slot_data(slot);
595 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
596 pa_atomic_ptr_store(&b->data, new_data);
597
598 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
599 b->read_only = FALSE;
600
601 goto finish;
602 }
603 }
604
605 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
606 b->per_type.user.free_cb = pa_xfree;
607 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
608
609 b->type = PA_MEMBLOCK_USER;
610 b->read_only = FALSE;
611
612 finish:
613 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
614 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
615 memblock_wait(b);
616 }
617
618 /* No lock necessary. This function is not multiple caller safe*/
619 void pa_memblock_unref_fixed(pa_memblock *b) {
620 pa_assert(b);
621 pa_assert(PA_REFCNT_VALUE(b) > 0);
622 pa_assert(b->type == PA_MEMBLOCK_FIXED);
623
624 if (PA_REFCNT_VALUE(b) > 1)
625 memblock_make_local(b);
626
627 pa_memblock_unref(b);
628 }
629
630 /* No lock necessary. */
631 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
632 void *p;
633
634 pa_assert(b);
635 pa_assert(PA_REFCNT_VALUE(b) > 0);
636
637 p = pa_memblock_acquire(b);
638 pa_will_need(p, b->length);
639 pa_memblock_release(b);
640
641 return b;
642 }
643
644 /* Self-locked. This function is not multiple-caller safe */
645 static void memblock_replace_import(pa_memblock *b) {
646 pa_memimport_segment *seg;
647
648 pa_assert(b);
649 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
650
651 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
652 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
653 pa_atomic_dec(&b->pool->stat.n_imported);
654 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
655
656 seg = b->per_type.imported.segment;
657 pa_assert(seg);
658 pa_assert(seg->import);
659
660 pa_mutex_lock(seg->import->mutex);
661
662 pa_hashmap_remove(
663 seg->import->blocks,
664 PA_UINT32_TO_PTR(b->per_type.imported.id));
665
666 memblock_make_local(b);
667
668 if (-- seg->n_blocks <= 0) {
669 pa_mutex_unlock(seg->import->mutex);
670 segment_detach(seg);
671 } else
672 pa_mutex_unlock(seg->import->mutex);
673 }
674
675 pa_mempool* pa_mempool_new(int shared) {
676 pa_mempool *p;
677
678 p = pa_xnew(pa_mempool, 1);
679
680 p->mutex = pa_mutex_new(TRUE, TRUE);
681 p->semaphore = pa_semaphore_new(0);
682
683 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
684 if (p->block_size < PA_PAGE_SIZE)
685 p->block_size = PA_PAGE_SIZE;
686
687 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
688
689 pa_assert(p->block_size > PA_ALIGN(sizeof(struct mempool_slot)));
690
691 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
692 pa_xfree(p);
693 return NULL;
694 }
695
696 memset(&p->stat, 0, sizeof(p->stat));
697 pa_atomic_store(&p->n_init, 0);
698
699 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
700 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
701
702 p->free_slots = pa_flist_new(p->n_blocks*2);
703
704 return p;
705 }
706
707 void pa_mempool_free(pa_mempool *p) {
708 pa_assert(p);
709
710 pa_mutex_lock(p->mutex);
711
712 while (p->imports)
713 pa_memimport_free(p->imports);
714
715 while (p->exports)
716 pa_memexport_free(p->exports);
717
718 pa_mutex_unlock(p->mutex);
719
720 pa_flist_free(p->free_slots, NULL);
721
722 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
723 /* raise(SIGTRAP); */
724 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
725 }
726
727 pa_shm_free(&p->memory);
728
729 pa_mutex_free(p->mutex);
730 pa_semaphore_free(p->semaphore);
731
732 pa_xfree(p);
733 }
734
735 /* No lock necessary */
736 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
737 pa_assert(p);
738
739 return &p->stat;
740 }
741
742 /* No lock necessary */
743 size_t pa_mempool_block_size_max(pa_mempool *p) {
744 pa_assert(p);
745
746 return p->block_size - PA_ALIGN(sizeof(struct mempool_slot)) - PA_ALIGN(sizeof(pa_memblock));
747 }
748
749 /* No lock necessary */
750 void pa_mempool_vacuum(pa_mempool *p) {
751 struct mempool_slot *slot;
752 pa_flist *list;
753
754 pa_assert(p);
755
756 list = pa_flist_new(p->n_blocks*2);
757
758 while ((slot = pa_flist_pop(p->free_slots)))
759 while (pa_flist_push(list, slot) < 0)
760 ;
761
762 while ((slot = pa_flist_pop(list))) {
763 pa_shm_punch(&p->memory,
764 (uint8_t*) slot - (uint8_t*) p->memory.ptr + PA_ALIGN(sizeof(struct mempool_slot)),
765 p->block_size - PA_ALIGN(sizeof(struct mempool_slot)));
766
767 while (pa_flist_push(p->free_slots, slot))
768 ;
769 }
770
771 pa_flist_free(list, NULL);
772 }
773
774 /* No lock necessary */
775 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
776 pa_assert(p);
777
778 if (!p->memory.shared)
779 return -1;
780
781 *id = p->memory.id;
782
783 return 0;
784 }
785
786 /* No lock necessary */
787 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
788 pa_assert(p);
789
790 return !!p->memory.shared;
791 }
792
793 /* For recieving blocks from other nodes */
794 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
795 pa_memimport *i;
796
797 pa_assert(p);
798 pa_assert(cb);
799
800 i = pa_xnew(pa_memimport, 1);
801 i->mutex = pa_mutex_new(TRUE, TRUE);
802 i->pool = p;
803 i->segments = pa_hashmap_new(NULL, NULL);
804 i->blocks = pa_hashmap_new(NULL, NULL);
805 i->release_cb = cb;
806 i->userdata = userdata;
807
808 pa_mutex_lock(p->mutex);
809 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
810 pa_mutex_unlock(p->mutex);
811
812 return i;
813 }
814
815 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
816
817 /* Should be called locked */
818 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
819 pa_memimport_segment* seg;
820
821 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
822 return NULL;
823
824 seg = pa_xnew(pa_memimport_segment, 1);
825
826 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
827 pa_xfree(seg);
828 return NULL;
829 }
830
831 seg->import = i;
832 seg->n_blocks = 0;
833
834 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
835 return seg;
836 }
837
838 /* Should be called locked */
839 static void segment_detach(pa_memimport_segment *seg) {
840 pa_assert(seg);
841
842 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
843 pa_shm_free(&seg->memory);
844 pa_xfree(seg);
845 }
846
847 /* Self-locked. Not multiple-caller safe */
848 void pa_memimport_free(pa_memimport *i) {
849 pa_memexport *e;
850 pa_memblock *b;
851
852 pa_assert(i);
853
854 pa_mutex_lock(i->mutex);
855
856 while ((b = pa_hashmap_get_first(i->blocks)))
857 memblock_replace_import(b);
858
859 pa_assert(pa_hashmap_size(i->segments) == 0);
860
861 pa_mutex_unlock(i->mutex);
862
863 pa_mutex_lock(i->pool->mutex);
864
865 /* If we've exported this block further we need to revoke that export */
866 for (e = i->pool->exports; e; e = e->next)
867 memexport_revoke_blocks(e, i);
868
869 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
870
871 pa_mutex_unlock(i->pool->mutex);
872
873 pa_hashmap_free(i->blocks, NULL, NULL);
874 pa_hashmap_free(i->segments, NULL, NULL);
875
876 pa_mutex_free(i->mutex);
877
878 pa_xfree(i);
879 }
880
881 /* Self-locked */
882 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
883 pa_memblock *b = NULL;
884 pa_memimport_segment *seg;
885
886 pa_assert(i);
887
888 pa_mutex_lock(i->mutex);
889
890 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
891 goto finish;
892
893 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
894 if (!(seg = segment_attach(i, shm_id)))
895 goto finish;
896
897 if (offset+size > seg->memory.size)
898 goto finish;
899
900 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
901 b = pa_xnew(pa_memblock, 1);
902
903 PA_REFCNT_INIT(b);
904 b->pool = i->pool;
905 b->type = PA_MEMBLOCK_IMPORTED;
906 b->read_only = TRUE;
907 b->is_silence = FALSE;
908 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
909 b->length = size;
910 pa_atomic_store(&b->n_acquired, 0);
911 pa_atomic_store(&b->please_signal, 0);
912 b->per_type.imported.id = block_id;
913 b->per_type.imported.segment = seg;
914
915 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
916
917 seg->n_blocks++;
918
919 finish:
920 pa_mutex_unlock(i->mutex);
921
922 if (b)
923 stat_add(b);
924
925 return b;
926 }
927
928 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
929 pa_memblock *b;
930 int ret = 0;
931 pa_assert(i);
932
933 pa_mutex_lock(i->mutex);
934
935 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
936 ret = -1;
937 goto finish;
938 }
939
940 memblock_replace_import(b);
941
942 finish:
943 pa_mutex_unlock(i->mutex);
944
945 return ret;
946 }
947
948 /* For sending blocks to other nodes */
949 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
950 pa_memexport *e;
951
952 pa_assert(p);
953 pa_assert(cb);
954
955 if (!p->memory.shared)
956 return NULL;
957
958 e = pa_xnew(pa_memexport, 1);
959 e->mutex = pa_mutex_new(TRUE, TRUE);
960 e->pool = p;
961 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
962 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
963 e->n_init = 0;
964 e->revoke_cb = cb;
965 e->userdata = userdata;
966
967 pa_mutex_lock(p->mutex);
968 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
969 pa_mutex_unlock(p->mutex);
970 return e;
971 }
972
973 void pa_memexport_free(pa_memexport *e) {
974 pa_assert(e);
975
976 pa_mutex_lock(e->mutex);
977 while (e->used_slots)
978 pa_memexport_process_release(e, e->used_slots - e->slots);
979 pa_mutex_unlock(e->mutex);
980
981 pa_mutex_lock(e->pool->mutex);
982 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
983 pa_mutex_unlock(e->pool->mutex);
984
985 pa_mutex_free(e->mutex);
986 pa_xfree(e);
987 }
988
989 /* Self-locked */
990 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
991 pa_memblock *b;
992
993 pa_assert(e);
994
995 pa_mutex_lock(e->mutex);
996
997 if (id >= e->n_init)
998 goto fail;
999
1000 if (!e->slots[id].block)
1001 goto fail;
1002
1003 b = e->slots[id].block;
1004 e->slots[id].block = NULL;
1005
1006 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1007 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1008
1009 pa_mutex_unlock(e->mutex);
1010
1011 /* pa_log("Processing release for %u", id); */
1012
1013 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1014 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1015
1016 pa_atomic_dec(&e->pool->stat.n_exported);
1017 pa_atomic_sub(&e->pool->stat.exported_size, b->length);
1018
1019 pa_memblock_unref(b);
1020
1021 return 0;
1022
1023 fail:
1024 pa_mutex_unlock(e->mutex);
1025
1026 return -1;
1027 }
1028
1029 /* Self-locked */
1030 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1031 struct memexport_slot *slot, *next;
1032 pa_assert(e);
1033 pa_assert(i);
1034
1035 pa_mutex_lock(e->mutex);
1036
1037 for (slot = e->used_slots; slot; slot = next) {
1038 uint32_t idx;
1039 next = slot->next;
1040
1041 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1042 slot->block->per_type.imported.segment->import != i)
1043 continue;
1044
1045 idx = slot - e->slots;
1046 e->revoke_cb(e, idx, e->userdata);
1047 pa_memexport_process_release(e, idx);
1048 }
1049
1050 pa_mutex_unlock(e->mutex);
1051 }
1052
1053 /* No lock necessary */
1054 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1055 pa_memblock *n;
1056
1057 pa_assert(p);
1058 pa_assert(b);
1059
1060 if (b->type == PA_MEMBLOCK_IMPORTED ||
1061 b->type == PA_MEMBLOCK_POOL ||
1062 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1063 pa_assert(b->pool == p);
1064 return pa_memblock_ref(b);
1065 }
1066
1067 if (!(n = pa_memblock_new_pool(p, b->length)))
1068 return NULL;
1069
1070 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1071 return n;
1072 }
1073
1074 /* Self-locked */
1075 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1076 pa_shm *memory;
1077 struct memexport_slot *slot;
1078 void *data;
1079
1080 pa_assert(e);
1081 pa_assert(b);
1082 pa_assert(block_id);
1083 pa_assert(shm_id);
1084 pa_assert(offset);
1085 pa_assert(size);
1086 pa_assert(b->pool == e->pool);
1087
1088 if (!(b = memblock_shared_copy(e->pool, b)))
1089 return -1;
1090
1091 pa_mutex_lock(e->mutex);
1092
1093 if (e->free_slots) {
1094 slot = e->free_slots;
1095 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1096 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1097 slot = &e->slots[e->n_init++];
1098 else {
1099 pa_mutex_unlock(e->mutex);
1100 pa_memblock_unref(b);
1101 return -1;
1102 }
1103
1104 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1105 slot->block = b;
1106 *block_id = slot - e->slots;
1107
1108 pa_mutex_unlock(e->mutex);
1109 /* pa_log("Got block id %u", *block_id); */
1110
1111 data = pa_memblock_acquire(b);
1112
1113 if (b->type == PA_MEMBLOCK_IMPORTED) {
1114 pa_assert(b->per_type.imported.segment);
1115 memory = &b->per_type.imported.segment->memory;
1116 } else {
1117 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1118 pa_assert(b->pool);
1119 memory = &b->pool->memory;
1120 }
1121
1122 pa_assert(data >= memory->ptr);
1123 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1124
1125 *shm_id = memory->id;
1126 *offset = (uint8_t*) data - (uint8_t*) memory->ptr;
1127 *size = b->length;
1128
1129 pa_memblock_release(b);
1130
1131 pa_atomic_inc(&e->pool->stat.n_exported);
1132 pa_atomic_add(&e->pool->stat.exported_size, b->length);
1133
1134 return 0;
1135 }