]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
memblock: decrease tile size to 64k again
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 /* A collection of multiple segments */
100 struct pa_memimport {
101 pa_mutex *mutex;
102
103 pa_mempool *pool;
104 pa_hashmap *segments;
105 pa_hashmap *blocks;
106
107 /* Called whenever an imported memory block is no longer
108 * needed. */
109 pa_memimport_release_cb_t release_cb;
110 void *userdata;
111
112 PA_LLIST_FIELDS(pa_memimport);
113 };
114
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
117 pa_memblock *block;
118 };
119
120 struct pa_memexport {
121 pa_mutex *mutex;
122 pa_mempool *pool;
123
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
125
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
128 unsigned n_init;
129
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
134 void *userdata;
135
136 PA_LLIST_FIELDS(pa_memexport);
137 };
138
139 struct pa_mempool {
140 pa_semaphore *semaphore;
141 pa_mutex *mutex;
142
143 pa_shm memory;
144 size_t block_size;
145 unsigned n_blocks;
146
147 pa_atomic_t n_init;
148
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
151
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
154
155 pa_mempool_stat stat;
156 };
157
158 static void segment_detach(pa_memimport_segment *seg);
159
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
161
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
164 pa_assert(b);
165 pa_assert(b->pool);
166
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
169
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
172
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
176 }
177
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
180 }
181
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
184 pa_assert(b);
185 pa_assert(b->pool);
186
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
189
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
192
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
196
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
199 }
200
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
202 }
203
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
205
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
208 pa_memblock *b;
209
210 pa_assert(p);
211 pa_assert(length);
212
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
215
216 return b;
217 }
218
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
221 pa_memblock *b;
222
223 pa_assert(p);
224 pa_assert(length);
225
226 /* If -1 is passed as length we choose the size for the caller. */
227
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
230
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
232 PA_REFCNT_INIT(b);
233 b->pool = p;
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
237 b->length = length;
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
240
241 stat_add(b);
242 return b;
243 }
244
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
248 pa_assert(p);
249
250 if (!(slot = pa_flist_pop(p->free_slots))) {
251 int idx;
252
253 /* The free list was empty, we have to allocate a new entry */
254
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
257 else
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
259
260 if (!slot) {
261 if (pa_log_ratelimit())
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p->stat.n_pool_full);
264 return NULL;
265 }
266 }
267
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
271 /* } */
272 /* #endif */
273
274 return slot;
275 }
276
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot *slot) {
279 return slot;
280 }
281
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
284 pa_assert(p);
285
286 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
287 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
288
289 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
290 }
291
292 /* No lock necessary */
293 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
294 unsigned idx;
295
296 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
297 return NULL;
298
299 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
300 }
301
302 /* No lock necessary */
303 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
304 pa_memblock *b = NULL;
305 struct mempool_slot *slot;
306 static int mempool_disable = 0;
307
308 pa_assert(p);
309 pa_assert(length);
310
311 if (mempool_disable == 0)
312 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
313
314 if (mempool_disable > 0)
315 return NULL;
316
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
319
320 if (length == (size_t) -1)
321 length = pa_mempool_block_size_max(p);
322
323 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
324
325 if (!(slot = mempool_allocate_slot(p)))
326 return NULL;
327
328 b = mempool_slot_data(slot);
329 b->type = PA_MEMBLOCK_POOL;
330 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
331
332 } else if (p->block_size >= length) {
333
334 if (!(slot = mempool_allocate_slot(p)))
335 return NULL;
336
337 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
338 b = pa_xnew(pa_memblock, 1);
339
340 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
341 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
342
343 } else {
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
345 pa_atomic_inc(&p->stat.n_too_large_for_pool);
346 return NULL;
347 }
348
349 PA_REFCNT_INIT(b);
350 b->pool = p;
351 b->read_only = b->is_silence = FALSE;
352 b->length = length;
353 pa_atomic_store(&b->n_acquired, 0);
354 pa_atomic_store(&b->please_signal, 0);
355
356 stat_add(b);
357 return b;
358 }
359
360 /* No lock necessary */
361 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
362 pa_memblock *b;
363
364 pa_assert(p);
365 pa_assert(d);
366 pa_assert(length != (size_t) -1);
367 pa_assert(length);
368
369 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
370 b = pa_xnew(pa_memblock, 1);
371
372 PA_REFCNT_INIT(b);
373 b->pool = p;
374 b->type = PA_MEMBLOCK_FIXED;
375 b->read_only = read_only;
376 b->is_silence = FALSE;
377 pa_atomic_ptr_store(&b->data, d);
378 b->length = length;
379 pa_atomic_store(&b->n_acquired, 0);
380 pa_atomic_store(&b->please_signal, 0);
381
382 stat_add(b);
383 return b;
384 }
385
386 /* No lock necessary */
387 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
388 pa_memblock *b;
389
390 pa_assert(p);
391 pa_assert(d);
392 pa_assert(length);
393 pa_assert(length != (size_t) -1);
394 pa_assert(free_cb);
395
396 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
397 b = pa_xnew(pa_memblock, 1);
398
399 PA_REFCNT_INIT(b);
400 b->pool = p;
401 b->type = PA_MEMBLOCK_USER;
402 b->read_only = read_only;
403 b->is_silence = FALSE;
404 pa_atomic_ptr_store(&b->data, d);
405 b->length = length;
406 pa_atomic_store(&b->n_acquired, 0);
407 pa_atomic_store(&b->please_signal, 0);
408
409 b->per_type.user.free_cb = free_cb;
410
411 stat_add(b);
412 return b;
413 }
414
415 /* No lock necessary */
416 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
417 pa_assert(b);
418 pa_assert(PA_REFCNT_VALUE(b) > 0);
419
420 return b->read_only && PA_REFCNT_VALUE(b) == 1;
421 }
422
423 /* No lock necessary */
424 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
425 pa_assert(b);
426 pa_assert(PA_REFCNT_VALUE(b) > 0);
427
428 return b->is_silence;
429 }
430
431 /* No lock necessary */
432 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
433 pa_assert(b);
434 pa_assert(PA_REFCNT_VALUE(b) > 0);
435
436 b->is_silence = v;
437 }
438
439 /* No lock necessary */
440 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
441 int r;
442 pa_assert(b);
443
444 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
445
446 return r == 1;
447 }
448
449 /* No lock necessary */
450 void* pa_memblock_acquire(pa_memblock *b) {
451 pa_assert(b);
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
453
454 pa_atomic_inc(&b->n_acquired);
455
456 return pa_atomic_ptr_load(&b->data);
457 }
458
459 /* No lock necessary, in corner cases locks by its own */
460 void pa_memblock_release(pa_memblock *b) {
461 int r;
462 pa_assert(b);
463 pa_assert(PA_REFCNT_VALUE(b) > 0);
464
465 r = pa_atomic_dec(&b->n_acquired);
466 pa_assert(r >= 1);
467
468 /* Signal a waiting thread that this memblock is no longer used */
469 if (r == 1 && pa_atomic_load(&b->please_signal))
470 pa_semaphore_post(b->pool->semaphore);
471 }
472
473 size_t pa_memblock_get_length(pa_memblock *b) {
474 pa_assert(b);
475 pa_assert(PA_REFCNT_VALUE(b) > 0);
476
477 return b->length;
478 }
479
480 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
481 pa_assert(b);
482 pa_assert(PA_REFCNT_VALUE(b) > 0);
483
484 return b->pool;
485 }
486
487 /* No lock necessary */
488 pa_memblock* pa_memblock_ref(pa_memblock*b) {
489 pa_assert(b);
490 pa_assert(PA_REFCNT_VALUE(b) > 0);
491
492 PA_REFCNT_INC(b);
493 return b;
494 }
495
496 static void memblock_free(pa_memblock *b) {
497 pa_assert(b);
498
499 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
500
501 stat_remove(b);
502
503 switch (b->type) {
504 case PA_MEMBLOCK_USER :
505 pa_assert(b->per_type.user.free_cb);
506 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
507
508 /* Fall through */
509
510 case PA_MEMBLOCK_FIXED:
511 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
512 pa_xfree(b);
513
514 break;
515
516 case PA_MEMBLOCK_APPENDED:
517
518 /* We could attached it unused_memblocks, but that would
519 * probably waste some considerable memory */
520 pa_xfree(b);
521 break;
522
523 case PA_MEMBLOCK_IMPORTED: {
524 pa_memimport_segment *segment;
525 pa_memimport *import;
526
527 /* FIXME! This should be implemented lock-free */
528
529 pa_assert_se(segment = b->per_type.imported.segment);
530 pa_assert_se(import = segment->import);
531
532 pa_mutex_lock(import->mutex);
533
534 pa_assert_se(pa_hashmap_remove(
535 import->blocks,
536 PA_UINT32_TO_PTR(b->per_type.imported.id)));
537
538 pa_assert(segment->n_blocks >= 1);
539 if (-- segment->n_blocks <= 0)
540 segment_detach(segment);
541
542 pa_mutex_unlock(import->mutex);
543
544 import->release_cb(import, b->per_type.imported.id, import->userdata);
545
546 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
547 pa_xfree(b);
548
549 break;
550 }
551
552 case PA_MEMBLOCK_POOL_EXTERNAL:
553 case PA_MEMBLOCK_POOL: {
554 struct mempool_slot *slot;
555 pa_bool_t call_free;
556
557 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
558
559 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
560
561 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
562 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
563 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
564 /* } */
565 /* #endif */
566
567 /* The free list dimensions should easily allow all slots
568 * to fit in, hence try harder if pushing this slot into
569 * the free list fails */
570 while (pa_flist_push(b->pool->free_slots, slot) < 0)
571 ;
572
573 if (call_free)
574 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
575 pa_xfree(b);
576
577 break;
578 }
579
580 case PA_MEMBLOCK_TYPE_MAX:
581 default:
582 pa_assert_not_reached();
583 }
584 }
585
586 /* No lock necessary */
587 void pa_memblock_unref(pa_memblock*b) {
588 pa_assert(b);
589 pa_assert(PA_REFCNT_VALUE(b) > 0);
590
591 if (PA_REFCNT_DEC(b) > 0)
592 return;
593
594 memblock_free(b);
595 }
596
597 /* Self locked */
598 static void memblock_wait(pa_memblock *b) {
599 pa_assert(b);
600
601 if (pa_atomic_load(&b->n_acquired) > 0) {
602 /* We need to wait until all threads gave up access to the
603 * memory block before we can go on. Unfortunately this means
604 * that we have to lock and wait here. Sniff! */
605
606 pa_atomic_inc(&b->please_signal);
607
608 while (pa_atomic_load(&b->n_acquired) > 0)
609 pa_semaphore_wait(b->pool->semaphore);
610
611 pa_atomic_dec(&b->please_signal);
612 }
613 }
614
615 /* No lock necessary. This function is not multiple caller safe! */
616 static void memblock_make_local(pa_memblock *b) {
617 pa_assert(b);
618
619 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
620
621 if (b->length <= b->pool->block_size) {
622 struct mempool_slot *slot;
623
624 if ((slot = mempool_allocate_slot(b->pool))) {
625 void *new_data;
626 /* We can move it into a local pool, perfect! */
627
628 new_data = mempool_slot_data(slot);
629 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
630 pa_atomic_ptr_store(&b->data, new_data);
631
632 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
633 b->read_only = FALSE;
634
635 goto finish;
636 }
637 }
638
639 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
640 b->per_type.user.free_cb = pa_xfree;
641 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
642
643 b->type = PA_MEMBLOCK_USER;
644 b->read_only = FALSE;
645
646 finish:
647 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
648 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
649 memblock_wait(b);
650 }
651
652 /* No lock necessary. This function is not multiple caller safe*/
653 void pa_memblock_unref_fixed(pa_memblock *b) {
654 pa_assert(b);
655 pa_assert(PA_REFCNT_VALUE(b) > 0);
656 pa_assert(b->type == PA_MEMBLOCK_FIXED);
657
658 if (PA_REFCNT_VALUE(b) > 1)
659 memblock_make_local(b);
660
661 pa_memblock_unref(b);
662 }
663
664 /* No lock necessary. */
665 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
666 void *p;
667
668 pa_assert(b);
669 pa_assert(PA_REFCNT_VALUE(b) > 0);
670
671 p = pa_memblock_acquire(b);
672 pa_will_need(p, b->length);
673 pa_memblock_release(b);
674
675 return b;
676 }
677
678 /* Self-locked. This function is not multiple-caller safe */
679 static void memblock_replace_import(pa_memblock *b) {
680 pa_memimport_segment *segment;
681 pa_memimport *import;
682
683 pa_assert(b);
684 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
685
686 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
687 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
688 pa_atomic_dec(&b->pool->stat.n_imported);
689 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
690
691 pa_assert_se(segment = b->per_type.imported.segment);
692 pa_assert_se(import = segment->import);
693
694 pa_mutex_lock(import->mutex);
695
696 pa_assert_se(pa_hashmap_remove(
697 import->blocks,
698 PA_UINT32_TO_PTR(b->per_type.imported.id)));
699
700 memblock_make_local(b);
701
702 pa_assert(segment->n_blocks >= 1);
703 if (-- segment->n_blocks <= 0)
704 segment_detach(segment);
705
706 pa_mutex_unlock(import->mutex);
707 }
708
709 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
710 pa_mempool *p;
711 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
712
713 p = pa_xnew(pa_mempool, 1);
714
715 p->mutex = pa_mutex_new(TRUE, TRUE);
716 p->semaphore = pa_semaphore_new(0);
717
718 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
719 if (p->block_size < PA_PAGE_SIZE)
720 p->block_size = PA_PAGE_SIZE;
721
722 if (size <= 0)
723 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
724 else {
725 p->n_blocks = (unsigned) (size / p->block_size);
726
727 if (p->n_blocks < 2)
728 p->n_blocks = 2;
729 }
730
731 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
732 pa_xfree(p);
733 return NULL;
734 }
735
736 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
737 p->memory.shared ? "shared" : "private",
738 p->n_blocks,
739 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
740 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
741 (unsigned long) pa_mempool_block_size_max(p));
742
743 memset(&p->stat, 0, sizeof(p->stat));
744 pa_atomic_store(&p->n_init, 0);
745
746 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
747 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
748
749 p->free_slots = pa_flist_new(p->n_blocks);
750
751 return p;
752 }
753
754 void pa_mempool_free(pa_mempool *p) {
755 pa_assert(p);
756
757 pa_mutex_lock(p->mutex);
758
759 while (p->imports)
760 pa_memimport_free(p->imports);
761
762 while (p->exports)
763 pa_memexport_free(p->exports);
764
765 pa_mutex_unlock(p->mutex);
766
767 pa_flist_free(p->free_slots, NULL);
768
769 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
770
771 /* Ouch, somebody is retaining a memory block reference! */
772
773 #ifdef DEBUG_REF
774 unsigned i;
775 pa_flist *list;
776
777 /* Let's try to find at least one of those leaked memory blocks */
778
779 list = pa_flist_new(p->n_blocks);
780
781 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
782 struct mempool_slot *slot;
783 pa_memblock *b, *k;
784
785 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
786 b = mempool_slot_data(slot);
787
788 while ((k = pa_flist_pop(p->free_slots))) {
789 while (pa_flist_push(list, k) < 0)
790 ;
791
792 if (b == k)
793 break;
794 }
795
796 if (!k)
797 pa_log("REF: Leaked memory block %p", b);
798
799 while ((k = pa_flist_pop(list)))
800 while (pa_flist_push(p->free_slots, k) < 0)
801 ;
802 }
803
804 pa_flist_free(list, NULL);
805
806 #endif
807
808 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
809
810 /* PA_DEBUG_TRAP; */
811 }
812
813 pa_shm_free(&p->memory);
814
815 pa_mutex_free(p->mutex);
816 pa_semaphore_free(p->semaphore);
817
818 pa_xfree(p);
819 }
820
821 /* No lock necessary */
822 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
823 pa_assert(p);
824
825 return &p->stat;
826 }
827
828 /* No lock necessary */
829 size_t pa_mempool_block_size_max(pa_mempool *p) {
830 pa_assert(p);
831
832 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
833 }
834
835 /* No lock necessary */
836 void pa_mempool_vacuum(pa_mempool *p) {
837 struct mempool_slot *slot;
838 pa_flist *list;
839
840 pa_assert(p);
841
842 list = pa_flist_new(p->n_blocks);
843
844 while ((slot = pa_flist_pop(p->free_slots)))
845 while (pa_flist_push(list, slot) < 0)
846 ;
847
848 while ((slot = pa_flist_pop(list))) {
849 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
850
851 while (pa_flist_push(p->free_slots, slot))
852 ;
853 }
854
855 pa_flist_free(list, NULL);
856 }
857
858 /* No lock necessary */
859 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
860 pa_assert(p);
861
862 if (!p->memory.shared)
863 return -1;
864
865 *id = p->memory.id;
866
867 return 0;
868 }
869
870 /* No lock necessary */
871 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
872 pa_assert(p);
873
874 return !!p->memory.shared;
875 }
876
877 /* For recieving blocks from other nodes */
878 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
879 pa_memimport *i;
880
881 pa_assert(p);
882 pa_assert(cb);
883
884 i = pa_xnew(pa_memimport, 1);
885 i->mutex = pa_mutex_new(TRUE, TRUE);
886 i->pool = p;
887 i->segments = pa_hashmap_new(NULL, NULL);
888 i->blocks = pa_hashmap_new(NULL, NULL);
889 i->release_cb = cb;
890 i->userdata = userdata;
891
892 pa_mutex_lock(p->mutex);
893 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
894 pa_mutex_unlock(p->mutex);
895
896 return i;
897 }
898
899 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
900
901 /* Should be called locked */
902 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
903 pa_memimport_segment* seg;
904
905 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
906 return NULL;
907
908 seg = pa_xnew0(pa_memimport_segment, 1);
909
910 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
911 pa_xfree(seg);
912 return NULL;
913 }
914
915 seg->import = i;
916 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
917
918 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
919 return seg;
920 }
921
922 /* Should be called locked */
923 static void segment_detach(pa_memimport_segment *seg) {
924 pa_assert(seg);
925
926 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
927 pa_shm_free(&seg->memory);
928
929 if (seg->trap)
930 pa_memtrap_remove(seg->trap);
931
932 pa_xfree(seg);
933 }
934
935 /* Self-locked. Not multiple-caller safe */
936 void pa_memimport_free(pa_memimport *i) {
937 pa_memexport *e;
938 pa_memblock *b;
939
940 pa_assert(i);
941
942 pa_mutex_lock(i->mutex);
943
944 while ((b = pa_hashmap_first(i->blocks)))
945 memblock_replace_import(b);
946
947 pa_assert(pa_hashmap_size(i->segments) == 0);
948
949 pa_mutex_unlock(i->mutex);
950
951 pa_mutex_lock(i->pool->mutex);
952
953 /* If we've exported this block further we need to revoke that export */
954 for (e = i->pool->exports; e; e = e->next)
955 memexport_revoke_blocks(e, i);
956
957 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
958
959 pa_mutex_unlock(i->pool->mutex);
960
961 pa_hashmap_free(i->blocks, NULL, NULL);
962 pa_hashmap_free(i->segments, NULL, NULL);
963
964 pa_mutex_free(i->mutex);
965
966 pa_xfree(i);
967 }
968
969 /* Self-locked */
970 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
971 pa_memblock *b = NULL;
972 pa_memimport_segment *seg;
973
974 pa_assert(i);
975
976 pa_mutex_lock(i->mutex);
977
978 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
979 pa_memblock_ref(b);
980 goto finish;
981 }
982
983 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
984 goto finish;
985
986 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
987 if (!(seg = segment_attach(i, shm_id)))
988 goto finish;
989
990 if (offset+size > seg->memory.size)
991 goto finish;
992
993 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
994 b = pa_xnew(pa_memblock, 1);
995
996 PA_REFCNT_INIT(b);
997 b->pool = i->pool;
998 b->type = PA_MEMBLOCK_IMPORTED;
999 b->read_only = TRUE;
1000 b->is_silence = FALSE;
1001 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1002 b->length = size;
1003 pa_atomic_store(&b->n_acquired, 0);
1004 pa_atomic_store(&b->please_signal, 0);
1005 b->per_type.imported.id = block_id;
1006 b->per_type.imported.segment = seg;
1007
1008 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1009
1010 seg->n_blocks++;
1011
1012 stat_add(b);
1013
1014 finish:
1015 pa_mutex_unlock(i->mutex);
1016
1017 return b;
1018 }
1019
1020 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1021 pa_memblock *b;
1022 int ret = 0;
1023 pa_assert(i);
1024
1025 pa_mutex_lock(i->mutex);
1026
1027 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1028 ret = -1;
1029 goto finish;
1030 }
1031
1032 memblock_replace_import(b);
1033
1034 finish:
1035 pa_mutex_unlock(i->mutex);
1036
1037 return ret;
1038 }
1039
1040 /* For sending blocks to other nodes */
1041 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1042 pa_memexport *e;
1043
1044 pa_assert(p);
1045 pa_assert(cb);
1046
1047 if (!p->memory.shared)
1048 return NULL;
1049
1050 e = pa_xnew(pa_memexport, 1);
1051 e->mutex = pa_mutex_new(TRUE, TRUE);
1052 e->pool = p;
1053 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1054 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1055 e->n_init = 0;
1056 e->revoke_cb = cb;
1057 e->userdata = userdata;
1058
1059 pa_mutex_lock(p->mutex);
1060 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1061 pa_mutex_unlock(p->mutex);
1062 return e;
1063 }
1064
1065 void pa_memexport_free(pa_memexport *e) {
1066 pa_assert(e);
1067
1068 pa_mutex_lock(e->mutex);
1069 while (e->used_slots)
1070 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1071 pa_mutex_unlock(e->mutex);
1072
1073 pa_mutex_lock(e->pool->mutex);
1074 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1075 pa_mutex_unlock(e->pool->mutex);
1076
1077 pa_mutex_free(e->mutex);
1078 pa_xfree(e);
1079 }
1080
1081 /* Self-locked */
1082 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1083 pa_memblock *b;
1084
1085 pa_assert(e);
1086
1087 pa_mutex_lock(e->mutex);
1088
1089 if (id >= e->n_init)
1090 goto fail;
1091
1092 if (!e->slots[id].block)
1093 goto fail;
1094
1095 b = e->slots[id].block;
1096 e->slots[id].block = NULL;
1097
1098 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1099 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1100
1101 pa_mutex_unlock(e->mutex);
1102
1103 /* pa_log("Processing release for %u", id); */
1104
1105 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1106 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1107
1108 pa_atomic_dec(&e->pool->stat.n_exported);
1109 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1110
1111 pa_memblock_unref(b);
1112
1113 return 0;
1114
1115 fail:
1116 pa_mutex_unlock(e->mutex);
1117
1118 return -1;
1119 }
1120
1121 /* Self-locked */
1122 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1123 struct memexport_slot *slot, *next;
1124 pa_assert(e);
1125 pa_assert(i);
1126
1127 pa_mutex_lock(e->mutex);
1128
1129 for (slot = e->used_slots; slot; slot = next) {
1130 uint32_t idx;
1131 next = slot->next;
1132
1133 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1134 slot->block->per_type.imported.segment->import != i)
1135 continue;
1136
1137 idx = (uint32_t) (slot - e->slots);
1138 e->revoke_cb(e, idx, e->userdata);
1139 pa_memexport_process_release(e, idx);
1140 }
1141
1142 pa_mutex_unlock(e->mutex);
1143 }
1144
1145 /* No lock necessary */
1146 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1147 pa_memblock *n;
1148
1149 pa_assert(p);
1150 pa_assert(b);
1151
1152 if (b->type == PA_MEMBLOCK_IMPORTED ||
1153 b->type == PA_MEMBLOCK_POOL ||
1154 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1155 pa_assert(b->pool == p);
1156 return pa_memblock_ref(b);
1157 }
1158
1159 if (!(n = pa_memblock_new_pool(p, b->length)))
1160 return NULL;
1161
1162 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1163 return n;
1164 }
1165
1166 /* Self-locked */
1167 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1168 pa_shm *memory;
1169 struct memexport_slot *slot;
1170 void *data;
1171
1172 pa_assert(e);
1173 pa_assert(b);
1174 pa_assert(block_id);
1175 pa_assert(shm_id);
1176 pa_assert(offset);
1177 pa_assert(size);
1178 pa_assert(b->pool == e->pool);
1179
1180 if (!(b = memblock_shared_copy(e->pool, b)))
1181 return -1;
1182
1183 pa_mutex_lock(e->mutex);
1184
1185 if (e->free_slots) {
1186 slot = e->free_slots;
1187 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1188 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1189 slot = &e->slots[e->n_init++];
1190 else {
1191 pa_mutex_unlock(e->mutex);
1192 pa_memblock_unref(b);
1193 return -1;
1194 }
1195
1196 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1197 slot->block = b;
1198 *block_id = (uint32_t) (slot - e->slots);
1199
1200 pa_mutex_unlock(e->mutex);
1201 /* pa_log("Got block id %u", *block_id); */
1202
1203 data = pa_memblock_acquire(b);
1204
1205 if (b->type == PA_MEMBLOCK_IMPORTED) {
1206 pa_assert(b->per_type.imported.segment);
1207 memory = &b->per_type.imported.segment->memory;
1208 } else {
1209 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1210 pa_assert(b->pool);
1211 memory = &b->pool->memory;
1212 }
1213
1214 pa_assert(data >= memory->ptr);
1215 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1216
1217 *shm_id = memory->id;
1218 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1219 *size = b->length;
1220
1221 pa_memblock_release(b);
1222
1223 pa_atomic_inc(&e->pool->stat.n_exported);
1224 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1225
1226 return 0;
1227 }