]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
Sending translation for Portuguese
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 struct pa_memimport {
100 pa_mutex *mutex;
101
102 pa_mempool *pool;
103 pa_hashmap *segments;
104 pa_hashmap *blocks;
105
106 /* Called whenever an imported memory block is no longer
107 * needed. */
108 pa_memimport_release_cb_t release_cb;
109 void *userdata;
110
111 PA_LLIST_FIELDS(pa_memimport);
112 };
113
114 struct memexport_slot {
115 PA_LLIST_FIELDS(struct memexport_slot);
116 pa_memblock *block;
117 };
118
119 struct pa_memexport {
120 pa_mutex *mutex;
121 pa_mempool *pool;
122
123 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
124
125 PA_LLIST_HEAD(struct memexport_slot, free_slots);
126 PA_LLIST_HEAD(struct memexport_slot, used_slots);
127 unsigned n_init;
128
129 /* Called whenever a client from which we imported a memory block
130 which we in turn exported to another client dies and we need to
131 revoke the memory block accordingly */
132 pa_memexport_revoke_cb_t revoke_cb;
133 void *userdata;
134
135 PA_LLIST_FIELDS(pa_memexport);
136 };
137
138 struct pa_mempool {
139 pa_semaphore *semaphore;
140 pa_mutex *mutex;
141
142 pa_shm memory;
143 size_t block_size;
144 unsigned n_blocks;
145
146 pa_atomic_t n_init;
147
148 PA_LLIST_HEAD(pa_memimport, imports);
149 PA_LLIST_HEAD(pa_memexport, exports);
150
151 /* A list of free slots that may be reused */
152 pa_flist *free_slots;
153
154 pa_mempool_stat stat;
155 };
156
157 static void segment_detach(pa_memimport_segment *seg);
158
159 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
160
161 /* No lock necessary */
162 static void stat_add(pa_memblock*b) {
163 pa_assert(b);
164 pa_assert(b->pool);
165
166 pa_atomic_inc(&b->pool->stat.n_allocated);
167 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
168
169 pa_atomic_inc(&b->pool->stat.n_accumulated);
170 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
171
172 if (b->type == PA_MEMBLOCK_IMPORTED) {
173 pa_atomic_inc(&b->pool->stat.n_imported);
174 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
175 }
176
177 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
178 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
179 }
180
181 /* No lock necessary */
182 static void stat_remove(pa_memblock *b) {
183 pa_assert(b);
184 pa_assert(b->pool);
185
186 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
187 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
188
189 pa_atomic_dec(&b->pool->stat.n_allocated);
190 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
191
192 if (b->type == PA_MEMBLOCK_IMPORTED) {
193 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
194 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
195
196 pa_atomic_dec(&b->pool->stat.n_imported);
197 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
198 }
199
200 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
201 }
202
203 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
204
205 /* No lock necessary */
206 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
207 pa_memblock *b;
208
209 pa_assert(p);
210 pa_assert(length);
211
212 if (!(b = pa_memblock_new_pool(p, length)))
213 b = memblock_new_appended(p, length);
214
215 return b;
216 }
217
218 /* No lock necessary */
219 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
220 pa_memblock *b;
221
222 pa_assert(p);
223 pa_assert(length);
224
225 /* If -1 is passed as length we choose the size for the caller. */
226
227 if (length == (size_t) -1)
228 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
229
230 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
231 PA_REFCNT_INIT(b);
232 b->pool = p;
233 b->type = PA_MEMBLOCK_APPENDED;
234 b->read_only = b->is_silence = FALSE;
235 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
236 b->length = length;
237 pa_atomic_store(&b->n_acquired, 0);
238 pa_atomic_store(&b->please_signal, 0);
239
240 stat_add(b);
241 return b;
242 }
243
244 /* No lock necessary */
245 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
246 struct mempool_slot *slot;
247 pa_assert(p);
248
249 if (!(slot = pa_flist_pop(p->free_slots))) {
250 int idx;
251
252 /* The free list was empty, we have to allocate a new entry */
253
254 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
255 pa_atomic_dec(&p->n_init);
256 else
257 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
258
259 if (!slot) {
260 pa_log_debug("Pool full");
261 pa_atomic_inc(&p->stat.n_pool_full);
262 return NULL;
263 }
264 }
265
266 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
267 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
268 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
269 /* } */
270 /* #endif */
271
272 return slot;
273 }
274
275 /* No lock necessary, totally redundant anyway */
276 static inline void* mempool_slot_data(struct mempool_slot *slot) {
277 return slot;
278 }
279
280 /* No lock necessary */
281 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
282 pa_assert(p);
283
284 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
285 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
286
287 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
288 }
289
290 /* No lock necessary */
291 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
292 unsigned idx;
293
294 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
295 return NULL;
296
297 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
298 }
299
300 /* No lock necessary */
301 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
302 pa_memblock *b = NULL;
303 struct mempool_slot *slot;
304
305 pa_assert(p);
306 pa_assert(length);
307
308 /* If -1 is passed as length we choose the size for the caller: we
309 * take the largest size that fits in one of our slots. */
310
311 if (length == (size_t) -1)
312 length = pa_mempool_block_size_max(p);
313
314 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
315
316 if (!(slot = mempool_allocate_slot(p)))
317 return NULL;
318
319 b = mempool_slot_data(slot);
320 b->type = PA_MEMBLOCK_POOL;
321 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
322
323 } else if (p->block_size >= length) {
324
325 if (!(slot = mempool_allocate_slot(p)))
326 return NULL;
327
328 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
329 b = pa_xnew(pa_memblock, 1);
330
331 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
332 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
333
334 } else {
335 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
336 pa_atomic_inc(&p->stat.n_too_large_for_pool);
337 return NULL;
338 }
339
340 PA_REFCNT_INIT(b);
341 b->pool = p;
342 b->read_only = b->is_silence = FALSE;
343 b->length = length;
344 pa_atomic_store(&b->n_acquired, 0);
345 pa_atomic_store(&b->please_signal, 0);
346
347 stat_add(b);
348 return b;
349 }
350
351 /* No lock necessary */
352 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
353 pa_memblock *b;
354
355 pa_assert(p);
356 pa_assert(d);
357 pa_assert(length != (size_t) -1);
358 pa_assert(length);
359
360 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
361 b = pa_xnew(pa_memblock, 1);
362 PA_REFCNT_INIT(b);
363 b->pool = p;
364 b->type = PA_MEMBLOCK_FIXED;
365 b->read_only = read_only;
366 b->is_silence = FALSE;
367 pa_atomic_ptr_store(&b->data, d);
368 b->length = length;
369 pa_atomic_store(&b->n_acquired, 0);
370 pa_atomic_store(&b->please_signal, 0);
371
372 stat_add(b);
373 return b;
374 }
375
376 /* No lock necessary */
377 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
378 pa_memblock *b;
379
380 pa_assert(p);
381 pa_assert(d);
382 pa_assert(length);
383 pa_assert(length != (size_t) -1);
384 pa_assert(free_cb);
385
386 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
387 b = pa_xnew(pa_memblock, 1);
388 PA_REFCNT_INIT(b);
389 b->pool = p;
390 b->type = PA_MEMBLOCK_USER;
391 b->read_only = read_only;
392 b->is_silence = FALSE;
393 pa_atomic_ptr_store(&b->data, d);
394 b->length = length;
395 pa_atomic_store(&b->n_acquired, 0);
396 pa_atomic_store(&b->please_signal, 0);
397
398 b->per_type.user.free_cb = free_cb;
399
400 stat_add(b);
401 return b;
402 }
403
404 /* No lock necessary */
405 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
406 pa_assert(b);
407 pa_assert(PA_REFCNT_VALUE(b) > 0);
408
409 return b->read_only && PA_REFCNT_VALUE(b) == 1;
410 }
411
412 /* No lock necessary */
413 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
414 pa_assert(b);
415 pa_assert(PA_REFCNT_VALUE(b) > 0);
416
417 return b->is_silence;
418 }
419
420 /* No lock necessary */
421 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
422 pa_assert(b);
423 pa_assert(PA_REFCNT_VALUE(b) > 0);
424
425 b->is_silence = v;
426 }
427
428 /* No lock necessary */
429 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
430 int r;
431 pa_assert(b);
432
433 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
434
435 return r == 1;
436 }
437
438 /* No lock necessary */
439 void* pa_memblock_acquire(pa_memblock *b) {
440 pa_assert(b);
441 pa_assert(PA_REFCNT_VALUE(b) > 0);
442
443 pa_atomic_inc(&b->n_acquired);
444
445 return pa_atomic_ptr_load(&b->data);
446 }
447
448 /* No lock necessary, in corner cases locks by its own */
449 void pa_memblock_release(pa_memblock *b) {
450 int r;
451 pa_assert(b);
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
453
454 r = pa_atomic_dec(&b->n_acquired);
455 pa_assert(r >= 1);
456
457 /* Signal a waiting thread that this memblock is no longer used */
458 if (r == 1 && pa_atomic_load(&b->please_signal))
459 pa_semaphore_post(b->pool->semaphore);
460 }
461
462 size_t pa_memblock_get_length(pa_memblock *b) {
463 pa_assert(b);
464 pa_assert(PA_REFCNT_VALUE(b) > 0);
465
466 return b->length;
467 }
468
469 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
470 pa_assert(b);
471 pa_assert(PA_REFCNT_VALUE(b) > 0);
472
473 return b->pool;
474 }
475
476 /* No lock necessary */
477 pa_memblock* pa_memblock_ref(pa_memblock*b) {
478 pa_assert(b);
479 pa_assert(PA_REFCNT_VALUE(b) > 0);
480
481 PA_REFCNT_INC(b);
482 return b;
483 }
484
485 static void memblock_free(pa_memblock *b) {
486 pa_assert(b);
487
488 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
489
490 stat_remove(b);
491
492 switch (b->type) {
493 case PA_MEMBLOCK_USER :
494 pa_assert(b->per_type.user.free_cb);
495 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
496
497 /* Fall through */
498
499 case PA_MEMBLOCK_FIXED:
500 case PA_MEMBLOCK_APPENDED :
501 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
502 pa_xfree(b);
503
504 break;
505
506 case PA_MEMBLOCK_IMPORTED : {
507 pa_memimport_segment *segment;
508 pa_memimport *import;
509
510 /* FIXME! This should be implemented lock-free */
511
512 pa_assert_se(segment = b->per_type.imported.segment);
513 pa_assert_se(import = segment->import);
514
515 pa_mutex_lock(import->mutex);
516
517 pa_hashmap_remove(
518 import->blocks,
519 PA_UINT32_TO_PTR(b->per_type.imported.id));
520
521 pa_assert(segment->n_blocks >= 1);
522 if (-- segment->n_blocks <= 0)
523 segment_detach(segment);
524
525 pa_mutex_unlock(import->mutex);
526
527 import->release_cb(import, b->per_type.imported.id, import->userdata);
528
529 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
530 pa_xfree(b);
531
532 break;
533 }
534
535 case PA_MEMBLOCK_POOL_EXTERNAL:
536 case PA_MEMBLOCK_POOL: {
537 struct mempool_slot *slot;
538 pa_bool_t call_free;
539
540 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
541 pa_assert(slot);
542
543 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
544
545 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
546 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
547 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
548 /* } */
549 /* #endif */
550
551 /* The free list dimensions should easily allow all slots
552 * to fit in, hence try harder if pushing this slot into
553 * the free list fails */
554 while (pa_flist_push(b->pool->free_slots, slot) < 0)
555 ;
556
557 if (call_free)
558 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
559 pa_xfree(b);
560
561 break;
562 }
563
564 case PA_MEMBLOCK_TYPE_MAX:
565 default:
566 pa_assert_not_reached();
567 }
568 }
569
570 /* No lock necessary */
571 void pa_memblock_unref(pa_memblock*b) {
572 pa_assert(b);
573 pa_assert(PA_REFCNT_VALUE(b) > 0);
574
575 if (PA_REFCNT_DEC(b) > 0)
576 return;
577
578 memblock_free(b);
579 }
580
581 /* Self locked */
582 static void memblock_wait(pa_memblock *b) {
583 pa_assert(b);
584
585 if (pa_atomic_load(&b->n_acquired) > 0) {
586 /* We need to wait until all threads gave up access to the
587 * memory block before we can go on. Unfortunately this means
588 * that we have to lock and wait here. Sniff! */
589
590 pa_atomic_inc(&b->please_signal);
591
592 while (pa_atomic_load(&b->n_acquired) > 0)
593 pa_semaphore_wait(b->pool->semaphore);
594
595 pa_atomic_dec(&b->please_signal);
596 }
597 }
598
599 /* No lock necessary. This function is not multiple caller safe! */
600 static void memblock_make_local(pa_memblock *b) {
601 pa_assert(b);
602
603 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
604
605 if (b->length <= b->pool->block_size) {
606 struct mempool_slot *slot;
607
608 if ((slot = mempool_allocate_slot(b->pool))) {
609 void *new_data;
610 /* We can move it into a local pool, perfect! */
611
612 new_data = mempool_slot_data(slot);
613 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
614 pa_atomic_ptr_store(&b->data, new_data);
615
616 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
617 b->read_only = FALSE;
618
619 goto finish;
620 }
621 }
622
623 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
624 b->per_type.user.free_cb = pa_xfree;
625 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
626
627 b->type = PA_MEMBLOCK_USER;
628 b->read_only = FALSE;
629
630 finish:
631 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
632 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
633 memblock_wait(b);
634 }
635
636 /* No lock necessary. This function is not multiple caller safe*/
637 void pa_memblock_unref_fixed(pa_memblock *b) {
638 pa_assert(b);
639 pa_assert(PA_REFCNT_VALUE(b) > 0);
640 pa_assert(b->type == PA_MEMBLOCK_FIXED);
641
642 if (PA_REFCNT_VALUE(b) > 1)
643 memblock_make_local(b);
644
645 pa_memblock_unref(b);
646 }
647
648 /* No lock necessary. */
649 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
650 void *p;
651
652 pa_assert(b);
653 pa_assert(PA_REFCNT_VALUE(b) > 0);
654
655 p = pa_memblock_acquire(b);
656 pa_will_need(p, b->length);
657 pa_memblock_release(b);
658
659 return b;
660 }
661
662 /* Self-locked. This function is not multiple-caller safe */
663 static void memblock_replace_import(pa_memblock *b) {
664 pa_memimport_segment *segment;
665 pa_memimport *import;
666
667 pa_assert(b);
668 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
669
670 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
671 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
672 pa_atomic_dec(&b->pool->stat.n_imported);
673 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
674
675 pa_assert_se(segment = b->per_type.imported.segment);
676 pa_assert_se(import = segment->import);
677
678 pa_mutex_lock(import->mutex);
679
680 pa_hashmap_remove(
681 import->blocks,
682 PA_UINT32_TO_PTR(b->per_type.imported.id));
683
684 memblock_make_local(b);
685
686 pa_assert(segment->n_blocks >= 1);
687 if (-- segment->n_blocks <= 0)
688 segment_detach(segment);
689
690 pa_mutex_unlock(import->mutex);
691 }
692
693 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
694 pa_mempool *p;
695 char t1[64], t2[64];
696
697 p = pa_xnew(pa_mempool, 1);
698
699 p->mutex = pa_mutex_new(TRUE, TRUE);
700 p->semaphore = pa_semaphore_new(0);
701
702 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
703 if (p->block_size < PA_PAGE_SIZE)
704 p->block_size = PA_PAGE_SIZE;
705
706 if (size <= 0)
707 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
708 else {
709 p->n_blocks = (unsigned) (size / p->block_size);
710
711 if (p->n_blocks < 2)
712 p->n_blocks = 2;
713 }
714
715 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
716 pa_xfree(p);
717 return NULL;
718 }
719
720 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
721 p->memory.shared ? "shared" : "private",
722 p->n_blocks,
723 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
724 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
725 (unsigned long) pa_mempool_block_size_max(p));
726
727 memset(&p->stat, 0, sizeof(p->stat));
728 pa_atomic_store(&p->n_init, 0);
729
730 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
731 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
732
733 p->free_slots = pa_flist_new(p->n_blocks);
734
735 return p;
736 }
737
738 void pa_mempool_free(pa_mempool *p) {
739 pa_assert(p);
740
741 pa_mutex_lock(p->mutex);
742
743 while (p->imports)
744 pa_memimport_free(p->imports);
745
746 while (p->exports)
747 pa_memexport_free(p->exports);
748
749 pa_mutex_unlock(p->mutex);
750
751 pa_flist_free(p->free_slots, NULL);
752
753 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
754
755 /* Ouch, somebody is retaining a memory block reference! */
756
757 #ifdef DEBUG_REF
758 unsigned i;
759 pa_flist *list;
760
761 /* Let's try to find at least one of those leaked memory blocks */
762
763 list = pa_flist_new(p->n_blocks);
764
765 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
766 struct mempool_slot *slot;
767 pa_memblock *b, *k;
768
769 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
770 b = mempool_slot_data(slot);
771
772 while ((k = pa_flist_pop(p->free_slots))) {
773 while (pa_flist_push(list, k) < 0)
774 ;
775
776 if (b == k)
777 break;
778 }
779
780 if (!k)
781 pa_log("REF: Leaked memory block %p", b);
782
783 while ((k = pa_flist_pop(list)))
784 while (pa_flist_push(p->free_slots, k) < 0)
785 ;
786 }
787
788 pa_flist_free(list, NULL);
789
790 #endif
791
792 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
793
794 /* PA_DEBUG_TRAP; */
795 }
796
797 pa_shm_free(&p->memory);
798
799 pa_mutex_free(p->mutex);
800 pa_semaphore_free(p->semaphore);
801
802 pa_xfree(p);
803 }
804
805 /* No lock necessary */
806 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
807 pa_assert(p);
808
809 return &p->stat;
810 }
811
812 /* No lock necessary */
813 size_t pa_mempool_block_size_max(pa_mempool *p) {
814 pa_assert(p);
815
816 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
817 }
818
819 /* No lock necessary */
820 void pa_mempool_vacuum(pa_mempool *p) {
821 struct mempool_slot *slot;
822 pa_flist *list;
823
824 pa_assert(p);
825
826 list = pa_flist_new(p->n_blocks);
827
828 while ((slot = pa_flist_pop(p->free_slots)))
829 while (pa_flist_push(list, slot) < 0)
830 ;
831
832 while ((slot = pa_flist_pop(list))) {
833 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
834
835 while (pa_flist_push(p->free_slots, slot))
836 ;
837 }
838
839 pa_flist_free(list, NULL);
840 }
841
842 /* No lock necessary */
843 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
844 pa_assert(p);
845
846 if (!p->memory.shared)
847 return -1;
848
849 *id = p->memory.id;
850
851 return 0;
852 }
853
854 /* No lock necessary */
855 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
856 pa_assert(p);
857
858 return !!p->memory.shared;
859 }
860
861 /* For recieving blocks from other nodes */
862 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
863 pa_memimport *i;
864
865 pa_assert(p);
866 pa_assert(cb);
867
868 i = pa_xnew(pa_memimport, 1);
869 i->mutex = pa_mutex_new(TRUE, TRUE);
870 i->pool = p;
871 i->segments = pa_hashmap_new(NULL, NULL);
872 i->blocks = pa_hashmap_new(NULL, NULL);
873 i->release_cb = cb;
874 i->userdata = userdata;
875
876 pa_mutex_lock(p->mutex);
877 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
878 pa_mutex_unlock(p->mutex);
879
880 return i;
881 }
882
883 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
884
885 /* Should be called locked */
886 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
887 pa_memimport_segment* seg;
888
889 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
890 return NULL;
891
892 seg = pa_xnew(pa_memimport_segment, 1);
893
894 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
895 pa_xfree(seg);
896 return NULL;
897 }
898
899 seg->import = i;
900 seg->n_blocks = 0;
901 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
902
903 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
904 return seg;
905 }
906
907 /* Should be called locked */
908 static void segment_detach(pa_memimport_segment *seg) {
909 pa_assert(seg);
910
911 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
912 pa_shm_free(&seg->memory);
913
914 if (seg->trap)
915 pa_memtrap_remove(seg->trap);
916
917 pa_xfree(seg);
918 }
919
920 /* Self-locked. Not multiple-caller safe */
921 void pa_memimport_free(pa_memimport *i) {
922 pa_memexport *e;
923 pa_memblock *b;
924
925 pa_assert(i);
926
927 pa_mutex_lock(i->mutex);
928
929 while ((b = pa_hashmap_first(i->blocks)))
930 memblock_replace_import(b);
931
932 pa_assert(pa_hashmap_size(i->segments) == 0);
933
934 pa_mutex_unlock(i->mutex);
935
936 pa_mutex_lock(i->pool->mutex);
937
938 /* If we've exported this block further we need to revoke that export */
939 for (e = i->pool->exports; e; e = e->next)
940 memexport_revoke_blocks(e, i);
941
942 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
943
944 pa_mutex_unlock(i->pool->mutex);
945
946 pa_hashmap_free(i->blocks, NULL, NULL);
947 pa_hashmap_free(i->segments, NULL, NULL);
948
949 pa_mutex_free(i->mutex);
950
951 pa_xfree(i);
952 }
953
954 /* Self-locked */
955 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
956 pa_memblock *b = NULL;
957 pa_memimport_segment *seg;
958
959 pa_assert(i);
960
961 pa_mutex_lock(i->mutex);
962
963 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
964 goto finish;
965
966 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
967 if (!(seg = segment_attach(i, shm_id)))
968 goto finish;
969
970 if (offset+size > seg->memory.size)
971 goto finish;
972
973 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
974 b = pa_xnew(pa_memblock, 1);
975
976 PA_REFCNT_INIT(b);
977 b->pool = i->pool;
978 b->type = PA_MEMBLOCK_IMPORTED;
979 b->read_only = TRUE;
980 b->is_silence = FALSE;
981 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
982 b->length = size;
983 pa_atomic_store(&b->n_acquired, 0);
984 pa_atomic_store(&b->please_signal, 0);
985 b->per_type.imported.id = block_id;
986 b->per_type.imported.segment = seg;
987
988 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
989
990 seg->n_blocks++;
991
992 finish:
993 pa_mutex_unlock(i->mutex);
994
995 if (b)
996 stat_add(b);
997
998 return b;
999 }
1000
1001 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1002 pa_memblock *b;
1003 int ret = 0;
1004 pa_assert(i);
1005
1006 pa_mutex_lock(i->mutex);
1007
1008 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1009 ret = -1;
1010 goto finish;
1011 }
1012
1013 memblock_replace_import(b);
1014
1015 finish:
1016 pa_mutex_unlock(i->mutex);
1017
1018 return ret;
1019 }
1020
1021 /* For sending blocks to other nodes */
1022 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1023 pa_memexport *e;
1024
1025 pa_assert(p);
1026 pa_assert(cb);
1027
1028 if (!p->memory.shared)
1029 return NULL;
1030
1031 e = pa_xnew(pa_memexport, 1);
1032 e->mutex = pa_mutex_new(TRUE, TRUE);
1033 e->pool = p;
1034 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1035 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1036 e->n_init = 0;
1037 e->revoke_cb = cb;
1038 e->userdata = userdata;
1039
1040 pa_mutex_lock(p->mutex);
1041 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1042 pa_mutex_unlock(p->mutex);
1043 return e;
1044 }
1045
1046 void pa_memexport_free(pa_memexport *e) {
1047 pa_assert(e);
1048
1049 pa_mutex_lock(e->mutex);
1050 while (e->used_slots)
1051 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1052 pa_mutex_unlock(e->mutex);
1053
1054 pa_mutex_lock(e->pool->mutex);
1055 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1056 pa_mutex_unlock(e->pool->mutex);
1057
1058 pa_mutex_free(e->mutex);
1059 pa_xfree(e);
1060 }
1061
1062 /* Self-locked */
1063 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1064 pa_memblock *b;
1065
1066 pa_assert(e);
1067
1068 pa_mutex_lock(e->mutex);
1069
1070 if (id >= e->n_init)
1071 goto fail;
1072
1073 if (!e->slots[id].block)
1074 goto fail;
1075
1076 b = e->slots[id].block;
1077 e->slots[id].block = NULL;
1078
1079 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1080 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1081
1082 pa_mutex_unlock(e->mutex);
1083
1084 /* pa_log("Processing release for %u", id); */
1085
1086 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1087 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1088
1089 pa_atomic_dec(&e->pool->stat.n_exported);
1090 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1091
1092 pa_memblock_unref(b);
1093
1094 return 0;
1095
1096 fail:
1097 pa_mutex_unlock(e->mutex);
1098
1099 return -1;
1100 }
1101
1102 /* Self-locked */
1103 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1104 struct memexport_slot *slot, *next;
1105 pa_assert(e);
1106 pa_assert(i);
1107
1108 pa_mutex_lock(e->mutex);
1109
1110 for (slot = e->used_slots; slot; slot = next) {
1111 uint32_t idx;
1112 next = slot->next;
1113
1114 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1115 slot->block->per_type.imported.segment->import != i)
1116 continue;
1117
1118 idx = (uint32_t) (slot - e->slots);
1119 e->revoke_cb(e, idx, e->userdata);
1120 pa_memexport_process_release(e, idx);
1121 }
1122
1123 pa_mutex_unlock(e->mutex);
1124 }
1125
1126 /* No lock necessary */
1127 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1128 pa_memblock *n;
1129
1130 pa_assert(p);
1131 pa_assert(b);
1132
1133 if (b->type == PA_MEMBLOCK_IMPORTED ||
1134 b->type == PA_MEMBLOCK_POOL ||
1135 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1136 pa_assert(b->pool == p);
1137 return pa_memblock_ref(b);
1138 }
1139
1140 if (!(n = pa_memblock_new_pool(p, b->length)))
1141 return NULL;
1142
1143 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1144 return n;
1145 }
1146
1147 /* Self-locked */
1148 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1149 pa_shm *memory;
1150 struct memexport_slot *slot;
1151 void *data;
1152
1153 pa_assert(e);
1154 pa_assert(b);
1155 pa_assert(block_id);
1156 pa_assert(shm_id);
1157 pa_assert(offset);
1158 pa_assert(size);
1159 pa_assert(b->pool == e->pool);
1160
1161 if (!(b = memblock_shared_copy(e->pool, b)))
1162 return -1;
1163
1164 pa_mutex_lock(e->mutex);
1165
1166 if (e->free_slots) {
1167 slot = e->free_slots;
1168 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1169 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1170 slot = &e->slots[e->n_init++];
1171 else {
1172 pa_mutex_unlock(e->mutex);
1173 pa_memblock_unref(b);
1174 return -1;
1175 }
1176
1177 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1178 slot->block = b;
1179 *block_id = (uint32_t) (slot - e->slots);
1180
1181 pa_mutex_unlock(e->mutex);
1182 /* pa_log("Got block id %u", *block_id); */
1183
1184 data = pa_memblock_acquire(b);
1185
1186 if (b->type == PA_MEMBLOCK_IMPORTED) {
1187 pa_assert(b->per_type.imported.segment);
1188 memory = &b->per_type.imported.segment->memory;
1189 } else {
1190 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1191 pa_assert(b->pool);
1192 memory = &b->pool->memory;
1193 }
1194
1195 pa_assert(data >= memory->ptr);
1196 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1197
1198 *shm_id = memory->id;
1199 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1200 *size = b->length;
1201
1202 pa_memblock_release(b);
1203
1204 pa_atomic_inc(&e->pool->stat.n_exported);
1205 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1206
1207 return 0;
1208 }