]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
6f09a906ae49edb3846747b806be3fae2ee05a1c
[pulseaudio] / src / pulsecore / memblock.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
8
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
13
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 USA.
23 ***/
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <assert.h>
32 #include <string.h>
33 #include <unistd.h>
34
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/shm.h>
38 #include <pulsecore/log.h>
39 #include <pulsecore/hashmap.h>
40
41 #include "memblock.h"
42
43 #define PA_MEMPOOL_SLOTS_MAX 128
44 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
45
46 #define PA_MEMEXPORT_SLOTS_MAX 128
47
48 #define PA_MEMIMPORT_SLOTS_MAX 128
49 #define PA_MEMIMPORT_SEGMENTS_MAX 16
50
51 struct pa_memimport_segment {
52 pa_memimport *import;
53 pa_shm memory;
54 unsigned n_blocks;
55 };
56
57 struct pa_memimport {
58 pa_mempool *pool;
59 pa_hashmap *segments;
60 pa_hashmap *blocks;
61
62 /* Called whenever an imported memory block is no longer
63 * needed. */
64 pa_memimport_release_cb_t release_cb;
65 void *userdata;
66
67 PA_LLIST_FIELDS(pa_memimport);
68 };
69
70 struct memexport_slot {
71 PA_LLIST_FIELDS(struct memexport_slot);
72 pa_memblock *block;
73 };
74
75 struct pa_memexport {
76 pa_mempool *pool;
77
78 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
79 PA_LLIST_HEAD(struct memexport_slot, free_slots);
80 PA_LLIST_HEAD(struct memexport_slot, used_slots);
81 unsigned n_init;
82
83 /* Called whenever a client from which we imported a memory block
84 which we in turn exported to another client dies and we need to
85 revoke the memory block accordingly */
86 pa_memexport_revoke_cb_t revoke_cb;
87 void *userdata;
88
89 PA_LLIST_FIELDS(pa_memexport);
90 };
91
92 struct mempool_slot {
93 PA_LLIST_FIELDS(struct mempool_slot);
94 /* the actual data follows immediately hereafter */
95 };
96
97 struct pa_mempool {
98 pa_shm memory;
99 size_t block_size;
100 unsigned n_blocks, n_init;
101
102 PA_LLIST_HEAD(pa_memimport, imports);
103 PA_LLIST_HEAD(pa_memexport, exports);
104
105 /* A list of free slots that may be reused */
106 PA_LLIST_HEAD(struct mempool_slot, free_slots);
107
108 pa_mempool_stat stat;
109 };
110
111 static void segment_detach(pa_memimport_segment *seg);
112
113 static void stat_add(pa_memblock*b) {
114 assert(b);
115 assert(b->pool);
116
117 pa_atomic_inc(&b->pool->stat.n_allocated);
118 pa_atomic_add(&b->pool->stat.allocated_size, b->length);
119
120 pa_atomic_inc(&b->pool->stat.n_accumulated);
121 pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
122
123 if (b->type == PA_MEMBLOCK_IMPORTED) {
124 pa_atomic_inc(&b->pool->stat.n_imported);
125 pa_atomic_add(&b->pool->stat.imported_size, b->length);
126 }
127
128 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
129 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
130 }
131
132 static void stat_remove(pa_memblock *b) {
133 assert(b);
134 assert(b->pool);
135
136 assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
137 assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
138
139 pa_atomic_dec(&b->pool->stat.n_allocated);
140 pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
141
142 if (b->type == PA_MEMBLOCK_IMPORTED) {
143 assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
144 assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
145
146 pa_atomic_dec(&b->pool->stat.n_imported);
147 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
148 }
149
150 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
151 }
152
153 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
154
155 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
156 pa_memblock *b;
157
158 assert(p);
159 assert(length > 0);
160
161 if (!(b = pa_memblock_new_pool(p, length)))
162 b = memblock_new_appended(p, length);
163
164 return b;
165 }
166
167 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
168 pa_memblock *b;
169
170 assert(p);
171 assert(length > 0);
172
173 b = pa_xmalloc(sizeof(pa_memblock) + length);
174 b->type = PA_MEMBLOCK_APPENDED;
175 b->read_only = 0;
176 PA_REFCNT_INIT(b);
177 b->length = length;
178 b->data = (uint8_t*) b + sizeof(pa_memblock);
179 b->pool = p;
180
181 stat_add(b);
182 return b;
183 }
184
185 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
186 struct mempool_slot *slot;
187 assert(p);
188
189 if (p->free_slots) {
190 slot = p->free_slots;
191 PA_LLIST_REMOVE(struct mempool_slot, p->free_slots, slot);
192 } else if (p->n_init < p->n_blocks)
193 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * p->n_init++));
194 else {
195 pa_log_debug("Pool full");
196 pa_atomic_inc(&p->stat.n_pool_full);
197 return NULL;
198 }
199
200 return slot;
201 }
202
203 static void* mempool_slot_data(struct mempool_slot *slot) {
204 assert(slot);
205
206 return (uint8_t*) slot + sizeof(struct mempool_slot);
207 }
208
209 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
210 assert(p);
211 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
212 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
213
214 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
215 }
216
217 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
218 unsigned idx;
219
220 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
221 return NULL;
222
223 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
224 }
225
226 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
227 pa_memblock *b = NULL;
228 struct mempool_slot *slot;
229
230 assert(p);
231 assert(length > 0);
232
233 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
234
235 if (!(slot = mempool_allocate_slot(p)))
236 return NULL;
237
238 b = mempool_slot_data(slot);
239 b->type = PA_MEMBLOCK_POOL;
240 b->data = (uint8_t*) b + sizeof(pa_memblock);
241
242 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
243
244 if (!(slot = mempool_allocate_slot(p)))
245 return NULL;
246
247 b = pa_xnew(pa_memblock, 1);
248 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
249 b->data = mempool_slot_data(slot);
250 } else {
251 pa_log_debug("Memory block too large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
252 pa_atomic_inc(&p->stat.n_too_large_for_pool);
253 return NULL;
254 }
255
256 b->length = length;
257 b->read_only = 0;
258 PA_REFCNT_INIT(b);
259 b->pool = p;
260
261 stat_add(b);
262 return b;
263 }
264
265 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
266 pa_memblock *b;
267
268 assert(p);
269 assert(d);
270 assert(length > 0);
271
272 b = pa_xnew(pa_memblock, 1);
273 b->type = PA_MEMBLOCK_FIXED;
274 b->read_only = read_only;
275 PA_REFCNT_INIT(b);
276 b->length = length;
277 b->data = d;
278 b->pool = p;
279
280 stat_add(b);
281 return b;
282 }
283
284 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
285 pa_memblock *b;
286
287 assert(p);
288 assert(d);
289 assert(length > 0);
290 assert(free_cb);
291
292 b = pa_xnew(pa_memblock, 1);
293 b->type = PA_MEMBLOCK_USER;
294 b->read_only = read_only;
295 PA_REFCNT_INIT(b);
296 b->length = length;
297 b->data = d;
298 b->per_type.user.free_cb = free_cb;
299 b->pool = p;
300
301 stat_add(b);
302 return b;
303 }
304
305 pa_memblock* pa_memblock_ref(pa_memblock*b) {
306 assert(b);
307 assert(PA_REFCNT_VALUE(b) > 0);
308
309 PA_REFCNT_INC(b);
310 return b;
311 }
312
313 void pa_memblock_unref(pa_memblock*b) {
314 assert(b);
315 assert(PA_REFCNT_VALUE(b) > 0);
316
317 if (PA_REFCNT_DEC(b) > 0)
318 return;
319
320 stat_remove(b);
321
322 switch (b->type) {
323 case PA_MEMBLOCK_USER :
324 assert(b->per_type.user.free_cb);
325 b->per_type.user.free_cb(b->data);
326
327 /* Fall through */
328
329 case PA_MEMBLOCK_FIXED:
330 case PA_MEMBLOCK_APPENDED :
331 pa_xfree(b);
332 break;
333
334 case PA_MEMBLOCK_IMPORTED : {
335 pa_memimport_segment *segment;
336
337 segment = b->per_type.imported.segment;
338 assert(segment);
339 assert(segment->import);
340
341 pa_hashmap_remove(segment->import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
342 segment->import->release_cb(segment->import, b->per_type.imported.id, segment->import->userdata);
343
344 if (-- segment->n_blocks <= 0)
345 segment_detach(segment);
346
347 pa_xfree(b);
348 break;
349 }
350
351 case PA_MEMBLOCK_POOL_EXTERNAL:
352 case PA_MEMBLOCK_POOL: {
353 struct mempool_slot *slot;
354
355 slot = mempool_slot_by_ptr(b->pool, b->data);
356 assert(slot);
357
358 PA_LLIST_PREPEND(struct mempool_slot, b->pool->free_slots, slot);
359
360 if (b->type == PA_MEMBLOCK_POOL_EXTERNAL)
361 pa_xfree(b);
362
363 break;
364 }
365
366 case PA_MEMBLOCK_TYPE_MAX:
367 default:
368 abort();
369 }
370 }
371
372 static void memblock_make_local(pa_memblock *b) {
373 assert(b);
374
375 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
376
377 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
378 struct mempool_slot *slot;
379
380 if ((slot = mempool_allocate_slot(b->pool))) {
381 void *new_data;
382 /* We can move it into a local pool, perfect! */
383
384 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
385 b->read_only = 0;
386
387 new_data = mempool_slot_data(slot);
388 memcpy(new_data, b->data, b->length);
389 b->data = new_data;
390 goto finish;
391 }
392 }
393
394 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
395 b->type = PA_MEMBLOCK_USER;
396 b->per_type.user.free_cb = pa_xfree;
397 b->read_only = 0;
398 b->data = pa_xmemdup(b->data, b->length);
399
400 finish:
401 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
402 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
403 }
404
405 void pa_memblock_unref_fixed(pa_memblock *b) {
406 assert(b);
407 assert(PA_REFCNT_VALUE(b) > 0);
408 assert(b->type == PA_MEMBLOCK_FIXED);
409
410 if (PA_REFCNT_VALUE(b) > 1)
411 memblock_make_local(b);
412
413 pa_memblock_unref(b);
414 }
415
416 static void memblock_replace_import(pa_memblock *b) {
417 pa_memimport_segment *seg;
418
419 assert(b);
420 assert(b->type == PA_MEMBLOCK_IMPORTED);
421
422 assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
423 assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
424 pa_atomic_dec(&b->pool->stat.n_imported);
425 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
426
427 seg = b->per_type.imported.segment;
428 assert(seg);
429 assert(seg->import);
430
431 pa_hashmap_remove(
432 seg->import->blocks,
433 PA_UINT32_TO_PTR(b->per_type.imported.id));
434
435 memblock_make_local(b);
436
437 if (-- seg->n_blocks <= 0)
438 segment_detach(seg);
439 }
440
441 pa_mempool* pa_mempool_new(int shared) {
442 size_t ps;
443 pa_mempool *p;
444
445 p = pa_xnew(pa_mempool, 1);
446
447 #ifdef HAVE_SYSCONF
448 ps = (size_t) sysconf(_SC_PAGESIZE);
449 #elif defined(PAGE_SIZE)
450 ps = (size_t) PAGE_SIZE;
451 #else
452 ps = 4096; /* Let's hope it's like x86. */
453 #endif
454
455 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
456
457 if (p->block_size < ps)
458 p->block_size = ps;
459
460 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
461
462 assert(p->block_size > sizeof(struct mempool_slot));
463
464 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
465 pa_xfree(p);
466 return NULL;
467 }
468
469 p->n_init = 0;
470
471 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
472 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
473 PA_LLIST_HEAD_INIT(struct mempool_slot, p->free_slots);
474
475 memset(&p->stat, 0, sizeof(p->stat));
476
477 return p;
478 }
479
480 void pa_mempool_free(pa_mempool *p) {
481 assert(p);
482
483 while (p->imports)
484 pa_memimport_free(p->imports);
485
486 while (p->exports)
487 pa_memexport_free(p->exports);
488
489 if (pa_atomic_load(&p->stat.n_allocated) > 0)
490 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
491
492 pa_shm_free(&p->memory);
493 pa_xfree(p);
494 }
495
496 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
497 assert(p);
498
499 return &p->stat;
500 }
501
502 void pa_mempool_vacuum(pa_mempool *p) {
503 struct mempool_slot *slot;
504
505 assert(p);
506
507 for (slot = p->free_slots; slot; slot = slot->next)
508 pa_shm_punch(&p->memory, (uint8_t*) slot + sizeof(struct mempool_slot) - (uint8_t*) p->memory.ptr, p->block_size - sizeof(struct mempool_slot));
509 }
510
511 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
512 assert(p);
513
514 if (!p->memory.shared)
515 return -1;
516
517 *id = p->memory.id;
518
519 return 0;
520 }
521
522 int pa_mempool_is_shared(pa_mempool *p) {
523 assert(p);
524
525 return !!p->memory.shared;
526 }
527
528 /* For recieving blocks from other nodes */
529 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
530 pa_memimport *i;
531
532 assert(p);
533 assert(cb);
534
535 i = pa_xnew(pa_memimport, 1);
536 i->pool = p;
537 i->segments = pa_hashmap_new(NULL, NULL);
538 i->blocks = pa_hashmap_new(NULL, NULL);
539 i->release_cb = cb;
540 i->userdata = userdata;
541
542 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
543 return i;
544 }
545
546 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
547
548 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
549 pa_memimport_segment* seg;
550
551 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
552 return NULL;
553
554 seg = pa_xnew(pa_memimport_segment, 1);
555
556 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
557 pa_xfree(seg);
558 return NULL;
559 }
560
561 seg->import = i;
562 seg->n_blocks = 0;
563
564 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
565 return seg;
566 }
567
568 static void segment_detach(pa_memimport_segment *seg) {
569 assert(seg);
570
571 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
572 pa_shm_free(&seg->memory);
573 pa_xfree(seg);
574 }
575
576 void pa_memimport_free(pa_memimport *i) {
577 pa_memexport *e;
578 pa_memblock *b;
579
580 assert(i);
581
582 /* If we've exported this block further we need to revoke that export */
583 for (e = i->pool->exports; e; e = e->next)
584 memexport_revoke_blocks(e, i);
585
586 while ((b = pa_hashmap_get_first(i->blocks)))
587 memblock_replace_import(b);
588
589 assert(pa_hashmap_size(i->segments) == 0);
590
591 pa_hashmap_free(i->blocks, NULL, NULL);
592 pa_hashmap_free(i->segments, NULL, NULL);
593
594 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
595 pa_xfree(i);
596 }
597
598 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
599 pa_memblock *b;
600 pa_memimport_segment *seg;
601
602 assert(i);
603
604 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
605 return NULL;
606
607 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
608 if (!(seg = segment_attach(i, shm_id)))
609 return NULL;
610
611 if (offset+size > seg->memory.size)
612 return NULL;
613
614 b = pa_xnew(pa_memblock, 1);
615 b->type = PA_MEMBLOCK_IMPORTED;
616 b->read_only = 1;
617 PA_REFCNT_INIT(b);
618 b->length = size;
619 b->data = (uint8_t*) seg->memory.ptr + offset;
620 b->pool = i->pool;
621 b->per_type.imported.id = block_id;
622 b->per_type.imported.segment = seg;
623
624 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
625
626 seg->n_blocks++;
627
628 stat_add(b);
629
630 return b;
631 }
632
633 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
634 pa_memblock *b;
635 assert(i);
636
637 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
638 return -1;
639
640 memblock_replace_import(b);
641 return 0;
642 }
643
644 /* For sending blocks to other nodes */
645 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
646 pa_memexport *e;
647
648 assert(p);
649 assert(cb);
650
651 if (!p->memory.shared)
652 return NULL;
653
654 e = pa_xnew(pa_memexport, 1);
655 e->pool = p;
656 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
657 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
658 e->n_init = 0;
659 e->revoke_cb = cb;
660 e->userdata = userdata;
661
662 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
663 return e;
664 }
665
666 void pa_memexport_free(pa_memexport *e) {
667 assert(e);
668
669 while (e->used_slots)
670 pa_memexport_process_release(e, e->used_slots - e->slots);
671
672 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
673 pa_xfree(e);
674 }
675
676 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
677 assert(e);
678
679 if (id >= e->n_init)
680 return -1;
681
682 if (!e->slots[id].block)
683 return -1;
684
685 /* pa_log("Processing release for %u", id); */
686
687 assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
688 assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) e->slots[id].block->length);
689
690 pa_atomic_dec(&e->pool->stat.n_exported);
691 pa_atomic_sub(&e->pool->stat.exported_size, e->slots[id].block->length);
692
693 pa_memblock_unref(e->slots[id].block);
694 e->slots[id].block = NULL;
695
696 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
697 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
698
699 return 0;
700 }
701
702 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
703 struct memexport_slot *slot, *next;
704 assert(e);
705 assert(i);
706
707 for (slot = e->used_slots; slot; slot = next) {
708 uint32_t idx;
709 next = slot->next;
710
711 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
712 slot->block->per_type.imported.segment->import != i)
713 continue;
714
715 idx = slot - e->slots;
716 e->revoke_cb(e, idx, e->userdata);
717 pa_memexport_process_release(e, idx);
718 }
719 }
720
721 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
722 pa_memblock *n;
723
724 assert(p);
725 assert(b);
726
727 if (b->type == PA_MEMBLOCK_IMPORTED ||
728 b->type == PA_MEMBLOCK_POOL ||
729 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
730 assert(b->pool == p);
731 return pa_memblock_ref(b);
732 }
733
734 if (!(n = pa_memblock_new_pool(p, b->length)))
735 return NULL;
736
737 memcpy(n->data, b->data, b->length);
738 return n;
739 }
740
741 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
742 pa_shm *memory;
743 struct memexport_slot *slot;
744
745 assert(e);
746 assert(b);
747 assert(block_id);
748 assert(shm_id);
749 assert(offset);
750 assert(size);
751 assert(b->pool == e->pool);
752
753 if (!(b = memblock_shared_copy(e->pool, b)))
754 return -1;
755
756 if (e->free_slots) {
757 slot = e->free_slots;
758 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
759 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX) {
760 slot = &e->slots[e->n_init++];
761 } else {
762 pa_memblock_unref(b);
763 return -1;
764 }
765
766 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
767 slot->block = b;
768 *block_id = slot - e->slots;
769
770 /* pa_log("Got block id %u", *block_id); */
771
772 if (b->type == PA_MEMBLOCK_IMPORTED) {
773 assert(b->per_type.imported.segment);
774 memory = &b->per_type.imported.segment->memory;
775 } else {
776 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
777 assert(b->pool);
778 memory = &b->pool->memory;
779 }
780
781 assert(b->data >= memory->ptr);
782 assert((uint8_t*) b->data + b->length <= (uint8_t*) memory->ptr + memory->size);
783
784 *shm_id = memory->id;
785 *offset = (uint8_t*) b->data - (uint8_t*) memory->ptr;
786 *size = b->length;
787
788 pa_atomic_inc(&e->pool->stat.n_exported);
789 pa_atomic_add(&e->pool->stat.exported_size, b->length);
790
791 return 0;
792 }