]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
modify memory block reference counting to use the new reference counting API
[pulseaudio] / src / pulsecore / memblock.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <assert.h>
29 #include <string.h>
30 #include <unistd.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/shm.h>
35 #include <pulsecore/log.h>
36 #include <pulsecore/hashmap.h>
37
38 #include "memblock.h"
39
40 #define PA_MEMPOOL_SLOTS_MAX 128
41 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
42
43 #define PA_MEMEXPORT_SLOTS_MAX 128
44
45 #define PA_MEMIMPORT_SLOTS_MAX 128
46 #define PA_MEMIMPORT_SEGMENTS_MAX 16
47
48 struct pa_memimport_segment {
49 pa_memimport *import;
50 pa_shm memory;
51 unsigned n_blocks;
52 };
53
54 struct pa_memimport {
55 pa_mempool *pool;
56 pa_hashmap *segments;
57 pa_hashmap *blocks;
58
59 /* Called whenever an imported memory block is no longer
60 * needed. */
61 pa_memimport_release_cb_t release_cb;
62 void *userdata;
63
64 PA_LLIST_FIELDS(pa_memimport);
65 };
66
67 struct memexport_slot {
68 PA_LLIST_FIELDS(struct memexport_slot);
69 pa_memblock *block;
70 };
71
72 struct pa_memexport {
73 pa_mempool *pool;
74
75 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
76 PA_LLIST_HEAD(struct memexport_slot, free_slots);
77 PA_LLIST_HEAD(struct memexport_slot, used_slots);
78 unsigned n_init;
79
80 /* Called whenever a client from which we imported a memory block
81 which we in turn exported to another client dies and we need to
82 revoke the memory block accordingly */
83 pa_memexport_revoke_cb_t revoke_cb;
84 void *userdata;
85
86 PA_LLIST_FIELDS(pa_memexport);
87 };
88
89 struct mempool_slot {
90 PA_LLIST_FIELDS(struct mempool_slot);
91 /* the actual data follows immediately hereafter */
92 };
93
94 struct pa_mempool {
95 pa_shm memory;
96 size_t block_size;
97 unsigned n_blocks, n_init;
98
99 PA_LLIST_HEAD(pa_memimport, imports);
100 PA_LLIST_HEAD(pa_memexport, exports);
101
102 /* A list of free slots that may be reused */
103 PA_LLIST_HEAD(struct mempool_slot, free_slots);
104 PA_LLIST_HEAD(struct mempool_slot, used_slots);
105
106 pa_mempool_stat stat;
107 };
108
109 static void segment_detach(pa_memimport_segment *seg);
110
111 static void stat_add(pa_memblock*b) {
112 assert(b);
113 assert(b->pool);
114
115 b->pool->stat.n_allocated ++;
116 b->pool->stat.n_accumulated ++;
117 b->pool->stat.allocated_size += b->length;
118 b->pool->stat.accumulated_size += b->length;
119
120 if (b->type == PA_MEMBLOCK_IMPORTED) {
121 b->pool->stat.n_imported++;
122 b->pool->stat.imported_size += b->length;
123 }
124
125 b->pool->stat.n_allocated_by_type[b->type]++;
126 b->pool->stat.n_accumulated_by_type[b->type]++;
127 }
128
129 static void stat_remove(pa_memblock *b) {
130 assert(b);
131 assert(b->pool);
132
133 assert(b->pool->stat.n_allocated > 0);
134 assert(b->pool->stat.allocated_size >= b->length);
135
136 b->pool->stat.n_allocated --;
137 b->pool->stat.allocated_size -= b->length;
138
139 if (b->type == PA_MEMBLOCK_IMPORTED) {
140 assert(b->pool->stat.n_imported > 0);
141 assert(b->pool->stat.imported_size >= b->length);
142
143 b->pool->stat.n_imported --;
144 b->pool->stat.imported_size -= b->length;
145 }
146
147 b->pool->stat.n_allocated_by_type[b->type]--;
148 }
149
150 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
151
152 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
153 pa_memblock *b;
154
155 assert(p);
156 assert(length > 0);
157
158 if (!(b = pa_memblock_new_pool(p, length)))
159 b = memblock_new_appended(p, length);
160
161 return b;
162 }
163
164 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
165 pa_memblock *b;
166
167 assert(p);
168 assert(length > 0);
169
170 b = pa_xmalloc(sizeof(pa_memblock) + length);
171 b->type = PA_MEMBLOCK_APPENDED;
172 b->read_only = 0;
173 PA_REFCNT_INIT(b);
174 b->length = length;
175 b->data = (uint8_t*) b + sizeof(pa_memblock);
176 b->pool = p;
177
178 stat_add(b);
179 return b;
180 }
181
182 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
183 struct mempool_slot *slot;
184 assert(p);
185
186 if (p->free_slots) {
187 slot = p->free_slots;
188 PA_LLIST_REMOVE(struct mempool_slot, p->free_slots, slot);
189 } else if (p->n_init < p->n_blocks)
190 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * p->n_init++));
191 else {
192 pa_log_debug("Pool full");
193 p->stat.n_pool_full++;
194 return NULL;
195 }
196
197 PA_LLIST_PREPEND(struct mempool_slot, p->used_slots, slot);
198 return slot;
199 }
200
201 static void* mempool_slot_data(struct mempool_slot *slot) {
202 assert(slot);
203
204 return (uint8_t*) slot + sizeof(struct mempool_slot);
205 }
206
207 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
208 assert(p);
209 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
210 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
211
212 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
213 }
214
215 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
216 unsigned idx;
217
218 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
219 return NULL;
220
221 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
222 }
223
224 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
225 pa_memblock *b = NULL;
226 struct mempool_slot *slot;
227
228 assert(p);
229 assert(length > 0);
230
231 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
232
233 if (!(slot = mempool_allocate_slot(p)))
234 return NULL;
235
236 b = mempool_slot_data(slot);
237 b->type = PA_MEMBLOCK_POOL;
238 b->data = (uint8_t*) b + sizeof(pa_memblock);
239
240 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
241
242 if (!(slot = mempool_allocate_slot(p)))
243 return NULL;
244
245 b = pa_xnew(pa_memblock, 1);
246 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
247 b->data = mempool_slot_data(slot);
248 } else {
249 pa_log_debug("Memory block too large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
250 p->stat.n_too_large_for_pool++;
251 return NULL;
252 }
253
254 b->length = length;
255 b->read_only = 0;
256 PA_REFCNT_INIT(b);
257 b->pool = p;
258
259 stat_add(b);
260 return b;
261 }
262
263 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
264 pa_memblock *b;
265
266 assert(p);
267 assert(d);
268 assert(length > 0);
269
270 b = pa_xnew(pa_memblock, 1);
271 b->type = PA_MEMBLOCK_FIXED;
272 b->read_only = read_only;
273 PA_REFCNT_INIT(b);
274 b->length = length;
275 b->data = d;
276 b->pool = p;
277
278 stat_add(b);
279 return b;
280 }
281
282 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
283 pa_memblock *b;
284
285 assert(p);
286 assert(d);
287 assert(length > 0);
288 assert(free_cb);
289
290 b = pa_xnew(pa_memblock, 1);
291 b->type = PA_MEMBLOCK_USER;
292 b->read_only = read_only;
293 PA_REFCNT_INIT(b);
294 b->length = length;
295 b->data = d;
296 b->per_type.user.free_cb = free_cb;
297 b->pool = p;
298
299 stat_add(b);
300 return b;
301 }
302
303 pa_memblock* pa_memblock_ref(pa_memblock*b) {
304 assert(b);
305 assert(PA_REFCNT_VALUE(b) > 0);
306
307 PA_REFCNT_INC(b);
308 return b;
309 }
310
311 void pa_memblock_unref(pa_memblock*b) {
312 assert(b);
313 assert(PA_REFCNT_VALUE(b) > 0);
314
315 if (PA_REFCNT_DEC(b) > 0)
316 return;
317
318 stat_remove(b);
319
320 switch (b->type) {
321 case PA_MEMBLOCK_USER :
322 assert(b->per_type.user.free_cb);
323 b->per_type.user.free_cb(b->data);
324
325 /* Fall through */
326
327 case PA_MEMBLOCK_FIXED:
328 case PA_MEMBLOCK_APPENDED :
329 pa_xfree(b);
330 break;
331
332 case PA_MEMBLOCK_IMPORTED : {
333 pa_memimport_segment *segment;
334
335 segment = b->per_type.imported.segment;
336 assert(segment);
337 assert(segment->import);
338
339 pa_hashmap_remove(segment->import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
340 segment->import->release_cb(segment->import, b->per_type.imported.id, segment->import->userdata);
341
342 if (-- segment->n_blocks <= 0)
343 segment_detach(segment);
344
345 pa_xfree(b);
346 break;
347 }
348
349 case PA_MEMBLOCK_POOL_EXTERNAL:
350 case PA_MEMBLOCK_POOL: {
351 struct mempool_slot *slot;
352
353 slot = mempool_slot_by_ptr(b->pool, b->data);
354 assert(slot);
355
356 PA_LLIST_REMOVE(struct mempool_slot, b->pool->used_slots, slot);
357 PA_LLIST_PREPEND(struct mempool_slot, b->pool->free_slots, slot);
358
359 if (b->type == PA_MEMBLOCK_POOL_EXTERNAL)
360 pa_xfree(b);
361
362 break;
363 }
364
365 case PA_MEMBLOCK_TYPE_MAX:
366 default:
367 abort();
368 }
369 }
370
371 static void memblock_make_local(pa_memblock *b) {
372 assert(b);
373
374 b->pool->stat.n_allocated_by_type[b->type]--;
375
376 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
377 struct mempool_slot *slot;
378
379 if ((slot = mempool_allocate_slot(b->pool))) {
380 void *new_data;
381 /* We can move it into a local pool, perfect! */
382
383 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
384 b->read_only = 0;
385
386 new_data = mempool_slot_data(slot);
387 memcpy(new_data, b->data, b->length);
388 b->data = new_data;
389 goto finish;
390 }
391 }
392
393 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
394 b->type = PA_MEMBLOCK_USER;
395 b->per_type.user.free_cb = pa_xfree;
396 b->read_only = 0;
397 b->data = pa_xmemdup(b->data, b->length);
398
399 finish:
400 b->pool->stat.n_allocated_by_type[b->type]++;
401 b->pool->stat.n_accumulated_by_type[b->type]++;
402 }
403
404 void pa_memblock_unref_fixed(pa_memblock *b) {
405 assert(b);
406 assert(PA_REFCNT_VALUE(b) > 0);
407 assert(b->type == PA_MEMBLOCK_FIXED);
408
409 if (PA_REFCNT_VALUE(b) > 1)
410 memblock_make_local(b);
411
412 pa_memblock_unref(b);
413 }
414
415 static void memblock_replace_import(pa_memblock *b) {
416 pa_memimport_segment *seg;
417
418 assert(b);
419 assert(b->type == PA_MEMBLOCK_IMPORTED);
420
421 assert(b->pool->stat.n_imported > 0);
422 assert(b->pool->stat.imported_size >= b->length);
423 b->pool->stat.n_imported --;
424 b->pool->stat.imported_size -= b->length;
425
426 seg = b->per_type.imported.segment;
427 assert(seg);
428 assert(seg->import);
429
430 pa_hashmap_remove(
431 seg->import->blocks,
432 PA_UINT32_TO_PTR(b->per_type.imported.id));
433
434 memblock_make_local(b);
435
436 if (-- seg->n_blocks <= 0)
437 segment_detach(seg);
438 }
439
440 pa_mempool* pa_mempool_new(int shared) {
441 size_t ps;
442 pa_mempool *p;
443
444 p = pa_xnew(pa_mempool, 1);
445
446 #ifdef HAVE_SYSCONF
447 ps = (size_t) sysconf(_SC_PAGESIZE);
448 #elif defined(PAGE_SIZE)
449 ps = (size_t) PAGE_SIZE;
450 #else
451 ps = 4096; /* Let's hope it's like x86. */
452 #endif
453
454 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
455
456 if (p->block_size < ps)
457 p->block_size = ps;
458
459 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
460
461 assert(p->block_size > sizeof(struct mempool_slot));
462
463 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
464 pa_xfree(p);
465 return NULL;
466 }
467
468 p->n_init = 0;
469
470 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
471 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
472 PA_LLIST_HEAD_INIT(struct mempool_slot, p->free_slots);
473 PA_LLIST_HEAD_INIT(struct mempool_slot, p->used_slots);
474
475 memset(&p->stat, 0, sizeof(p->stat));
476
477 return p;
478 }
479
480 void pa_mempool_free(pa_mempool *p) {
481 assert(p);
482
483 while (p->imports)
484 pa_memimport_free(p->imports);
485
486 while (p->exports)
487 pa_memexport_free(p->exports);
488
489 if (p->stat.n_allocated > 0)
490 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
491
492 pa_shm_free(&p->memory);
493 pa_xfree(p);
494 }
495
496 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
497 assert(p);
498
499 return &p->stat;
500 }
501
502 void pa_mempool_vacuum(pa_mempool *p) {
503 struct mempool_slot *slot;
504
505 assert(p);
506
507 for (slot = p->free_slots; slot; slot = slot->next) {
508 pa_shm_punch(&p->memory, (uint8_t*) slot + sizeof(struct mempool_slot) - (uint8_t*) p->memory.ptr, p->block_size - sizeof(struct mempool_slot));
509 }
510 }
511
512 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
513 assert(p);
514
515 if (!p->memory.shared)
516 return -1;
517
518 *id = p->memory.id;
519
520 return 0;
521 }
522
523 int pa_mempool_is_shared(pa_mempool *p) {
524 assert(p);
525
526 return !!p->memory.shared;
527 }
528
529 /* For recieving blocks from other nodes */
530 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
531 pa_memimport *i;
532
533 assert(p);
534 assert(cb);
535
536 i = pa_xnew(pa_memimport, 1);
537 i->pool = p;
538 i->segments = pa_hashmap_new(NULL, NULL);
539 i->blocks = pa_hashmap_new(NULL, NULL);
540 i->release_cb = cb;
541 i->userdata = userdata;
542
543 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
544 return i;
545 }
546
547 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
548
549 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
550 pa_memimport_segment* seg;
551
552 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
553 return NULL;
554
555 seg = pa_xnew(pa_memimport_segment, 1);
556
557 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
558 pa_xfree(seg);
559 return NULL;
560 }
561
562 seg->import = i;
563 seg->n_blocks = 0;
564
565 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
566 return seg;
567 }
568
569 static void segment_detach(pa_memimport_segment *seg) {
570 assert(seg);
571
572 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
573 pa_shm_free(&seg->memory);
574 pa_xfree(seg);
575 }
576
577 void pa_memimport_free(pa_memimport *i) {
578 pa_memexport *e;
579 pa_memblock *b;
580
581 assert(i);
582
583 /* If we've exported this block further we need to revoke that export */
584 for (e = i->pool->exports; e; e = e->next)
585 memexport_revoke_blocks(e, i);
586
587 while ((b = pa_hashmap_get_first(i->blocks)))
588 memblock_replace_import(b);
589
590 assert(pa_hashmap_size(i->segments) == 0);
591
592 pa_hashmap_free(i->blocks, NULL, NULL);
593 pa_hashmap_free(i->segments, NULL, NULL);
594
595 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
596 pa_xfree(i);
597 }
598
599 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
600 pa_memblock *b;
601 pa_memimport_segment *seg;
602
603 assert(i);
604
605 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
606 return NULL;
607
608 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
609 if (!(seg = segment_attach(i, shm_id)))
610 return NULL;
611
612 if (offset+size > seg->memory.size)
613 return NULL;
614
615 b = pa_xnew(pa_memblock, 1);
616 b->type = PA_MEMBLOCK_IMPORTED;
617 b->read_only = 1;
618 PA_REFCNT_INIT(b);
619 b->length = size;
620 b->data = (uint8_t*) seg->memory.ptr + offset;
621 b->pool = i->pool;
622 b->per_type.imported.id = block_id;
623 b->per_type.imported.segment = seg;
624
625 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
626
627 seg->n_blocks++;
628
629 stat_add(b);
630
631 return b;
632 }
633
634 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
635 pa_memblock *b;
636 assert(i);
637
638 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
639 return -1;
640
641 memblock_replace_import(b);
642 return 0;
643 }
644
645 /* For sending blocks to other nodes */
646 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
647 pa_memexport *e;
648
649 assert(p);
650 assert(cb);
651
652 if (!p->memory.shared)
653 return NULL;
654
655 e = pa_xnew(pa_memexport, 1);
656 e->pool = p;
657 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
658 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
659 e->n_init = 0;
660 e->revoke_cb = cb;
661 e->userdata = userdata;
662
663 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
664 return e;
665 }
666
667 void pa_memexport_free(pa_memexport *e) {
668 assert(e);
669
670 while (e->used_slots)
671 pa_memexport_process_release(e, e->used_slots - e->slots);
672
673 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
674 pa_xfree(e);
675 }
676
677 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
678 assert(e);
679
680 if (id >= e->n_init)
681 return -1;
682
683 if (!e->slots[id].block)
684 return -1;
685
686 /* pa_log("Processing release for %u", id); */
687
688 assert(e->pool->stat.n_exported > 0);
689 assert(e->pool->stat.exported_size >= e->slots[id].block->length);
690
691 e->pool->stat.n_exported --;
692 e->pool->stat.exported_size -= e->slots[id].block->length;
693
694 pa_memblock_unref(e->slots[id].block);
695 e->slots[id].block = NULL;
696
697 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
698 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
699
700 return 0;
701 }
702
703 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
704 struct memexport_slot *slot, *next;
705 assert(e);
706 assert(i);
707
708 for (slot = e->used_slots; slot; slot = next) {
709 uint32_t idx;
710 next = slot->next;
711
712 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
713 slot->block->per_type.imported.segment->import != i)
714 continue;
715
716 idx = slot - e->slots;
717 e->revoke_cb(e, idx, e->userdata);
718 pa_memexport_process_release(e, idx);
719 }
720 }
721
722 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
723 pa_memblock *n;
724
725 assert(p);
726 assert(b);
727
728 if (b->type == PA_MEMBLOCK_IMPORTED ||
729 b->type == PA_MEMBLOCK_POOL ||
730 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
731 assert(b->pool == p);
732 return pa_memblock_ref(b);
733 }
734
735 if (!(n = pa_memblock_new_pool(p, b->length)))
736 return NULL;
737
738 memcpy(n->data, b->data, b->length);
739 return n;
740 }
741
742 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
743 pa_shm *memory;
744 struct memexport_slot *slot;
745
746 assert(e);
747 assert(b);
748 assert(block_id);
749 assert(shm_id);
750 assert(offset);
751 assert(size);
752 assert(b->pool == e->pool);
753
754 if (!(b = memblock_shared_copy(e->pool, b)))
755 return -1;
756
757 if (e->free_slots) {
758 slot = e->free_slots;
759 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
760 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX) {
761 slot = &e->slots[e->n_init++];
762 } else {
763 pa_memblock_unref(b);
764 return -1;
765 }
766
767 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
768 slot->block = b;
769 *block_id = slot - e->slots;
770
771 /* pa_log("Got block id %u", *block_id); */
772
773 if (b->type == PA_MEMBLOCK_IMPORTED) {
774 assert(b->per_type.imported.segment);
775 memory = &b->per_type.imported.segment->memory;
776 } else {
777 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
778 assert(b->pool);
779 memory = &b->pool->memory;
780 }
781
782 assert(b->data >= memory->ptr);
783 assert((uint8_t*) b->data + b->length <= (uint8_t*) memory->ptr + memory->size);
784
785 *shm_id = memory->id;
786 *offset = (uint8_t*) b->data - (uint8_t*) memory->ptr;
787 *size = b->length;
788
789 e->pool->stat.n_exported ++;
790 e->pool->stat.exported_size += b->length;
791
792 return 0;
793 }