]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
add new function pa_mempool_is_shared() to test whether a memory pool is suitable...
[pulseaudio] / src / pulsecore / memblock.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <assert.h>
29 #include <string.h>
30 #include <unistd.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/shm.h>
35 #include <pulsecore/log.h>
36 #include <pulsecore/hashmap.h>
37
38 #include "memblock.h"
39
40 #define PA_MEMPOOL_SLOTS_MAX 128
41 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
42
43 #define PA_MEMEXPORT_SLOTS_MAX 128
44
45 #define PA_MEMIMPORT_SLOTS_MAX 128
46 #define PA_MEMIMPORT_SEGMENTS_MAX 16
47
48 struct pa_memimport_segment {
49 pa_memimport *import;
50 pa_shm memory;
51 unsigned n_blocks;
52 };
53
54 struct pa_memimport {
55 pa_mempool *pool;
56 pa_hashmap *segments;
57 pa_hashmap *blocks;
58
59 /* Called whenever an imported memory block is no longer
60 * needed. */
61 pa_memimport_release_cb_t release_cb;
62 void *userdata;
63
64 PA_LLIST_FIELDS(pa_memimport);
65 };
66
67 struct memexport_slot {
68 PA_LLIST_FIELDS(struct memexport_slot);
69 pa_memblock *block;
70 };
71
72 struct pa_memexport {
73 pa_mempool *pool;
74
75 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
76 PA_LLIST_HEAD(struct memexport_slot, free_slots);
77 PA_LLIST_HEAD(struct memexport_slot, used_slots);
78 unsigned n_init;
79
80 /* Called whenever a client from which we imported a memory block
81 which we in turn exported to another client dies and we need to
82 revoke the memory block accordingly */
83 pa_memexport_revoke_cb_t revoke_cb;
84 void *userdata;
85
86 PA_LLIST_FIELDS(pa_memexport);
87 };
88
89 struct mempool_slot {
90 PA_LLIST_FIELDS(struct mempool_slot);
91 /* the actual data follows immediately hereafter */
92 };
93
94 struct pa_mempool {
95 pa_shm memory;
96 size_t block_size;
97 unsigned n_blocks, n_init;
98
99 PA_LLIST_HEAD(pa_memimport, imports);
100 PA_LLIST_HEAD(pa_memexport, exports);
101
102 /* A list of free slots that may be reused */
103 PA_LLIST_HEAD(struct mempool_slot, free_slots);
104 PA_LLIST_HEAD(struct mempool_slot, used_slots);
105
106 pa_mempool_stat stat;
107 };
108
109 static void segment_detach(pa_memimport_segment *seg);
110
111 static void stat_add(pa_memblock*b) {
112 assert(b);
113 assert(b->pool);
114
115 b->pool->stat.n_allocated ++;
116 b->pool->stat.n_accumulated ++;
117 b->pool->stat.allocated_size += b->length;
118 b->pool->stat.accumulated_size += b->length;
119
120 if (b->type == PA_MEMBLOCK_IMPORTED) {
121 b->pool->stat.n_imported++;
122 b->pool->stat.imported_size += b->length;
123 }
124 }
125
126 static void stat_remove(pa_memblock *b) {
127 assert(b);
128 assert(b->pool);
129
130 assert(b->pool->stat.n_allocated > 0);
131 assert(b->pool->stat.allocated_size >= b->length);
132
133 b->pool->stat.n_allocated --;
134 b->pool->stat.allocated_size -= b->length;
135
136 if (b->type == PA_MEMBLOCK_IMPORTED) {
137 assert(b->pool->stat.n_imported > 0);
138 assert(b->pool->stat.imported_size >= b->length);
139
140 b->pool->stat.n_imported --;
141 b->pool->stat.imported_size -= b->length;
142 }
143 }
144
145 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
146
147 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
148 pa_memblock *b;
149
150 assert(p);
151 assert(length > 0);
152
153 if (!(b = pa_memblock_new_pool(p, length)))
154 b = memblock_new_appended(p, length);
155
156 return b;
157 }
158
159 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
160 pa_memblock *b;
161
162 assert(p);
163 assert(length > 0);
164
165 b = pa_xmalloc(sizeof(pa_memblock) + length);
166 b->type = PA_MEMBLOCK_APPENDED;
167 b->read_only = 0;
168 b->ref = 1;
169 b->length = length;
170 b->data = (uint8_t*) b + sizeof(pa_memblock);
171 b->pool = p;
172
173 stat_add(b);
174 return b;
175 }
176
177 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
178 struct mempool_slot *slot;
179 assert(p);
180
181 if (p->free_slots) {
182 slot = p->free_slots;
183 PA_LLIST_REMOVE(struct mempool_slot, p->free_slots, slot);
184 } else if (p->n_init < p->n_blocks)
185 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * p->n_init++));
186 else {
187 pa_log_debug("Pool full");
188 p->stat.n_pool_full++;
189 return NULL;
190 }
191
192 PA_LLIST_PREPEND(struct mempool_slot, p->used_slots, slot);
193 return slot;
194 }
195
196 static void* mempool_slot_data(struct mempool_slot *slot) {
197 assert(slot);
198
199 return (uint8_t*) slot + sizeof(struct mempool_slot);
200 }
201
202 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
203 assert(p);
204 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
205 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
206
207 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
208 }
209
210 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
211 unsigned idx;
212
213 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
214 return NULL;
215
216 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
217 }
218
219 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
220 pa_memblock *b = NULL;
221 struct mempool_slot *slot;
222
223 assert(p);
224 assert(length > 0);
225
226 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
227
228 if (!(slot = mempool_allocate_slot(p)))
229 return NULL;
230
231 b = mempool_slot_data(slot);
232 b->type = PA_MEMBLOCK_POOL;
233 b->data = (uint8_t*) b + sizeof(pa_memblock);
234
235 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
236
237 if (!(slot = mempool_allocate_slot(p)))
238 return NULL;
239
240 b = pa_xnew(pa_memblock, 1);
241 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
242 b->data = mempool_slot_data(slot);
243 } else {
244 pa_log_debug("Memory block to large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
245 p->stat.n_too_large_for_pool++;
246 return NULL;
247 }
248
249 b->length = length;
250 b->read_only = 0;
251 b->ref = 1;
252 b->pool = p;
253
254 stat_add(b);
255 return b;
256 }
257
258 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
259 pa_memblock *b;
260
261 assert(p);
262 assert(d);
263 assert(length > 0);
264
265 b = pa_xnew(pa_memblock, 1);
266 b->type = PA_MEMBLOCK_FIXED;
267 b->read_only = read_only;
268 b->ref = 1;
269 b->length = length;
270 b->data = d;
271 b->pool = p;
272
273 stat_add(b);
274 return b;
275 }
276
277 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
278 pa_memblock *b;
279
280 assert(p);
281 assert(d);
282 assert(length > 0);
283 assert(free_cb);
284
285 b = pa_xnew(pa_memblock, 1);
286 b->type = PA_MEMBLOCK_USER;
287 b->read_only = read_only;
288 b->ref = 1;
289 b->length = length;
290 b->data = d;
291 b->per_type.user.free_cb = free_cb;
292 b->pool = p;
293
294 stat_add(b);
295 return b;
296 }
297
298 pa_memblock* pa_memblock_ref(pa_memblock*b) {
299 assert(b);
300 assert(b->ref >= 1);
301
302 b->ref++;
303 return b;
304 }
305
306 void pa_memblock_unref(pa_memblock*b) {
307 assert(b);
308 assert(b->ref >= 1);
309
310 if ((--(b->ref)) > 0)
311 return;
312
313 stat_remove(b);
314
315 switch (b->type) {
316 case PA_MEMBLOCK_USER :
317 assert(b->per_type.user.free_cb);
318 b->per_type.user.free_cb(b->data);
319
320 /* Fall through */
321
322 case PA_MEMBLOCK_FIXED:
323 case PA_MEMBLOCK_APPENDED :
324 pa_xfree(b);
325 break;
326
327 case PA_MEMBLOCK_IMPORTED : {
328 pa_memimport_segment *segment;
329
330 segment = b->per_type.imported.segment;
331 assert(segment);
332 assert(segment->import);
333
334 pa_hashmap_remove(segment->import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
335 segment->import->release_cb(segment->import, b->per_type.imported.id, segment->import->userdata);
336
337 if (-- segment->n_blocks <= 0)
338 segment_detach(segment);
339
340 pa_xfree(b);
341 break;
342 }
343
344 case PA_MEMBLOCK_POOL_EXTERNAL:
345 case PA_MEMBLOCK_POOL: {
346 struct mempool_slot *slot;
347
348 slot = mempool_slot_by_ptr(b->pool, b->data);
349 assert(slot);
350
351 PA_LLIST_REMOVE(struct mempool_slot, b->pool->used_slots, slot);
352 PA_LLIST_PREPEND(struct mempool_slot, b->pool->free_slots, slot);
353
354 if (b->type == PA_MEMBLOCK_POOL_EXTERNAL)
355 pa_xfree(b);
356 }
357 }
358 }
359
360 static void memblock_make_local(pa_memblock *b) {
361 assert(b);
362
363 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
364 struct mempool_slot *slot;
365
366 if ((slot = mempool_allocate_slot(b->pool))) {
367 void *new_data;
368 /* We can move it into a local pool, perfect! */
369
370 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
371 b->read_only = 0;
372
373 new_data = mempool_slot_data(slot);
374 memcpy(new_data, b->data, b->length);
375 b->data = new_data;
376 return;
377 }
378 }
379
380 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
381 b->type = PA_MEMBLOCK_USER;
382 b->per_type.user.free_cb = pa_xfree;
383 b->read_only = 0;
384 b->data = pa_xmemdup(b->data, b->length);
385 }
386
387 void pa_memblock_unref_fixed(pa_memblock *b) {
388 assert(b);
389 assert(b->ref >= 1);
390 assert(b->type == PA_MEMBLOCK_FIXED);
391
392 if (b->ref > 1)
393 memblock_make_local(b);
394
395 pa_memblock_unref(b);
396 }
397
398 static void memblock_replace_import(pa_memblock *b) {
399 pa_memimport_segment *seg;
400
401 assert(b);
402 assert(b->type == PA_MEMBLOCK_IMPORTED);
403
404 assert(b->pool->stat.n_imported > 0);
405 assert(b->pool->stat.imported_size >= b->length);
406 b->pool->stat.n_imported --;
407 b->pool->stat.imported_size -= b->length;
408
409 seg = b->per_type.imported.segment;
410 assert(seg);
411 assert(seg->import);
412
413 pa_hashmap_remove(
414 seg->import->blocks,
415 PA_UINT32_TO_PTR(b->per_type.imported.id));
416
417 memblock_make_local(b);
418
419 if (-- seg->n_blocks <= 0)
420 segment_detach(seg);
421 }
422
423 pa_mempool* pa_mempool_new(int shared) {
424 size_t ps;
425 pa_mempool *p;
426
427 p = pa_xnew(pa_mempool, 1);
428
429 ps = (size_t) sysconf(_SC_PAGESIZE);
430
431 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
432
433 if (p->block_size < ps)
434 p->block_size = ps;
435
436 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
437
438 assert(p->block_size > sizeof(struct mempool_slot));
439
440 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
441 pa_xfree(p);
442 return NULL;
443 }
444
445 p->n_init = 0;
446
447 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
448 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
449 PA_LLIST_HEAD_INIT(struct mempool_slot, p->free_slots);
450 PA_LLIST_HEAD_INIT(struct mempool_slot, p->used_slots);
451
452 memset(&p->stat, 0, sizeof(p->stat));
453
454 return p;
455 }
456
457 void pa_mempool_free(pa_mempool *p) {
458 assert(p);
459
460 while (p->imports)
461 pa_memimport_free(p->imports);
462
463 while (p->exports)
464 pa_memexport_free(p->exports);
465
466 if (p->stat.n_allocated > 0)
467 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
468
469 pa_shm_free(&p->memory);
470 pa_xfree(p);
471 }
472
473 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
474 assert(p);
475
476 return &p->stat;
477 }
478
479 void pa_mempool_vacuum(pa_mempool *p) {
480 struct mempool_slot *slot;
481
482 assert(p);
483
484 for (slot = p->free_slots; slot; slot = slot->next) {
485 pa_shm_punch(&p->memory, (uint8_t*) slot + sizeof(struct mempool_slot) - (uint8_t*) p->memory.ptr, p->block_size - sizeof(struct mempool_slot));
486 }
487 }
488
489 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
490 assert(p);
491
492 if (!p->memory.shared)
493 return -1;
494
495 *id = p->memory.id;
496
497 return 0;
498 }
499
500 int pa_mempool_is_shared(pa_mempool *p) {
501 assert(p);
502
503 return !!p->memory.shared;
504 }
505
506 /* For recieving blocks from other nodes */
507 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
508 pa_memimport *i;
509
510 assert(p);
511 assert(cb);
512
513 i = pa_xnew(pa_memimport, 1);
514 i->pool = p;
515 i->segments = pa_hashmap_new(NULL, NULL);
516 i->blocks = pa_hashmap_new(NULL, NULL);
517 i->release_cb = cb;
518 i->userdata = userdata;
519
520 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
521 return i;
522 }
523
524 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
525
526 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
527 pa_memimport_segment* seg;
528
529 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
530 return NULL;
531
532 seg = pa_xnew(pa_memimport_segment, 1);
533
534 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
535 pa_xfree(seg);
536 return NULL;
537 }
538
539 seg->import = i;
540 seg->n_blocks = 0;
541
542 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
543 return seg;
544 }
545
546 static void segment_detach(pa_memimport_segment *seg) {
547 assert(seg);
548
549 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
550 pa_shm_free(&seg->memory);
551 pa_xfree(seg);
552 }
553
554 void pa_memimport_free(pa_memimport *i) {
555 pa_memexport *e;
556 pa_memblock *b;
557
558 assert(i);
559
560 /* If we've exported this block further we need to revoke that export */
561 for (e = i->pool->exports; e; e = e->next)
562 memexport_revoke_blocks(e, i);
563
564 while ((b = pa_hashmap_get_first(i->blocks)))
565 memblock_replace_import(b);
566
567 assert(pa_hashmap_size(i->segments) == 0);
568
569 pa_hashmap_free(i->blocks, NULL, NULL);
570 pa_hashmap_free(i->segments, NULL, NULL);
571
572 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
573 pa_xfree(i);
574 }
575
576 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
577 pa_memblock *b;
578 pa_memimport_segment *seg;
579
580 assert(i);
581
582 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
583 return NULL;
584
585 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
586 if (!(seg = segment_attach(i, shm_id)))
587 return NULL;
588
589 if (offset+size > seg->memory.size)
590 return NULL;
591
592 b = pa_xnew(pa_memblock, 1);
593 b->type = PA_MEMBLOCK_IMPORTED;
594 b->read_only = 1;
595 b->ref = 1;
596 b->length = size;
597 b->data = (uint8_t*) seg->memory.ptr + offset;
598 b->pool = i->pool;
599 b->per_type.imported.id = block_id;
600 b->per_type.imported.segment = seg;
601
602 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
603
604 seg->n_blocks++;
605
606 stat_add(b);
607
608 return b;
609 }
610
611 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
612 pa_memblock *b;
613 assert(i);
614
615 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
616 return -1;
617
618 memblock_replace_import(b);
619 return 0;
620 }
621
622 /* For sending blocks to other nodes */
623 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
624 pa_memexport *e;
625
626 assert(p);
627 assert(cb);
628
629 if (!p->memory.shared)
630 return NULL;
631
632 e = pa_xnew(pa_memexport, 1);
633 e->pool = p;
634 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
635 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
636 e->n_init = 0;
637 e->revoke_cb = cb;
638 e->userdata = userdata;
639
640 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
641 return e;
642 }
643
644 void pa_memexport_free(pa_memexport *e) {
645 assert(e);
646
647 while (e->used_slots)
648 pa_memexport_process_release(e, e->used_slots - e->slots);
649
650 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
651 pa_xfree(e);
652 }
653
654 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
655 assert(e);
656
657 if (id >= e->n_init)
658 return -1;
659
660 if (!e->slots[id].block)
661 return -1;
662
663 /* pa_log("Processing release for %u", id); */
664
665 assert(e->pool->stat.n_exported > 0);
666 assert(e->pool->stat.exported_size >= e->slots[id].block->length);
667
668 e->pool->stat.n_exported --;
669 e->pool->stat.exported_size -= e->slots[id].block->length;
670
671 pa_memblock_unref(e->slots[id].block);
672 e->slots[id].block = NULL;
673
674 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
675 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
676
677 return 0;
678 }
679
680 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
681 struct memexport_slot *slot, *next;
682 assert(e);
683 assert(i);
684
685 for (slot = e->used_slots; slot; slot = next) {
686 uint32_t idx;
687 next = slot->next;
688
689 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
690 slot->block->per_type.imported.segment->import != i)
691 continue;
692
693 idx = slot - e->slots;
694 e->revoke_cb(e, idx, e->userdata);
695 pa_memexport_process_release(e, idx);
696 }
697 }
698
699 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
700 pa_memblock *n;
701
702 assert(p);
703 assert(b);
704
705 if (b->type == PA_MEMBLOCK_IMPORTED ||
706 b->type == PA_MEMBLOCK_POOL ||
707 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
708 assert(b->pool == p);
709 return pa_memblock_ref(b);
710 }
711
712 if (!(n = pa_memblock_new_pool(p, b->length)))
713 return NULL;
714
715 memcpy(n->data, b->data, b->length);
716 return n;
717 }
718
719 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
720 pa_shm *memory;
721 struct memexport_slot *slot;
722
723 assert(e);
724 assert(b);
725 assert(block_id);
726 assert(shm_id);
727 assert(offset);
728 assert(size);
729 assert(b->pool == e->pool);
730
731 if (!(b = memblock_shared_copy(e->pool, b)))
732 return -1;
733
734 if (e->free_slots) {
735 slot = e->free_slots;
736 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
737 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX) {
738 slot = &e->slots[e->n_init++];
739 } else {
740 pa_memblock_unref(b);
741 return -1;
742 }
743
744 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
745 slot->block = b;
746 *block_id = slot - e->slots;
747
748 /* pa_log("Got block id %u", *block_id); */
749
750 if (b->type == PA_MEMBLOCK_IMPORTED) {
751 assert(b->per_type.imported.segment);
752 memory = &b->per_type.imported.segment->memory;
753 } else {
754 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
755 assert(b->pool);
756 memory = &b->pool->memory;
757 }
758
759 assert(b->data >= memory->ptr);
760 assert((uint8_t*) b->data + b->length <= (uint8_t*) memory->ptr + memory->size);
761
762 *shm_id = memory->id;
763 *offset = (uint8_t*) b->data - (uint8_t*) memory->ptr;
764 *size = b->length;
765
766 e->pool->stat.n_exported ++;
767 e->pool->stat.exported_size += b->length;
768
769 return 0;
770 }