]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
make pa_mempool_stat thread-safe/lock-free
[pulseaudio] / src / pulsecore / memblock.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <assert.h>
29 #include <string.h>
30 #include <unistd.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/shm.h>
35 #include <pulsecore/log.h>
36 #include <pulsecore/hashmap.h>
37
38 #include "memblock.h"
39
40 #define PA_MEMPOOL_SLOTS_MAX 128
41 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
42
43 #define PA_MEMEXPORT_SLOTS_MAX 128
44
45 #define PA_MEMIMPORT_SLOTS_MAX 128
46 #define PA_MEMIMPORT_SEGMENTS_MAX 16
47
48 struct pa_memimport_segment {
49 pa_memimport *import;
50 pa_shm memory;
51 unsigned n_blocks;
52 };
53
54 struct pa_memimport {
55 pa_mempool *pool;
56 pa_hashmap *segments;
57 pa_hashmap *blocks;
58
59 /* Called whenever an imported memory block is no longer
60 * needed. */
61 pa_memimport_release_cb_t release_cb;
62 void *userdata;
63
64 PA_LLIST_FIELDS(pa_memimport);
65 };
66
67 struct memexport_slot {
68 PA_LLIST_FIELDS(struct memexport_slot);
69 pa_memblock *block;
70 };
71
72 struct pa_memexport {
73 pa_mempool *pool;
74
75 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
76 PA_LLIST_HEAD(struct memexport_slot, free_slots);
77 PA_LLIST_HEAD(struct memexport_slot, used_slots);
78 unsigned n_init;
79
80 /* Called whenever a client from which we imported a memory block
81 which we in turn exported to another client dies and we need to
82 revoke the memory block accordingly */
83 pa_memexport_revoke_cb_t revoke_cb;
84 void *userdata;
85
86 PA_LLIST_FIELDS(pa_memexport);
87 };
88
89 struct mempool_slot {
90 PA_LLIST_FIELDS(struct mempool_slot);
91 /* the actual data follows immediately hereafter */
92 };
93
94 struct pa_mempool {
95 pa_shm memory;
96 size_t block_size;
97 unsigned n_blocks, n_init;
98
99 PA_LLIST_HEAD(pa_memimport, imports);
100 PA_LLIST_HEAD(pa_memexport, exports);
101
102 /* A list of free slots that may be reused */
103 PA_LLIST_HEAD(struct mempool_slot, free_slots);
104 PA_LLIST_HEAD(struct mempool_slot, used_slots);
105
106 pa_mempool_stat stat;
107 };
108
109 static void segment_detach(pa_memimport_segment *seg);
110
111 static void stat_add(pa_memblock*b) {
112 assert(b);
113 assert(b->pool);
114
115 AO_fetch_and_add1_release_write(&b->pool->stat.n_allocated);
116 AO_fetch_and_add_release_write(&b->pool->stat.allocated_size, (AO_t) b->length);
117
118 AO_fetch_and_add1_release_write(&b->pool->stat.n_accumulated);
119 AO_fetch_and_add_release_write(&b->pool->stat.accumulated_size, (AO_t) b->length);
120
121 if (b->type == PA_MEMBLOCK_IMPORTED) {
122 AO_fetch_and_add1_release_write(&b->pool->stat.n_imported);
123 AO_fetch_and_add_release_write(&b->pool->stat.imported_size, (AO_t) b->length);
124 }
125
126 AO_fetch_and_add1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
127 AO_fetch_and_add1_release_write(&b->pool->stat.n_accumulated_by_type[b->type]);
128 }
129
130 static void stat_remove(pa_memblock *b) {
131 assert(b);
132 assert(b->pool);
133
134 assert(AO_load_acquire_read(&b->pool->stat.n_allocated) > 0);
135 assert(AO_load_acquire_read(&b->pool->stat.allocated_size) >= (AO_t) b->length);
136
137 AO_fetch_and_sub1_release_write(&b->pool->stat.n_allocated);
138 AO_fetch_and_add_release_write(&b->pool->stat.allocated_size, (AO_t) (-b->length));
139
140 if (b->type == PA_MEMBLOCK_IMPORTED) {
141 assert(AO_load_acquire_read(&b->pool->stat.n_imported) > 0);
142 assert(AO_load_acquire_read(&b->pool->stat.imported_size) >= (AO_t) b->length);
143
144 AO_fetch_and_sub1_release_write(&b->pool->stat.n_imported);
145 AO_fetch_and_add_release_write(&b->pool->stat.imported_size, (AO_t) (-b->length));
146 }
147
148 AO_fetch_and_sub1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
149 }
150
151 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
152
153 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
154 pa_memblock *b;
155
156 assert(p);
157 assert(length > 0);
158
159 if (!(b = pa_memblock_new_pool(p, length)))
160 b = memblock_new_appended(p, length);
161
162 return b;
163 }
164
165 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
166 pa_memblock *b;
167
168 assert(p);
169 assert(length > 0);
170
171 b = pa_xmalloc(sizeof(pa_memblock) + length);
172 b->type = PA_MEMBLOCK_APPENDED;
173 b->read_only = 0;
174 PA_REFCNT_INIT(b);
175 b->length = length;
176 b->data = (uint8_t*) b + sizeof(pa_memblock);
177 b->pool = p;
178
179 stat_add(b);
180 return b;
181 }
182
183 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
184 struct mempool_slot *slot;
185 assert(p);
186
187 if (p->free_slots) {
188 slot = p->free_slots;
189 PA_LLIST_REMOVE(struct mempool_slot, p->free_slots, slot);
190 } else if (p->n_init < p->n_blocks)
191 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * p->n_init++));
192 else {
193 pa_log_debug("Pool full");
194 AO_fetch_and_add1_release_write(&p->stat.n_pool_full);
195 return NULL;
196 }
197
198 PA_LLIST_PREPEND(struct mempool_slot, p->used_slots, slot);
199 return slot;
200 }
201
202 static void* mempool_slot_data(struct mempool_slot *slot) {
203 assert(slot);
204
205 return (uint8_t*) slot + sizeof(struct mempool_slot);
206 }
207
208 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
209 assert(p);
210 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
211 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
212
213 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
214 }
215
216 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
217 unsigned idx;
218
219 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
220 return NULL;
221
222 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
223 }
224
225 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
226 pa_memblock *b = NULL;
227 struct mempool_slot *slot;
228
229 assert(p);
230 assert(length > 0);
231
232 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
233
234 if (!(slot = mempool_allocate_slot(p)))
235 return NULL;
236
237 b = mempool_slot_data(slot);
238 b->type = PA_MEMBLOCK_POOL;
239 b->data = (uint8_t*) b + sizeof(pa_memblock);
240
241 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
242
243 if (!(slot = mempool_allocate_slot(p)))
244 return NULL;
245
246 b = pa_xnew(pa_memblock, 1);
247 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
248 b->data = mempool_slot_data(slot);
249 } else {
250 pa_log_debug("Memory block too large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
251 AO_fetch_and_add1_release_write(&p->stat.n_too_large_for_pool);
252 return NULL;
253 }
254
255 b->length = length;
256 b->read_only = 0;
257 PA_REFCNT_INIT(b);
258 b->pool = p;
259
260 stat_add(b);
261 return b;
262 }
263
264 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
265 pa_memblock *b;
266
267 assert(p);
268 assert(d);
269 assert(length > 0);
270
271 b = pa_xnew(pa_memblock, 1);
272 b->type = PA_MEMBLOCK_FIXED;
273 b->read_only = read_only;
274 PA_REFCNT_INIT(b);
275 b->length = length;
276 b->data = d;
277 b->pool = p;
278
279 stat_add(b);
280 return b;
281 }
282
283 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
284 pa_memblock *b;
285
286 assert(p);
287 assert(d);
288 assert(length > 0);
289 assert(free_cb);
290
291 b = pa_xnew(pa_memblock, 1);
292 b->type = PA_MEMBLOCK_USER;
293 b->read_only = read_only;
294 PA_REFCNT_INIT(b);
295 b->length = length;
296 b->data = d;
297 b->per_type.user.free_cb = free_cb;
298 b->pool = p;
299
300 stat_add(b);
301 return b;
302 }
303
304 pa_memblock* pa_memblock_ref(pa_memblock*b) {
305 assert(b);
306 assert(PA_REFCNT_VALUE(b) > 0);
307
308 PA_REFCNT_INC(b);
309 return b;
310 }
311
312 void pa_memblock_unref(pa_memblock*b) {
313 assert(b);
314 assert(PA_REFCNT_VALUE(b) > 0);
315
316 if (PA_REFCNT_DEC(b) > 0)
317 return;
318
319 stat_remove(b);
320
321 switch (b->type) {
322 case PA_MEMBLOCK_USER :
323 assert(b->per_type.user.free_cb);
324 b->per_type.user.free_cb(b->data);
325
326 /* Fall through */
327
328 case PA_MEMBLOCK_FIXED:
329 case PA_MEMBLOCK_APPENDED :
330 pa_xfree(b);
331 break;
332
333 case PA_MEMBLOCK_IMPORTED : {
334 pa_memimport_segment *segment;
335
336 segment = b->per_type.imported.segment;
337 assert(segment);
338 assert(segment->import);
339
340 pa_hashmap_remove(segment->import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
341 segment->import->release_cb(segment->import, b->per_type.imported.id, segment->import->userdata);
342
343 if (-- segment->n_blocks <= 0)
344 segment_detach(segment);
345
346 pa_xfree(b);
347 break;
348 }
349
350 case PA_MEMBLOCK_POOL_EXTERNAL:
351 case PA_MEMBLOCK_POOL: {
352 struct mempool_slot *slot;
353
354 slot = mempool_slot_by_ptr(b->pool, b->data);
355 assert(slot);
356
357 PA_LLIST_REMOVE(struct mempool_slot, b->pool->used_slots, slot);
358 PA_LLIST_PREPEND(struct mempool_slot, b->pool->free_slots, slot);
359
360 if (b->type == PA_MEMBLOCK_POOL_EXTERNAL)
361 pa_xfree(b);
362
363 break;
364 }
365
366 case PA_MEMBLOCK_TYPE_MAX:
367 default:
368 abort();
369 }
370 }
371
372 static void memblock_make_local(pa_memblock *b) {
373 assert(b);
374
375 AO_fetch_and_sub1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
376
377 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
378 struct mempool_slot *slot;
379
380 if ((slot = mempool_allocate_slot(b->pool))) {
381 void *new_data;
382 /* We can move it into a local pool, perfect! */
383
384 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
385 b->read_only = 0;
386
387 new_data = mempool_slot_data(slot);
388 memcpy(new_data, b->data, b->length);
389 b->data = new_data;
390 goto finish;
391 }
392 }
393
394 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
395 b->type = PA_MEMBLOCK_USER;
396 b->per_type.user.free_cb = pa_xfree;
397 b->read_only = 0;
398 b->data = pa_xmemdup(b->data, b->length);
399
400 finish:
401 AO_fetch_and_add1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
402 AO_fetch_and_add1_release_write(&b->pool->stat.n_accumulated_by_type[b->type]);
403 }
404
405 void pa_memblock_unref_fixed(pa_memblock *b) {
406 assert(b);
407 assert(PA_REFCNT_VALUE(b) > 0);
408 assert(b->type == PA_MEMBLOCK_FIXED);
409
410 if (PA_REFCNT_VALUE(b) > 1)
411 memblock_make_local(b);
412
413 pa_memblock_unref(b);
414 }
415
416 static void memblock_replace_import(pa_memblock *b) {
417 pa_memimport_segment *seg;
418
419 assert(b);
420 assert(b->type == PA_MEMBLOCK_IMPORTED);
421
422 assert(AO_load_acquire_read(&b->pool->stat.n_imported) > 0);
423 assert(AO_load_acquire_read(&b->pool->stat.imported_size) >= (AO_t) b->length);
424 AO_fetch_and_sub1_release_write(&b->pool->stat.n_imported);
425 AO_fetch_and_add_release_write(&b->pool->stat.imported_size, (AO_t) - b->length);
426
427 seg = b->per_type.imported.segment;
428 assert(seg);
429 assert(seg->import);
430
431 pa_hashmap_remove(
432 seg->import->blocks,
433 PA_UINT32_TO_PTR(b->per_type.imported.id));
434
435 memblock_make_local(b);
436
437 if (-- seg->n_blocks <= 0)
438 segment_detach(seg);
439 }
440
441 pa_mempool* pa_mempool_new(int shared) {
442 size_t ps;
443 pa_mempool *p;
444
445 p = pa_xnew(pa_mempool, 1);
446
447 #ifdef HAVE_SYSCONF
448 ps = (size_t) sysconf(_SC_PAGESIZE);
449 #elif defined(PAGE_SIZE)
450 ps = (size_t) PAGE_SIZE;
451 #else
452 ps = 4096; /* Let's hope it's like x86. */
453 #endif
454
455 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
456
457 if (p->block_size < ps)
458 p->block_size = ps;
459
460 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
461
462 assert(p->block_size > sizeof(struct mempool_slot));
463
464 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
465 pa_xfree(p);
466 return NULL;
467 }
468
469 p->n_init = 0;
470
471 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
472 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
473 PA_LLIST_HEAD_INIT(struct mempool_slot, p->free_slots);
474 PA_LLIST_HEAD_INIT(struct mempool_slot, p->used_slots);
475
476 memset(&p->stat, 0, sizeof(p->stat));
477
478 return p;
479 }
480
481 void pa_mempool_free(pa_mempool *p) {
482 assert(p);
483
484 while (p->imports)
485 pa_memimport_free(p->imports);
486
487 while (p->exports)
488 pa_memexport_free(p->exports);
489
490 if (AO_load_acquire_read(&p->stat.n_allocated) > 0)
491 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
492
493 pa_shm_free(&p->memory);
494 pa_xfree(p);
495 }
496
497 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
498 assert(p);
499
500 return &p->stat;
501 }
502
503 void pa_mempool_vacuum(pa_mempool *p) {
504 struct mempool_slot *slot;
505
506 assert(p);
507
508 for (slot = p->free_slots; slot; slot = slot->next) {
509 pa_shm_punch(&p->memory, (uint8_t*) slot + sizeof(struct mempool_slot) - (uint8_t*) p->memory.ptr, p->block_size - sizeof(struct mempool_slot));
510 }
511 }
512
513 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
514 assert(p);
515
516 if (!p->memory.shared)
517 return -1;
518
519 *id = p->memory.id;
520
521 return 0;
522 }
523
524 int pa_mempool_is_shared(pa_mempool *p) {
525 assert(p);
526
527 return !!p->memory.shared;
528 }
529
530 /* For recieving blocks from other nodes */
531 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
532 pa_memimport *i;
533
534 assert(p);
535 assert(cb);
536
537 i = pa_xnew(pa_memimport, 1);
538 i->pool = p;
539 i->segments = pa_hashmap_new(NULL, NULL);
540 i->blocks = pa_hashmap_new(NULL, NULL);
541 i->release_cb = cb;
542 i->userdata = userdata;
543
544 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
545 return i;
546 }
547
548 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
549
550 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
551 pa_memimport_segment* seg;
552
553 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
554 return NULL;
555
556 seg = pa_xnew(pa_memimport_segment, 1);
557
558 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
559 pa_xfree(seg);
560 return NULL;
561 }
562
563 seg->import = i;
564 seg->n_blocks = 0;
565
566 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
567 return seg;
568 }
569
570 static void segment_detach(pa_memimport_segment *seg) {
571 assert(seg);
572
573 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
574 pa_shm_free(&seg->memory);
575 pa_xfree(seg);
576 }
577
578 void pa_memimport_free(pa_memimport *i) {
579 pa_memexport *e;
580 pa_memblock *b;
581
582 assert(i);
583
584 /* If we've exported this block further we need to revoke that export */
585 for (e = i->pool->exports; e; e = e->next)
586 memexport_revoke_blocks(e, i);
587
588 while ((b = pa_hashmap_get_first(i->blocks)))
589 memblock_replace_import(b);
590
591 assert(pa_hashmap_size(i->segments) == 0);
592
593 pa_hashmap_free(i->blocks, NULL, NULL);
594 pa_hashmap_free(i->segments, NULL, NULL);
595
596 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
597 pa_xfree(i);
598 }
599
600 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
601 pa_memblock *b;
602 pa_memimport_segment *seg;
603
604 assert(i);
605
606 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
607 return NULL;
608
609 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
610 if (!(seg = segment_attach(i, shm_id)))
611 return NULL;
612
613 if (offset+size > seg->memory.size)
614 return NULL;
615
616 b = pa_xnew(pa_memblock, 1);
617 b->type = PA_MEMBLOCK_IMPORTED;
618 b->read_only = 1;
619 PA_REFCNT_INIT(b);
620 b->length = size;
621 b->data = (uint8_t*) seg->memory.ptr + offset;
622 b->pool = i->pool;
623 b->per_type.imported.id = block_id;
624 b->per_type.imported.segment = seg;
625
626 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
627
628 seg->n_blocks++;
629
630 stat_add(b);
631
632 return b;
633 }
634
635 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
636 pa_memblock *b;
637 assert(i);
638
639 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
640 return -1;
641
642 memblock_replace_import(b);
643 return 0;
644 }
645
646 /* For sending blocks to other nodes */
647 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
648 pa_memexport *e;
649
650 assert(p);
651 assert(cb);
652
653 if (!p->memory.shared)
654 return NULL;
655
656 e = pa_xnew(pa_memexport, 1);
657 e->pool = p;
658 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
659 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
660 e->n_init = 0;
661 e->revoke_cb = cb;
662 e->userdata = userdata;
663
664 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
665 return e;
666 }
667
668 void pa_memexport_free(pa_memexport *e) {
669 assert(e);
670
671 while (e->used_slots)
672 pa_memexport_process_release(e, e->used_slots - e->slots);
673
674 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
675 pa_xfree(e);
676 }
677
678 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
679 assert(e);
680
681 if (id >= e->n_init)
682 return -1;
683
684 if (!e->slots[id].block)
685 return -1;
686
687 /* pa_log("Processing release for %u", id); */
688
689 assert(AO_load_acquire_read(&e->pool->stat.n_exported) > 0);
690 assert(AO_load_acquire_read(&e->pool->stat.exported_size) >= (AO_t) e->slots[id].block->length);
691
692 AO_fetch_and_sub1_release_write(&e->pool->stat.n_exported);
693 AO_fetch_and_add_release_write(&e->pool->stat.exported_size, (AO_t) -e->slots[id].block->length);
694
695 pa_memblock_unref(e->slots[id].block);
696 e->slots[id].block = NULL;
697
698 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
699 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
700
701 return 0;
702 }
703
704 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
705 struct memexport_slot *slot, *next;
706 assert(e);
707 assert(i);
708
709 for (slot = e->used_slots; slot; slot = next) {
710 uint32_t idx;
711 next = slot->next;
712
713 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
714 slot->block->per_type.imported.segment->import != i)
715 continue;
716
717 idx = slot - e->slots;
718 e->revoke_cb(e, idx, e->userdata);
719 pa_memexport_process_release(e, idx);
720 }
721 }
722
723 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
724 pa_memblock *n;
725
726 assert(p);
727 assert(b);
728
729 if (b->type == PA_MEMBLOCK_IMPORTED ||
730 b->type == PA_MEMBLOCK_POOL ||
731 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
732 assert(b->pool == p);
733 return pa_memblock_ref(b);
734 }
735
736 if (!(n = pa_memblock_new_pool(p, b->length)))
737 return NULL;
738
739 memcpy(n->data, b->data, b->length);
740 return n;
741 }
742
743 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
744 pa_shm *memory;
745 struct memexport_slot *slot;
746
747 assert(e);
748 assert(b);
749 assert(block_id);
750 assert(shm_id);
751 assert(offset);
752 assert(size);
753 assert(b->pool == e->pool);
754
755 if (!(b = memblock_shared_copy(e->pool, b)))
756 return -1;
757
758 if (e->free_slots) {
759 slot = e->free_slots;
760 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
761 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX) {
762 slot = &e->slots[e->n_init++];
763 } else {
764 pa_memblock_unref(b);
765 return -1;
766 }
767
768 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
769 slot->block = b;
770 *block_id = slot - e->slots;
771
772 /* pa_log("Got block id %u", *block_id); */
773
774 if (b->type == PA_MEMBLOCK_IMPORTED) {
775 assert(b->per_type.imported.segment);
776 memory = &b->per_type.imported.segment->memory;
777 } else {
778 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
779 assert(b->pool);
780 memory = &b->pool->memory;
781 }
782
783 assert(b->data >= memory->ptr);
784 assert((uint8_t*) b->data + b->length <= (uint8_t*) memory->ptr + memory->size);
785
786 *shm_id = memory->id;
787 *offset = (uint8_t*) b->data - (uint8_t*) memory->ptr;
788 *size = b->length;
789
790 AO_fetch_and_add1_release_write(&e->pool->stat.n_exported);
791 AO_fetch_and_add_release_write(&e->pool->stat.exported_size, (AO_t) b->length);
792
793 return 0;
794 }