]> code.delx.au - gnu-emacs/blob - src/ralloc.c
Fix minor ralloc.c problems found by static checking.
[gnu-emacs] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000-2012 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
18
19 /* NOTES:
20
21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
22 rather than all of them. This means allowing for a possible
23 hole between the first bloc and the end of malloc storage. */
24
25 #ifdef emacs
26
27 #include <config.h>
28 #include <setjmp.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
31
32 #include <unistd.h>
33
34 typedef POINTER_TYPE *POINTER;
35 typedef size_t SIZE;
36
37 #ifdef DOUG_LEA_MALLOC
38 #define M_TOP_PAD -2
39 extern int mallopt (int, int);
40 #else /* not DOUG_LEA_MALLOC */
41 #ifndef SYSTEM_MALLOC
42 extern size_t __malloc_extra_blocks;
43 #endif /* SYSTEM_MALLOC */
44 #endif /* not DOUG_LEA_MALLOC */
45
46 #else /* not emacs */
47
48 #include <stddef.h>
49
50 typedef size_t SIZE;
51 typedef void *POINTER;
52
53 #include <unistd.h>
54 #include <malloc.h>
55
56 #endif /* not emacs */
57
58
59 #include "getpagesize.h"
60
61 #define NIL ((POINTER) 0)
62
63 /* A flag to indicate whether we have initialized ralloc yet. For
64 Emacs's sake, please do not make this local to malloc_init; on some
65 machines, the dumping procedure makes all static variables
66 read-only. On these machines, the word static is #defined to be
67 the empty string, meaning that r_alloc_initialized becomes an
68 automatic variable, and loses its value each time Emacs is started
69 up. */
70
71 static int r_alloc_initialized = 0;
72
73 static void r_alloc_init (void);
74
75 \f
76 /* Declarations for working with the malloc, ralloc, and system breaks. */
77
78 /* Function to set the real break value. */
79 POINTER (*real_morecore) (long int);
80
81 /* The break value, as seen by malloc. */
82 static POINTER virtual_break_value;
83
84 /* The address of the end of the last data in use by ralloc,
85 including relocatable blocs as well as malloc data. */
86 static POINTER break_value;
87
88 /* This is the size of a page. We round memory requests to this boundary. */
89 static int page_size;
90
91 /* Whenever we get memory from the system, get this many extra bytes. This
92 must be a multiple of page_size. */
93 static int extra_bytes;
94
95 /* Macros for rounding. Note that rounding to any value is possible
96 by changing the definition of PAGE. */
97 #define PAGE (getpagesize ())
98 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
99 & ~(page_size - 1))
100
101 #define MEM_ALIGN sizeof (double)
102 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
103 & ~(MEM_ALIGN - 1))
104
105 /* The hook `malloc' uses for the function which gets more space
106 from the system. */
107
108 #ifndef SYSTEM_MALLOC
109 extern POINTER (*__morecore) (long int);
110 #endif
111
112
113 \f
114 /***********************************************************************
115 Implementation using sbrk
116 ***********************************************************************/
117
118 /* Data structures of heaps and blocs. */
119
120 /* The relocatable objects, or blocs, and the malloc data
121 both reside within one or more heaps.
122 Each heap contains malloc data, running from `start' to `bloc_start',
123 and relocatable objects, running from `bloc_start' to `free'.
124
125 Relocatable objects may relocate within the same heap
126 or may move into another heap; the heaps themselves may grow
127 but they never move.
128
129 We try to make just one heap and make it larger as necessary.
130 But sometimes we can't do that, because we can't get contiguous
131 space to add onto the heap. When that happens, we start a new heap. */
132
133 typedef struct heap
134 {
135 struct heap *next;
136 struct heap *prev;
137 /* Start of memory range of this heap. */
138 POINTER start;
139 /* End of memory range of this heap. */
140 POINTER end;
141 /* Start of relocatable data in this heap. */
142 POINTER bloc_start;
143 /* Start of unused space in this heap. */
144 POINTER free;
145 /* First bloc in this heap. */
146 struct bp *first_bloc;
147 /* Last bloc in this heap. */
148 struct bp *last_bloc;
149 } *heap_ptr;
150
151 #define NIL_HEAP ((heap_ptr) 0)
152
153 /* This is the first heap object.
154 If we need additional heap objects, each one resides at the beginning of
155 the space it covers. */
156 static struct heap heap_base;
157
158 /* Head and tail of the list of heaps. */
159 static heap_ptr first_heap, last_heap;
160
161 /* These structures are allocated in the malloc arena.
162 The linked list is kept in order of increasing '.data' members.
163 The data blocks abut each other; if b->next is non-nil, then
164 b->data + b->size == b->next->data.
165
166 An element with variable==NIL denotes a freed block, which has not yet
167 been collected. They may only appear while r_alloc_freeze_level > 0,
168 and will be freed when the arena is thawed. Currently, these blocs are
169 not reusable, while the arena is frozen. Very inefficient. */
170
171 typedef struct bp
172 {
173 struct bp *next;
174 struct bp *prev;
175 POINTER *variable;
176 POINTER data;
177 SIZE size;
178 POINTER new_data; /* temporarily used for relocation */
179 struct heap *heap; /* Heap this bloc is in. */
180 } *bloc_ptr;
181
182 #define NIL_BLOC ((bloc_ptr) 0)
183 #define BLOC_PTR_SIZE (sizeof (struct bp))
184
185 /* Head and tail of the list of relocatable blocs. */
186 static bloc_ptr first_bloc, last_bloc;
187
188 static int use_relocatable_buffers;
189
190 /* If >0, no relocation whatsoever takes place. */
191 static int r_alloc_freeze_level;
192
193 \f
194 /* Functions to get and return memory from the system. */
195
196 /* Find the heap that ADDRESS falls within. */
197
198 static heap_ptr
199 find_heap (POINTER address)
200 {
201 heap_ptr heap;
202
203 for (heap = last_heap; heap; heap = heap->prev)
204 {
205 if (heap->start <= address && address <= heap->end)
206 return heap;
207 }
208
209 return NIL_HEAP;
210 }
211
212 /* Find SIZE bytes of space in a heap.
213 Try to get them at ADDRESS (which must fall within some heap's range)
214 if we can get that many within one heap.
215
216 If enough space is not presently available in our reserve, this means
217 getting more page-aligned space from the system. If the returned space
218 is not contiguous to the last heap, allocate a new heap, and append it
219 to the heap list.
220
221 obtain does not try to keep track of whether space is in use or not
222 in use. It just returns the address of SIZE bytes that fall within a
223 single heap. If you call obtain twice in a row with the same arguments,
224 you typically get the same value. It's the caller's responsibility to
225 keep track of what space is in use.
226
227 Return the address of the space if all went well, or zero if we couldn't
228 allocate the memory. */
229
230 static POINTER
231 obtain (POINTER address, SIZE size)
232 {
233 heap_ptr heap;
234 SIZE already_available;
235
236 /* Find the heap that ADDRESS falls within. */
237 for (heap = last_heap; heap; heap = heap->prev)
238 {
239 if (heap->start <= address && address <= heap->end)
240 break;
241 }
242
243 if (! heap)
244 abort ();
245
246 /* If we can't fit SIZE bytes in that heap,
247 try successive later heaps. */
248 while (heap && (char *) address + size > (char *) heap->end)
249 {
250 heap = heap->next;
251 if (heap == NIL_HEAP)
252 break;
253 address = heap->bloc_start;
254 }
255
256 /* If we can't fit them within any existing heap,
257 get more space. */
258 if (heap == NIL_HEAP)
259 {
260 POINTER new = (*real_morecore)(0);
261 SIZE get;
262
263 already_available = (char *)last_heap->end - (char *)address;
264
265 if (new != last_heap->end)
266 {
267 /* Someone else called sbrk. Make a new heap. */
268
269 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
270 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
271
272 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
273 return 0;
274
275 new_heap->start = new;
276 new_heap->end = bloc_start;
277 new_heap->bloc_start = bloc_start;
278 new_heap->free = bloc_start;
279 new_heap->next = NIL_HEAP;
280 new_heap->prev = last_heap;
281 new_heap->first_bloc = NIL_BLOC;
282 new_heap->last_bloc = NIL_BLOC;
283 last_heap->next = new_heap;
284 last_heap = new_heap;
285
286 address = bloc_start;
287 already_available = 0;
288 }
289
290 /* Add space to the last heap (which we may have just created).
291 Get some extra, so we can come here less often. */
292
293 get = size + extra_bytes - already_available;
294 get = (char *) ROUNDUP ((char *)last_heap->end + get)
295 - (char *) last_heap->end;
296
297 if ((*real_morecore) (get) != last_heap->end)
298 return 0;
299
300 last_heap->end = (char *) last_heap->end + get;
301 }
302
303 return address;
304 }
305
306 /* Return unused heap space to the system
307 if there is a lot of unused space now.
308 This can make the last heap smaller;
309 it can also eliminate the last heap entirely. */
310
311 static void
312 relinquish (void)
313 {
314 register heap_ptr h;
315 long excess = 0;
316
317 /* Add the amount of space beyond break_value
318 in all heaps which have extend beyond break_value at all. */
319
320 for (h = last_heap; h && break_value < h->end; h = h->prev)
321 {
322 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
323 ? h->bloc_start : break_value);
324 }
325
326 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
327 {
328 /* Keep extra_bytes worth of empty space.
329 And don't free anything unless we can free at least extra_bytes. */
330 excess -= extra_bytes;
331
332 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
333 {
334 /* This heap should have no blocs in it. */
335 if (last_heap->first_bloc != NIL_BLOC
336 || last_heap->last_bloc != NIL_BLOC)
337 abort ();
338
339 /* Return the last heap, with its header, to the system. */
340 excess = (char *)last_heap->end - (char *)last_heap->start;
341 last_heap = last_heap->prev;
342 last_heap->next = NIL_HEAP;
343 }
344 else
345 {
346 excess = (char *) last_heap->end
347 - (char *) ROUNDUP ((char *)last_heap->end - excess);
348 last_heap->end = (char *) last_heap->end - excess;
349 }
350
351 if ((*real_morecore) (- excess) == 0)
352 {
353 /* If the system didn't want that much memory back, adjust
354 the end of the last heap to reflect that. This can occur
355 if break_value is still within the original data segment. */
356 last_heap->end = (char *) last_heap->end + excess;
357 /* Make sure that the result of the adjustment is accurate.
358 It should be, for the else clause above; the other case,
359 which returns the entire last heap to the system, seems
360 unlikely to trigger this mode of failure. */
361 if (last_heap->end != (*real_morecore) (0))
362 abort ();
363 }
364 }
365 }
366 \f
367 /* The meat - allocating, freeing, and relocating blocs. */
368
369 /* Find the bloc referenced by the address in PTR. Returns a pointer
370 to that block. */
371
372 static bloc_ptr
373 find_bloc (POINTER *ptr)
374 {
375 register bloc_ptr p = first_bloc;
376
377 while (p != NIL_BLOC)
378 {
379 /* Consistency check. Don't return inconsistent blocs.
380 Don't abort here, as callers might be expecting this, but
381 callers that always expect a bloc to be returned should abort
382 if one isn't to avoid a memory corruption bug that is
383 difficult to track down. */
384 if (p->variable == ptr && p->data == *ptr)
385 return p;
386
387 p = p->next;
388 }
389
390 return p;
391 }
392
393 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
394 Returns a pointer to the new bloc, or zero if we couldn't allocate
395 memory for the new block. */
396
397 static bloc_ptr
398 get_bloc (SIZE size)
399 {
400 register bloc_ptr new_bloc;
401 register heap_ptr heap;
402
403 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
404 || ! (new_bloc->data = obtain (break_value, size)))
405 {
406 free (new_bloc);
407
408 return 0;
409 }
410
411 break_value = (char *) new_bloc->data + size;
412
413 new_bloc->size = size;
414 new_bloc->next = NIL_BLOC;
415 new_bloc->variable = (POINTER *) NIL;
416 new_bloc->new_data = 0;
417
418 /* Record in the heap that this space is in use. */
419 heap = find_heap (new_bloc->data);
420 heap->free = break_value;
421
422 /* Maintain the correspondence between heaps and blocs. */
423 new_bloc->heap = heap;
424 heap->last_bloc = new_bloc;
425 if (heap->first_bloc == NIL_BLOC)
426 heap->first_bloc = new_bloc;
427
428 /* Put this bloc on the doubly-linked list of blocs. */
429 if (first_bloc)
430 {
431 new_bloc->prev = last_bloc;
432 last_bloc->next = new_bloc;
433 last_bloc = new_bloc;
434 }
435 else
436 {
437 first_bloc = last_bloc = new_bloc;
438 new_bloc->prev = NIL_BLOC;
439 }
440
441 return new_bloc;
442 }
443 \f
444 /* Calculate new locations of blocs in the list beginning with BLOC,
445 relocating it to start at ADDRESS, in heap HEAP. If enough space is
446 not presently available in our reserve, call obtain for
447 more space.
448
449 Store the new location of each bloc in its new_data field.
450 Do not touch the contents of blocs or break_value. */
451
452 static int
453 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
454 {
455 register bloc_ptr b = bloc;
456
457 /* No need to ever call this if arena is frozen, bug somewhere! */
458 if (r_alloc_freeze_level)
459 abort ();
460
461 while (b)
462 {
463 /* If bloc B won't fit within HEAP,
464 move to the next heap and try again. */
465 while (heap && (char *) address + b->size > (char *) heap->end)
466 {
467 heap = heap->next;
468 if (heap == NIL_HEAP)
469 break;
470 address = heap->bloc_start;
471 }
472
473 /* If BLOC won't fit in any heap,
474 get enough new space to hold BLOC and all following blocs. */
475 if (heap == NIL_HEAP)
476 {
477 register bloc_ptr tb = b;
478 register SIZE s = 0;
479
480 /* Add up the size of all the following blocs. */
481 while (tb != NIL_BLOC)
482 {
483 if (tb->variable)
484 s += tb->size;
485
486 tb = tb->next;
487 }
488
489 /* Get that space. */
490 address = obtain (address, s);
491 if (address == 0)
492 return 0;
493
494 heap = last_heap;
495 }
496
497 /* Record the new address of this bloc
498 and update where the next bloc can start. */
499 b->new_data = address;
500 if (b->variable)
501 address = (char *) address + b->size;
502 b = b->next;
503 }
504
505 return 1;
506 }
507 \f
508 /* Update the records of which heaps contain which blocs, starting
509 with heap HEAP and bloc BLOC. */
510
511 static void
512 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
513 {
514 register bloc_ptr b;
515
516 /* Initialize HEAP's status to reflect blocs before BLOC. */
517 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
518 {
519 /* The previous bloc is in HEAP. */
520 heap->last_bloc = bloc->prev;
521 heap->free = (char *) bloc->prev->data + bloc->prev->size;
522 }
523 else
524 {
525 /* HEAP contains no blocs before BLOC. */
526 heap->first_bloc = NIL_BLOC;
527 heap->last_bloc = NIL_BLOC;
528 heap->free = heap->bloc_start;
529 }
530
531 /* Advance through blocs one by one. */
532 for (b = bloc; b != NIL_BLOC; b = b->next)
533 {
534 /* Advance through heaps, marking them empty,
535 till we get to the one that B is in. */
536 while (heap)
537 {
538 if (heap->bloc_start <= b->data && b->data <= heap->end)
539 break;
540 heap = heap->next;
541 /* We know HEAP is not null now,
542 because there has to be space for bloc B. */
543 heap->first_bloc = NIL_BLOC;
544 heap->last_bloc = NIL_BLOC;
545 heap->free = heap->bloc_start;
546 }
547
548 /* Update HEAP's status for bloc B. */
549 heap->free = (char *) b->data + b->size;
550 heap->last_bloc = b;
551 if (heap->first_bloc == NIL_BLOC)
552 heap->first_bloc = b;
553
554 /* Record that B is in HEAP. */
555 b->heap = heap;
556 }
557
558 /* If there are any remaining heaps and no blocs left,
559 mark those heaps as empty. */
560 heap = heap->next;
561 while (heap)
562 {
563 heap->first_bloc = NIL_BLOC;
564 heap->last_bloc = NIL_BLOC;
565 heap->free = heap->bloc_start;
566 heap = heap->next;
567 }
568 }
569 \f
570 /* Resize BLOC to SIZE bytes. This relocates the blocs
571 that come after BLOC in memory. */
572
573 static int
574 resize_bloc (bloc_ptr bloc, SIZE size)
575 {
576 register bloc_ptr b;
577 heap_ptr heap;
578 POINTER address;
579 SIZE old_size;
580
581 /* No need to ever call this if arena is frozen, bug somewhere! */
582 if (r_alloc_freeze_level)
583 abort ();
584
585 if (bloc == NIL_BLOC || size == bloc->size)
586 return 1;
587
588 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
589 {
590 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
591 break;
592 }
593
594 if (heap == NIL_HEAP)
595 abort ();
596
597 old_size = bloc->size;
598 bloc->size = size;
599
600 /* Note that bloc could be moved into the previous heap. */
601 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
602 : (char *) first_heap->bloc_start);
603 while (heap)
604 {
605 if (heap->bloc_start <= address && address <= heap->end)
606 break;
607 heap = heap->prev;
608 }
609
610 if (! relocate_blocs (bloc, heap, address))
611 {
612 bloc->size = old_size;
613 return 0;
614 }
615
616 if (size > old_size)
617 {
618 for (b = last_bloc; b != bloc; b = b->prev)
619 {
620 if (!b->variable)
621 {
622 b->size = 0;
623 b->data = b->new_data;
624 }
625 else
626 {
627 if (b->new_data != b->data)
628 memmove (b->new_data, b->data, b->size);
629 *b->variable = b->data = b->new_data;
630 }
631 }
632 if (!bloc->variable)
633 {
634 bloc->size = 0;
635 bloc->data = bloc->new_data;
636 }
637 else
638 {
639 if (bloc->new_data != bloc->data)
640 memmove (bloc->new_data, bloc->data, old_size);
641 memset ((char *) bloc->new_data + old_size, 0, size - old_size);
642 *bloc->variable = bloc->data = bloc->new_data;
643 }
644 }
645 else
646 {
647 for (b = bloc; b != NIL_BLOC; b = b->next)
648 {
649 if (!b->variable)
650 {
651 b->size = 0;
652 b->data = b->new_data;
653 }
654 else
655 {
656 if (b->new_data != b->data)
657 memmove (b->new_data, b->data, b->size);
658 *b->variable = b->data = b->new_data;
659 }
660 }
661 }
662
663 update_heap_bloc_correspondence (bloc, heap);
664
665 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
666 : (char *) first_heap->bloc_start);
667 return 1;
668 }
669 \f
670 /* Free BLOC from the chain of blocs, relocating any blocs above it.
671 This may return space to the system. */
672
673 static void
674 free_bloc (bloc_ptr bloc)
675 {
676 heap_ptr heap = bloc->heap;
677
678 if (r_alloc_freeze_level)
679 {
680 bloc->variable = (POINTER *) NIL;
681 return;
682 }
683
684 resize_bloc (bloc, 0);
685
686 if (bloc == first_bloc && bloc == last_bloc)
687 {
688 first_bloc = last_bloc = NIL_BLOC;
689 }
690 else if (bloc == last_bloc)
691 {
692 last_bloc = bloc->prev;
693 last_bloc->next = NIL_BLOC;
694 }
695 else if (bloc == first_bloc)
696 {
697 first_bloc = bloc->next;
698 first_bloc->prev = NIL_BLOC;
699 }
700 else
701 {
702 bloc->next->prev = bloc->prev;
703 bloc->prev->next = bloc->next;
704 }
705
706 /* Update the records of which blocs are in HEAP. */
707 if (heap->first_bloc == bloc)
708 {
709 if (bloc->next != 0 && bloc->next->heap == heap)
710 heap->first_bloc = bloc->next;
711 else
712 heap->first_bloc = heap->last_bloc = NIL_BLOC;
713 }
714 if (heap->last_bloc == bloc)
715 {
716 if (bloc->prev != 0 && bloc->prev->heap == heap)
717 heap->last_bloc = bloc->prev;
718 else
719 heap->first_bloc = heap->last_bloc = NIL_BLOC;
720 }
721
722 relinquish ();
723 free (bloc);
724 }
725 \f
726 /* Interface routines. */
727
728 /* Obtain SIZE bytes of storage from the free pool, or the system, as
729 necessary. If relocatable blocs are in use, this means relocating
730 them. This function gets plugged into the GNU malloc's __morecore
731 hook.
732
733 We provide hysteresis, never relocating by less than extra_bytes.
734
735 If we're out of memory, we should return zero, to imitate the other
736 __morecore hook values - in particular, __default_morecore in the
737 GNU malloc package. */
738
739 static POINTER
740 r_alloc_sbrk (long int size)
741 {
742 register bloc_ptr b;
743 POINTER address;
744
745 if (! r_alloc_initialized)
746 r_alloc_init ();
747
748 if (! use_relocatable_buffers)
749 return (*real_morecore) (size);
750
751 if (size == 0)
752 return virtual_break_value;
753
754 if (size > 0)
755 {
756 /* Allocate a page-aligned space. GNU malloc would reclaim an
757 extra space if we passed an unaligned one. But we could
758 not always find a space which is contiguous to the previous. */
759 POINTER new_bloc_start;
760 heap_ptr h = first_heap;
761 SIZE get = ROUNDUP (size);
762
763 address = (POINTER) ROUNDUP (virtual_break_value);
764
765 /* Search the list upward for a heap which is large enough. */
766 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
767 {
768 h = h->next;
769 if (h == NIL_HEAP)
770 break;
771 address = (POINTER) ROUNDUP (h->start);
772 }
773
774 /* If not found, obtain more space. */
775 if (h == NIL_HEAP)
776 {
777 get += extra_bytes + page_size;
778
779 if (! obtain (address, get))
780 return 0;
781
782 if (first_heap == last_heap)
783 address = (POINTER) ROUNDUP (virtual_break_value);
784 else
785 address = (POINTER) ROUNDUP (last_heap->start);
786 h = last_heap;
787 }
788
789 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
790
791 if (first_heap->bloc_start < new_bloc_start)
792 {
793 /* This is no clean solution - no idea how to do it better. */
794 if (r_alloc_freeze_level)
795 return NIL;
796
797 /* There is a bug here: if the above obtain call succeeded, but the
798 relocate_blocs call below does not succeed, we need to free
799 the memory that we got with obtain. */
800
801 /* Move all blocs upward. */
802 if (! relocate_blocs (first_bloc, h, new_bloc_start))
803 return 0;
804
805 /* Note that (POINTER)(h+1) <= new_bloc_start since
806 get >= page_size, so the following does not destroy the heap
807 header. */
808 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
809 {
810 if (b->new_data != b->data)
811 memmove (b->new_data, b->data, b->size);
812 *b->variable = b->data = b->new_data;
813 }
814
815 h->bloc_start = new_bloc_start;
816
817 update_heap_bloc_correspondence (first_bloc, h);
818 }
819 if (h != first_heap)
820 {
821 /* Give up managing heaps below the one the new
822 virtual_break_value points to. */
823 first_heap->prev = NIL_HEAP;
824 first_heap->next = h->next;
825 first_heap->start = h->start;
826 first_heap->end = h->end;
827 first_heap->free = h->free;
828 first_heap->first_bloc = h->first_bloc;
829 first_heap->last_bloc = h->last_bloc;
830 first_heap->bloc_start = h->bloc_start;
831
832 if (first_heap->next)
833 first_heap->next->prev = first_heap;
834 else
835 last_heap = first_heap;
836 }
837
838 memset (address, 0, size);
839 }
840 else /* size < 0 */
841 {
842 SIZE excess = (char *)first_heap->bloc_start
843 - ((char *)virtual_break_value + size);
844
845 address = virtual_break_value;
846
847 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
848 {
849 excess -= extra_bytes;
850 first_heap->bloc_start
851 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
852
853 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
854
855 for (b = first_bloc; b != NIL_BLOC; b = b->next)
856 {
857 if (b->new_data != b->data)
858 memmove (b->new_data, b->data, b->size);
859 *b->variable = b->data = b->new_data;
860 }
861 }
862
863 if ((char *)virtual_break_value + size < (char *)first_heap->start)
864 {
865 /* We found an additional space below the first heap */
866 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
867 }
868 }
869
870 virtual_break_value = (POINTER) ((char *)address + size);
871 break_value = (last_bloc
872 ? (char *) last_bloc->data + last_bloc->size
873 : (char *) first_heap->bloc_start);
874 if (size < 0)
875 relinquish ();
876
877 return address;
878 }
879
880
881 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
882 the data is returned in *PTR. PTR is thus the address of some variable
883 which will use the data area.
884
885 The allocation of 0 bytes is valid.
886 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
887 done before allocating a new area. Not yet done.
888
889 If we can't allocate the necessary memory, set *PTR to zero, and
890 return zero. */
891
892 POINTER
893 r_alloc (POINTER *ptr, SIZE size)
894 {
895 register bloc_ptr new_bloc;
896
897 if (! r_alloc_initialized)
898 r_alloc_init ();
899
900 new_bloc = get_bloc (MEM_ROUNDUP (size));
901 if (new_bloc)
902 {
903 new_bloc->variable = ptr;
904 *ptr = new_bloc->data;
905 }
906 else
907 *ptr = 0;
908
909 return *ptr;
910 }
911
912 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
913 Store 0 in *PTR to show there's no block allocated. */
914
915 void
916 r_alloc_free (register POINTER *ptr)
917 {
918 register bloc_ptr dead_bloc;
919
920 if (! r_alloc_initialized)
921 r_alloc_init ();
922
923 dead_bloc = find_bloc (ptr);
924 if (dead_bloc == NIL_BLOC)
925 abort (); /* Double free? PTR not originally used to allocate? */
926
927 free_bloc (dead_bloc);
928 *ptr = 0;
929
930 #ifdef emacs
931 refill_memory_reserve ();
932 #endif
933 }
934
935 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
936 Do this by shifting all blocks above this one up in memory, unless
937 SIZE is less than or equal to the current bloc size, in which case
938 do nothing.
939
940 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
941 memory copied to it. Not very efficient. We could traverse the
942 bloc_list for a best fit of free blocs first.
943
944 Change *PTR to reflect the new bloc, and return this value.
945
946 If more memory cannot be allocated, then leave *PTR unchanged, and
947 return zero. */
948
949 POINTER
950 r_re_alloc (POINTER *ptr, SIZE size)
951 {
952 register bloc_ptr bloc;
953
954 if (! r_alloc_initialized)
955 r_alloc_init ();
956
957 if (!*ptr)
958 return r_alloc (ptr, size);
959 if (!size)
960 {
961 r_alloc_free (ptr);
962 return r_alloc (ptr, 0);
963 }
964
965 bloc = find_bloc (ptr);
966 if (bloc == NIL_BLOC)
967 abort (); /* Already freed? PTR not originally used to allocate? */
968
969 if (size < bloc->size)
970 {
971 /* Wouldn't it be useful to actually resize the bloc here? */
972 /* I think so too, but not if it's too expensive... */
973 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
974 && r_alloc_freeze_level == 0)
975 {
976 resize_bloc (bloc, MEM_ROUNDUP (size));
977 /* Never mind if this fails, just do nothing... */
978 /* It *should* be infallible! */
979 }
980 }
981 else if (size > bloc->size)
982 {
983 if (r_alloc_freeze_level)
984 {
985 bloc_ptr new_bloc;
986 new_bloc = get_bloc (MEM_ROUNDUP (size));
987 if (new_bloc)
988 {
989 new_bloc->variable = ptr;
990 *ptr = new_bloc->data;
991 bloc->variable = (POINTER *) NIL;
992 }
993 else
994 return NIL;
995 }
996 else
997 {
998 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
999 return NIL;
1000 }
1001 }
1002 return *ptr;
1003 }
1004
1005
1006 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1007
1008 /* Reinitialize the morecore hook variables after restarting a dumped
1009 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1010 void
1011 r_alloc_reinit (void)
1012 {
1013 /* Only do this if the hook has been reset, so that we don't get an
1014 infinite loop, in case Emacs was linked statically. */
1015 if (__morecore != r_alloc_sbrk)
1016 {
1017 real_morecore = __morecore;
1018 __morecore = r_alloc_sbrk;
1019 }
1020 }
1021
1022 #endif /* emacs && DOUG_LEA_MALLOC */
1023
1024 #ifdef DEBUG
1025
1026 #include <assert.h>
1027
1028 void
1029 r_alloc_check (void)
1030 {
1031 int found = 0;
1032 heap_ptr h, ph = 0;
1033 bloc_ptr b, pb = 0;
1034
1035 if (!r_alloc_initialized)
1036 return;
1037
1038 assert (first_heap);
1039 assert (last_heap->end <= (POINTER) sbrk (0));
1040 assert ((POINTER) first_heap < first_heap->start);
1041 assert (first_heap->start <= virtual_break_value);
1042 assert (virtual_break_value <= first_heap->end);
1043
1044 for (h = first_heap; h; h = h->next)
1045 {
1046 assert (h->prev == ph);
1047 assert ((POINTER) ROUNDUP (h->end) == h->end);
1048 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1049 the heap start has any sort of alignment.
1050 Perhaps it should. */
1051 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1052 #endif
1053 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1054 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1055
1056 if (ph)
1057 {
1058 assert (ph->end < h->start);
1059 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1060 }
1061
1062 if (h->bloc_start <= break_value && break_value <= h->end)
1063 found = 1;
1064
1065 ph = h;
1066 }
1067
1068 assert (found);
1069 assert (last_heap == ph);
1070
1071 for (b = first_bloc; b; b = b->next)
1072 {
1073 assert (b->prev == pb);
1074 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1075 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1076
1077 ph = 0;
1078 for (h = first_heap; h; h = h->next)
1079 {
1080 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1081 break;
1082 ph = h;
1083 }
1084
1085 assert (h);
1086
1087 if (pb && pb->data + pb->size != b->data)
1088 {
1089 assert (ph && b->data == h->bloc_start);
1090 while (ph)
1091 {
1092 if (ph->bloc_start <= pb->data
1093 && pb->data + pb->size <= ph->end)
1094 {
1095 assert (pb->data + pb->size + b->size > ph->end);
1096 break;
1097 }
1098 else
1099 {
1100 assert (ph->bloc_start + b->size > ph->end);
1101 }
1102 ph = ph->prev;
1103 }
1104 }
1105 pb = b;
1106 }
1107
1108 assert (last_bloc == pb);
1109
1110 if (last_bloc)
1111 assert (last_bloc->data + last_bloc->size == break_value);
1112 else
1113 assert (first_heap->bloc_start == break_value);
1114 }
1115
1116 #endif /* DEBUG */
1117
1118 /* Update the internal record of which variable points to some data to NEW.
1119 Used by buffer-swap-text in Emacs to restore consistency after it
1120 swaps the buffer text between two buffer objects. The OLD pointer
1121 is checked to ensure that memory corruption does not occur due to
1122 misuse. */
1123 void
1124 r_alloc_reset_variable (POINTER *old, POINTER *new)
1125 {
1126 bloc_ptr bloc = first_bloc;
1127
1128 /* Find the bloc that corresponds to the data pointed to by pointer.
1129 find_bloc cannot be used, as it has internal consistency checks
1130 which fail when the variable needs resetting. */
1131 while (bloc != NIL_BLOC)
1132 {
1133 if (bloc->data == *new)
1134 break;
1135
1136 bloc = bloc->next;
1137 }
1138
1139 if (bloc == NIL_BLOC || bloc->variable != old)
1140 abort (); /* Already freed? OLD not originally used to allocate? */
1141
1142 /* Update variable to point to the new location. */
1143 bloc->variable = new;
1144 }
1145
1146 \f
1147 /***********************************************************************
1148 Initialization
1149 ***********************************************************************/
1150
1151 /* Initialize various things for memory allocation. */
1152
1153 static void
1154 r_alloc_init (void)
1155 {
1156 if (r_alloc_initialized)
1157 return;
1158 r_alloc_initialized = 1;
1159
1160 page_size = PAGE;
1161 #ifndef SYSTEM_MALLOC
1162 real_morecore = __morecore;
1163 __morecore = r_alloc_sbrk;
1164
1165 first_heap = last_heap = &heap_base;
1166 first_heap->next = first_heap->prev = NIL_HEAP;
1167 first_heap->start = first_heap->bloc_start
1168 = virtual_break_value = break_value = (*real_morecore) (0);
1169 if (break_value == NIL)
1170 abort ();
1171
1172 extra_bytes = ROUNDUP (50000);
1173 #endif
1174
1175 #ifdef DOUG_LEA_MALLOC
1176 BLOCK_INPUT;
1177 mallopt (M_TOP_PAD, 64 * 4096);
1178 UNBLOCK_INPUT;
1179 #else
1180 #ifndef SYSTEM_MALLOC
1181 /* Give GNU malloc's morecore some hysteresis
1182 so that we move all the relocatable blocks much less often. */
1183 __malloc_extra_blocks = 64;
1184 #endif
1185 #endif
1186
1187 #ifndef SYSTEM_MALLOC
1188 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1189
1190 /* The extra call to real_morecore guarantees that the end of the
1191 address space is a multiple of page_size, even if page_size is
1192 not really the page size of the system running the binary in
1193 which page_size is stored. This allows a binary to be built on a
1194 system with one page size and run on a system with a smaller page
1195 size. */
1196 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1197
1198 /* Clear the rest of the last page; this memory is in our address space
1199 even though it is after the sbrk value. */
1200 /* Doubly true, with the additional call that explicitly adds the
1201 rest of that page to the address space. */
1202 memset (first_heap->start, 0,
1203 (char *) first_heap->end - (char *) first_heap->start);
1204 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1205 #endif
1206
1207 use_relocatable_buffers = 1;
1208 }