]> code.delx.au - gnu-emacs/blob - src/gmalloc.c
* src/macfont.m (mac_font_shape): Make sure that total_advance is increasing.
[gnu-emacs] / src / gmalloc.c
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
22 #include <config.h>
23
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
27
28 #include <stddef.h>
29 #include <string.h>
30 #include <limits.h>
31 #include <stdint.h>
32 #include <unistd.h>
33
34 #ifdef USE_PTHREAD
35 #include <pthread.h>
36 #endif
37
38 #ifdef WINDOWSNT
39 #include <w32heap.h> /* for sbrk */
40 #endif
41
42 #ifdef emacs
43 # include "lisp.h"
44 #endif
45
46 #ifdef HAVE_MALLOC_H
47 # if GNUC_PREREQ (4, 2, 0)
48 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
49 # endif
50 # include <malloc.h>
51 #endif
52 #ifndef __MALLOC_HOOK_VOLATILE
53 # define __MALLOC_HOOK_VOLATILE volatile
54 #endif
55 #ifndef HAVE_MALLOC_H
56 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
57 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
58 extern void *(*__morecore) (ptrdiff_t);
59 #endif
60
61 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
62 realloc... as defined in this file (and renamed gmalloc,
63 grealloc... via the macros that follow). The dumped emacs,
64 however, will use the system malloc, realloc.... In other source
65 files, malloc, realloc... are renamed hybrid_malloc,
66 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
67 friends are wrapper functions defined later in this file. */
68 #undef malloc
69 #undef realloc
70 #undef calloc
71 #undef aligned_alloc
72 #undef free
73 #define malloc gmalloc
74 #define realloc grealloc
75 #define calloc gcalloc
76 #define aligned_alloc galigned_alloc
77 #define free gfree
78 #define malloc_info gmalloc_info
79
80 #ifdef HYBRID_MALLOC
81 # include "sheap.h"
82 # define DUMPED bss_sbrk_did_unexec
83 static bool
84 ALLOCATED_BEFORE_DUMPING (char *p)
85 {
86 return bss_sbrk_buffer <= p && p < bss_sbrk_buffer + STATIC_HEAP_SIZE;
87 }
88 #endif
89
90 #ifdef __cplusplus
91 extern "C"
92 {
93 #endif
94
95 #ifdef HYBRID_MALLOC
96 #define extern static
97 #endif
98
99 /* Allocate SIZE bytes of memory. */
100 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
101 /* Re-allocate the previously allocated block
102 in ptr, making the new block SIZE bytes long. */
103 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
104 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
105 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
106 /* Free a block. */
107 extern void free (void *ptr);
108
109 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
110 extern void *aligned_alloc (size_t, size_t);
111 #ifdef MSDOS
112 extern void *memalign (size_t, size_t);
113 extern int posix_memalign (void **, size_t, size_t);
114 #endif
115
116 /* The allocator divides the heap into blocks of fixed size; large
117 requests receive one or more whole blocks, and small requests
118 receive a fragment of a block. Fragment sizes are powers of two,
119 and all fragments of a block are the same size. When all the
120 fragments in a block have been freed, the block itself is freed. */
121 #define INT_BIT (CHAR_BIT * sizeof (int))
122 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
123 #define BLOCKSIZE (1 << BLOCKLOG)
124 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
125
126 /* Determine the amount of memory spanned by the initial heap table
127 (not an absolute limit). */
128 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
129
130 /* Number of contiguous free blocks allowed to build up at the end of
131 memory before they will be returned to the system. */
132 #define FINAL_FREE_BLOCKS 8
133
134 /* Data structure giving per-block information. */
135 typedef union
136 {
137 /* Heap information for a busy block. */
138 struct
139 {
140 /* Zero for a large (multiblock) object, or positive giving the
141 logarithm to the base two of the fragment size. */
142 int type;
143 union
144 {
145 struct
146 {
147 size_t nfree; /* Free frags in a fragmented block. */
148 size_t first; /* First free fragment of the block. */
149 } frag;
150 /* For a large object, in its first block, this has the number
151 of blocks in the object. In the other blocks, this has a
152 negative number which says how far back the first block is. */
153 ptrdiff_t size;
154 } info;
155 } busy;
156 /* Heap information for a free block
157 (that may be the first of a free cluster). */
158 struct
159 {
160 size_t size; /* Size (in blocks) of a free cluster. */
161 size_t next; /* Index of next free cluster. */
162 size_t prev; /* Index of previous free cluster. */
163 } free;
164 } malloc_info;
165
166 /* Pointer to first block of the heap. */
167 extern char *_heapbase;
168
169 /* Table indexed by block number giving per-block information. */
170 extern malloc_info *_heapinfo;
171
172 /* Address to block number and vice versa. */
173 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
174 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
175
176 /* Current search index for the heap table. */
177 extern size_t _heapindex;
178
179 /* Limit of valid info table indices. */
180 extern size_t _heaplimit;
181
182 /* Doubly linked lists of free fragments. */
183 struct list
184 {
185 struct list *next;
186 struct list *prev;
187 };
188
189 /* Free list headers for each fragment size. */
190 extern struct list _fraghead[];
191
192 /* List of blocks allocated with aligned_alloc and friends. */
193 struct alignlist
194 {
195 struct alignlist *next;
196 void *aligned; /* The address that aligned_alloc returned. */
197 void *exact; /* The address that malloc returned. */
198 };
199 extern struct alignlist *_aligned_blocks;
200
201 /* Instrumentation. */
202 extern size_t _chunks_used;
203 extern size_t _bytes_used;
204 extern size_t _chunks_free;
205 extern size_t _bytes_free;
206
207 /* Internal versions of `malloc', `realloc', and `free'
208 used when these functions need to call each other.
209 They are the same but don't call the hooks. */
210 extern void *_malloc_internal (size_t);
211 extern void *_realloc_internal (void *, size_t);
212 extern void _free_internal (void *);
213 extern void *_malloc_internal_nolock (size_t);
214 extern void *_realloc_internal_nolock (void *, size_t);
215 extern void _free_internal_nolock (void *);
216
217 #ifdef USE_PTHREAD
218 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
219 extern int _malloc_thread_enabled_p;
220 #define LOCK() \
221 do { \
222 if (_malloc_thread_enabled_p) \
223 pthread_mutex_lock (&_malloc_mutex); \
224 } while (0)
225 #define UNLOCK() \
226 do { \
227 if (_malloc_thread_enabled_p) \
228 pthread_mutex_unlock (&_malloc_mutex); \
229 } while (0)
230 #define LOCK_ALIGNED_BLOCKS() \
231 do { \
232 if (_malloc_thread_enabled_p) \
233 pthread_mutex_lock (&_aligned_blocks_mutex); \
234 } while (0)
235 #define UNLOCK_ALIGNED_BLOCKS() \
236 do { \
237 if (_malloc_thread_enabled_p) \
238 pthread_mutex_unlock (&_aligned_blocks_mutex); \
239 } while (0)
240 #else
241 #define LOCK()
242 #define UNLOCK()
243 #define LOCK_ALIGNED_BLOCKS()
244 #define UNLOCK_ALIGNED_BLOCKS()
245 #endif
246
247 /* Nonzero if `malloc' has been called and done its initialization. */
248 extern int __malloc_initialized;
249 /* Function called to initialize malloc data structures. */
250 extern int __malloc_initialize (void);
251
252 #ifdef GC_MCHECK
253
254 /* Return values for `mprobe': these are the kinds of inconsistencies that
255 `mcheck' enables detection of. */
256 enum mcheck_status
257 {
258 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
259 MCHECK_OK, /* Block is fine. */
260 MCHECK_FREE, /* Block freed twice. */
261 MCHECK_HEAD, /* Memory before the block was clobbered. */
262 MCHECK_TAIL /* Memory after the block was clobbered. */
263 };
264
265 /* Activate a standard collection of debugging hooks. This must be called
266 before `malloc' is ever called. ABORTFUNC is called with an error code
267 (see enum above) when an inconsistency is detected. If ABORTFUNC is
268 null, the standard function prints on stderr and then calls `abort'. */
269 extern int mcheck (void (*abortfunc) (enum mcheck_status));
270
271 /* Check for aberrations in a particular malloc'd block. You must have
272 called `mcheck' already. These are the same checks that `mcheck' does
273 when you free or reallocate a block. */
274 extern enum mcheck_status mprobe (void *ptr);
275
276 /* Activate a standard collection of tracing hooks. */
277 extern void mtrace (void);
278 extern void muntrace (void);
279
280 /* Statistics available to the user. */
281 struct mstats
282 {
283 size_t bytes_total; /* Total size of the heap. */
284 size_t chunks_used; /* Chunks allocated by the user. */
285 size_t bytes_used; /* Byte total of user-allocated chunks. */
286 size_t chunks_free; /* Chunks in the free list. */
287 size_t bytes_free; /* Byte total of chunks in the free list. */
288 };
289
290 /* Pick up the current statistics. */
291 extern struct mstats mstats (void);
292
293 #endif
294
295 #undef extern
296
297 #ifdef __cplusplus
298 }
299 #endif
300
301 /* Memory allocator `malloc'.
302 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
303 Written May 1989 by Mike Haertel.
304
305 This library is free software; you can redistribute it and/or
306 modify it under the terms of the GNU General Public License as
307 published by the Free Software Foundation; either version 2 of the
308 License, or (at your option) any later version.
309
310 This library is distributed in the hope that it will be useful,
311 but WITHOUT ANY WARRANTY; without even the implied warranty of
312 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
313 General Public License for more details.
314
315 You should have received a copy of the GNU General Public
316 License along with this library. If not, see <http://www.gnu.org/licenses/>.
317
318 The author may be reached (Email) at the address mike@ai.mit.edu,
319 or (US mail) as Mike Haertel c/o Free Software Foundation. */
320
321 #include <errno.h>
322
323 /* Debugging hook for 'malloc'. */
324 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
325
326 /* Replacements for traditional glibc malloc hooks, for platforms that
327 do not already have these hooks. Platforms with these hooks all
328 used relaxed ref/def, so it is OK to define them here too. */
329 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
330 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
331 void *(*__morecore) (ptrdiff_t);
332
333 #ifndef HYBRID_MALLOC
334
335 /* Pointer to the base of the first block. */
336 char *_heapbase;
337
338 /* Block information table. Allocated with align/__free (not malloc/free). */
339 malloc_info *_heapinfo;
340
341 /* Search index in the info table. */
342 size_t _heapindex;
343
344 /* Limit of valid info table indices. */
345 size_t _heaplimit;
346
347 /* Free lists for each fragment size. */
348 struct list _fraghead[BLOCKLOG];
349
350 /* Instrumentation. */
351 size_t _chunks_used;
352 size_t _bytes_used;
353 size_t _chunks_free;
354 size_t _bytes_free;
355
356 /* Are you experienced? */
357 int __malloc_initialized;
358
359 #else
360
361 static struct list _fraghead[BLOCKLOG];
362
363 #endif /* HYBRID_MALLOC */
364
365 /* Number of extra blocks to get each time we ask for more core.
366 This reduces the frequency of calling `(*__morecore)'. */
367 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
368 static
369 #endif
370 size_t __malloc_extra_blocks;
371
372 /* Number of info entries. */
373 static size_t heapsize;
374
375 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
376
377 /* Some code for hunting a bug writing into _heapinfo.
378
379 Call this macro with argument PROT non-zero to protect internal
380 malloc state against writing to it, call it with a zero argument to
381 make it readable and writable.
382
383 Note that this only works if BLOCKSIZE == page size, which is
384 the case on the i386. */
385
386 #include <sys/types.h>
387 #include <sys/mman.h>
388
389 static int state_protected_p;
390 static size_t last_state_size;
391 static malloc_info *last_heapinfo;
392
393 void
394 protect_malloc_state (int protect_p)
395 {
396 /* If _heapinfo has been relocated, make sure its old location
397 isn't left read-only; it will be reused by malloc. */
398 if (_heapinfo != last_heapinfo
399 && last_heapinfo
400 && state_protected_p)
401 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
402
403 last_state_size = _heaplimit * sizeof *_heapinfo;
404 last_heapinfo = _heapinfo;
405
406 if (protect_p != state_protected_p)
407 {
408 state_protected_p = protect_p;
409 if (mprotect (_heapinfo, last_state_size,
410 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
411 abort ();
412 }
413 }
414
415 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
416
417 #else
418 #define PROTECT_MALLOC_STATE(PROT) /* empty */
419 #endif
420
421
422 /* Aligned allocation. */
423 static void *
424 align (size_t size)
425 {
426 void *result;
427 ptrdiff_t adj;
428
429 /* align accepts an unsigned argument, but __morecore accepts a
430 signed one. This could lead to trouble if SIZE overflows the
431 ptrdiff_t type accepted by __morecore. We just punt in that
432 case, since they are requesting a ludicrous amount anyway. */
433 if (PTRDIFF_MAX < size)
434 result = 0;
435 else
436 result = (*__morecore) (size);
437 adj = (uintptr_t) result % BLOCKSIZE;
438 if (adj != 0)
439 {
440 adj = BLOCKSIZE - adj;
441 (*__morecore) (adj);
442 result = (char *) result + adj;
443 }
444
445 if (__after_morecore_hook)
446 (*__after_morecore_hook) ();
447
448 return result;
449 }
450
451 /* Get SIZE bytes, if we can get them starting at END.
452 Return the address of the space we got.
453 If we cannot get space at END, fail and return 0. */
454 static void *
455 get_contiguous_space (ptrdiff_t size, void *position)
456 {
457 void *before;
458 void *after;
459
460 before = (*__morecore) (0);
461 /* If we can tell in advance that the break is at the wrong place,
462 fail now. */
463 if (before != position)
464 return 0;
465
466 /* Allocate SIZE bytes and get the address of them. */
467 after = (*__morecore) (size);
468 if (!after)
469 return 0;
470
471 /* It was not contiguous--reject it. */
472 if (after != position)
473 {
474 (*__morecore) (- size);
475 return 0;
476 }
477
478 return after;
479 }
480
481
482 /* This is called when `_heapinfo' and `heapsize' have just
483 been set to describe a new info table. Set up the table
484 to describe itself and account for it in the statistics. */
485 static void
486 register_heapinfo (void)
487 {
488 size_t block, blocks;
489
490 block = BLOCK (_heapinfo);
491 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
492
493 /* Account for the _heapinfo block itself in the statistics. */
494 _bytes_used += blocks * BLOCKSIZE;
495 ++_chunks_used;
496
497 /* Describe the heapinfo block itself in the heapinfo. */
498 _heapinfo[block].busy.type = 0;
499 _heapinfo[block].busy.info.size = blocks;
500 /* Leave back-pointers for malloc_find_address. */
501 while (--blocks > 0)
502 _heapinfo[block + blocks].busy.info.size = -blocks;
503 }
504
505 #ifdef USE_PTHREAD
506 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
507 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
508 int _malloc_thread_enabled_p;
509
510 static void
511 malloc_atfork_handler_prepare (void)
512 {
513 LOCK ();
514 LOCK_ALIGNED_BLOCKS ();
515 }
516
517 static void
518 malloc_atfork_handler_parent (void)
519 {
520 UNLOCK_ALIGNED_BLOCKS ();
521 UNLOCK ();
522 }
523
524 static void
525 malloc_atfork_handler_child (void)
526 {
527 UNLOCK_ALIGNED_BLOCKS ();
528 UNLOCK ();
529 }
530
531 /* Set up mutexes and make malloc etc. thread-safe. */
532 void
533 malloc_enable_thread (void)
534 {
535 if (_malloc_thread_enabled_p)
536 return;
537
538 /* Some pthread implementations call malloc for statically
539 initialized mutexes when they are used first. To avoid such a
540 situation, we initialize mutexes here while their use is
541 disabled in malloc etc. */
542 pthread_mutex_init (&_malloc_mutex, NULL);
543 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
544 pthread_atfork (malloc_atfork_handler_prepare,
545 malloc_atfork_handler_parent,
546 malloc_atfork_handler_child);
547 _malloc_thread_enabled_p = 1;
548 }
549 #endif /* USE_PTHREAD */
550
551 static void
552 malloc_initialize_1 (void)
553 {
554 #ifdef GC_MCHECK
555 mcheck (NULL);
556 #endif
557
558 if (__malloc_initialize_hook)
559 (*__malloc_initialize_hook) ();
560
561 heapsize = HEAP / BLOCKSIZE;
562 _heapinfo = align (heapsize * sizeof (malloc_info));
563 if (_heapinfo == NULL)
564 return;
565 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
566 _heapinfo[0].free.size = 0;
567 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
568 _heapindex = 0;
569 _heapbase = (char *) _heapinfo;
570 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
571
572 register_heapinfo ();
573
574 __malloc_initialized = 1;
575 PROTECT_MALLOC_STATE (1);
576 return;
577 }
578
579 /* Set everything up and remember that we have.
580 main will call malloc which calls this function. That is before any threads
581 or signal handlers has been set up, so we don't need thread protection. */
582 int
583 __malloc_initialize (void)
584 {
585 if (__malloc_initialized)
586 return 0;
587
588 malloc_initialize_1 ();
589
590 return __malloc_initialized;
591 }
592
593 static int morecore_recursing;
594
595 /* Get neatly aligned memory, initializing or
596 growing the heap info table as necessary. */
597 static void *
598 morecore_nolock (size_t size)
599 {
600 void *result;
601 malloc_info *newinfo, *oldinfo;
602 size_t newsize;
603
604 if (morecore_recursing)
605 /* Avoid recursion. The caller will know how to handle a null return. */
606 return NULL;
607
608 result = align (size);
609 if (result == NULL)
610 return NULL;
611
612 PROTECT_MALLOC_STATE (0);
613
614 /* Check if we need to grow the info table. */
615 if ((size_t) BLOCK ((char *) result + size) > heapsize)
616 {
617 /* Calculate the new _heapinfo table size. We do not account for the
618 added blocks in the table itself, as we hope to place them in
619 existing free space, which is already covered by part of the
620 existing table. */
621 newsize = heapsize;
622 do
623 newsize *= 2;
624 while ((size_t) BLOCK ((char *) result + size) > newsize);
625
626 /* We must not reuse existing core for the new info table when called
627 from realloc in the case of growing a large block, because the
628 block being grown is momentarily marked as free. In this case
629 _heaplimit is zero so we know not to reuse space for internal
630 allocation. */
631 if (_heaplimit != 0)
632 {
633 /* First try to allocate the new info table in core we already
634 have, in the usual way using realloc. If realloc cannot
635 extend it in place or relocate it to existing sufficient core,
636 we will get called again, and the code above will notice the
637 `morecore_recursing' flag and return null. */
638 int save = errno; /* Don't want to clobber errno with ENOMEM. */
639 morecore_recursing = 1;
640 newinfo = _realloc_internal_nolock (_heapinfo,
641 newsize * sizeof (malloc_info));
642 morecore_recursing = 0;
643 if (newinfo == NULL)
644 errno = save;
645 else
646 {
647 /* We found some space in core, and realloc has put the old
648 table's blocks on the free list. Now zero the new part
649 of the table and install the new table location. */
650 memset (&newinfo[heapsize], 0,
651 (newsize - heapsize) * sizeof (malloc_info));
652 _heapinfo = newinfo;
653 heapsize = newsize;
654 goto got_heap;
655 }
656 }
657
658 /* Allocate new space for the malloc info table. */
659 while (1)
660 {
661 newinfo = align (newsize * sizeof (malloc_info));
662
663 /* Did it fail? */
664 if (newinfo == NULL)
665 {
666 (*__morecore) (-size);
667 return NULL;
668 }
669
670 /* Is it big enough to record status for its own space?
671 If so, we win. */
672 if ((size_t) BLOCK ((char *) newinfo
673 + newsize * sizeof (malloc_info))
674 < newsize)
675 break;
676
677 /* Must try again. First give back most of what we just got. */
678 (*__morecore) (- newsize * sizeof (malloc_info));
679 newsize *= 2;
680 }
681
682 /* Copy the old table to the beginning of the new,
683 and zero the rest of the new table. */
684 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
685 memset (&newinfo[heapsize], 0,
686 (newsize - heapsize) * sizeof (malloc_info));
687 oldinfo = _heapinfo;
688 _heapinfo = newinfo;
689 heapsize = newsize;
690
691 register_heapinfo ();
692
693 /* Reset _heaplimit so _free_internal never decides
694 it can relocate or resize the info table. */
695 _heaplimit = 0;
696 _free_internal_nolock (oldinfo);
697 PROTECT_MALLOC_STATE (0);
698
699 /* The new heap limit includes the new table just allocated. */
700 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
701 return result;
702 }
703
704 got_heap:
705 _heaplimit = BLOCK ((char *) result + size);
706 return result;
707 }
708
709 /* Allocate memory from the heap. */
710 void *
711 _malloc_internal_nolock (size_t size)
712 {
713 void *result;
714 size_t block, blocks, lastblocks, start;
715 register size_t i;
716 struct list *next;
717
718 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
719 valid address you can realloc and free (though not dereference).
720
721 It turns out that some extant code (sunrpc, at least Ultrix's version)
722 expects `malloc (0)' to return non-NULL and breaks otherwise.
723 Be compatible. */
724
725 #if 0
726 if (size == 0)
727 return NULL;
728 #endif
729
730 PROTECT_MALLOC_STATE (0);
731
732 if (size < sizeof (struct list))
733 size = sizeof (struct list);
734
735 /* Determine the allocation policy based on the request size. */
736 if (size <= BLOCKSIZE / 2)
737 {
738 /* Small allocation to receive a fragment of a block.
739 Determine the logarithm to base two of the fragment size. */
740 register size_t log = 1;
741 --size;
742 while ((size /= 2) != 0)
743 ++log;
744
745 /* Look in the fragment lists for a
746 free fragment of the desired size. */
747 next = _fraghead[log].next;
748 if (next != NULL)
749 {
750 /* There are free fragments of this size.
751 Pop a fragment out of the fragment list and return it.
752 Update the block's nfree and first counters. */
753 result = next;
754 next->prev->next = next->next;
755 if (next->next != NULL)
756 next->next->prev = next->prev;
757 block = BLOCK (result);
758 if (--_heapinfo[block].busy.info.frag.nfree != 0)
759 _heapinfo[block].busy.info.frag.first =
760 (uintptr_t) next->next % BLOCKSIZE >> log;
761
762 /* Update the statistics. */
763 ++_chunks_used;
764 _bytes_used += 1 << log;
765 --_chunks_free;
766 _bytes_free -= 1 << log;
767 }
768 else
769 {
770 /* No free fragments of the desired size, so get a new block
771 and break it into fragments, returning the first. */
772 #ifdef GC_MALLOC_CHECK
773 result = _malloc_internal_nolock (BLOCKSIZE);
774 PROTECT_MALLOC_STATE (0);
775 #elif defined (USE_PTHREAD)
776 result = _malloc_internal_nolock (BLOCKSIZE);
777 #else
778 result = malloc (BLOCKSIZE);
779 #endif
780 if (result == NULL)
781 {
782 PROTECT_MALLOC_STATE (1);
783 goto out;
784 }
785
786 /* Link all fragments but the first into the free list. */
787 next = (struct list *) ((char *) result + (1 << log));
788 next->next = NULL;
789 next->prev = &_fraghead[log];
790 _fraghead[log].next = next;
791
792 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
793 {
794 next = (struct list *) ((char *) result + (i << log));
795 next->next = _fraghead[log].next;
796 next->prev = &_fraghead[log];
797 next->prev->next = next;
798 next->next->prev = next;
799 }
800
801 /* Initialize the nfree and first counters for this block. */
802 block = BLOCK (result);
803 _heapinfo[block].busy.type = log;
804 _heapinfo[block].busy.info.frag.nfree = i - 1;
805 _heapinfo[block].busy.info.frag.first = i - 1;
806
807 _chunks_free += (BLOCKSIZE >> log) - 1;
808 _bytes_free += BLOCKSIZE - (1 << log);
809 _bytes_used -= BLOCKSIZE - (1 << log);
810 }
811 }
812 else
813 {
814 /* Large allocation to receive one or more blocks.
815 Search the free list in a circle starting at the last place visited.
816 If we loop completely around without finding a large enough
817 space we will have to get more memory from the system. */
818 blocks = BLOCKIFY (size);
819 start = block = _heapindex;
820 while (_heapinfo[block].free.size < blocks)
821 {
822 block = _heapinfo[block].free.next;
823 if (block == start)
824 {
825 /* Need to get more from the system. Get a little extra. */
826 size_t wantblocks = blocks + __malloc_extra_blocks;
827 block = _heapinfo[0].free.prev;
828 lastblocks = _heapinfo[block].free.size;
829 /* Check to see if the new core will be contiguous with the
830 final free block; if so we don't need to get as much. */
831 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
832 /* We can't do this if we will have to make the heap info
833 table bigger to accommodate the new space. */
834 block + wantblocks <= heapsize &&
835 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
836 ADDRESS (block + lastblocks)))
837 {
838 /* We got it contiguously. Which block we are extending
839 (the `final free block' referred to above) might have
840 changed, if it got combined with a freed info table. */
841 block = _heapinfo[0].free.prev;
842 _heapinfo[block].free.size += (wantblocks - lastblocks);
843 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
844 _heaplimit += wantblocks - lastblocks;
845 continue;
846 }
847 result = morecore_nolock (wantblocks * BLOCKSIZE);
848 if (result == NULL)
849 goto out;
850 block = BLOCK (result);
851 /* Put the new block at the end of the free list. */
852 _heapinfo[block].free.size = wantblocks;
853 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
854 _heapinfo[block].free.next = 0;
855 _heapinfo[0].free.prev = block;
856 _heapinfo[_heapinfo[block].free.prev].free.next = block;
857 ++_chunks_free;
858 /* Now loop to use some of that block for this allocation. */
859 }
860 }
861
862 /* At this point we have found a suitable free list entry.
863 Figure out how to remove what we need from the list. */
864 result = ADDRESS (block);
865 if (_heapinfo[block].free.size > blocks)
866 {
867 /* The block we found has a bit left over,
868 so relink the tail end back into the free list. */
869 _heapinfo[block + blocks].free.size
870 = _heapinfo[block].free.size - blocks;
871 _heapinfo[block + blocks].free.next
872 = _heapinfo[block].free.next;
873 _heapinfo[block + blocks].free.prev
874 = _heapinfo[block].free.prev;
875 _heapinfo[_heapinfo[block].free.prev].free.next
876 = _heapinfo[_heapinfo[block].free.next].free.prev
877 = _heapindex = block + blocks;
878 }
879 else
880 {
881 /* The block exactly matches our requirements,
882 so just remove it from the list. */
883 _heapinfo[_heapinfo[block].free.next].free.prev
884 = _heapinfo[block].free.prev;
885 _heapinfo[_heapinfo[block].free.prev].free.next
886 = _heapindex = _heapinfo[block].free.next;
887 --_chunks_free;
888 }
889
890 _heapinfo[block].busy.type = 0;
891 _heapinfo[block].busy.info.size = blocks;
892 ++_chunks_used;
893 _bytes_used += blocks * BLOCKSIZE;
894 _bytes_free -= blocks * BLOCKSIZE;
895
896 /* Mark all the blocks of the object just allocated except for the
897 first with a negative number so you can find the first block by
898 adding that adjustment. */
899 while (--blocks > 0)
900 _heapinfo[block + blocks].busy.info.size = -blocks;
901 }
902
903 PROTECT_MALLOC_STATE (1);
904 out:
905 return result;
906 }
907
908 void *
909 _malloc_internal (size_t size)
910 {
911 void *result;
912
913 LOCK ();
914 result = _malloc_internal_nolock (size);
915 UNLOCK ();
916
917 return result;
918 }
919
920 void *
921 malloc (size_t size)
922 {
923 void *(*hook) (size_t);
924
925 if (!__malloc_initialized && !__malloc_initialize ())
926 return NULL;
927
928 /* Copy the value of gmalloc_hook to an automatic variable in case
929 gmalloc_hook is modified in another thread between its
930 NULL-check and the use.
931
932 Note: Strictly speaking, this is not a right solution. We should
933 use mutexes to access non-read-only variables that are shared
934 among multiple threads. We just leave it for compatibility with
935 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
936 hook = gmalloc_hook;
937 return (hook != NULL ? *hook : _malloc_internal) (size);
938 }
939 \f
940 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
941
942 /* On some ANSI C systems, some libc functions call _malloc, _free
943 and _realloc. Make them use the GNU functions. */
944
945 extern void *_malloc (size_t);
946 extern void _free (void *);
947 extern void *_realloc (void *, size_t);
948
949 void *
950 _malloc (size_t size)
951 {
952 return malloc (size);
953 }
954
955 void
956 _free (void *ptr)
957 {
958 free (ptr);
959 }
960
961 void *
962 _realloc (void *ptr, size_t size)
963 {
964 return realloc (ptr, size);
965 }
966
967 #endif
968 /* Free a block of memory allocated by `malloc'.
969 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
970 Written May 1989 by Mike Haertel.
971
972 This library is free software; you can redistribute it and/or
973 modify it under the terms of the GNU General Public License as
974 published by the Free Software Foundation; either version 2 of the
975 License, or (at your option) any later version.
976
977 This library is distributed in the hope that it will be useful,
978 but WITHOUT ANY WARRANTY; without even the implied warranty of
979 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
980 General Public License for more details.
981
982 You should have received a copy of the GNU General Public
983 License along with this library. If not, see <http://www.gnu.org/licenses/>.
984
985 The author may be reached (Email) at the address mike@ai.mit.edu,
986 or (US mail) as Mike Haertel c/o Free Software Foundation. */
987
988 /* Debugging hook for free. */
989 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
990
991 #ifndef HYBRID_MALLOC
992
993 /* List of blocks allocated by aligned_alloc. */
994 struct alignlist *_aligned_blocks = NULL;
995 #endif
996
997 /* Return memory to the heap.
998 Like `_free_internal' but don't lock mutex. */
999 void
1000 _free_internal_nolock (void *ptr)
1001 {
1002 int type;
1003 size_t block, blocks;
1004 register size_t i;
1005 struct list *prev, *next;
1006 void *curbrk;
1007 const size_t lesscore_threshold
1008 /* Threshold of free space at which we will return some to the system. */
1009 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1010
1011 register struct alignlist *l;
1012
1013 if (ptr == NULL)
1014 return;
1015
1016 PROTECT_MALLOC_STATE (0);
1017
1018 LOCK_ALIGNED_BLOCKS ();
1019 for (l = _aligned_blocks; l != NULL; l = l->next)
1020 if (l->aligned == ptr)
1021 {
1022 l->aligned = NULL; /* Mark the slot in the list as free. */
1023 ptr = l->exact;
1024 break;
1025 }
1026 UNLOCK_ALIGNED_BLOCKS ();
1027
1028 block = BLOCK (ptr);
1029
1030 type = _heapinfo[block].busy.type;
1031 switch (type)
1032 {
1033 case 0:
1034 /* Get as many statistics as early as we can. */
1035 --_chunks_used;
1036 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1037 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1038
1039 /* Find the free cluster previous to this one in the free list.
1040 Start searching at the last block referenced; this may benefit
1041 programs with locality of allocation. */
1042 i = _heapindex;
1043 if (i > block)
1044 while (i > block)
1045 i = _heapinfo[i].free.prev;
1046 else
1047 {
1048 do
1049 i = _heapinfo[i].free.next;
1050 while (i > 0 && i < block);
1051 i = _heapinfo[i].free.prev;
1052 }
1053
1054 /* Determine how to link this block into the free list. */
1055 if (block == i + _heapinfo[i].free.size)
1056 {
1057 /* Coalesce this block with its predecessor. */
1058 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1059 block = i;
1060 }
1061 else
1062 {
1063 /* Really link this block back into the free list. */
1064 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1065 _heapinfo[block].free.next = _heapinfo[i].free.next;
1066 _heapinfo[block].free.prev = i;
1067 _heapinfo[i].free.next = block;
1068 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1069 ++_chunks_free;
1070 }
1071
1072 /* Now that the block is linked in, see if we can coalesce it
1073 with its successor (by deleting its successor from the list
1074 and adding in its size). */
1075 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1076 {
1077 _heapinfo[block].free.size
1078 += _heapinfo[_heapinfo[block].free.next].free.size;
1079 _heapinfo[block].free.next
1080 = _heapinfo[_heapinfo[block].free.next].free.next;
1081 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1082 --_chunks_free;
1083 }
1084
1085 /* How many trailing free blocks are there now? */
1086 blocks = _heapinfo[block].free.size;
1087
1088 /* Where is the current end of accessible core? */
1089 curbrk = (*__morecore) (0);
1090
1091 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1092 {
1093 /* The end of the malloc heap is at the end of accessible core.
1094 It's possible that moving _heapinfo will allow us to
1095 return some space to the system. */
1096
1097 size_t info_block = BLOCK (_heapinfo);
1098 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1099 size_t prev_block = _heapinfo[block].free.prev;
1100 size_t prev_blocks = _heapinfo[prev_block].free.size;
1101 size_t next_block = _heapinfo[block].free.next;
1102 size_t next_blocks = _heapinfo[next_block].free.size;
1103
1104 if (/* Win if this block being freed is last in core, the info table
1105 is just before it, the previous free block is just before the
1106 info table, and the two free blocks together form a useful
1107 amount to return to the system. */
1108 (block + blocks == _heaplimit &&
1109 info_block + info_blocks == block &&
1110 prev_block != 0 && prev_block + prev_blocks == info_block &&
1111 blocks + prev_blocks >= lesscore_threshold) ||
1112 /* Nope, not the case. We can also win if this block being
1113 freed is just before the info table, and the table extends
1114 to the end of core or is followed only by a free block,
1115 and the total free space is worth returning to the system. */
1116 (block + blocks == info_block &&
1117 ((info_block + info_blocks == _heaplimit &&
1118 blocks >= lesscore_threshold) ||
1119 (info_block + info_blocks == next_block &&
1120 next_block + next_blocks == _heaplimit &&
1121 blocks + next_blocks >= lesscore_threshold)))
1122 )
1123 {
1124 malloc_info *newinfo;
1125 size_t oldlimit = _heaplimit;
1126
1127 /* Free the old info table, clearing _heaplimit to avoid
1128 recursion into this code. We don't want to return the
1129 table's blocks to the system before we have copied them to
1130 the new location. */
1131 _heaplimit = 0;
1132 _free_internal_nolock (_heapinfo);
1133 _heaplimit = oldlimit;
1134
1135 /* Tell malloc to search from the beginning of the heap for
1136 free blocks, so it doesn't reuse the ones just freed. */
1137 _heapindex = 0;
1138
1139 /* Allocate new space for the info table and move its data. */
1140 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1141 PROTECT_MALLOC_STATE (0);
1142 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1143 _heapinfo = newinfo;
1144
1145 /* We should now have coalesced the free block with the
1146 blocks freed from the old info table. Examine the entire
1147 trailing free block to decide below whether to return some
1148 to the system. */
1149 block = _heapinfo[0].free.prev;
1150 blocks = _heapinfo[block].free.size;
1151 }
1152
1153 /* Now see if we can return stuff to the system. */
1154 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1155 {
1156 register size_t bytes = blocks * BLOCKSIZE;
1157 _heaplimit -= blocks;
1158 (*__morecore) (-bytes);
1159 _heapinfo[_heapinfo[block].free.prev].free.next
1160 = _heapinfo[block].free.next;
1161 _heapinfo[_heapinfo[block].free.next].free.prev
1162 = _heapinfo[block].free.prev;
1163 block = _heapinfo[block].free.prev;
1164 --_chunks_free;
1165 _bytes_free -= bytes;
1166 }
1167 }
1168
1169 /* Set the next search to begin at this block. */
1170 _heapindex = block;
1171 break;
1172
1173 default:
1174 /* Do some of the statistics. */
1175 --_chunks_used;
1176 _bytes_used -= 1 << type;
1177 ++_chunks_free;
1178 _bytes_free += 1 << type;
1179
1180 /* Get the address of the first free fragment in this block. */
1181 prev = (struct list *) ((char *) ADDRESS (block) +
1182 (_heapinfo[block].busy.info.frag.first << type));
1183
1184 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1185 {
1186 /* If all fragments of this block are free, remove them
1187 from the fragment list and free the whole block. */
1188 next = prev;
1189 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1190 next = next->next;
1191 prev->prev->next = next;
1192 if (next != NULL)
1193 next->prev = prev->prev;
1194 _heapinfo[block].busy.type = 0;
1195 _heapinfo[block].busy.info.size = 1;
1196
1197 /* Keep the statistics accurate. */
1198 ++_chunks_used;
1199 _bytes_used += BLOCKSIZE;
1200 _chunks_free -= BLOCKSIZE >> type;
1201 _bytes_free -= BLOCKSIZE;
1202
1203 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1204 _free_internal_nolock (ADDRESS (block));
1205 #else
1206 free (ADDRESS (block));
1207 #endif
1208 }
1209 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1210 {
1211 /* If some fragments of this block are free, link this
1212 fragment into the fragment list after the first free
1213 fragment of this block. */
1214 next = ptr;
1215 next->next = prev->next;
1216 next->prev = prev;
1217 prev->next = next;
1218 if (next->next != NULL)
1219 next->next->prev = next;
1220 ++_heapinfo[block].busy.info.frag.nfree;
1221 }
1222 else
1223 {
1224 /* No fragments of this block are free, so link this
1225 fragment into the fragment list and announce that
1226 it is the first free fragment of this block. */
1227 prev = ptr;
1228 _heapinfo[block].busy.info.frag.nfree = 1;
1229 _heapinfo[block].busy.info.frag.first =
1230 (uintptr_t) ptr % BLOCKSIZE >> type;
1231 prev->next = _fraghead[type].next;
1232 prev->prev = &_fraghead[type];
1233 prev->prev->next = prev;
1234 if (prev->next != NULL)
1235 prev->next->prev = prev;
1236 }
1237 break;
1238 }
1239
1240 PROTECT_MALLOC_STATE (1);
1241 }
1242
1243 /* Return memory to the heap.
1244 Like 'free' but don't call a hook if there is one. */
1245 void
1246 _free_internal (void *ptr)
1247 {
1248 LOCK ();
1249 _free_internal_nolock (ptr);
1250 UNLOCK ();
1251 }
1252
1253 /* Return memory to the heap. */
1254
1255 void
1256 free (void *ptr)
1257 {
1258 void (*hook) (void *) = gfree_hook;
1259
1260 if (hook != NULL)
1261 (*hook) (ptr);
1262 else
1263 _free_internal (ptr);
1264 }
1265
1266 #ifndef HYBRID_MALLOC
1267 /* Define the `cfree' alias for `free'. */
1268 #ifdef weak_alias
1269 weak_alias (free, cfree)
1270 #else
1271 void
1272 cfree (void *ptr)
1273 {
1274 free (ptr);
1275 }
1276 #endif
1277 #endif
1278 /* Change the size of a block allocated by `malloc'.
1279 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1280 Written May 1989 by Mike Haertel.
1281
1282 This library is free software; you can redistribute it and/or
1283 modify it under the terms of the GNU General Public License as
1284 published by the Free Software Foundation; either version 2 of the
1285 License, or (at your option) any later version.
1286
1287 This library is distributed in the hope that it will be useful,
1288 but WITHOUT ANY WARRANTY; without even the implied warranty of
1289 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1290 General Public License for more details.
1291
1292 You should have received a copy of the GNU General Public
1293 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1294
1295 The author may be reached (Email) at the address mike@ai.mit.edu,
1296 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1297
1298 #ifndef min
1299 #define min(a, b) ((a) < (b) ? (a) : (b))
1300 #endif
1301
1302 /* Debugging hook for realloc. */
1303 static void *(*grealloc_hook) (void *, size_t);
1304
1305 /* Resize the given region to the new size, returning a pointer
1306 to the (possibly moved) region. This is optimized for speed;
1307 some benchmarks seem to indicate that greater compactness is
1308 achieved by unconditionally allocating and copying to a
1309 new region. This module has incestuous knowledge of the
1310 internals of both free and malloc. */
1311 void *
1312 _realloc_internal_nolock (void *ptr, size_t size)
1313 {
1314 void *result;
1315 int type;
1316 size_t block, blocks, oldlimit;
1317
1318 if (size == 0)
1319 {
1320 _free_internal_nolock (ptr);
1321 return _malloc_internal_nolock (0);
1322 }
1323 else if (ptr == NULL)
1324 return _malloc_internal_nolock (size);
1325
1326 block = BLOCK (ptr);
1327
1328 PROTECT_MALLOC_STATE (0);
1329
1330 type = _heapinfo[block].busy.type;
1331 switch (type)
1332 {
1333 case 0:
1334 /* Maybe reallocate a large block to a small fragment. */
1335 if (size <= BLOCKSIZE / 2)
1336 {
1337 result = _malloc_internal_nolock (size);
1338 if (result != NULL)
1339 {
1340 memcpy (result, ptr, size);
1341 _free_internal_nolock (ptr);
1342 goto out;
1343 }
1344 }
1345
1346 /* The new size is a large allocation as well;
1347 see if we can hold it in place. */
1348 blocks = BLOCKIFY (size);
1349 if (blocks < _heapinfo[block].busy.info.size)
1350 {
1351 /* The new size is smaller; return
1352 excess memory to the free list. */
1353 _heapinfo[block + blocks].busy.type = 0;
1354 _heapinfo[block + blocks].busy.info.size
1355 = _heapinfo[block].busy.info.size - blocks;
1356 _heapinfo[block].busy.info.size = blocks;
1357 /* We have just created a new chunk by splitting a chunk in two.
1358 Now we will free this chunk; increment the statistics counter
1359 so it doesn't become wrong when _free_internal decrements it. */
1360 ++_chunks_used;
1361 _free_internal_nolock (ADDRESS (block + blocks));
1362 result = ptr;
1363 }
1364 else if (blocks == _heapinfo[block].busy.info.size)
1365 /* No size change necessary. */
1366 result = ptr;
1367 else
1368 {
1369 /* Won't fit, so allocate a new region that will.
1370 Free the old region first in case there is sufficient
1371 adjacent free space to grow without moving. */
1372 blocks = _heapinfo[block].busy.info.size;
1373 /* Prevent free from actually returning memory to the system. */
1374 oldlimit = _heaplimit;
1375 _heaplimit = 0;
1376 _free_internal_nolock (ptr);
1377 result = _malloc_internal_nolock (size);
1378 PROTECT_MALLOC_STATE (0);
1379 if (_heaplimit == 0)
1380 _heaplimit = oldlimit;
1381 if (result == NULL)
1382 {
1383 /* Now we're really in trouble. We have to unfree
1384 the thing we just freed. Unfortunately it might
1385 have been coalesced with its neighbors. */
1386 if (_heapindex == block)
1387 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1388 else
1389 {
1390 void *previous
1391 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1392 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1393 _free_internal_nolock (previous);
1394 }
1395 goto out;
1396 }
1397 if (ptr != result)
1398 memmove (result, ptr, blocks * BLOCKSIZE);
1399 }
1400 break;
1401
1402 default:
1403 /* Old size is a fragment; type is logarithm
1404 to base two of the fragment size. */
1405 if (size > (size_t) (1 << (type - 1)) &&
1406 size <= (size_t) (1 << type))
1407 /* The new size is the same kind of fragment. */
1408 result = ptr;
1409 else
1410 {
1411 /* The new size is different; allocate a new space,
1412 and copy the lesser of the new size and the old. */
1413 result = _malloc_internal_nolock (size);
1414 if (result == NULL)
1415 goto out;
1416 memcpy (result, ptr, min (size, (size_t) 1 << type));
1417 _free_internal_nolock (ptr);
1418 }
1419 break;
1420 }
1421
1422 PROTECT_MALLOC_STATE (1);
1423 out:
1424 return result;
1425 }
1426
1427 void *
1428 _realloc_internal (void *ptr, size_t size)
1429 {
1430 void *result;
1431
1432 LOCK ();
1433 result = _realloc_internal_nolock (ptr, size);
1434 UNLOCK ();
1435
1436 return result;
1437 }
1438
1439 void *
1440 realloc (void *ptr, size_t size)
1441 {
1442 void *(*hook) (void *, size_t);
1443
1444 if (!__malloc_initialized && !__malloc_initialize ())
1445 return NULL;
1446
1447 hook = grealloc_hook;
1448 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1449 }
1450 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1451
1452 This library is free software; you can redistribute it and/or
1453 modify it under the terms of the GNU General Public License as
1454 published by the Free Software Foundation; either version 2 of the
1455 License, or (at your option) any later version.
1456
1457 This library is distributed in the hope that it will be useful,
1458 but WITHOUT ANY WARRANTY; without even the implied warranty of
1459 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1460 General Public License for more details.
1461
1462 You should have received a copy of the GNU General Public
1463 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1464
1465 The author may be reached (Email) at the address mike@ai.mit.edu,
1466 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1467
1468 /* Allocate an array of NMEMB elements each SIZE bytes long.
1469 The entire array is initialized to zeros. */
1470 void *
1471 calloc (size_t nmemb, size_t size)
1472 {
1473 void *result;
1474 size_t bytes = nmemb * size;
1475
1476 if (size != 0 && bytes / size != nmemb)
1477 {
1478 errno = ENOMEM;
1479 return NULL;
1480 }
1481
1482 result = malloc (bytes);
1483 if (result)
1484 return memset (result, 0, bytes);
1485 return result;
1486 }
1487 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1488 This file is part of the GNU C Library.
1489
1490 The GNU C Library is free software; you can redistribute it and/or modify
1491 it under the terms of the GNU General Public License as published by
1492 the Free Software Foundation; either version 2, or (at your option)
1493 any later version.
1494
1495 The GNU C Library is distributed in the hope that it will be useful,
1496 but WITHOUT ANY WARRANTY; without even the implied warranty of
1497 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1498 GNU General Public License for more details.
1499
1500 You should have received a copy of the GNU General Public License
1501 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1502
1503 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1504 compatible. */
1505 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1506 #define __sbrk sbrk
1507 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1508 /* It is best not to declare this and cast its result on foreign operating
1509 systems with potentially hostile include files. */
1510
1511 extern void *__sbrk (ptrdiff_t increment);
1512 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1513
1514 /* Allocate INCREMENT more bytes of data space,
1515 and return the start of data space, or NULL on errors.
1516 If INCREMENT is negative, shrink data space. */
1517 static void *
1518 gdefault_morecore (ptrdiff_t increment)
1519 {
1520 void *result;
1521 #ifdef HYBRID_MALLOC
1522 if (!DUMPED)
1523 {
1524 return bss_sbrk (increment);
1525 }
1526 #endif
1527 result = (void *) __sbrk (increment);
1528 if (result == (void *) -1)
1529 return NULL;
1530 return result;
1531 }
1532
1533 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1534
1535 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1536
1537 This library is free software; you can redistribute it and/or
1538 modify it under the terms of the GNU General Public License as
1539 published by the Free Software Foundation; either version 2 of the
1540 License, or (at your option) any later version.
1541
1542 This library is distributed in the hope that it will be useful,
1543 but WITHOUT ANY WARRANTY; without even the implied warranty of
1544 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1545 General Public License for more details.
1546
1547 You should have received a copy of the GNU General Public
1548 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1549
1550 void *
1551 aligned_alloc (size_t alignment, size_t size)
1552 {
1553 void *result;
1554 size_t adj, lastadj;
1555
1556 /* Allocate a block with enough extra space to pad the block with up to
1557 (ALIGNMENT - 1) bytes if necessary. */
1558 if (- size < alignment)
1559 {
1560 errno = ENOMEM;
1561 return NULL;
1562 }
1563 result = malloc (size + alignment - 1);
1564 if (result == NULL)
1565 return NULL;
1566
1567 /* Figure out how much we will need to pad this particular block
1568 to achieve the required alignment. */
1569 adj = alignment - (uintptr_t) result % alignment;
1570 if (adj == alignment)
1571 adj = 0;
1572
1573 if (adj != alignment - 1)
1574 {
1575 do
1576 {
1577 /* Reallocate the block with only as much excess as it
1578 needs. */
1579 free (result);
1580 result = malloc (size + adj);
1581 if (result == NULL) /* Impossible unless interrupted. */
1582 return NULL;
1583
1584 lastadj = adj;
1585 adj = alignment - (uintptr_t) result % alignment;
1586 if (adj == alignment)
1587 adj = 0;
1588 /* It's conceivable we might have been so unlucky as to get
1589 a different block with weaker alignment. If so, this
1590 block is too short to contain SIZE after alignment
1591 correction. So we must try again and get another block,
1592 slightly larger. */
1593 } while (adj > lastadj);
1594 }
1595
1596 if (adj != 0)
1597 {
1598 /* Record this block in the list of aligned blocks, so that `free'
1599 can identify the pointer it is passed, which will be in the middle
1600 of an allocated block. */
1601
1602 struct alignlist *l;
1603 LOCK_ALIGNED_BLOCKS ();
1604 for (l = _aligned_blocks; l != NULL; l = l->next)
1605 if (l->aligned == NULL)
1606 /* This slot is free. Use it. */
1607 break;
1608 if (l == NULL)
1609 {
1610 l = malloc (sizeof *l);
1611 if (l != NULL)
1612 {
1613 l->next = _aligned_blocks;
1614 _aligned_blocks = l;
1615 }
1616 }
1617 if (l != NULL)
1618 {
1619 l->exact = result;
1620 result = l->aligned = (char *) result + adj;
1621 }
1622 UNLOCK_ALIGNED_BLOCKS ();
1623 if (l == NULL)
1624 {
1625 free (result);
1626 result = NULL;
1627 }
1628 }
1629
1630 return result;
1631 }
1632
1633 /* Note that memalign and posix_memalign are not used in Emacs. */
1634 #ifndef HYBRID_MALLOC
1635 /* An obsolete alias for aligned_alloc, for any old libraries that use
1636 this alias. */
1637
1638 void *
1639 memalign (size_t alignment, size_t size)
1640 {
1641 return aligned_alloc (alignment, size);
1642 }
1643
1644 /* If HYBRID_MALLOC is defined, we may want to use the system
1645 posix_memalign below. */
1646 int
1647 posix_memalign (void **memptr, size_t alignment, size_t size)
1648 {
1649 void *mem;
1650
1651 if (alignment == 0
1652 || alignment % sizeof (void *) != 0
1653 || (alignment & (alignment - 1)) != 0)
1654 return EINVAL;
1655
1656 mem = aligned_alloc (alignment, size);
1657 if (mem == NULL)
1658 return ENOMEM;
1659
1660 *memptr = mem;
1661
1662 return 0;
1663 }
1664 #endif
1665
1666 /* Allocate memory on a page boundary.
1667 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1668
1669 This library is free software; you can redistribute it and/or
1670 modify it under the terms of the GNU General Public License as
1671 published by the Free Software Foundation; either version 2 of the
1672 License, or (at your option) any later version.
1673
1674 This library is distributed in the hope that it will be useful,
1675 but WITHOUT ANY WARRANTY; without even the implied warranty of
1676 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1677 General Public License for more details.
1678
1679 You should have received a copy of the GNU General Public
1680 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1681
1682 The author may be reached (Email) at the address mike@ai.mit.edu,
1683 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1684
1685 #ifndef HYBRID_MALLOC
1686
1687 # ifndef HAVE_MALLOC_H
1688 /* Allocate SIZE bytes on a page boundary. */
1689 extern void *valloc (size_t);
1690 # endif
1691
1692 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1693 # include "getpagesize.h"
1694 # elif !defined getpagesize
1695 extern int getpagesize (void);
1696 # endif
1697
1698 static size_t pagesize;
1699
1700 void *
1701 valloc (size_t size)
1702 {
1703 if (pagesize == 0)
1704 pagesize = getpagesize ();
1705
1706 return aligned_alloc (pagesize, size);
1707 }
1708 #endif /* HYBRID_MALLOC */
1709
1710 #undef malloc
1711 #undef realloc
1712 #undef calloc
1713 #undef aligned_alloc
1714 #undef free
1715
1716 #ifdef HYBRID_MALLOC
1717 /* Declare system malloc and friends. */
1718 extern void *malloc (size_t size);
1719 extern void *realloc (void *ptr, size_t size);
1720 extern void *calloc (size_t nmemb, size_t size);
1721 extern void free (void *ptr);
1722 #ifdef HAVE_ALIGNED_ALLOC
1723 extern void *aligned_alloc (size_t alignment, size_t size);
1724 #elif defined HAVE_POSIX_MEMALIGN
1725 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1726 #endif
1727
1728 /* See the comments near the beginning of this file for explanations
1729 of the following functions. */
1730
1731 void *
1732 hybrid_malloc (size_t size)
1733 {
1734 if (DUMPED)
1735 return malloc (size);
1736 return gmalloc (size);
1737 }
1738
1739 void *
1740 hybrid_calloc (size_t nmemb, size_t size)
1741 {
1742 if (DUMPED)
1743 return calloc (nmemb, size);
1744 return gcalloc (nmemb, size);
1745 }
1746
1747 void
1748 hybrid_free (void *ptr)
1749 {
1750 if (!DUMPED)
1751 gfree (ptr);
1752 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1753 free (ptr);
1754 /* Otherwise the dumped emacs is trying to free something allocated
1755 before dumping; do nothing. */
1756 return;
1757 }
1758
1759 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1760 void *
1761 hybrid_aligned_alloc (size_t alignment, size_t size)
1762 {
1763 if (!DUMPED)
1764 return galigned_alloc (alignment, size);
1765 /* The following is copied from alloc.c */
1766 #ifdef HAVE_ALIGNED_ALLOC
1767 return aligned_alloc (alignment, size);
1768 #else /* HAVE_POSIX_MEMALIGN */
1769 void *p;
1770 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1771 #endif
1772 }
1773 #endif
1774
1775 void *
1776 hybrid_realloc (void *ptr, size_t size)
1777 {
1778 void *result;
1779 int type;
1780 size_t block, oldsize;
1781
1782 if (!DUMPED)
1783 return grealloc (ptr, size);
1784 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1785 return realloc (ptr, size);
1786
1787 /* The dumped emacs is trying to realloc storage allocated before
1788 dumping. We just malloc new space and copy the data. */
1789 if (size == 0 || ptr == NULL)
1790 return malloc (size);
1791 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1792 type = _heapinfo[block].busy.type;
1793 oldsize =
1794 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1795 : (size_t) 1 << type;
1796 result = malloc (size);
1797 if (result)
1798 return memcpy (result, ptr, min (oldsize, size));
1799 return result;
1800 }
1801
1802 #else /* ! HYBRID_MALLOC */
1803
1804 void *
1805 malloc (size_t size)
1806 {
1807 return gmalloc (size);
1808 }
1809
1810 void *
1811 calloc (size_t nmemb, size_t size)
1812 {
1813 return gcalloc (nmemb, size);
1814 }
1815
1816 void
1817 free (void *ptr)
1818 {
1819 gfree (ptr);
1820 }
1821
1822 void *
1823 aligned_alloc (size_t alignment, size_t size)
1824 {
1825 return galigned_alloc (alignment, size);
1826 }
1827
1828 void *
1829 realloc (void *ptr, size_t size)
1830 {
1831 return grealloc (ptr, size);
1832 }
1833
1834 #endif /* HYBRID_MALLOC */
1835
1836 #ifdef GC_MCHECK
1837
1838 /* Standard debugging hooks for `malloc'.
1839 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1840 Written May 1989 by Mike Haertel.
1841
1842 This library is free software; you can redistribute it and/or
1843 modify it under the terms of the GNU General Public License as
1844 published by the Free Software Foundation; either version 2 of the
1845 License, or (at your option) any later version.
1846
1847 This library is distributed in the hope that it will be useful,
1848 but WITHOUT ANY WARRANTY; without even the implied warranty of
1849 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1850 General Public License for more details.
1851
1852 You should have received a copy of the GNU General Public
1853 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1854
1855 The author may be reached (Email) at the address mike@ai.mit.edu,
1856 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1857
1858 #include <stdio.h>
1859
1860 /* Old hook values. */
1861 static void (*old_free_hook) (void *ptr);
1862 static void *(*old_malloc_hook) (size_t size);
1863 static void *(*old_realloc_hook) (void *ptr, size_t size);
1864
1865 /* Function to call when something awful happens. */
1866 static void (*abortfunc) (enum mcheck_status);
1867
1868 /* Arbitrary magical numbers. */
1869 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1870 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1871 #define MAGICBYTE ((char) 0xd7)
1872 #define MALLOCFLOOD ((char) 0x93)
1873 #define FREEFLOOD ((char) 0x95)
1874
1875 struct hdr
1876 {
1877 size_t size; /* Exact size requested by user. */
1878 size_t magic; /* Magic number to check header integrity. */
1879 };
1880
1881 static enum mcheck_status
1882 checkhdr (const struct hdr *hdr)
1883 {
1884 enum mcheck_status status;
1885 switch (hdr->magic)
1886 {
1887 default:
1888 status = MCHECK_HEAD;
1889 break;
1890 case MAGICFREE:
1891 status = MCHECK_FREE;
1892 break;
1893 case MAGICWORD:
1894 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1895 status = MCHECK_TAIL;
1896 else
1897 status = MCHECK_OK;
1898 break;
1899 }
1900 if (status != MCHECK_OK)
1901 (*abortfunc) (status);
1902 return status;
1903 }
1904
1905 static void
1906 freehook (void *ptr)
1907 {
1908 struct hdr *hdr;
1909
1910 if (ptr)
1911 {
1912 struct alignlist *l;
1913
1914 /* If the block was allocated by aligned_alloc, its real pointer
1915 to free is recorded in _aligned_blocks; find that. */
1916 PROTECT_MALLOC_STATE (0);
1917 LOCK_ALIGNED_BLOCKS ();
1918 for (l = _aligned_blocks; l != NULL; l = l->next)
1919 if (l->aligned == ptr)
1920 {
1921 l->aligned = NULL; /* Mark the slot in the list as free. */
1922 ptr = l->exact;
1923 break;
1924 }
1925 UNLOCK_ALIGNED_BLOCKS ();
1926 PROTECT_MALLOC_STATE (1);
1927
1928 hdr = ((struct hdr *) ptr) - 1;
1929 checkhdr (hdr);
1930 hdr->magic = MAGICFREE;
1931 memset (ptr, FREEFLOOD, hdr->size);
1932 }
1933 else
1934 hdr = NULL;
1935
1936 gfree_hook = old_free_hook;
1937 free (hdr);
1938 gfree_hook = freehook;
1939 }
1940
1941 static void *
1942 mallochook (size_t size)
1943 {
1944 struct hdr *hdr;
1945
1946 gmalloc_hook = old_malloc_hook;
1947 hdr = malloc (sizeof *hdr + size + 1);
1948 gmalloc_hook = mallochook;
1949 if (hdr == NULL)
1950 return NULL;
1951
1952 hdr->size = size;
1953 hdr->magic = MAGICWORD;
1954 ((char *) &hdr[1])[size] = MAGICBYTE;
1955 return memset (hdr + 1, MALLOCFLOOD, size);
1956 }
1957
1958 static void *
1959 reallochook (void *ptr, size_t size)
1960 {
1961 struct hdr *hdr = NULL;
1962 size_t osize = 0;
1963
1964 if (ptr)
1965 {
1966 hdr = ((struct hdr *) ptr) - 1;
1967 osize = hdr->size;
1968
1969 checkhdr (hdr);
1970 if (size < osize)
1971 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1972 }
1973
1974 gfree_hook = old_free_hook;
1975 gmalloc_hook = old_malloc_hook;
1976 grealloc_hook = old_realloc_hook;
1977 hdr = realloc (hdr, sizeof *hdr + size + 1);
1978 gfree_hook = freehook;
1979 gmalloc_hook = mallochook;
1980 grealloc_hook = reallochook;
1981 if (hdr == NULL)
1982 return NULL;
1983
1984 hdr->size = size;
1985 hdr->magic = MAGICWORD;
1986 ((char *) &hdr[1])[size] = MAGICBYTE;
1987 if (size > osize)
1988 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1989 return hdr + 1;
1990 }
1991
1992 static void
1993 mabort (enum mcheck_status status)
1994 {
1995 const char *msg;
1996 switch (status)
1997 {
1998 case MCHECK_OK:
1999 msg = "memory is consistent, library is buggy";
2000 break;
2001 case MCHECK_HEAD:
2002 msg = "memory clobbered before allocated block";
2003 break;
2004 case MCHECK_TAIL:
2005 msg = "memory clobbered past end of allocated block";
2006 break;
2007 case MCHECK_FREE:
2008 msg = "block freed twice";
2009 break;
2010 default:
2011 msg = "bogus mcheck_status, library is buggy";
2012 break;
2013 }
2014 #ifdef __GNU_LIBRARY__
2015 __libc_fatal (msg);
2016 #else
2017 fprintf (stderr, "mcheck: %s\n", msg);
2018 fflush (stderr);
2019 # ifdef emacs
2020 emacs_abort ();
2021 # else
2022 abort ();
2023 # endif
2024 #endif
2025 }
2026
2027 static int mcheck_used = 0;
2028
2029 int
2030 mcheck (void (*func) (enum mcheck_status))
2031 {
2032 abortfunc = (func != NULL) ? func : &mabort;
2033
2034 /* These hooks may not be safely inserted if malloc is already in use. */
2035 if (!__malloc_initialized && !mcheck_used)
2036 {
2037 old_free_hook = gfree_hook;
2038 gfree_hook = freehook;
2039 old_malloc_hook = gmalloc_hook;
2040 gmalloc_hook = mallochook;
2041 old_realloc_hook = grealloc_hook;
2042 grealloc_hook = reallochook;
2043 mcheck_used = 1;
2044 }
2045
2046 return mcheck_used ? 0 : -1;
2047 }
2048
2049 enum mcheck_status
2050 mprobe (void *ptr)
2051 {
2052 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2053 }
2054
2055 #endif /* GC_MCHECK */