]> code.delx.au - gnu-emacs/blob - src/gmalloc.c
Update copyright year to 2015
[gnu-emacs] / src / gmalloc.c
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2015 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
22 #include <config.h>
23
24 #ifdef HAVE_PTHREAD
25 #define USE_PTHREAD
26 #endif
27
28 #include <string.h>
29 #include <limits.h>
30 #include <stdint.h>
31 #include <unistd.h>
32
33 #ifdef USE_PTHREAD
34 #include <pthread.h>
35 #endif
36
37 #ifdef WINDOWSNT
38 #include <w32heap.h> /* for sbrk */
39 #endif
40
41 #ifdef emacs
42 extern void emacs_abort (void);
43 #endif
44
45 #ifdef __cplusplus
46 extern "C"
47 {
48 #endif
49
50 #include <stddef.h>
51
52
53 /* Allocate SIZE bytes of memory. */
54 extern void *malloc (size_t size);
55 /* Re-allocate the previously allocated block
56 in ptr, making the new block SIZE bytes long. */
57 extern void *realloc (void *ptr, size_t size);
58 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
59 extern void *calloc (size_t nmemb, size_t size);
60 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
61 extern void free (void *ptr);
62
63 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
64 #ifdef MSDOS
65 extern void *aligned_alloc (size_t, size_t);
66 extern void *memalign (size_t, size_t);
67 extern int posix_memalign (void **, size_t, size_t);
68 #endif
69
70 #ifdef USE_PTHREAD
71 /* Set up mutexes and make malloc etc. thread-safe. */
72 extern void malloc_enable_thread (void);
73 #endif
74
75 #ifdef emacs
76 extern void emacs_abort (void);
77 #endif
78
79 /* The allocator divides the heap into blocks of fixed size; large
80 requests receive one or more whole blocks, and small requests
81 receive a fragment of a block. Fragment sizes are powers of two,
82 and all fragments of a block are the same size. When all the
83 fragments in a block have been freed, the block itself is freed. */
84 #define INT_BIT (CHAR_BIT * sizeof (int))
85 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
86 #define BLOCKSIZE (1 << BLOCKLOG)
87 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
88
89 /* Determine the amount of memory spanned by the initial heap table
90 (not an absolute limit). */
91 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
92
93 /* Number of contiguous free blocks allowed to build up at the end of
94 memory before they will be returned to the system. */
95 #define FINAL_FREE_BLOCKS 8
96
97 /* Data structure giving per-block information. */
98 typedef union
99 {
100 /* Heap information for a busy block. */
101 struct
102 {
103 /* Zero for a large (multiblock) object, or positive giving the
104 logarithm to the base two of the fragment size. */
105 int type;
106 union
107 {
108 struct
109 {
110 size_t nfree; /* Free frags in a fragmented block. */
111 size_t first; /* First free fragment of the block. */
112 } frag;
113 /* For a large object, in its first block, this has the number
114 of blocks in the object. In the other blocks, this has a
115 negative number which says how far back the first block is. */
116 ptrdiff_t size;
117 } info;
118 } busy;
119 /* Heap information for a free block
120 (that may be the first of a free cluster). */
121 struct
122 {
123 size_t size; /* Size (in blocks) of a free cluster. */
124 size_t next; /* Index of next free cluster. */
125 size_t prev; /* Index of previous free cluster. */
126 } free;
127 } malloc_info;
128
129 /* Pointer to first block of the heap. */
130 extern char *_heapbase;
131
132 /* Table indexed by block number giving per-block information. */
133 extern malloc_info *_heapinfo;
134
135 /* Address to block number and vice versa. */
136 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
137 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
138
139 /* Current search index for the heap table. */
140 extern size_t _heapindex;
141
142 /* Limit of valid info table indices. */
143 extern size_t _heaplimit;
144
145 /* Doubly linked lists of free fragments. */
146 struct list
147 {
148 struct list *next;
149 struct list *prev;
150 };
151
152 /* Free list headers for each fragment size. */
153 extern struct list _fraghead[];
154
155 /* List of blocks allocated with aligned_alloc and friends. */
156 struct alignlist
157 {
158 struct alignlist *next;
159 void *aligned; /* The address that aligned_alloc returned. */
160 void *exact; /* The address that malloc returned. */
161 };
162 extern struct alignlist *_aligned_blocks;
163
164 /* Instrumentation. */
165 extern size_t _chunks_used;
166 extern size_t _bytes_used;
167 extern size_t _chunks_free;
168 extern size_t _bytes_free;
169
170 /* Internal versions of `malloc', `realloc', and `free'
171 used when these functions need to call each other.
172 They are the same but don't call the hooks. */
173 extern void *_malloc_internal (size_t);
174 extern void *_realloc_internal (void *, size_t);
175 extern void _free_internal (void *);
176 extern void *_malloc_internal_nolock (size_t);
177 extern void *_realloc_internal_nolock (void *, size_t);
178 extern void _free_internal_nolock (void *);
179
180 #ifdef USE_PTHREAD
181 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
182 extern int _malloc_thread_enabled_p;
183 #define LOCK() \
184 do { \
185 if (_malloc_thread_enabled_p) \
186 pthread_mutex_lock (&_malloc_mutex); \
187 } while (0)
188 #define UNLOCK() \
189 do { \
190 if (_malloc_thread_enabled_p) \
191 pthread_mutex_unlock (&_malloc_mutex); \
192 } while (0)
193 #define LOCK_ALIGNED_BLOCKS() \
194 do { \
195 if (_malloc_thread_enabled_p) \
196 pthread_mutex_lock (&_aligned_blocks_mutex); \
197 } while (0)
198 #define UNLOCK_ALIGNED_BLOCKS() \
199 do { \
200 if (_malloc_thread_enabled_p) \
201 pthread_mutex_unlock (&_aligned_blocks_mutex); \
202 } while (0)
203 #else
204 #define LOCK()
205 #define UNLOCK()
206 #define LOCK_ALIGNED_BLOCKS()
207 #define UNLOCK_ALIGNED_BLOCKS()
208 #endif
209
210 /* Given an address in the middle of a malloc'd object,
211 return the address of the beginning of the object. */
212 extern void *malloc_find_object_address (void *ptr);
213
214 /* Underlying allocation function; successive calls should
215 return contiguous pieces of memory. */
216 extern void *(*__morecore) (ptrdiff_t size);
217
218 /* Default value of `__morecore'. */
219 extern void *__default_morecore (ptrdiff_t size);
220
221 /* If not NULL, this function is called after each time
222 `__morecore' is called to increase the data size. */
223 extern void (*__after_morecore_hook) (void);
224
225 /* Number of extra blocks to get each time we ask for more core.
226 This reduces the frequency of calling `(*__morecore)'. */
227 extern size_t __malloc_extra_blocks;
228
229 /* Nonzero if `malloc' has been called and done its initialization. */
230 extern int __malloc_initialized;
231 /* Function called to initialize malloc data structures. */
232 extern int __malloc_initialize (void);
233
234 /* Hooks for debugging versions. */
235 extern void (*__malloc_initialize_hook) (void);
236 extern void (*__free_hook) (void *ptr);
237 extern void *(*__malloc_hook) (size_t size);
238 extern void *(*__realloc_hook) (void *ptr, size_t size);
239 extern void *(*__memalign_hook) (size_t size, size_t alignment);
240
241 /* Return values for `mprobe': these are the kinds of inconsistencies that
242 `mcheck' enables detection of. */
243 enum mcheck_status
244 {
245 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
246 MCHECK_OK, /* Block is fine. */
247 MCHECK_FREE, /* Block freed twice. */
248 MCHECK_HEAD, /* Memory before the block was clobbered. */
249 MCHECK_TAIL /* Memory after the block was clobbered. */
250 };
251
252 /* Activate a standard collection of debugging hooks. This must be called
253 before `malloc' is ever called. ABORTFUNC is called with an error code
254 (see enum above) when an inconsistency is detected. If ABORTFUNC is
255 null, the standard function prints on stderr and then calls `abort'. */
256 extern int mcheck (void (*abortfunc) (enum mcheck_status));
257
258 /* Check for aberrations in a particular malloc'd block. You must have
259 called `mcheck' already. These are the same checks that `mcheck' does
260 when you free or reallocate a block. */
261 extern enum mcheck_status mprobe (void *ptr);
262
263 /* Activate a standard collection of tracing hooks. */
264 extern void mtrace (void);
265 extern void muntrace (void);
266
267 /* Statistics available to the user. */
268 struct mstats
269 {
270 size_t bytes_total; /* Total size of the heap. */
271 size_t chunks_used; /* Chunks allocated by the user. */
272 size_t bytes_used; /* Byte total of user-allocated chunks. */
273 size_t chunks_free; /* Chunks in the free list. */
274 size_t bytes_free; /* Byte total of chunks in the free list. */
275 };
276
277 /* Pick up the current statistics. */
278 extern struct mstats mstats (void);
279
280 /* Call WARNFUN with a warning message when memory usage is high. */
281 extern void memory_warnings (void *start, void (*warnfun) (const char *));
282
283 #ifdef __cplusplus
284 }
285 #endif
286
287 /* Memory allocator `malloc'.
288 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
289 Written May 1989 by Mike Haertel.
290
291 This library is free software; you can redistribute it and/or
292 modify it under the terms of the GNU General Public License as
293 published by the Free Software Foundation; either version 2 of the
294 License, or (at your option) any later version.
295
296 This library is distributed in the hope that it will be useful,
297 but WITHOUT ANY WARRANTY; without even the implied warranty of
298 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
299 General Public License for more details.
300
301 You should have received a copy of the GNU General Public
302 License along with this library. If not, see <http://www.gnu.org/licenses/>.
303
304 The author may be reached (Email) at the address mike@ai.mit.edu,
305 or (US mail) as Mike Haertel c/o Free Software Foundation. */
306
307 #include <errno.h>
308
309 /* On Cygwin there are two heaps. temacs uses the static heap
310 (defined in sheap.c and managed with bss_sbrk), and the dumped
311 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
312 on Cygwin, it reinitializes malloc, and we save the old info for
313 use by free and realloc if they're called with a pointer into the
314 static heap.
315
316 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
317 this is changed in the future, we'll have to similarly deal with
318 reinitializing ralloc. */
319 #ifdef CYGWIN
320 extern void *bss_sbrk (ptrdiff_t size);
321 extern int bss_sbrk_did_unexec;
322 char *bss_sbrk_heapbase; /* _heapbase for static heap */
323 malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
324 #endif
325 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
326
327 /* Debugging hook for `malloc'. */
328 void *(*__malloc_hook) (size_t size);
329
330 /* Pointer to the base of the first block. */
331 char *_heapbase;
332
333 /* Block information table. Allocated with align/__free (not malloc/free). */
334 malloc_info *_heapinfo;
335
336 /* Number of info entries. */
337 static size_t heapsize;
338
339 /* Search index in the info table. */
340 size_t _heapindex;
341
342 /* Limit of valid info table indices. */
343 size_t _heaplimit;
344
345 /* Free lists for each fragment size. */
346 struct list _fraghead[BLOCKLOG];
347
348 /* Instrumentation. */
349 size_t _chunks_used;
350 size_t _bytes_used;
351 size_t _chunks_free;
352 size_t _bytes_free;
353
354 /* Are you experienced? */
355 int __malloc_initialized;
356
357 size_t __malloc_extra_blocks;
358
359 void (*__malloc_initialize_hook) (void);
360 void (*__after_morecore_hook) (void);
361
362 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
363
364 /* Some code for hunting a bug writing into _heapinfo.
365
366 Call this macro with argument PROT non-zero to protect internal
367 malloc state against writing to it, call it with a zero argument to
368 make it readable and writable.
369
370 Note that this only works if BLOCKSIZE == page size, which is
371 the case on the i386. */
372
373 #include <sys/types.h>
374 #include <sys/mman.h>
375
376 static int state_protected_p;
377 static size_t last_state_size;
378 static malloc_info *last_heapinfo;
379
380 void
381 protect_malloc_state (int protect_p)
382 {
383 /* If _heapinfo has been relocated, make sure its old location
384 isn't left read-only; it will be reused by malloc. */
385 if (_heapinfo != last_heapinfo
386 && last_heapinfo
387 && state_protected_p)
388 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
389
390 last_state_size = _heaplimit * sizeof *_heapinfo;
391 last_heapinfo = _heapinfo;
392
393 if (protect_p != state_protected_p)
394 {
395 state_protected_p = protect_p;
396 if (mprotect (_heapinfo, last_state_size,
397 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
398 abort ();
399 }
400 }
401
402 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
403
404 #else
405 #define PROTECT_MALLOC_STATE(PROT) /* empty */
406 #endif
407
408
409 /* Aligned allocation. */
410 static void *
411 align (size_t size)
412 {
413 void *result;
414 ptrdiff_t adj;
415
416 /* align accepts an unsigned argument, but __morecore accepts a
417 signed one. This could lead to trouble if SIZE overflows the
418 ptrdiff_t type accepted by __morecore. We just punt in that
419 case, since they are requesting a ludicrous amount anyway. */
420 if (PTRDIFF_MAX < size)
421 result = 0;
422 else
423 result = (*__morecore) (size);
424 adj = (uintptr_t) result % BLOCKSIZE;
425 if (adj != 0)
426 {
427 adj = BLOCKSIZE - adj;
428 (*__morecore) (adj);
429 result = (char *) result + adj;
430 }
431
432 if (__after_morecore_hook)
433 (*__after_morecore_hook) ();
434
435 return result;
436 }
437
438 /* Get SIZE bytes, if we can get them starting at END.
439 Return the address of the space we got.
440 If we cannot get space at END, fail and return 0. */
441 static void *
442 get_contiguous_space (ptrdiff_t size, void *position)
443 {
444 void *before;
445 void *after;
446
447 before = (*__morecore) (0);
448 /* If we can tell in advance that the break is at the wrong place,
449 fail now. */
450 if (before != position)
451 return 0;
452
453 /* Allocate SIZE bytes and get the address of them. */
454 after = (*__morecore) (size);
455 if (!after)
456 return 0;
457
458 /* It was not contiguous--reject it. */
459 if (after != position)
460 {
461 (*__morecore) (- size);
462 return 0;
463 }
464
465 return after;
466 }
467
468
469 /* This is called when `_heapinfo' and `heapsize' have just
470 been set to describe a new info table. Set up the table
471 to describe itself and account for it in the statistics. */
472 static void
473 register_heapinfo (void)
474 {
475 size_t block, blocks;
476
477 block = BLOCK (_heapinfo);
478 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
479
480 /* Account for the _heapinfo block itself in the statistics. */
481 _bytes_used += blocks * BLOCKSIZE;
482 ++_chunks_used;
483
484 /* Describe the heapinfo block itself in the heapinfo. */
485 _heapinfo[block].busy.type = 0;
486 _heapinfo[block].busy.info.size = blocks;
487 /* Leave back-pointers for malloc_find_address. */
488 while (--blocks > 0)
489 _heapinfo[block + blocks].busy.info.size = -blocks;
490 }
491
492 #ifdef USE_PTHREAD
493 /* On Cygwin prior to 1.7.31, pthread_mutexes were ERRORCHECK mutexes
494 by default. When the default changed to NORMAL in Cygwin-1.7.31,
495 deadlocks occurred (bug#18222). As a temporary workaround, we
496 explicitly set the mutexes to be of ERRORCHECK type, restoring the
497 previous behavior. */
498 #ifdef CYGWIN
499 pthread_mutex_t _malloc_mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
500 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
501 #else /* not CYGWIN */
502 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
503 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
504 #endif /* not CYGWIN */
505 int _malloc_thread_enabled_p;
506
507 static void
508 malloc_atfork_handler_prepare (void)
509 {
510 LOCK ();
511 LOCK_ALIGNED_BLOCKS ();
512 }
513
514 static void
515 malloc_atfork_handler_parent (void)
516 {
517 UNLOCK_ALIGNED_BLOCKS ();
518 UNLOCK ();
519 }
520
521 static void
522 malloc_atfork_handler_child (void)
523 {
524 UNLOCK_ALIGNED_BLOCKS ();
525 UNLOCK ();
526 }
527
528 /* Set up mutexes and make malloc etc. thread-safe. */
529 void
530 malloc_enable_thread (void)
531 {
532 if (_malloc_thread_enabled_p)
533 return;
534
535 /* Some pthread implementations call malloc for statically
536 initialized mutexes when they are used first. To avoid such a
537 situation, we initialize mutexes here while their use is
538 disabled in malloc etc. */
539 #ifdef CYGWIN
540 /* Use ERRORCHECK mutexes; see comment above. */
541 pthread_mutexattr_t attr;
542 pthread_mutexattr_init (&attr);
543 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ERRORCHECK);
544 pthread_mutex_init (&_malloc_mutex, &attr);
545 pthread_mutex_init (&_aligned_blocks_mutex, &attr);
546 #else /* not CYGWIN */
547 pthread_mutex_init (&_malloc_mutex, NULL);
548 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
549 #endif /* not CYGWIN */
550 pthread_atfork (malloc_atfork_handler_prepare,
551 malloc_atfork_handler_parent,
552 malloc_atfork_handler_child);
553 _malloc_thread_enabled_p = 1;
554 }
555 #endif /* USE_PTHREAD */
556
557 static void
558 malloc_initialize_1 (void)
559 {
560 #ifdef GC_MCHECK
561 mcheck (NULL);
562 #endif
563
564 #ifdef CYGWIN
565 if (bss_sbrk_did_unexec)
566 /* we're reinitializing the dumped emacs */
567 {
568 bss_sbrk_heapbase = _heapbase;
569 bss_sbrk_heapinfo = _heapinfo;
570 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
571 }
572 #endif
573
574 if (__malloc_initialize_hook)
575 (*__malloc_initialize_hook) ();
576
577 heapsize = HEAP / BLOCKSIZE;
578 _heapinfo = align (heapsize * sizeof (malloc_info));
579 if (_heapinfo == NULL)
580 return;
581 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
582 _heapinfo[0].free.size = 0;
583 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
584 _heapindex = 0;
585 _heapbase = (char *) _heapinfo;
586 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
587
588 register_heapinfo ();
589
590 __malloc_initialized = 1;
591 PROTECT_MALLOC_STATE (1);
592 return;
593 }
594
595 /* Set everything up and remember that we have.
596 main will call malloc which calls this function. That is before any threads
597 or signal handlers has been set up, so we don't need thread protection. */
598 int
599 __malloc_initialize (void)
600 {
601 if (__malloc_initialized)
602 return 0;
603
604 malloc_initialize_1 ();
605
606 return __malloc_initialized;
607 }
608
609 static int morecore_recursing;
610
611 /* Get neatly aligned memory, initializing or
612 growing the heap info table as necessary. */
613 static void *
614 morecore_nolock (size_t size)
615 {
616 void *result;
617 malloc_info *newinfo, *oldinfo;
618 size_t newsize;
619
620 if (morecore_recursing)
621 /* Avoid recursion. The caller will know how to handle a null return. */
622 return NULL;
623
624 result = align (size);
625 if (result == NULL)
626 return NULL;
627
628 PROTECT_MALLOC_STATE (0);
629
630 /* Check if we need to grow the info table. */
631 if ((size_t) BLOCK ((char *) result + size) > heapsize)
632 {
633 /* Calculate the new _heapinfo table size. We do not account for the
634 added blocks in the table itself, as we hope to place them in
635 existing free space, which is already covered by part of the
636 existing table. */
637 newsize = heapsize;
638 do
639 newsize *= 2;
640 while ((size_t) BLOCK ((char *) result + size) > newsize);
641
642 /* We must not reuse existing core for the new info table when called
643 from realloc in the case of growing a large block, because the
644 block being grown is momentarily marked as free. In this case
645 _heaplimit is zero so we know not to reuse space for internal
646 allocation. */
647 if (_heaplimit != 0)
648 {
649 /* First try to allocate the new info table in core we already
650 have, in the usual way using realloc. If realloc cannot
651 extend it in place or relocate it to existing sufficient core,
652 we will get called again, and the code above will notice the
653 `morecore_recursing' flag and return null. */
654 int save = errno; /* Don't want to clobber errno with ENOMEM. */
655 morecore_recursing = 1;
656 newinfo = _realloc_internal_nolock (_heapinfo,
657 newsize * sizeof (malloc_info));
658 morecore_recursing = 0;
659 if (newinfo == NULL)
660 errno = save;
661 else
662 {
663 /* We found some space in core, and realloc has put the old
664 table's blocks on the free list. Now zero the new part
665 of the table and install the new table location. */
666 memset (&newinfo[heapsize], 0,
667 (newsize - heapsize) * sizeof (malloc_info));
668 _heapinfo = newinfo;
669 heapsize = newsize;
670 goto got_heap;
671 }
672 }
673
674 /* Allocate new space for the malloc info table. */
675 while (1)
676 {
677 newinfo = align (newsize * sizeof (malloc_info));
678
679 /* Did it fail? */
680 if (newinfo == NULL)
681 {
682 (*__morecore) (-size);
683 return NULL;
684 }
685
686 /* Is it big enough to record status for its own space?
687 If so, we win. */
688 if ((size_t) BLOCK ((char *) newinfo
689 + newsize * sizeof (malloc_info))
690 < newsize)
691 break;
692
693 /* Must try again. First give back most of what we just got. */
694 (*__morecore) (- newsize * sizeof (malloc_info));
695 newsize *= 2;
696 }
697
698 /* Copy the old table to the beginning of the new,
699 and zero the rest of the new table. */
700 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
701 memset (&newinfo[heapsize], 0,
702 (newsize - heapsize) * sizeof (malloc_info));
703 oldinfo = _heapinfo;
704 _heapinfo = newinfo;
705 heapsize = newsize;
706
707 register_heapinfo ();
708
709 /* Reset _heaplimit so _free_internal never decides
710 it can relocate or resize the info table. */
711 _heaplimit = 0;
712 _free_internal_nolock (oldinfo);
713 PROTECT_MALLOC_STATE (0);
714
715 /* The new heap limit includes the new table just allocated. */
716 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
717 return result;
718 }
719
720 got_heap:
721 _heaplimit = BLOCK ((char *) result + size);
722 return result;
723 }
724
725 /* Allocate memory from the heap. */
726 void *
727 _malloc_internal_nolock (size_t size)
728 {
729 void *result;
730 size_t block, blocks, lastblocks, start;
731 register size_t i;
732 struct list *next;
733
734 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
735 valid address you can realloc and free (though not dereference).
736
737 It turns out that some extant code (sunrpc, at least Ultrix's version)
738 expects `malloc (0)' to return non-NULL and breaks otherwise.
739 Be compatible. */
740
741 #if 0
742 if (size == 0)
743 return NULL;
744 #endif
745
746 PROTECT_MALLOC_STATE (0);
747
748 if (size < sizeof (struct list))
749 size = sizeof (struct list);
750
751 /* Determine the allocation policy based on the request size. */
752 if (size <= BLOCKSIZE / 2)
753 {
754 /* Small allocation to receive a fragment of a block.
755 Determine the logarithm to base two of the fragment size. */
756 register size_t log = 1;
757 --size;
758 while ((size /= 2) != 0)
759 ++log;
760
761 /* Look in the fragment lists for a
762 free fragment of the desired size. */
763 next = _fraghead[log].next;
764 if (next != NULL)
765 {
766 /* There are free fragments of this size.
767 Pop a fragment out of the fragment list and return it.
768 Update the block's nfree and first counters. */
769 result = next;
770 next->prev->next = next->next;
771 if (next->next != NULL)
772 next->next->prev = next->prev;
773 block = BLOCK (result);
774 if (--_heapinfo[block].busy.info.frag.nfree != 0)
775 _heapinfo[block].busy.info.frag.first =
776 (uintptr_t) next->next % BLOCKSIZE >> log;
777
778 /* Update the statistics. */
779 ++_chunks_used;
780 _bytes_used += 1 << log;
781 --_chunks_free;
782 _bytes_free -= 1 << log;
783 }
784 else
785 {
786 /* No free fragments of the desired size, so get a new block
787 and break it into fragments, returning the first. */
788 #ifdef GC_MALLOC_CHECK
789 result = _malloc_internal_nolock (BLOCKSIZE);
790 PROTECT_MALLOC_STATE (0);
791 #elif defined (USE_PTHREAD)
792 result = _malloc_internal_nolock (BLOCKSIZE);
793 #else
794 result = malloc (BLOCKSIZE);
795 #endif
796 if (result == NULL)
797 {
798 PROTECT_MALLOC_STATE (1);
799 goto out;
800 }
801
802 /* Link all fragments but the first into the free list. */
803 next = (struct list *) ((char *) result + (1 << log));
804 next->next = NULL;
805 next->prev = &_fraghead[log];
806 _fraghead[log].next = next;
807
808 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
809 {
810 next = (struct list *) ((char *) result + (i << log));
811 next->next = _fraghead[log].next;
812 next->prev = &_fraghead[log];
813 next->prev->next = next;
814 next->next->prev = next;
815 }
816
817 /* Initialize the nfree and first counters for this block. */
818 block = BLOCK (result);
819 _heapinfo[block].busy.type = log;
820 _heapinfo[block].busy.info.frag.nfree = i - 1;
821 _heapinfo[block].busy.info.frag.first = i - 1;
822
823 _chunks_free += (BLOCKSIZE >> log) - 1;
824 _bytes_free += BLOCKSIZE - (1 << log);
825 _bytes_used -= BLOCKSIZE - (1 << log);
826 }
827 }
828 else
829 {
830 /* Large allocation to receive one or more blocks.
831 Search the free list in a circle starting at the last place visited.
832 If we loop completely around without finding a large enough
833 space we will have to get more memory from the system. */
834 blocks = BLOCKIFY (size);
835 start = block = _heapindex;
836 while (_heapinfo[block].free.size < blocks)
837 {
838 block = _heapinfo[block].free.next;
839 if (block == start)
840 {
841 /* Need to get more from the system. Get a little extra. */
842 size_t wantblocks = blocks + __malloc_extra_blocks;
843 block = _heapinfo[0].free.prev;
844 lastblocks = _heapinfo[block].free.size;
845 /* Check to see if the new core will be contiguous with the
846 final free block; if so we don't need to get as much. */
847 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
848 /* We can't do this if we will have to make the heap info
849 table bigger to accommodate the new space. */
850 block + wantblocks <= heapsize &&
851 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
852 ADDRESS (block + lastblocks)))
853 {
854 /* We got it contiguously. Which block we are extending
855 (the `final free block' referred to above) might have
856 changed, if it got combined with a freed info table. */
857 block = _heapinfo[0].free.prev;
858 _heapinfo[block].free.size += (wantblocks - lastblocks);
859 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
860 _heaplimit += wantblocks - lastblocks;
861 continue;
862 }
863 result = morecore_nolock (wantblocks * BLOCKSIZE);
864 if (result == NULL)
865 goto out;
866 block = BLOCK (result);
867 /* Put the new block at the end of the free list. */
868 _heapinfo[block].free.size = wantblocks;
869 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
870 _heapinfo[block].free.next = 0;
871 _heapinfo[0].free.prev = block;
872 _heapinfo[_heapinfo[block].free.prev].free.next = block;
873 ++_chunks_free;
874 /* Now loop to use some of that block for this allocation. */
875 }
876 }
877
878 /* At this point we have found a suitable free list entry.
879 Figure out how to remove what we need from the list. */
880 result = ADDRESS (block);
881 if (_heapinfo[block].free.size > blocks)
882 {
883 /* The block we found has a bit left over,
884 so relink the tail end back into the free list. */
885 _heapinfo[block + blocks].free.size
886 = _heapinfo[block].free.size - blocks;
887 _heapinfo[block + blocks].free.next
888 = _heapinfo[block].free.next;
889 _heapinfo[block + blocks].free.prev
890 = _heapinfo[block].free.prev;
891 _heapinfo[_heapinfo[block].free.prev].free.next
892 = _heapinfo[_heapinfo[block].free.next].free.prev
893 = _heapindex = block + blocks;
894 }
895 else
896 {
897 /* The block exactly matches our requirements,
898 so just remove it from the list. */
899 _heapinfo[_heapinfo[block].free.next].free.prev
900 = _heapinfo[block].free.prev;
901 _heapinfo[_heapinfo[block].free.prev].free.next
902 = _heapindex = _heapinfo[block].free.next;
903 --_chunks_free;
904 }
905
906 _heapinfo[block].busy.type = 0;
907 _heapinfo[block].busy.info.size = blocks;
908 ++_chunks_used;
909 _bytes_used += blocks * BLOCKSIZE;
910 _bytes_free -= blocks * BLOCKSIZE;
911
912 /* Mark all the blocks of the object just allocated except for the
913 first with a negative number so you can find the first block by
914 adding that adjustment. */
915 while (--blocks > 0)
916 _heapinfo[block + blocks].busy.info.size = -blocks;
917 }
918
919 PROTECT_MALLOC_STATE (1);
920 out:
921 return result;
922 }
923
924 void *
925 _malloc_internal (size_t size)
926 {
927 void *result;
928
929 LOCK ();
930 result = _malloc_internal_nolock (size);
931 UNLOCK ();
932
933 return result;
934 }
935
936 void *
937 malloc (size_t size)
938 {
939 void *(*hook) (size_t);
940
941 if (!__malloc_initialized && !__malloc_initialize ())
942 return NULL;
943
944 /* Copy the value of __malloc_hook to an automatic variable in case
945 __malloc_hook is modified in another thread between its
946 NULL-check and the use.
947
948 Note: Strictly speaking, this is not a right solution. We should
949 use mutexes to access non-read-only variables that are shared
950 among multiple threads. We just leave it for compatibility with
951 glibc malloc (i.e., assignments to __malloc_hook) for now. */
952 hook = __malloc_hook;
953 return (hook != NULL ? *hook : _malloc_internal) (size);
954 }
955 \f
956 #ifndef _LIBC
957
958 /* On some ANSI C systems, some libc functions call _malloc, _free
959 and _realloc. Make them use the GNU functions. */
960
961 extern void *_malloc (size_t);
962 extern void _free (void *);
963 extern void *_realloc (void *, size_t);
964
965 void *
966 _malloc (size_t size)
967 {
968 return malloc (size);
969 }
970
971 void
972 _free (void *ptr)
973 {
974 free (ptr);
975 }
976
977 void *
978 _realloc (void *ptr, size_t size)
979 {
980 return realloc (ptr, size);
981 }
982
983 #endif
984 /* Free a block of memory allocated by `malloc'.
985 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
986 Written May 1989 by Mike Haertel.
987
988 This library is free software; you can redistribute it and/or
989 modify it under the terms of the GNU General Public License as
990 published by the Free Software Foundation; either version 2 of the
991 License, or (at your option) any later version.
992
993 This library is distributed in the hope that it will be useful,
994 but WITHOUT ANY WARRANTY; without even the implied warranty of
995 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
996 General Public License for more details.
997
998 You should have received a copy of the GNU General Public
999 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1000
1001 The author may be reached (Email) at the address mike@ai.mit.edu,
1002 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1003
1004
1005 /* Debugging hook for free. */
1006 void (*__free_hook) (void *__ptr);
1007
1008 /* List of blocks allocated by aligned_alloc. */
1009 struct alignlist *_aligned_blocks = NULL;
1010
1011 /* Return memory to the heap.
1012 Like `_free_internal' but don't lock mutex. */
1013 void
1014 _free_internal_nolock (void *ptr)
1015 {
1016 int type;
1017 size_t block, blocks;
1018 register size_t i;
1019 struct list *prev, *next;
1020 void *curbrk;
1021 const size_t lesscore_threshold
1022 /* Threshold of free space at which we will return some to the system. */
1023 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1024
1025 register struct alignlist *l;
1026
1027 if (ptr == NULL)
1028 return;
1029
1030 #ifdef CYGWIN
1031 if ((char *) ptr < _heapbase)
1032 /* We're being asked to free something in the static heap. */
1033 return;
1034 #endif
1035
1036 PROTECT_MALLOC_STATE (0);
1037
1038 LOCK_ALIGNED_BLOCKS ();
1039 for (l = _aligned_blocks; l != NULL; l = l->next)
1040 if (l->aligned == ptr)
1041 {
1042 l->aligned = NULL; /* Mark the slot in the list as free. */
1043 ptr = l->exact;
1044 break;
1045 }
1046 UNLOCK_ALIGNED_BLOCKS ();
1047
1048 block = BLOCK (ptr);
1049
1050 type = _heapinfo[block].busy.type;
1051 switch (type)
1052 {
1053 case 0:
1054 /* Get as many statistics as early as we can. */
1055 --_chunks_used;
1056 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1057 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1058
1059 /* Find the free cluster previous to this one in the free list.
1060 Start searching at the last block referenced; this may benefit
1061 programs with locality of allocation. */
1062 i = _heapindex;
1063 if (i > block)
1064 while (i > block)
1065 i = _heapinfo[i].free.prev;
1066 else
1067 {
1068 do
1069 i = _heapinfo[i].free.next;
1070 while (i > 0 && i < block);
1071 i = _heapinfo[i].free.prev;
1072 }
1073
1074 /* Determine how to link this block into the free list. */
1075 if (block == i + _heapinfo[i].free.size)
1076 {
1077 /* Coalesce this block with its predecessor. */
1078 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1079 block = i;
1080 }
1081 else
1082 {
1083 /* Really link this block back into the free list. */
1084 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1085 _heapinfo[block].free.next = _heapinfo[i].free.next;
1086 _heapinfo[block].free.prev = i;
1087 _heapinfo[i].free.next = block;
1088 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1089 ++_chunks_free;
1090 }
1091
1092 /* Now that the block is linked in, see if we can coalesce it
1093 with its successor (by deleting its successor from the list
1094 and adding in its size). */
1095 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1096 {
1097 _heapinfo[block].free.size
1098 += _heapinfo[_heapinfo[block].free.next].free.size;
1099 _heapinfo[block].free.next
1100 = _heapinfo[_heapinfo[block].free.next].free.next;
1101 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1102 --_chunks_free;
1103 }
1104
1105 /* How many trailing free blocks are there now? */
1106 blocks = _heapinfo[block].free.size;
1107
1108 /* Where is the current end of accessible core? */
1109 curbrk = (*__morecore) (0);
1110
1111 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1112 {
1113 /* The end of the malloc heap is at the end of accessible core.
1114 It's possible that moving _heapinfo will allow us to
1115 return some space to the system. */
1116
1117 size_t info_block = BLOCK (_heapinfo);
1118 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1119 size_t prev_block = _heapinfo[block].free.prev;
1120 size_t prev_blocks = _heapinfo[prev_block].free.size;
1121 size_t next_block = _heapinfo[block].free.next;
1122 size_t next_blocks = _heapinfo[next_block].free.size;
1123
1124 if (/* Win if this block being freed is last in core, the info table
1125 is just before it, the previous free block is just before the
1126 info table, and the two free blocks together form a useful
1127 amount to return to the system. */
1128 (block + blocks == _heaplimit &&
1129 info_block + info_blocks == block &&
1130 prev_block != 0 && prev_block + prev_blocks == info_block &&
1131 blocks + prev_blocks >= lesscore_threshold) ||
1132 /* Nope, not the case. We can also win if this block being
1133 freed is just before the info table, and the table extends
1134 to the end of core or is followed only by a free block,
1135 and the total free space is worth returning to the system. */
1136 (block + blocks == info_block &&
1137 ((info_block + info_blocks == _heaplimit &&
1138 blocks >= lesscore_threshold) ||
1139 (info_block + info_blocks == next_block &&
1140 next_block + next_blocks == _heaplimit &&
1141 blocks + next_blocks >= lesscore_threshold)))
1142 )
1143 {
1144 malloc_info *newinfo;
1145 size_t oldlimit = _heaplimit;
1146
1147 /* Free the old info table, clearing _heaplimit to avoid
1148 recursion into this code. We don't want to return the
1149 table's blocks to the system before we have copied them to
1150 the new location. */
1151 _heaplimit = 0;
1152 _free_internal_nolock (_heapinfo);
1153 _heaplimit = oldlimit;
1154
1155 /* Tell malloc to search from the beginning of the heap for
1156 free blocks, so it doesn't reuse the ones just freed. */
1157 _heapindex = 0;
1158
1159 /* Allocate new space for the info table and move its data. */
1160 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1161 PROTECT_MALLOC_STATE (0);
1162 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1163 _heapinfo = newinfo;
1164
1165 /* We should now have coalesced the free block with the
1166 blocks freed from the old info table. Examine the entire
1167 trailing free block to decide below whether to return some
1168 to the system. */
1169 block = _heapinfo[0].free.prev;
1170 blocks = _heapinfo[block].free.size;
1171 }
1172
1173 /* Now see if we can return stuff to the system. */
1174 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1175 {
1176 register size_t bytes = blocks * BLOCKSIZE;
1177 _heaplimit -= blocks;
1178 (*__morecore) (-bytes);
1179 _heapinfo[_heapinfo[block].free.prev].free.next
1180 = _heapinfo[block].free.next;
1181 _heapinfo[_heapinfo[block].free.next].free.prev
1182 = _heapinfo[block].free.prev;
1183 block = _heapinfo[block].free.prev;
1184 --_chunks_free;
1185 _bytes_free -= bytes;
1186 }
1187 }
1188
1189 /* Set the next search to begin at this block. */
1190 _heapindex = block;
1191 break;
1192
1193 default:
1194 /* Do some of the statistics. */
1195 --_chunks_used;
1196 _bytes_used -= 1 << type;
1197 ++_chunks_free;
1198 _bytes_free += 1 << type;
1199
1200 /* Get the address of the first free fragment in this block. */
1201 prev = (struct list *) ((char *) ADDRESS (block) +
1202 (_heapinfo[block].busy.info.frag.first << type));
1203
1204 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1205 {
1206 /* If all fragments of this block are free, remove them
1207 from the fragment list and free the whole block. */
1208 next = prev;
1209 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1210 next = next->next;
1211 prev->prev->next = next;
1212 if (next != NULL)
1213 next->prev = prev->prev;
1214 _heapinfo[block].busy.type = 0;
1215 _heapinfo[block].busy.info.size = 1;
1216
1217 /* Keep the statistics accurate. */
1218 ++_chunks_used;
1219 _bytes_used += BLOCKSIZE;
1220 _chunks_free -= BLOCKSIZE >> type;
1221 _bytes_free -= BLOCKSIZE;
1222
1223 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1224 _free_internal_nolock (ADDRESS (block));
1225 #else
1226 free (ADDRESS (block));
1227 #endif
1228 }
1229 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1230 {
1231 /* If some fragments of this block are free, link this
1232 fragment into the fragment list after the first free
1233 fragment of this block. */
1234 next = ptr;
1235 next->next = prev->next;
1236 next->prev = prev;
1237 prev->next = next;
1238 if (next->next != NULL)
1239 next->next->prev = next;
1240 ++_heapinfo[block].busy.info.frag.nfree;
1241 }
1242 else
1243 {
1244 /* No fragments of this block are free, so link this
1245 fragment into the fragment list and announce that
1246 it is the first free fragment of this block. */
1247 prev = ptr;
1248 _heapinfo[block].busy.info.frag.nfree = 1;
1249 _heapinfo[block].busy.info.frag.first =
1250 (uintptr_t) ptr % BLOCKSIZE >> type;
1251 prev->next = _fraghead[type].next;
1252 prev->prev = &_fraghead[type];
1253 prev->prev->next = prev;
1254 if (prev->next != NULL)
1255 prev->next->prev = prev;
1256 }
1257 break;
1258 }
1259
1260 PROTECT_MALLOC_STATE (1);
1261 }
1262
1263 /* Return memory to the heap.
1264 Like `free' but don't call a __free_hook if there is one. */
1265 void
1266 _free_internal (void *ptr)
1267 {
1268 LOCK ();
1269 _free_internal_nolock (ptr);
1270 UNLOCK ();
1271 }
1272
1273 /* Return memory to the heap. */
1274
1275 void
1276 free (void *ptr)
1277 {
1278 void (*hook) (void *) = __free_hook;
1279
1280 if (hook != NULL)
1281 (*hook) (ptr);
1282 else
1283 _free_internal (ptr);
1284 }
1285
1286 /* Define the `cfree' alias for `free'. */
1287 #ifdef weak_alias
1288 weak_alias (free, cfree)
1289 #else
1290 void
1291 cfree (void *ptr)
1292 {
1293 free (ptr);
1294 }
1295 #endif
1296 /* Change the size of a block allocated by `malloc'.
1297 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1298 Written May 1989 by Mike Haertel.
1299
1300 This library is free software; you can redistribute it and/or
1301 modify it under the terms of the GNU General Public License as
1302 published by the Free Software Foundation; either version 2 of the
1303 License, or (at your option) any later version.
1304
1305 This library is distributed in the hope that it will be useful,
1306 but WITHOUT ANY WARRANTY; without even the implied warranty of
1307 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1308 General Public License for more details.
1309
1310 You should have received a copy of the GNU General Public
1311 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1312
1313 The author may be reached (Email) at the address mike@ai.mit.edu,
1314 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1315
1316 #ifndef min
1317 #define min(A, B) ((A) < (B) ? (A) : (B))
1318 #endif
1319
1320 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1321 the static heap. We just malloc space in the new heap and copy the
1322 data. */
1323 #ifdef CYGWIN
1324 void *
1325 special_realloc (void *ptr, size_t size)
1326 {
1327 void *result;
1328 int type;
1329 size_t block, oldsize;
1330
1331 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1332 type = bss_sbrk_heapinfo[block].busy.type;
1333 oldsize =
1334 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
1335 : (size_t) 1 << type;
1336 result = _malloc_internal_nolock (size);
1337 if (result)
1338 return memcpy (result, ptr, min (oldsize, size));
1339 return result;
1340 }
1341 #endif
1342
1343 /* Debugging hook for realloc. */
1344 void *(*__realloc_hook) (void *ptr, size_t size);
1345
1346 /* Resize the given region to the new size, returning a pointer
1347 to the (possibly moved) region. This is optimized for speed;
1348 some benchmarks seem to indicate that greater compactness is
1349 achieved by unconditionally allocating and copying to a
1350 new region. This module has incestuous knowledge of the
1351 internals of both free and malloc. */
1352 void *
1353 _realloc_internal_nolock (void *ptr, size_t size)
1354 {
1355 void *result;
1356 int type;
1357 size_t block, blocks, oldlimit;
1358
1359 if (size == 0)
1360 {
1361 _free_internal_nolock (ptr);
1362 return _malloc_internal_nolock (0);
1363 }
1364 else if (ptr == NULL)
1365 return _malloc_internal_nolock (size);
1366
1367 #ifdef CYGWIN
1368 if ((char *) ptr < _heapbase)
1369 /* ptr points into the static heap */
1370 return special_realloc (ptr, size);
1371 #endif
1372
1373 block = BLOCK (ptr);
1374
1375 PROTECT_MALLOC_STATE (0);
1376
1377 type = _heapinfo[block].busy.type;
1378 switch (type)
1379 {
1380 case 0:
1381 /* Maybe reallocate a large block to a small fragment. */
1382 if (size <= BLOCKSIZE / 2)
1383 {
1384 result = _malloc_internal_nolock (size);
1385 if (result != NULL)
1386 {
1387 memcpy (result, ptr, size);
1388 _free_internal_nolock (ptr);
1389 goto out;
1390 }
1391 }
1392
1393 /* The new size is a large allocation as well;
1394 see if we can hold it in place. */
1395 blocks = BLOCKIFY (size);
1396 if (blocks < _heapinfo[block].busy.info.size)
1397 {
1398 /* The new size is smaller; return
1399 excess memory to the free list. */
1400 _heapinfo[block + blocks].busy.type = 0;
1401 _heapinfo[block + blocks].busy.info.size
1402 = _heapinfo[block].busy.info.size - blocks;
1403 _heapinfo[block].busy.info.size = blocks;
1404 /* We have just created a new chunk by splitting a chunk in two.
1405 Now we will free this chunk; increment the statistics counter
1406 so it doesn't become wrong when _free_internal decrements it. */
1407 ++_chunks_used;
1408 _free_internal_nolock (ADDRESS (block + blocks));
1409 result = ptr;
1410 }
1411 else if (blocks == _heapinfo[block].busy.info.size)
1412 /* No size change necessary. */
1413 result = ptr;
1414 else
1415 {
1416 /* Won't fit, so allocate a new region that will.
1417 Free the old region first in case there is sufficient
1418 adjacent free space to grow without moving. */
1419 blocks = _heapinfo[block].busy.info.size;
1420 /* Prevent free from actually returning memory to the system. */
1421 oldlimit = _heaplimit;
1422 _heaplimit = 0;
1423 _free_internal_nolock (ptr);
1424 result = _malloc_internal_nolock (size);
1425 PROTECT_MALLOC_STATE (0);
1426 if (_heaplimit == 0)
1427 _heaplimit = oldlimit;
1428 if (result == NULL)
1429 {
1430 /* Now we're really in trouble. We have to unfree
1431 the thing we just freed. Unfortunately it might
1432 have been coalesced with its neighbors. */
1433 if (_heapindex == block)
1434 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1435 else
1436 {
1437 void *previous
1438 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1439 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1440 _free_internal_nolock (previous);
1441 }
1442 goto out;
1443 }
1444 if (ptr != result)
1445 memmove (result, ptr, blocks * BLOCKSIZE);
1446 }
1447 break;
1448
1449 default:
1450 /* Old size is a fragment; type is logarithm
1451 to base two of the fragment size. */
1452 if (size > (size_t) (1 << (type - 1)) &&
1453 size <= (size_t) (1 << type))
1454 /* The new size is the same kind of fragment. */
1455 result = ptr;
1456 else
1457 {
1458 /* The new size is different; allocate a new space,
1459 and copy the lesser of the new size and the old. */
1460 result = _malloc_internal_nolock (size);
1461 if (result == NULL)
1462 goto out;
1463 memcpy (result, ptr, min (size, (size_t) 1 << type));
1464 _free_internal_nolock (ptr);
1465 }
1466 break;
1467 }
1468
1469 PROTECT_MALLOC_STATE (1);
1470 out:
1471 return result;
1472 }
1473
1474 void *
1475 _realloc_internal (void *ptr, size_t size)
1476 {
1477 void *result;
1478
1479 LOCK ();
1480 result = _realloc_internal_nolock (ptr, size);
1481 UNLOCK ();
1482
1483 return result;
1484 }
1485
1486 void *
1487 realloc (void *ptr, size_t size)
1488 {
1489 void *(*hook) (void *, size_t);
1490
1491 if (!__malloc_initialized && !__malloc_initialize ())
1492 return NULL;
1493
1494 hook = __realloc_hook;
1495 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1496 }
1497 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1498
1499 This library is free software; you can redistribute it and/or
1500 modify it under the terms of the GNU General Public License as
1501 published by the Free Software Foundation; either version 2 of the
1502 License, or (at your option) any later version.
1503
1504 This library is distributed in the hope that it will be useful,
1505 but WITHOUT ANY WARRANTY; without even the implied warranty of
1506 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1507 General Public License for more details.
1508
1509 You should have received a copy of the GNU General Public
1510 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1511
1512 The author may be reached (Email) at the address mike@ai.mit.edu,
1513 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1514
1515 /* Allocate an array of NMEMB elements each SIZE bytes long.
1516 The entire array is initialized to zeros. */
1517 void *
1518 calloc (size_t nmemb, size_t size)
1519 {
1520 void *result;
1521 size_t bytes = nmemb * size;
1522
1523 if (size != 0 && bytes / size != nmemb)
1524 {
1525 errno = ENOMEM;
1526 return NULL;
1527 }
1528
1529 result = malloc (bytes);
1530 if (result)
1531 return memset (result, 0, bytes);
1532 return result;
1533 }
1534 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1535 This file is part of the GNU C Library.
1536
1537 The GNU C Library is free software; you can redistribute it and/or modify
1538 it under the terms of the GNU General Public License as published by
1539 the Free Software Foundation; either version 2, or (at your option)
1540 any later version.
1541
1542 The GNU C Library is distributed in the hope that it will be useful,
1543 but WITHOUT ANY WARRANTY; without even the implied warranty of
1544 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1545 GNU General Public License for more details.
1546
1547 You should have received a copy of the GNU General Public License
1548 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1549
1550 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1551 compatible. */
1552 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1553 #define __sbrk sbrk
1554 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1555 /* It is best not to declare this and cast its result on foreign operating
1556 systems with potentially hostile include files. */
1557
1558 extern void *__sbrk (ptrdiff_t increment);
1559 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1560
1561 /* Allocate INCREMENT more bytes of data space,
1562 and return the start of data space, or NULL on errors.
1563 If INCREMENT is negative, shrink data space. */
1564 void *
1565 __default_morecore (ptrdiff_t increment)
1566 {
1567 void *result;
1568 #if defined (CYGWIN)
1569 if (!bss_sbrk_did_unexec)
1570 {
1571 return bss_sbrk (increment);
1572 }
1573 #endif
1574 result = (void *) __sbrk (increment);
1575 if (result == (void *) -1)
1576 return NULL;
1577 return result;
1578 }
1579 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1580
1581 This library is free software; you can redistribute it and/or
1582 modify it under the terms of the GNU General Public License as
1583 published by the Free Software Foundation; either version 2 of the
1584 License, or (at your option) any later version.
1585
1586 This library is distributed in the hope that it will be useful,
1587 but WITHOUT ANY WARRANTY; without even the implied warranty of
1588 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1589 General Public License for more details.
1590
1591 You should have received a copy of the GNU General Public
1592 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1593
1594 void *(*__memalign_hook) (size_t size, size_t alignment);
1595
1596 void *
1597 aligned_alloc (size_t alignment, size_t size)
1598 {
1599 void *result;
1600 size_t adj, lastadj;
1601 void *(*hook) (size_t, size_t) = __memalign_hook;
1602
1603 if (hook)
1604 return (*hook) (alignment, size);
1605
1606 /* Allocate a block with enough extra space to pad the block with up to
1607 (ALIGNMENT - 1) bytes if necessary. */
1608 if (- size < alignment)
1609 {
1610 errno = ENOMEM;
1611 return NULL;
1612 }
1613 result = malloc (size + alignment - 1);
1614 if (result == NULL)
1615 return NULL;
1616
1617 /* Figure out how much we will need to pad this particular block
1618 to achieve the required alignment. */
1619 adj = alignment - (uintptr_t) result % alignment;
1620 if (adj == alignment)
1621 adj = 0;
1622
1623 if (adj != alignment - 1)
1624 {
1625 do
1626 {
1627 /* Reallocate the block with only as much excess as it
1628 needs. */
1629 free (result);
1630 result = malloc (size + adj);
1631 if (result == NULL) /* Impossible unless interrupted. */
1632 return NULL;
1633
1634 lastadj = adj;
1635 adj = alignment - (uintptr_t) result % alignment;
1636 if (adj == alignment)
1637 adj = 0;
1638 /* It's conceivable we might have been so unlucky as to get
1639 a different block with weaker alignment. If so, this
1640 block is too short to contain SIZE after alignment
1641 correction. So we must try again and get another block,
1642 slightly larger. */
1643 } while (adj > lastadj);
1644 }
1645
1646 if (adj != 0)
1647 {
1648 /* Record this block in the list of aligned blocks, so that `free'
1649 can identify the pointer it is passed, which will be in the middle
1650 of an allocated block. */
1651
1652 struct alignlist *l;
1653 LOCK_ALIGNED_BLOCKS ();
1654 for (l = _aligned_blocks; l != NULL; l = l->next)
1655 if (l->aligned == NULL)
1656 /* This slot is free. Use it. */
1657 break;
1658 if (l == NULL)
1659 {
1660 l = malloc (sizeof *l);
1661 if (l != NULL)
1662 {
1663 l->next = _aligned_blocks;
1664 _aligned_blocks = l;
1665 }
1666 }
1667 if (l != NULL)
1668 {
1669 l->exact = result;
1670 result = l->aligned = (char *) result + adj;
1671 }
1672 UNLOCK_ALIGNED_BLOCKS ();
1673 if (l == NULL)
1674 {
1675 free (result);
1676 result = NULL;
1677 }
1678 }
1679
1680 return result;
1681 }
1682
1683 /* An obsolete alias for aligned_alloc, for any old libraries that use
1684 this alias. */
1685
1686 void *
1687 memalign (size_t alignment, size_t size)
1688 {
1689 return aligned_alloc (alignment, size);
1690 }
1691
1692 int
1693 posix_memalign (void **memptr, size_t alignment, size_t size)
1694 {
1695 void *mem;
1696
1697 if (alignment == 0
1698 || alignment % sizeof (void *) != 0
1699 || (alignment & (alignment - 1)) != 0)
1700 return EINVAL;
1701
1702 mem = aligned_alloc (alignment, size);
1703 if (mem == NULL)
1704 return ENOMEM;
1705
1706 *memptr = mem;
1707
1708 return 0;
1709 }
1710
1711 /* Allocate memory on a page boundary.
1712 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1713
1714 This library is free software; you can redistribute it and/or
1715 modify it under the terms of the GNU General Public License as
1716 published by the Free Software Foundation; either version 2 of the
1717 License, or (at your option) any later version.
1718
1719 This library is distributed in the hope that it will be useful,
1720 but WITHOUT ANY WARRANTY; without even the implied warranty of
1721 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1722 General Public License for more details.
1723
1724 You should have received a copy of the GNU General Public
1725 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1726
1727 The author may be reached (Email) at the address mike@ai.mit.edu,
1728 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1729
1730 /* Allocate SIZE bytes on a page boundary. */
1731 extern void *valloc (size_t);
1732
1733 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1734 # include "getpagesize.h"
1735 #elif !defined getpagesize
1736 extern int getpagesize (void);
1737 #endif
1738
1739 static size_t pagesize;
1740
1741 void *
1742 valloc (size_t size)
1743 {
1744 if (pagesize == 0)
1745 pagesize = getpagesize ();
1746
1747 return aligned_alloc (pagesize, size);
1748 }
1749
1750 #ifdef GC_MCHECK
1751
1752 /* Standard debugging hooks for `malloc'.
1753 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1754 Written May 1989 by Mike Haertel.
1755
1756 This library is free software; you can redistribute it and/or
1757 modify it under the terms of the GNU General Public License as
1758 published by the Free Software Foundation; either version 2 of the
1759 License, or (at your option) any later version.
1760
1761 This library is distributed in the hope that it will be useful,
1762 but WITHOUT ANY WARRANTY; without even the implied warranty of
1763 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1764 General Public License for more details.
1765
1766 You should have received a copy of the GNU General Public
1767 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1768
1769 The author may be reached (Email) at the address mike@ai.mit.edu,
1770 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1771
1772 #include <stdio.h>
1773
1774 /* Old hook values. */
1775 static void (*old_free_hook) (void *ptr);
1776 static void *(*old_malloc_hook) (size_t size);
1777 static void *(*old_realloc_hook) (void *ptr, size_t size);
1778
1779 /* Function to call when something awful happens. */
1780 static void (*abortfunc) (enum mcheck_status);
1781
1782 /* Arbitrary magical numbers. */
1783 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1784 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1785 #define MAGICBYTE ((char) 0xd7)
1786 #define MALLOCFLOOD ((char) 0x93)
1787 #define FREEFLOOD ((char) 0x95)
1788
1789 struct hdr
1790 {
1791 size_t size; /* Exact size requested by user. */
1792 size_t magic; /* Magic number to check header integrity. */
1793 };
1794
1795 static enum mcheck_status
1796 checkhdr (const struct hdr *hdr)
1797 {
1798 enum mcheck_status status;
1799 switch (hdr->magic)
1800 {
1801 default:
1802 status = MCHECK_HEAD;
1803 break;
1804 case MAGICFREE:
1805 status = MCHECK_FREE;
1806 break;
1807 case MAGICWORD:
1808 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1809 status = MCHECK_TAIL;
1810 else
1811 status = MCHECK_OK;
1812 break;
1813 }
1814 if (status != MCHECK_OK)
1815 (*abortfunc) (status);
1816 return status;
1817 }
1818
1819 static void
1820 freehook (void *ptr)
1821 {
1822 struct hdr *hdr;
1823
1824 if (ptr)
1825 {
1826 struct alignlist *l;
1827
1828 /* If the block was allocated by aligned_alloc, its real pointer
1829 to free is recorded in _aligned_blocks; find that. */
1830 PROTECT_MALLOC_STATE (0);
1831 LOCK_ALIGNED_BLOCKS ();
1832 for (l = _aligned_blocks; l != NULL; l = l->next)
1833 if (l->aligned == ptr)
1834 {
1835 l->aligned = NULL; /* Mark the slot in the list as free. */
1836 ptr = l->exact;
1837 break;
1838 }
1839 UNLOCK_ALIGNED_BLOCKS ();
1840 PROTECT_MALLOC_STATE (1);
1841
1842 hdr = ((struct hdr *) ptr) - 1;
1843 checkhdr (hdr);
1844 hdr->magic = MAGICFREE;
1845 memset (ptr, FREEFLOOD, hdr->size);
1846 }
1847 else
1848 hdr = NULL;
1849
1850 __free_hook = old_free_hook;
1851 free (hdr);
1852 __free_hook = freehook;
1853 }
1854
1855 static void *
1856 mallochook (size_t size)
1857 {
1858 struct hdr *hdr;
1859
1860 __malloc_hook = old_malloc_hook;
1861 hdr = malloc (sizeof *hdr + size + 1);
1862 __malloc_hook = mallochook;
1863 if (hdr == NULL)
1864 return NULL;
1865
1866 hdr->size = size;
1867 hdr->magic = MAGICWORD;
1868 ((char *) &hdr[1])[size] = MAGICBYTE;
1869 return memset (hdr + 1, MALLOCFLOOD, size);
1870 }
1871
1872 static void *
1873 reallochook (void *ptr, size_t size)
1874 {
1875 struct hdr *hdr = NULL;
1876 size_t osize = 0;
1877
1878 if (ptr)
1879 {
1880 hdr = ((struct hdr *) ptr) - 1;
1881 osize = hdr->size;
1882
1883 checkhdr (hdr);
1884 if (size < osize)
1885 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1886 }
1887
1888 __free_hook = old_free_hook;
1889 __malloc_hook = old_malloc_hook;
1890 __realloc_hook = old_realloc_hook;
1891 hdr = realloc (hdr, sizeof *hdr + size + 1);
1892 __free_hook = freehook;
1893 __malloc_hook = mallochook;
1894 __realloc_hook = reallochook;
1895 if (hdr == NULL)
1896 return NULL;
1897
1898 hdr->size = size;
1899 hdr->magic = MAGICWORD;
1900 ((char *) &hdr[1])[size] = MAGICBYTE;
1901 if (size > osize)
1902 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1903 return hdr + 1;
1904 }
1905
1906 static void
1907 mabort (enum mcheck_status status)
1908 {
1909 const char *msg;
1910 switch (status)
1911 {
1912 case MCHECK_OK:
1913 msg = "memory is consistent, library is buggy";
1914 break;
1915 case MCHECK_HEAD:
1916 msg = "memory clobbered before allocated block";
1917 break;
1918 case MCHECK_TAIL:
1919 msg = "memory clobbered past end of allocated block";
1920 break;
1921 case MCHECK_FREE:
1922 msg = "block freed twice";
1923 break;
1924 default:
1925 msg = "bogus mcheck_status, library is buggy";
1926 break;
1927 }
1928 #ifdef __GNU_LIBRARY__
1929 __libc_fatal (msg);
1930 #else
1931 fprintf (stderr, "mcheck: %s\n", msg);
1932 fflush (stderr);
1933 # ifdef emacs
1934 emacs_abort ();
1935 # else
1936 abort ();
1937 # endif
1938 #endif
1939 }
1940
1941 static int mcheck_used = 0;
1942
1943 int
1944 mcheck (void (*func) (enum mcheck_status))
1945 {
1946 abortfunc = (func != NULL) ? func : &mabort;
1947
1948 /* These hooks may not be safely inserted if malloc is already in use. */
1949 if (!__malloc_initialized && !mcheck_used)
1950 {
1951 old_free_hook = __free_hook;
1952 __free_hook = freehook;
1953 old_malloc_hook = __malloc_hook;
1954 __malloc_hook = mallochook;
1955 old_realloc_hook = __realloc_hook;
1956 __realloc_hook = reallochook;
1957 mcheck_used = 1;
1958 }
1959
1960 return mcheck_used ? 0 : -1;
1961 }
1962
1963 enum mcheck_status
1964 mprobe (void *ptr)
1965 {
1966 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1967 }
1968
1969 #endif /* GC_MCHECK */