]> code.delx.au - gnu-emacs/blob - src/w32heap.c
Include file cleanup for w32 files in src directory
[gnu-emacs] / src / w32heap.c
1 /* Heap management routines for GNU Emacs on the Microsoft Windows API.
2 Copyright (C) 1994, 2001-2015 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
18
19 /*
20 Geoff Voelker (voelker@cs.washington.edu) 7-29-94
21 */
22
23 /*
24 Heavily modified by Fabrice Popineau (fabrice.popineau@gmail.com) 28-02-2014
25 */
26
27 /*
28 Memory allocation scheme for w32/w64:
29
30 - Buffers are mmap'ed using a very simple emulation of mmap/munmap
31 - During the temacs phase:
32 * we use a private heap declared to be stored into the `dumped_data'
33 * unfortunately, this heap cannot be made growable, so the size of
34 blocks it can allocate is limited to (0x80000 - pagesize)
35 * the blocks that are larger than this are allocated from the end
36 of the `dumped_data' array; there are not so many of them.
37 We use a very simple first-fit scheme to reuse those blocks.
38 * we check that the private heap does not cross the area used
39 by the bigger chunks.
40 - During the emacs phase:
41 * we create a private heap for new memory blocks
42 * we make sure that we never free a block that has been dumped.
43 Freeing a dumped block could work in principle, but may prove
44 unreliable if we distribute binaries of emacs.exe: MS does not
45 guarantee that the heap data structures are the same across all
46 versions of their OS, even though the API is available since XP. */
47
48 #include <config.h>
49 #include <stdio.h>
50 #include <errno.h>
51
52 #include <sys/mman.h>
53 #include "w32common.h"
54 #include "w32heap.h"
55 #include "lisp.h"
56
57 /* We chose to leave those declarations here. They are used only in
58 this file. The RtlCreateHeap is available since XP. It is located
59 in ntdll.dll and is available with the DDK. People often
60 complained that HeapCreate doesn't offer the ability to create a
61 heap at a given place, which we need here, and which RtlCreateHeap
62 provides. We reproduce here the definitions available with the
63 DDK. */
64
65 typedef PVOID (WINAPI * RtlCreateHeap_Proc) (
66 /* _In_ */ ULONG Flags,
67 /* _In_opt_ */ PVOID HeapBase,
68 /* _In_opt_ */ SIZE_T ReserveSize,
69 /* _In_opt_ */ SIZE_T CommitSize,
70 /* _In_opt_ */ PVOID Lock,
71 /* _In_opt_ */ PVOID Parameters
72 );
73
74 typedef LONG NTSTATUS;
75
76 typedef NTSTATUS
77 (NTAPI * PRTL_HEAP_COMMIT_ROUTINE)(
78 IN PVOID Base,
79 IN OUT PVOID *CommitAddress,
80 IN OUT PSIZE_T CommitSize
81 );
82
83 typedef struct _RTL_HEAP_PARAMETERS {
84 ULONG Length;
85 SIZE_T SegmentReserve;
86 SIZE_T SegmentCommit;
87 SIZE_T DeCommitFreeBlockThreshold;
88 SIZE_T DeCommitTotalFreeThreshold;
89 SIZE_T MaximumAllocationSize;
90 SIZE_T VirtualMemoryThreshold;
91 SIZE_T InitialCommit;
92 SIZE_T InitialReserve;
93 PRTL_HEAP_COMMIT_ROUTINE CommitRoutine;
94 SIZE_T Reserved[ 2 ];
95 } RTL_HEAP_PARAMETERS, *PRTL_HEAP_PARAMETERS;
96
97 /* We reserve space for dumping emacs lisp byte-code inside a static
98 array. By storing it in an array, the generic mechanism in
99 unexecw32.c will be able to dump it without the need to add a
100 special segment to the executable. In order to be able to do this
101 without losing too much space, we need to create a Windows heap at
102 the specific address of the static array. The RtlCreateHeap
103 available inside the NT kernel since XP will do this. It allows to
104 create a non-growable heap at a specific address. So before
105 dumping, we create a non-growable heap at the address of the
106 dumped_data[] array. After dumping, we reuse memory allocated
107 there without being able to free it (but most of it is not meant to
108 be freed anyway), and we use a new private heap for all new
109 allocations. */
110
111 /* FIXME: Most of the space reserved for dumped_data[] is only used by
112 the 1st bootstrap-emacs.exe built while bootstrapping. Once the
113 preloaded Lisp files are byte-compiled, the next loadup uses less
114 than half of the size stated below. It would be nice to find a way
115 to build only the first bootstrap-emacs.exe with the large size,
116 and reset that to a lower value afterwards. */
117 #if defined _WIN64 || defined WIDE_EMACS_INT
118 # define DUMPED_HEAP_SIZE (20*1024*1024)
119 #else
120 # define DUMPED_HEAP_SIZE (12*1024*1024)
121 #endif
122
123 static unsigned char dumped_data[DUMPED_HEAP_SIZE];
124
125 /* Info for keeping track of our dynamic heap used after dumping. */
126 unsigned char *data_region_base = NULL;
127 unsigned char *data_region_end = NULL;
128 static DWORD_PTR committed = 0;
129
130 /* The maximum block size that can be handled by a non-growable w32
131 heap is limited by the MaxBlockSize value below.
132
133 This point deserves and explanation.
134
135 The W32 heap allocator can be used for a growable
136 heap or a non-growable one.
137
138 A growable heap is not compatible with a fixed base address for the
139 heap. Only a non-growable one is. One drawback of non-growable
140 heaps is that they can hold only objects smaller than a certain
141 size (the one defined below). Most of the largest blocks are GC'ed
142 before dumping. In any case and to be safe, we implement a simple
143 first-fit allocation algorithm starting at the end of the
144 dumped_data[] array like depicted below:
145
146 ----------------------------------------------
147 | | | |
148 | Private heap |-> <-| Big chunks |
149 | | | |
150 ----------------------------------------------
151 ^ ^ ^
152 dumped_data dumped_data bc_limit
153 + committed
154
155 */
156
157 /* Info for managing our preload heap, which is essentially a fixed size
158 data area in the executable. */
159 #define PAGE_SIZE 0x1000
160 #define MaxBlockSize (0x80000 - PAGE_SIZE)
161
162 #define MAX_BLOCKS 0x40
163
164 static struct
165 {
166 unsigned char *address;
167 size_t size;
168 DWORD occupied;
169 } blocks[MAX_BLOCKS];
170
171 static DWORD blocks_number = 0;
172 static unsigned char *bc_limit;
173
174 /* Handle for the private heap:
175 - inside the dumped_data[] array before dump,
176 - outside of it after dump.
177 */
178 HANDLE heap = NULL;
179
180 /* We redirect the standard allocation functions. */
181 malloc_fn the_malloc_fn;
182 realloc_fn the_realloc_fn;
183 free_fn the_free_fn;
184
185 /* It doesn't seem to be useful to allocate from a file mapping.
186 It would be if the memory was shared.
187 http://stackoverflow.com/questions/307060/what-is-the-purpose-of-allocating-pages-in-the-pagefile-with-createfilemapping */
188
189 /* This is the function to commit memory when the heap allocator
190 claims for new memory. Before dumping, we allocate space
191 from the fixed size dumped_data[] array.
192 */
193 NTSTATUS NTAPI
194 dumped_data_commit (PVOID Base, PVOID *CommitAddress, PSIZE_T CommitSize)
195 {
196 /* This is used before dumping.
197
198 The private heap is stored at dumped_data[] address.
199 We commit contiguous areas of the dumped_data array
200 as requests arrive. */
201 *CommitAddress = data_region_base + committed;
202 committed += *CommitSize;
203 /* Check that the private heap area does not overlap the big chunks area. */
204 if (((unsigned char *)(*CommitAddress)) + *CommitSize >= bc_limit)
205 {
206 fprintf (stderr,
207 "dumped_data_commit: memory exhausted.\nEnlarge dumped_data[]!\n");
208 exit (-1);
209 }
210 return 0;
211 }
212
213 /* Heap creation. */
214
215 /* We want to turn on Low Fragmentation Heap for XP and older systems.
216 MinGW32 lacks those definitions. */
217 #ifndef MINGW_W64
218 typedef enum _HEAP_INFORMATION_CLASS {
219 HeapCompatibilityInformation
220 } HEAP_INFORMATION_CLASS;
221
222 typedef WINBASEAPI BOOL (WINAPI * HeapSetInformation_Proc)(HANDLE,HEAP_INFORMATION_CLASS,PVOID,SIZE_T);
223 #endif
224
225 void
226 init_heap (void)
227 {
228 if (using_dynamic_heap)
229 {
230 unsigned long enable_lfh = 2;
231
232 /* After dumping, use a new private heap. We explicitly enable
233 the low fragmentation heap (LFH) here, for the sake of pre
234 Vista versions. Note: this will harmlessly fail on Vista and
235 later, where the low-fragmentation heap is enabled by
236 default. It will also fail on pre-Vista versions when Emacs
237 is run under a debugger; set _NO_DEBUG_HEAP=1 in the
238 environment before starting GDB to get low fragmentation heap
239 on XP and older systems, for the price of losing "certain
240 heap debug options"; for the details see
241 http://msdn.microsoft.com/en-us/library/windows/desktop/aa366705%28v=vs.85%29.aspx. */
242 data_region_end = data_region_base;
243
244 /* Create the private heap. */
245 heap = HeapCreate (0, 0, 0);
246
247 #ifndef MINGW_W64
248 /* Set the low-fragmentation heap for OS before Vista. */
249 HMODULE hm_kernel32dll = LoadLibrary ("kernel32.dll");
250 HeapSetInformation_Proc s_pfn_Heap_Set_Information = (HeapSetInformation_Proc) GetProcAddress (hm_kernel32dll, "HeapSetInformation");
251 if (s_pfn_Heap_Set_Information != NULL)
252 {
253 if (s_pfn_Heap_Set_Information ((PVOID) heap,
254 HeapCompatibilityInformation,
255 &enable_lfh, sizeof(enable_lfh)) == 0)
256 DebPrint (("Enabling Low Fragmentation Heap failed: error %ld\n",
257 GetLastError ()));
258 }
259 #endif
260
261 the_malloc_fn = malloc_after_dump;
262 the_realloc_fn = realloc_after_dump;
263 the_free_fn = free_after_dump;
264 }
265 else
266 {
267 /* Find the RtlCreateHeap function. Headers for this function
268 are provided with the w32 ddk, but the function is available
269 in ntdll.dll since XP. */
270 HMODULE hm_ntdll = LoadLibrary ("ntdll.dll");
271 RtlCreateHeap_Proc s_pfn_Rtl_Create_Heap
272 = (RtlCreateHeap_Proc) GetProcAddress (hm_ntdll, "RtlCreateHeap");
273 /* Specific parameters for the private heap. */
274 RTL_HEAP_PARAMETERS params;
275 ZeroMemory (&params, sizeof(params));
276 params.Length = sizeof(RTL_HEAP_PARAMETERS);
277
278 data_region_base = (unsigned char *)ROUND_UP (dumped_data, 0x1000);
279 data_region_end = bc_limit = dumped_data + DUMPED_HEAP_SIZE;
280
281 params.InitialCommit = committed = 0x1000;
282 params.InitialReserve = sizeof(dumped_data);
283 /* Use our own routine to commit memory from the dumped_data
284 array. */
285 params.CommitRoutine = &dumped_data_commit;
286
287 /* Create the private heap. */
288 if (s_pfn_Rtl_Create_Heap == NULL)
289 {
290 fprintf (stderr, "Cannot build Emacs without RtlCreateHeap being available; exiting.\n");
291 exit (-1);
292 }
293 heap = s_pfn_Rtl_Create_Heap (0, data_region_base, 0, 0, NULL, &params);
294 the_malloc_fn = malloc_before_dump;
295 the_realloc_fn = realloc_before_dump;
296 the_free_fn = free_before_dump;
297 }
298
299 /* Update system version information to match current system. */
300 cache_system_info ();
301 }
302
303 #undef malloc
304 #undef realloc
305 #undef free
306
307 /* FREEABLE_P checks if the block can be safely freed. */
308 #define FREEABLE_P(addr) \
309 ((unsigned char *)(addr) > 0 \
310 && ((unsigned char *)(addr) < dumped_data \
311 || (unsigned char *)(addr) >= dumped_data + DUMPED_HEAP_SIZE))
312
313 void *
314 malloc_after_dump (size_t size)
315 {
316 /* Use the new private heap. */
317 void *p = HeapAlloc (heap, 0, size);
318
319 /* After dump, keep track of the "brk value" for sbrk(0). */
320 if (p)
321 {
322 unsigned char *new_brk = (unsigned char *)p + size;
323
324 if (new_brk > data_region_end)
325 data_region_end = new_brk;
326 }
327 else
328 errno = ENOMEM;
329 return p;
330 }
331
332 void *
333 malloc_before_dump (size_t size)
334 {
335 void *p;
336
337 /* Before dumping. The private heap can handle only requests for
338 less than MaxBlockSize. */
339 if (size < MaxBlockSize)
340 {
341 /* Use the private heap if possible. */
342 p = HeapAlloc (heap, 0, size);
343 if (!p)
344 errno = ENOMEM;
345 }
346 else
347 {
348 /* Find the first big chunk that can hold the requested size. */
349 int i = 0;
350
351 for (i = 0; i < blocks_number; i++)
352 {
353 if (blocks[i].occupied == 0 && blocks[i].size >= size)
354 break;
355 }
356 if (i < blocks_number)
357 {
358 /* If found, use it. */
359 p = blocks[i].address;
360 blocks[i].occupied = TRUE;
361 }
362 else
363 {
364 /* Allocate a new big chunk from the end of the dumped_data
365 array. */
366 if (blocks_number >= MAX_BLOCKS)
367 {
368 fprintf (stderr,
369 "malloc_before_dump: no more big chunks available.\nEnlarge MAX_BLOCKS!\n");
370 exit (-1);
371 }
372 bc_limit -= size;
373 bc_limit = (unsigned char *)ROUND_DOWN (bc_limit, 0x10);
374 p = bc_limit;
375 blocks[blocks_number].address = p;
376 blocks[blocks_number].size = size;
377 blocks[blocks_number].occupied = TRUE;
378 blocks_number++;
379 /* Check that areas do not overlap. */
380 if (bc_limit < dumped_data + committed)
381 {
382 fprintf (stderr,
383 "malloc_before_dump: memory exhausted.\nEnlarge dumped_data[]!\n");
384 exit (-1);
385 }
386 }
387 }
388 return p;
389 }
390
391 /* Re-allocate the previously allocated block in ptr, making the new
392 block SIZE bytes long. */
393 void *
394 realloc_after_dump (void *ptr, size_t size)
395 {
396 void *p;
397
398 /* After dumping. */
399 if (FREEABLE_P (ptr))
400 {
401 /* Reallocate the block since it lies in the new heap. */
402 p = HeapReAlloc (heap, 0, ptr, size);
403 if (!p)
404 errno = ENOMEM;
405 }
406 else
407 {
408 /* If the block lies in the dumped data, do not free it. Only
409 allocate a new one. */
410 p = HeapAlloc (heap, 0, size);
411 if (!p)
412 errno = ENOMEM;
413 else if (ptr)
414 CopyMemory (p, ptr, size);
415 }
416 /* After dump, keep track of the "brk value" for sbrk(0). */
417 if (p)
418 {
419 unsigned char *new_brk = (unsigned char *)p + size;
420
421 if (new_brk > data_region_end)
422 data_region_end = new_brk;
423 }
424 return p;
425 }
426
427 void *
428 realloc_before_dump (void *ptr, size_t size)
429 {
430 void *p;
431
432 /* Before dumping. */
433 if (dumped_data < (unsigned char *)ptr
434 && (unsigned char *)ptr < bc_limit && size <= MaxBlockSize)
435 {
436 p = HeapReAlloc (heap, 0, ptr, size);
437 if (!p)
438 errno = ENOMEM;
439 }
440 else
441 {
442 /* In this case, either the new block is too large for the heap,
443 or the old block was already too large. In both cases,
444 malloc_before_dump() and free_before_dump() will take care of
445 reallocation. */
446 p = malloc_before_dump (size);
447 /* If SIZE is below MaxBlockSize, malloc_before_dump will try to
448 allocate it in the fixed heap. If that fails, we could have
449 kept the block in its original place, above bc_limit, instead
450 of failing the call as below. But this doesn't seem to be
451 worth the added complexity, as loadup allocates only a very
452 small number of large blocks, and never reallocates them. */
453 if (p && ptr)
454 {
455 CopyMemory (p, ptr, size);
456 free_before_dump (ptr);
457 }
458 }
459 return p;
460 }
461
462 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
463 void
464 free_after_dump (void *ptr)
465 {
466 /* After dumping. */
467 if (FREEABLE_P (ptr))
468 {
469 /* Free the block if it is in the new private heap. */
470 HeapFree (heap, 0, ptr);
471 }
472 }
473
474 void
475 free_before_dump (void *ptr)
476 {
477 if (!ptr)
478 return;
479
480 /* Before dumping. */
481 if (dumped_data < (unsigned char *)ptr
482 && (unsigned char *)ptr < bc_limit)
483 {
484 /* Free the block if it is allocated in the private heap. */
485 HeapFree (heap, 0, ptr);
486 }
487 else
488 {
489 /* Look for the big chunk. */
490 int i;
491
492 for (i = 0; i < blocks_number; i++)
493 {
494 if (blocks[i].address == ptr)
495 {
496 /* Reset block occupation if found. */
497 blocks[i].occupied = 0;
498 break;
499 }
500 /* What if the block is not found? We should trigger an
501 error here. */
502 eassert (i < blocks_number);
503 }
504 }
505 }
506
507 #ifdef ENABLE_CHECKING
508 void
509 report_temacs_memory_usage (void)
510 {
511 DWORD blocks_used = 0;
512 size_t large_mem_used = 0;
513 int i;
514
515 for (i = 0; i < blocks_number; i++)
516 if (blocks[i].occupied)
517 {
518 blocks_used++;
519 large_mem_used += blocks[i].size;
520 }
521
522 /* Emulate 'message', which writes to stderr in non-interactive
523 sessions. */
524 fprintf (stderr,
525 "Dump memory usage: Heap: %" PRIu64 " Large blocks(%lu/%lu): %" PRIu64 "/%" PRIu64 "\n",
526 (unsigned long long)committed, blocks_used, blocks_number,
527 (unsigned long long)large_mem_used,
528 (unsigned long long)(dumped_data + DUMPED_HEAP_SIZE - bc_limit));
529 }
530 #endif
531
532 /* Emulate getpagesize. */
533 int
534 getpagesize (void)
535 {
536 return sysinfo_cache.dwPageSize;
537 }
538
539 void *
540 sbrk (ptrdiff_t increment)
541 {
542 /* data_region_end is the address beyond the last allocated byte.
543 The sbrk() function is not emulated at all, except for a 0 value
544 of its parameter. This is needed by the Emacs Lisp function
545 `memory-limit'. */
546 eassert (increment == 0);
547 return data_region_end;
548 }
549
550 #define MAX_BUFFER_SIZE (512 * 1024 * 1024)
551
552 /* MMAP allocation for buffers. */
553 void *
554 mmap_alloc (void **var, size_t nbytes)
555 {
556 void *p = NULL;
557
558 /* We implement amortized allocation. We start by reserving twice
559 the size requested and commit only the size requested. Then
560 realloc could proceed and use the reserved pages, reallocating
561 only if needed. Buffer shrink would happen only so that we stay
562 in the 2x range. This is a big win when visiting compressed
563 files, where the final size of the buffer is not known in
564 advance, and the buffer is enlarged several times as the data is
565 decompressed on the fly. */
566 if (nbytes < MAX_BUFFER_SIZE)
567 p = VirtualAlloc (NULL, (nbytes * 2), MEM_RESERVE, PAGE_READWRITE);
568
569 /* If it fails, or if the request is above 512MB, try with the
570 requested size. */
571 if (p == NULL)
572 p = VirtualAlloc (NULL, nbytes, MEM_RESERVE, PAGE_READWRITE);
573
574 if (p != NULL)
575 {
576 /* Now, commit pages for NBYTES. */
577 *var = VirtualAlloc (p, nbytes, MEM_COMMIT, PAGE_READWRITE);
578 }
579
580 if (!p)
581 {
582 if (GetLastError () == ERROR_NOT_ENOUGH_MEMORY)
583 errno = ENOMEM;
584 else
585 {
586 DebPrint (("mmap_alloc: error %ld\n", GetLastError ()));
587 errno = EINVAL;
588 }
589 }
590
591 return *var = p;
592 }
593
594 void
595 mmap_free (void **var)
596 {
597 if (*var)
598 {
599 if (VirtualFree (*var, 0, MEM_RELEASE) == 0)
600 DebPrint (("mmap_free: error %ld\n", GetLastError ()));
601 *var = NULL;
602 }
603 }
604
605 void *
606 mmap_realloc (void **var, size_t nbytes)
607 {
608 MEMORY_BASIC_INFORMATION memInfo, m2;
609
610 if (*var == NULL)
611 return mmap_alloc (var, nbytes);
612
613 /* This case happens in init_buffer(). */
614 if (nbytes == 0)
615 {
616 mmap_free (var);
617 return mmap_alloc (var, nbytes);
618 }
619
620 if (VirtualQuery (*var, &memInfo, sizeof (memInfo)) == 0)
621 DebPrint (("mmap_realloc: VirtualQuery error = %ld\n", GetLastError ()));
622
623 /* We need to enlarge the block. */
624 if (memInfo.RegionSize < nbytes)
625 {
626 if (VirtualQuery (*var + memInfo.RegionSize, &m2, sizeof(m2)) == 0)
627 DebPrint (("mmap_realloc: VirtualQuery error = %ld\n",
628 GetLastError ()));
629 /* If there is enough room in the current reserved area, then
630 commit more pages as needed. */
631 if (m2.State == MEM_RESERVE
632 && nbytes <= memInfo.RegionSize + m2.RegionSize)
633 {
634 void *p;
635
636 p = VirtualAlloc (*var + memInfo.RegionSize,
637 nbytes - memInfo.RegionSize,
638 MEM_COMMIT, PAGE_READWRITE);
639 if (!p /* && GetLastError() != ERROR_NOT_ENOUGH_MEMORY */)
640 {
641 DebPrint (("realloc enlarge: VirtualAlloc error %ld\n",
642 GetLastError ()));
643 errno = ENOMEM;
644 }
645 return *var;
646 }
647 else
648 {
649 /* Else we must actually enlarge the block by allocating a
650 new one and copying previous contents from the old to the
651 new one. */
652 void *old_ptr = *var;
653
654 if (mmap_alloc (var, nbytes))
655 {
656 CopyMemory (*var, old_ptr, memInfo.RegionSize);
657 mmap_free (&old_ptr);
658 return *var;
659 }
660 else
661 {
662 /* We failed to enlarge the buffer. */
663 *var = old_ptr;
664 return NULL;
665 }
666 }
667 }
668
669 /* If we are shrinking by more than one page... */
670 if (memInfo.RegionSize > nbytes + getpagesize())
671 {
672 /* If we are shrinking a lot... */
673 if ((memInfo.RegionSize / 2) > nbytes)
674 {
675 /* Let's give some memory back to the system and release
676 some pages. */
677 void *old_ptr = *var;
678
679 if (mmap_alloc (var, nbytes))
680 {
681 CopyMemory (*var, old_ptr, nbytes);
682 mmap_free (&old_ptr);
683 return *var;
684 }
685 else
686 {
687 /* In case we fail to shrink, try to go on with the old block.
688 But that means there is a lot of memory pressure.
689 We could also decommit pages. */
690 *var = old_ptr;
691 return *var;
692 }
693 }
694
695 /* We still can decommit pages. */
696 if (VirtualFree (*var + nbytes + get_page_size(),
697 memInfo.RegionSize - nbytes - get_page_size(),
698 MEM_DECOMMIT) == 0)
699 DebPrint (("mmap_realloc: VirtualFree error %ld\n", GetLastError ()));
700 return *var;
701 }
702
703 /* Not enlarging, not shrinking by more than one page. */
704 return *var;
705 }