]> code.delx.au - gnu-emacs/blob - src/w32heap.c
Rework C source files to avoid ^(
[gnu-emacs] / src / w32heap.c
1 /* Heap management routines for GNU Emacs on the Microsoft Windows API.
2 Copyright (C) 1994, 2001-2016 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
18
19 /*
20 Geoff Voelker (voelker@cs.washington.edu) 7-29-94
21 */
22
23 /*
24 Heavily modified by Fabrice Popineau (fabrice.popineau@gmail.com) 28-02-2014
25 */
26
27 /*
28 Memory allocation scheme for w32/w64:
29
30 - Buffers are mmap'ed using a very simple emulation of mmap/munmap
31 - During the temacs phase:
32 * we use a private heap declared to be stored into the `dumped_data'
33 * unfortunately, this heap cannot be made growable, so the size of
34 blocks it can allocate is limited to (0x80000 - pagesize)
35 * the blocks that are larger than this are allocated from the end
36 of the `dumped_data' array; there are not so many of them.
37 We use a very simple first-fit scheme to reuse those blocks.
38 * we check that the private heap does not cross the area used
39 by the bigger chunks.
40 - During the emacs phase:
41 * we create a private heap for new memory blocks
42 * we make sure that we never free a block that has been dumped.
43 Freeing a dumped block could work in principle, but may prove
44 unreliable if we distribute binaries of emacs.exe: MS does not
45 guarantee that the heap data structures are the same across all
46 versions of their OS, even though the API is available since XP. */
47
48 #include <config.h>
49 #include <stdio.h>
50 #include <errno.h>
51
52 #include <sys/mman.h>
53 #include "w32common.h"
54 #include "w32heap.h"
55 #include "lisp.h"
56
57 /* We chose to leave those declarations here. They are used only in
58 this file. The RtlCreateHeap is available since XP. It is located
59 in ntdll.dll and is available with the DDK. People often
60 complained that HeapCreate doesn't offer the ability to create a
61 heap at a given place, which we need here, and which RtlCreateHeap
62 provides. We reproduce here the definitions available with the
63 DDK. */
64
65 typedef PVOID (WINAPI * RtlCreateHeap_Proc) (
66 /* _In_ */ ULONG Flags,
67 /* _In_opt_ */ PVOID HeapBase,
68 /* _In_opt_ */ SIZE_T ReserveSize,
69 /* _In_opt_ */ SIZE_T CommitSize,
70 /* _In_opt_ */ PVOID Lock,
71 /* _In_opt_ */ PVOID Parameters
72 );
73
74 typedef LONG NTSTATUS;
75
76 typedef NTSTATUS (NTAPI *PRTL_HEAP_COMMIT_ROUTINE) (
77 IN PVOID Base,
78 IN OUT PVOID *CommitAddress,
79 IN OUT PSIZE_T CommitSize
80 );
81
82 typedef struct _RTL_HEAP_PARAMETERS {
83 ULONG Length;
84 SIZE_T SegmentReserve;
85 SIZE_T SegmentCommit;
86 SIZE_T DeCommitFreeBlockThreshold;
87 SIZE_T DeCommitTotalFreeThreshold;
88 SIZE_T MaximumAllocationSize;
89 SIZE_T VirtualMemoryThreshold;
90 SIZE_T InitialCommit;
91 SIZE_T InitialReserve;
92 PRTL_HEAP_COMMIT_ROUTINE CommitRoutine;
93 SIZE_T Reserved[ 2 ];
94 } RTL_HEAP_PARAMETERS, *PRTL_HEAP_PARAMETERS;
95
96 /* We reserve space for dumping emacs lisp byte-code inside a static
97 array. By storing it in an array, the generic mechanism in
98 unexecw32.c will be able to dump it without the need to add a
99 special segment to the executable. In order to be able to do this
100 without losing too much space, we need to create a Windows heap at
101 the specific address of the static array. The RtlCreateHeap
102 available inside the NT kernel since XP will do this. It allows the
103 creation of a non-growable heap at a specific address. So before
104 dumping, we create a non-growable heap at the address of the
105 dumped_data[] array. After dumping, we reuse memory allocated
106 there without being able to free it (but most of it is not meant to
107 be freed anyway), and we use a new private heap for all new
108 allocations. */
109
110 /* FIXME: Most of the space reserved for dumped_data[] is only used by
111 the 1st bootstrap-emacs.exe built while bootstrapping. Once the
112 preloaded Lisp files are byte-compiled, the next loadup uses less
113 than half of the size stated below. It would be nice to find a way
114 to build only the first bootstrap-emacs.exe with the large size,
115 and reset that to a lower value afterwards. */
116 #if defined _WIN64 || defined WIDE_EMACS_INT
117 # define DUMPED_HEAP_SIZE (20*1024*1024)
118 #else
119 # define DUMPED_HEAP_SIZE (12*1024*1024)
120 #endif
121
122 static unsigned char dumped_data[DUMPED_HEAP_SIZE];
123
124 /* Info for keeping track of our dynamic heap used after dumping. */
125 unsigned char *data_region_base = NULL;
126 unsigned char *data_region_end = NULL;
127 static DWORD_PTR committed = 0;
128
129 /* The maximum block size that can be handled by a non-growable w32
130 heap is limited by the MaxBlockSize value below.
131
132 This point deserves and explanation.
133
134 The W32 heap allocator can be used for a growable
135 heap or a non-growable one.
136
137 A growable heap is not compatible with a fixed base address for the
138 heap. Only a non-growable one is. One drawback of non-growable
139 heaps is that they can hold only objects smaller than a certain
140 size (the one defined below). Most of the largest blocks are GC'ed
141 before dumping. In any case and to be safe, we implement a simple
142 first-fit allocation algorithm starting at the end of the
143 dumped_data[] array like depicted below:
144
145 ----------------------------------------------
146 | | | |
147 | Private heap |-> <-| Big chunks |
148 | | | |
149 ----------------------------------------------
150 ^ ^ ^
151 dumped_data dumped_data bc_limit
152 + committed
153
154 */
155
156 /* Info for managing our preload heap, which is essentially a fixed size
157 data area in the executable. */
158 #define PAGE_SIZE 0x1000
159 #define MaxBlockSize (0x80000 - PAGE_SIZE)
160
161 #define MAX_BLOCKS 0x40
162
163 static struct
164 {
165 unsigned char *address;
166 size_t size;
167 DWORD occupied;
168 } blocks[MAX_BLOCKS];
169
170 static DWORD blocks_number = 0;
171 static unsigned char *bc_limit;
172
173 /* Handle for the private heap:
174 - inside the dumped_data[] array before dump,
175 - outside of it after dump.
176 */
177 HANDLE heap = NULL;
178
179 /* We redirect the standard allocation functions. */
180 malloc_fn the_malloc_fn;
181 realloc_fn the_realloc_fn;
182 free_fn the_free_fn;
183
184 /* It doesn't seem to be useful to allocate from a file mapping.
185 It would be if the memory was shared.
186 http://stackoverflow.com/questions/307060/what-is-the-purpose-of-allocating-pages-in-the-pagefile-with-createfilemapping */
187
188 /* This is the function to commit memory when the heap allocator
189 claims for new memory. Before dumping, we allocate space
190 from the fixed size dumped_data[] array.
191 */
192 NTSTATUS NTAPI
193 dumped_data_commit (PVOID Base, PVOID *CommitAddress, PSIZE_T CommitSize)
194 {
195 /* This is used before dumping.
196
197 The private heap is stored at dumped_data[] address.
198 We commit contiguous areas of the dumped_data array
199 as requests arrive. */
200 *CommitAddress = data_region_base + committed;
201 committed += *CommitSize;
202 /* Check that the private heap area does not overlap the big chunks area. */
203 if (((unsigned char *)(*CommitAddress)) + *CommitSize >= bc_limit)
204 {
205 fprintf (stderr,
206 "dumped_data_commit: memory exhausted.\nEnlarge dumped_data[]!\n");
207 exit (-1);
208 }
209 return 0;
210 }
211
212 /* Heap creation. */
213
214 /* We want to turn on Low Fragmentation Heap for XP and older systems.
215 MinGW32 lacks those definitions. */
216 #ifndef MINGW_W64
217 typedef enum _HEAP_INFORMATION_CLASS {
218 HeapCompatibilityInformation
219 } HEAP_INFORMATION_CLASS;
220
221 typedef WINBASEAPI BOOL (WINAPI * HeapSetInformation_Proc)(HANDLE,HEAP_INFORMATION_CLASS,PVOID,SIZE_T);
222 #endif
223
224 void
225 init_heap (void)
226 {
227 if (using_dynamic_heap)
228 {
229 unsigned long enable_lfh = 2;
230
231 /* After dumping, use a new private heap. We explicitly enable
232 the low fragmentation heap (LFH) here, for the sake of pre
233 Vista versions. Note: this will harmlessly fail on Vista and
234 later, where the low-fragmentation heap is enabled by
235 default. It will also fail on pre-Vista versions when Emacs
236 is run under a debugger; set _NO_DEBUG_HEAP=1 in the
237 environment before starting GDB to get low fragmentation heap
238 on XP and older systems, for the price of losing "certain
239 heap debug options"; for the details see
240 http://msdn.microsoft.com/en-us/library/windows/desktop/aa366705%28v=vs.85%29.aspx. */
241 data_region_end = data_region_base;
242
243 /* Create the private heap. */
244 heap = HeapCreate (0, 0, 0);
245
246 #ifndef MINGW_W64
247 /* Set the low-fragmentation heap for OS before Vista. */
248 HMODULE hm_kernel32dll = LoadLibrary ("kernel32.dll");
249 HeapSetInformation_Proc s_pfn_Heap_Set_Information = (HeapSetInformation_Proc) GetProcAddress (hm_kernel32dll, "HeapSetInformation");
250 if (s_pfn_Heap_Set_Information != NULL)
251 {
252 if (s_pfn_Heap_Set_Information ((PVOID) heap,
253 HeapCompatibilityInformation,
254 &enable_lfh, sizeof(enable_lfh)) == 0)
255 DebPrint (("Enabling Low Fragmentation Heap failed: error %ld\n",
256 GetLastError ()));
257 }
258 #endif
259
260 if (os_subtype == OS_9X)
261 {
262 the_malloc_fn = malloc_after_dump_9x;
263 the_realloc_fn = realloc_after_dump_9x;
264 the_free_fn = free_after_dump_9x;
265 }
266 else
267 {
268 the_malloc_fn = malloc_after_dump;
269 the_realloc_fn = realloc_after_dump;
270 the_free_fn = free_after_dump;
271 }
272 }
273 else
274 {
275 /* Find the RtlCreateHeap function. Headers for this function
276 are provided with the w32 ddk, but the function is available
277 in ntdll.dll since XP. */
278 HMODULE hm_ntdll = LoadLibrary ("ntdll.dll");
279 RtlCreateHeap_Proc s_pfn_Rtl_Create_Heap
280 = (RtlCreateHeap_Proc) GetProcAddress (hm_ntdll, "RtlCreateHeap");
281 /* Specific parameters for the private heap. */
282 RTL_HEAP_PARAMETERS params;
283 ZeroMemory (&params, sizeof(params));
284 params.Length = sizeof(RTL_HEAP_PARAMETERS);
285
286 data_region_base = (unsigned char *)ROUND_UP (dumped_data, 0x1000);
287 data_region_end = bc_limit = dumped_data + DUMPED_HEAP_SIZE;
288
289 params.InitialCommit = committed = 0x1000;
290 params.InitialReserve = sizeof(dumped_data);
291 /* Use our own routine to commit memory from the dumped_data
292 array. */
293 params.CommitRoutine = &dumped_data_commit;
294
295 /* Create the private heap. */
296 if (s_pfn_Rtl_Create_Heap == NULL)
297 {
298 fprintf (stderr, "Cannot build Emacs without RtlCreateHeap being available; exiting.\n");
299 exit (-1);
300 }
301 heap = s_pfn_Rtl_Create_Heap (0, data_region_base, 0, 0, NULL, &params);
302
303 if (os_subtype == OS_9X)
304 {
305 fprintf (stderr, "Cannot dump Emacs on Windows 9X; exiting.\n");
306 exit (-1);
307 }
308 else
309 {
310 the_malloc_fn = malloc_before_dump;
311 the_realloc_fn = realloc_before_dump;
312 the_free_fn = free_before_dump;
313 }
314 }
315
316 /* Update system version information to match current system. */
317 cache_system_info ();
318 }
319
320 #undef malloc
321 #undef realloc
322 #undef free
323
324 /* FREEABLE_P checks if the block can be safely freed. */
325 #define FREEABLE_P(addr) \
326 ((unsigned char *)(addr) > 0 \
327 && ((unsigned char *)(addr) < dumped_data \
328 || (unsigned char *)(addr) >= dumped_data + DUMPED_HEAP_SIZE))
329
330 void *
331 malloc_after_dump (size_t size)
332 {
333 /* Use the new private heap. */
334 void *p = HeapAlloc (heap, 0, size);
335
336 /* After dump, keep track of the "brk value" for sbrk(0). */
337 if (p)
338 {
339 unsigned char *new_brk = (unsigned char *)p + size;
340
341 if (new_brk > data_region_end)
342 data_region_end = new_brk;
343 }
344 else
345 errno = ENOMEM;
346 return p;
347 }
348
349 void *
350 malloc_before_dump (size_t size)
351 {
352 void *p;
353
354 /* Before dumping. The private heap can handle only requests for
355 less than MaxBlockSize. */
356 if (size < MaxBlockSize)
357 {
358 /* Use the private heap if possible. */
359 p = HeapAlloc (heap, 0, size);
360 if (!p)
361 errno = ENOMEM;
362 }
363 else
364 {
365 /* Find the first big chunk that can hold the requested size. */
366 int i = 0;
367
368 for (i = 0; i < blocks_number; i++)
369 {
370 if (blocks[i].occupied == 0 && blocks[i].size >= size)
371 break;
372 }
373 if (i < blocks_number)
374 {
375 /* If found, use it. */
376 p = blocks[i].address;
377 blocks[i].occupied = TRUE;
378 }
379 else
380 {
381 /* Allocate a new big chunk from the end of the dumped_data
382 array. */
383 if (blocks_number >= MAX_BLOCKS)
384 {
385 fprintf (stderr,
386 "malloc_before_dump: no more big chunks available.\nEnlarge MAX_BLOCKS!\n");
387 exit (-1);
388 }
389 bc_limit -= size;
390 bc_limit = (unsigned char *)ROUND_DOWN (bc_limit, 0x10);
391 p = bc_limit;
392 blocks[blocks_number].address = p;
393 blocks[blocks_number].size = size;
394 blocks[blocks_number].occupied = TRUE;
395 blocks_number++;
396 /* Check that areas do not overlap. */
397 if (bc_limit < dumped_data + committed)
398 {
399 fprintf (stderr,
400 "malloc_before_dump: memory exhausted.\nEnlarge dumped_data[]!\n");
401 exit (-1);
402 }
403 }
404 }
405 return p;
406 }
407
408 /* Re-allocate the previously allocated block in ptr, making the new
409 block SIZE bytes long. */
410 void *
411 realloc_after_dump (void *ptr, size_t size)
412 {
413 void *p;
414
415 /* After dumping. */
416 if (FREEABLE_P (ptr))
417 {
418 /* Reallocate the block since it lies in the new heap. */
419 p = HeapReAlloc (heap, 0, ptr, size);
420 if (!p)
421 errno = ENOMEM;
422 }
423 else
424 {
425 /* If the block lies in the dumped data, do not free it. Only
426 allocate a new one. */
427 p = HeapAlloc (heap, 0, size);
428 if (!p)
429 errno = ENOMEM;
430 else if (ptr)
431 CopyMemory (p, ptr, size);
432 }
433 /* After dump, keep track of the "brk value" for sbrk(0). */
434 if (p)
435 {
436 unsigned char *new_brk = (unsigned char *)p + size;
437
438 if (new_brk > data_region_end)
439 data_region_end = new_brk;
440 }
441 return p;
442 }
443
444 void *
445 realloc_before_dump (void *ptr, size_t size)
446 {
447 void *p;
448
449 /* Before dumping. */
450 if (dumped_data < (unsigned char *)ptr
451 && (unsigned char *)ptr < bc_limit && size <= MaxBlockSize)
452 {
453 p = HeapReAlloc (heap, 0, ptr, size);
454 if (!p)
455 errno = ENOMEM;
456 }
457 else
458 {
459 /* In this case, either the new block is too large for the heap,
460 or the old block was already too large. In both cases,
461 malloc_before_dump() and free_before_dump() will take care of
462 reallocation. */
463 p = malloc_before_dump (size);
464 /* If SIZE is below MaxBlockSize, malloc_before_dump will try to
465 allocate it in the fixed heap. If that fails, we could have
466 kept the block in its original place, above bc_limit, instead
467 of failing the call as below. But this doesn't seem to be
468 worth the added complexity, as loadup allocates only a very
469 small number of large blocks, and never reallocates them. */
470 if (p && ptr)
471 {
472 CopyMemory (p, ptr, size);
473 free_before_dump (ptr);
474 }
475 }
476 return p;
477 }
478
479 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
480 void
481 free_after_dump (void *ptr)
482 {
483 /* After dumping. */
484 if (FREEABLE_P (ptr))
485 {
486 /* Free the block if it is in the new private heap. */
487 HeapFree (heap, 0, ptr);
488 }
489 }
490
491 void
492 free_before_dump (void *ptr)
493 {
494 if (!ptr)
495 return;
496
497 /* Before dumping. */
498 if (dumped_data < (unsigned char *)ptr
499 && (unsigned char *)ptr < bc_limit)
500 {
501 /* Free the block if it is allocated in the private heap. */
502 HeapFree (heap, 0, ptr);
503 }
504 else
505 {
506 /* Look for the big chunk. */
507 int i;
508
509 for (i = 0; i < blocks_number; i++)
510 {
511 if (blocks[i].address == ptr)
512 {
513 /* Reset block occupation if found. */
514 blocks[i].occupied = 0;
515 break;
516 }
517 /* What if the block is not found? We should trigger an
518 error here. */
519 eassert (i < blocks_number);
520 }
521 }
522 }
523
524 /* On Windows 9X, HeapAlloc may return pointers that are not aligned
525 on 8-byte boundary, alignment which is required by the Lisp memory
526 management. To circumvent this problem, manually enforce alignment
527 on Windows 9X. */
528
529 void *
530 malloc_after_dump_9x (size_t size)
531 {
532 void *p = malloc_after_dump (size + 8);
533 void *pa;
534 if (p == NULL)
535 return p;
536 pa = (void*)(((intptr_t)p + 8) & ~7);
537 *((void**)pa-1) = p;
538 return pa;
539 }
540
541 void *
542 realloc_after_dump_9x (void *ptr, size_t size)
543 {
544 if (FREEABLE_P (ptr))
545 {
546 void *po = *((void**)ptr-1);
547 void *p;
548 void *pa;
549 p = realloc_after_dump (po, size + 8);
550 if (p == NULL)
551 return p;
552 pa = (void*)(((intptr_t)p + 8) & ~7);
553 if (ptr != NULL &&
554 (char*)pa - (char*)p != (char*)ptr - (char*)po)
555 {
556 /* Handle the case where alignment in pre-realloc and
557 post-realloc blocks does not match. */
558 MoveMemory (pa, (void*)((char*)p + ((char*)ptr - (char*)po)), size);
559 }
560 *((void**)pa-1) = p;
561 return pa;
562 }
563 else
564 {
565 /* Non-freeable pointers have no alignment-enforcing header
566 (since dumping is not allowed on Windows 9X). */
567 void* p = malloc_after_dump_9x (size);
568 if (p != NULL)
569 CopyMemory (p, ptr, size);
570 return p;
571 }
572 }
573
574 void
575 free_after_dump_9x (void *ptr)
576 {
577 if (FREEABLE_P (ptr))
578 {
579 free_after_dump (*((void**)ptr-1));
580 }
581 }
582
583 #ifdef ENABLE_CHECKING
584 void
585 report_temacs_memory_usage (void)
586 {
587 DWORD blocks_used = 0;
588 size_t large_mem_used = 0;
589 int i;
590
591 for (i = 0; i < blocks_number; i++)
592 if (blocks[i].occupied)
593 {
594 blocks_used++;
595 large_mem_used += blocks[i].size;
596 }
597
598 /* Emulate 'message', which writes to stderr in non-interactive
599 sessions. */
600 fprintf (stderr,
601 "Dump memory usage: Heap: %" PRIu64 " Large blocks(%lu/%lu): %" PRIu64 "/%" PRIu64 "\n",
602 (unsigned long long)committed, blocks_used, blocks_number,
603 (unsigned long long)large_mem_used,
604 (unsigned long long)(dumped_data + DUMPED_HEAP_SIZE - bc_limit));
605 }
606 #endif
607
608 /* Emulate getpagesize. */
609 int
610 getpagesize (void)
611 {
612 return sysinfo_cache.dwPageSize;
613 }
614
615 void *
616 sbrk (ptrdiff_t increment)
617 {
618 /* data_region_end is the address beyond the last allocated byte.
619 The sbrk() function is not emulated at all, except for a 0 value
620 of its parameter. This is needed by the Emacs Lisp function
621 `memory-limit'. */
622 eassert (increment == 0);
623 return data_region_end;
624 }
625
626 #define MAX_BUFFER_SIZE (512 * 1024 * 1024)
627
628 /* MMAP allocation for buffers. */
629 void *
630 mmap_alloc (void **var, size_t nbytes)
631 {
632 void *p = NULL;
633
634 /* We implement amortized allocation. We start by reserving twice
635 the size requested and commit only the size requested. Then
636 realloc could proceed and use the reserved pages, reallocating
637 only if needed. Buffer shrink would happen only so that we stay
638 in the 2x range. This is a big win when visiting compressed
639 files, where the final size of the buffer is not known in
640 advance, and the buffer is enlarged several times as the data is
641 decompressed on the fly. */
642 if (nbytes < MAX_BUFFER_SIZE)
643 p = VirtualAlloc (NULL, ROUND_UP (nbytes * 2, get_allocation_unit ()),
644 MEM_RESERVE, PAGE_READWRITE);
645
646 /* If it fails, or if the request is above 512MB, try with the
647 requested size. */
648 if (p == NULL)
649 p = VirtualAlloc (NULL, ROUND_UP (nbytes, get_allocation_unit ()),
650 MEM_RESERVE, PAGE_READWRITE);
651
652 if (p != NULL)
653 {
654 /* Now, commit pages for NBYTES. */
655 *var = VirtualAlloc (p, nbytes, MEM_COMMIT, PAGE_READWRITE);
656 if (*var == NULL)
657 p = *var;
658 }
659
660 if (!p)
661 {
662 DWORD e = GetLastError ();
663
664 if (e == ERROR_NOT_ENOUGH_MEMORY)
665 errno = ENOMEM;
666 else
667 {
668 DebPrint (("mmap_alloc: error %ld\n", e));
669 errno = EINVAL;
670 }
671 }
672
673 return *var = p;
674 }
675
676 void
677 mmap_free (void **var)
678 {
679 if (*var)
680 {
681 if (VirtualFree (*var, 0, MEM_RELEASE) == 0)
682 DebPrint (("mmap_free: error %ld\n", GetLastError ()));
683 *var = NULL;
684 }
685 }
686
687 void *
688 mmap_realloc (void **var, size_t nbytes)
689 {
690 MEMORY_BASIC_INFORMATION memInfo, m2;
691 void *old_ptr;
692
693 if (*var == NULL)
694 return mmap_alloc (var, nbytes);
695
696 /* This case happens in init_buffer(). */
697 if (nbytes == 0)
698 {
699 mmap_free (var);
700 return mmap_alloc (var, nbytes);
701 }
702
703 memset (&memInfo, 0, sizeof (memInfo));
704 if (VirtualQuery (*var, &memInfo, sizeof (memInfo)) == 0)
705 DebPrint (("mmap_realloc: VirtualQuery error = %ld\n", GetLastError ()));
706
707 /* We need to enlarge the block. */
708 if (memInfo.RegionSize < nbytes)
709 {
710 memset (&m2, 0, sizeof (m2));
711 if (VirtualQuery (*var + memInfo.RegionSize, &m2, sizeof(m2)) == 0)
712 DebPrint (("mmap_realloc: VirtualQuery error = %ld\n",
713 GetLastError ()));
714 /* If there is enough room in the current reserved area, then
715 commit more pages as needed. */
716 if (m2.State == MEM_RESERVE
717 && nbytes <= memInfo.RegionSize + m2.RegionSize)
718 {
719 void *p;
720
721 p = VirtualAlloc (*var + memInfo.RegionSize,
722 nbytes - memInfo.RegionSize,
723 MEM_COMMIT, PAGE_READWRITE);
724 if (!p /* && GetLastError() != ERROR_NOT_ENOUGH_MEMORY */)
725 {
726 DebPrint (("realloc enlarge: VirtualAlloc (%p + %I64x, %I64x) error %ld\n",
727 *var, (uint64_t)memInfo.RegionSize,
728 (uint64_t)(nbytes - memInfo.RegionSize),
729 GetLastError ()));
730 DebPrint (("next region: %p %p %I64x %x\n", m2.BaseAddress,
731 m2.AllocationBase, m2.RegionSize, m2.AllocationProtect));
732 }
733 else
734 return *var;
735 }
736 /* Else we must actually enlarge the block by allocating a new
737 one and copying previous contents from the old to the new one. */
738 old_ptr = *var;
739
740 if (mmap_alloc (var, nbytes))
741 {
742 CopyMemory (*var, old_ptr, memInfo.RegionSize);
743 mmap_free (&old_ptr);
744 return *var;
745 }
746 else
747 {
748 /* We failed to reallocate the buffer. */
749 *var = old_ptr;
750 return NULL;
751 }
752 }
753
754 /* If we are shrinking by more than one page... */
755 if (memInfo.RegionSize > nbytes + getpagesize())
756 {
757 /* If we are shrinking a lot... */
758 if ((memInfo.RegionSize / 2) > nbytes)
759 {
760 /* Let's give some memory back to the system and release
761 some pages. */
762 old_ptr = *var;
763
764 if (mmap_alloc (var, nbytes))
765 {
766 CopyMemory (*var, old_ptr, nbytes);
767 mmap_free (&old_ptr);
768 return *var;
769 }
770 else
771 {
772 /* In case we fail to shrink, try to go on with the old block.
773 But that means there is a lot of memory pressure.
774 We could also decommit pages. */
775 *var = old_ptr;
776 return *var;
777 }
778 }
779
780 /* We still can decommit pages. */
781 if (VirtualFree (*var + nbytes + get_page_size(),
782 memInfo.RegionSize - nbytes - get_page_size(),
783 MEM_DECOMMIT) == 0)
784 DebPrint (("mmap_realloc: VirtualFree error %ld\n", GetLastError ()));
785 return *var;
786 }
787
788 /* Not enlarging, not shrinking by more than one page. */
789 return *var;
790 }