]> code.delx.au - gnu-emacs/blob - src/w32heap.c
b908169b96c62896a635708754a8dc6b95f07465
[gnu-emacs] / src / w32heap.c
1 /* Heap management routines for GNU Emacs on the Microsoft Windows API.
2 Copyright (C) 1994, 2001-2016 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
18
19 /*
20 Geoff Voelker (voelker@cs.washington.edu) 7-29-94
21 */
22
23 /*
24 Heavily modified by Fabrice Popineau (fabrice.popineau@gmail.com) 28-02-2014
25 */
26
27 /*
28 Memory allocation scheme for w32/w64:
29
30 - Buffers are mmap'ed using a very simple emulation of mmap/munmap
31 - During the temacs phase:
32 * we use a private heap declared to be stored into the `dumped_data'
33 * unfortunately, this heap cannot be made growable, so the size of
34 blocks it can allocate is limited to (0x80000 - pagesize)
35 * the blocks that are larger than this are allocated from the end
36 of the `dumped_data' array; there are not so many of them.
37 We use a very simple first-fit scheme to reuse those blocks.
38 * we check that the private heap does not cross the area used
39 by the bigger chunks.
40 - During the emacs phase:
41 * we create a private heap for new memory blocks
42 * we make sure that we never free a block that has been dumped.
43 Freeing a dumped block could work in principle, but may prove
44 unreliable if we distribute binaries of emacs.exe: MS does not
45 guarantee that the heap data structures are the same across all
46 versions of their OS, even though the API is available since XP. */
47
48 #include <config.h>
49 #include <stdio.h>
50 #include <errno.h>
51
52 #include <sys/mman.h>
53 #include "w32common.h"
54 #include "w32heap.h"
55 #include "lisp.h"
56
57 /* We chose to leave those declarations here. They are used only in
58 this file. The RtlCreateHeap is available since XP. It is located
59 in ntdll.dll and is available with the DDK. People often
60 complained that HeapCreate doesn't offer the ability to create a
61 heap at a given place, which we need here, and which RtlCreateHeap
62 provides. We reproduce here the definitions available with the
63 DDK. */
64
65 typedef PVOID (WINAPI * RtlCreateHeap_Proc) (
66 /* _In_ */ ULONG Flags,
67 /* _In_opt_ */ PVOID HeapBase,
68 /* _In_opt_ */ SIZE_T ReserveSize,
69 /* _In_opt_ */ SIZE_T CommitSize,
70 /* _In_opt_ */ PVOID Lock,
71 /* _In_opt_ */ PVOID Parameters
72 );
73
74 typedef LONG NTSTATUS;
75
76 typedef NTSTATUS
77 (NTAPI * PRTL_HEAP_COMMIT_ROUTINE)(
78 IN PVOID Base,
79 IN OUT PVOID *CommitAddress,
80 IN OUT PSIZE_T CommitSize
81 );
82
83 typedef struct _RTL_HEAP_PARAMETERS {
84 ULONG Length;
85 SIZE_T SegmentReserve;
86 SIZE_T SegmentCommit;
87 SIZE_T DeCommitFreeBlockThreshold;
88 SIZE_T DeCommitTotalFreeThreshold;
89 SIZE_T MaximumAllocationSize;
90 SIZE_T VirtualMemoryThreshold;
91 SIZE_T InitialCommit;
92 SIZE_T InitialReserve;
93 PRTL_HEAP_COMMIT_ROUTINE CommitRoutine;
94 SIZE_T Reserved[ 2 ];
95 } RTL_HEAP_PARAMETERS, *PRTL_HEAP_PARAMETERS;
96
97 /* We reserve space for dumping emacs lisp byte-code inside a static
98 array. By storing it in an array, the generic mechanism in
99 unexecw32.c will be able to dump it without the need to add a
100 special segment to the executable. In order to be able to do this
101 without losing too much space, we need to create a Windows heap at
102 the specific address of the static array. The RtlCreateHeap
103 available inside the NT kernel since XP will do this. It allows the
104 creation of a non-growable heap at a specific address. So before
105 dumping, we create a non-growable heap at the address of the
106 dumped_data[] array. After dumping, we reuse memory allocated
107 there without being able to free it (but most of it is not meant to
108 be freed anyway), and we use a new private heap for all new
109 allocations. */
110
111 /* FIXME: Most of the space reserved for dumped_data[] is only used by
112 the 1st bootstrap-emacs.exe built while bootstrapping. Once the
113 preloaded Lisp files are byte-compiled, the next loadup uses less
114 than half of the size stated below. It would be nice to find a way
115 to build only the first bootstrap-emacs.exe with the large size,
116 and reset that to a lower value afterwards. */
117 #if defined _WIN64 || defined WIDE_EMACS_INT
118 # define DUMPED_HEAP_SIZE (20*1024*1024)
119 #else
120 # define DUMPED_HEAP_SIZE (12*1024*1024)
121 #endif
122
123 static unsigned char dumped_data[DUMPED_HEAP_SIZE];
124
125 /* Info for keeping track of our dynamic heap used after dumping. */
126 unsigned char *data_region_base = NULL;
127 unsigned char *data_region_end = NULL;
128 static DWORD_PTR committed = 0;
129
130 /* The maximum block size that can be handled by a non-growable w32
131 heap is limited by the MaxBlockSize value below.
132
133 This point deserves and explanation.
134
135 The W32 heap allocator can be used for a growable
136 heap or a non-growable one.
137
138 A growable heap is not compatible with a fixed base address for the
139 heap. Only a non-growable one is. One drawback of non-growable
140 heaps is that they can hold only objects smaller than a certain
141 size (the one defined below). Most of the largest blocks are GC'ed
142 before dumping. In any case and to be safe, we implement a simple
143 first-fit allocation algorithm starting at the end of the
144 dumped_data[] array like depicted below:
145
146 ----------------------------------------------
147 | | | |
148 | Private heap |-> <-| Big chunks |
149 | | | |
150 ----------------------------------------------
151 ^ ^ ^
152 dumped_data dumped_data bc_limit
153 + committed
154
155 */
156
157 /* Info for managing our preload heap, which is essentially a fixed size
158 data area in the executable. */
159 #define PAGE_SIZE 0x1000
160 #define MaxBlockSize (0x80000 - PAGE_SIZE)
161
162 #define MAX_BLOCKS 0x40
163
164 static struct
165 {
166 unsigned char *address;
167 size_t size;
168 DWORD occupied;
169 } blocks[MAX_BLOCKS];
170
171 static DWORD blocks_number = 0;
172 static unsigned char *bc_limit;
173
174 /* Handle for the private heap:
175 - inside the dumped_data[] array before dump,
176 - outside of it after dump.
177 */
178 HANDLE heap = NULL;
179
180 /* We redirect the standard allocation functions. */
181 malloc_fn the_malloc_fn;
182 realloc_fn the_realloc_fn;
183 free_fn the_free_fn;
184
185 /* It doesn't seem to be useful to allocate from a file mapping.
186 It would be if the memory was shared.
187 http://stackoverflow.com/questions/307060/what-is-the-purpose-of-allocating-pages-in-the-pagefile-with-createfilemapping */
188
189 /* This is the function to commit memory when the heap allocator
190 claims for new memory. Before dumping, we allocate space
191 from the fixed size dumped_data[] array.
192 */
193 NTSTATUS NTAPI
194 dumped_data_commit (PVOID Base, PVOID *CommitAddress, PSIZE_T CommitSize)
195 {
196 /* This is used before dumping.
197
198 The private heap is stored at dumped_data[] address.
199 We commit contiguous areas of the dumped_data array
200 as requests arrive. */
201 *CommitAddress = data_region_base + committed;
202 committed += *CommitSize;
203 /* Check that the private heap area does not overlap the big chunks area. */
204 if (((unsigned char *)(*CommitAddress)) + *CommitSize >= bc_limit)
205 {
206 fprintf (stderr,
207 "dumped_data_commit: memory exhausted.\nEnlarge dumped_data[]!\n");
208 exit (-1);
209 }
210 return 0;
211 }
212
213 /* Heap creation. */
214
215 /* We want to turn on Low Fragmentation Heap for XP and older systems.
216 MinGW32 lacks those definitions. */
217 #ifndef MINGW_W64
218 typedef enum _HEAP_INFORMATION_CLASS {
219 HeapCompatibilityInformation
220 } HEAP_INFORMATION_CLASS;
221
222 typedef WINBASEAPI BOOL (WINAPI * HeapSetInformation_Proc)(HANDLE,HEAP_INFORMATION_CLASS,PVOID,SIZE_T);
223 #endif
224
225 void
226 init_heap (void)
227 {
228 if (using_dynamic_heap)
229 {
230 unsigned long enable_lfh = 2;
231
232 /* After dumping, use a new private heap. We explicitly enable
233 the low fragmentation heap (LFH) here, for the sake of pre
234 Vista versions. Note: this will harmlessly fail on Vista and
235 later, where the low-fragmentation heap is enabled by
236 default. It will also fail on pre-Vista versions when Emacs
237 is run under a debugger; set _NO_DEBUG_HEAP=1 in the
238 environment before starting GDB to get low fragmentation heap
239 on XP and older systems, for the price of losing "certain
240 heap debug options"; for the details see
241 http://msdn.microsoft.com/en-us/library/windows/desktop/aa366705%28v=vs.85%29.aspx. */
242 data_region_end = data_region_base;
243
244 /* Create the private heap. */
245 heap = HeapCreate (0, 0, 0);
246
247 #ifndef MINGW_W64
248 /* Set the low-fragmentation heap for OS before Vista. */
249 HMODULE hm_kernel32dll = LoadLibrary ("kernel32.dll");
250 HeapSetInformation_Proc s_pfn_Heap_Set_Information = (HeapSetInformation_Proc) GetProcAddress (hm_kernel32dll, "HeapSetInformation");
251 if (s_pfn_Heap_Set_Information != NULL)
252 {
253 if (s_pfn_Heap_Set_Information ((PVOID) heap,
254 HeapCompatibilityInformation,
255 &enable_lfh, sizeof(enable_lfh)) == 0)
256 DebPrint (("Enabling Low Fragmentation Heap failed: error %ld\n",
257 GetLastError ()));
258 }
259 #endif
260
261 if (os_subtype == OS_9X)
262 {
263 the_malloc_fn = malloc_after_dump_9x;
264 the_realloc_fn = realloc_after_dump_9x;
265 the_free_fn = free_after_dump_9x;
266 }
267 else
268 {
269 the_malloc_fn = malloc_after_dump;
270 the_realloc_fn = realloc_after_dump;
271 the_free_fn = free_after_dump;
272 }
273 }
274 else
275 {
276 /* Find the RtlCreateHeap function. Headers for this function
277 are provided with the w32 ddk, but the function is available
278 in ntdll.dll since XP. */
279 HMODULE hm_ntdll = LoadLibrary ("ntdll.dll");
280 RtlCreateHeap_Proc s_pfn_Rtl_Create_Heap
281 = (RtlCreateHeap_Proc) GetProcAddress (hm_ntdll, "RtlCreateHeap");
282 /* Specific parameters for the private heap. */
283 RTL_HEAP_PARAMETERS params;
284 ZeroMemory (&params, sizeof(params));
285 params.Length = sizeof(RTL_HEAP_PARAMETERS);
286
287 data_region_base = (unsigned char *)ROUND_UP (dumped_data, 0x1000);
288 data_region_end = bc_limit = dumped_data + DUMPED_HEAP_SIZE;
289
290 params.InitialCommit = committed = 0x1000;
291 params.InitialReserve = sizeof(dumped_data);
292 /* Use our own routine to commit memory from the dumped_data
293 array. */
294 params.CommitRoutine = &dumped_data_commit;
295
296 /* Create the private heap. */
297 if (s_pfn_Rtl_Create_Heap == NULL)
298 {
299 fprintf (stderr, "Cannot build Emacs without RtlCreateHeap being available; exiting.\n");
300 exit (-1);
301 }
302 heap = s_pfn_Rtl_Create_Heap (0, data_region_base, 0, 0, NULL, &params);
303
304 if (os_subtype == OS_9X)
305 {
306 fprintf (stderr, "Cannot dump Emacs on Windows 9X; exiting.\n");
307 exit (-1);
308 }
309 else
310 {
311 the_malloc_fn = malloc_before_dump;
312 the_realloc_fn = realloc_before_dump;
313 the_free_fn = free_before_dump;
314 }
315 }
316
317 /* Update system version information to match current system. */
318 cache_system_info ();
319 }
320
321 #undef malloc
322 #undef realloc
323 #undef free
324
325 /* FREEABLE_P checks if the block can be safely freed. */
326 #define FREEABLE_P(addr) \
327 ((unsigned char *)(addr) > 0 \
328 && ((unsigned char *)(addr) < dumped_data \
329 || (unsigned char *)(addr) >= dumped_data + DUMPED_HEAP_SIZE))
330
331 void *
332 malloc_after_dump (size_t size)
333 {
334 /* Use the new private heap. */
335 void *p = HeapAlloc (heap, 0, size);
336
337 /* After dump, keep track of the "brk value" for sbrk(0). */
338 if (p)
339 {
340 unsigned char *new_brk = (unsigned char *)p + size;
341
342 if (new_brk > data_region_end)
343 data_region_end = new_brk;
344 }
345 else
346 errno = ENOMEM;
347 return p;
348 }
349
350 void *
351 malloc_before_dump (size_t size)
352 {
353 void *p;
354
355 /* Before dumping. The private heap can handle only requests for
356 less than MaxBlockSize. */
357 if (size < MaxBlockSize)
358 {
359 /* Use the private heap if possible. */
360 p = HeapAlloc (heap, 0, size);
361 if (!p)
362 errno = ENOMEM;
363 }
364 else
365 {
366 /* Find the first big chunk that can hold the requested size. */
367 int i = 0;
368
369 for (i = 0; i < blocks_number; i++)
370 {
371 if (blocks[i].occupied == 0 && blocks[i].size >= size)
372 break;
373 }
374 if (i < blocks_number)
375 {
376 /* If found, use it. */
377 p = blocks[i].address;
378 blocks[i].occupied = TRUE;
379 }
380 else
381 {
382 /* Allocate a new big chunk from the end of the dumped_data
383 array. */
384 if (blocks_number >= MAX_BLOCKS)
385 {
386 fprintf (stderr,
387 "malloc_before_dump: no more big chunks available.\nEnlarge MAX_BLOCKS!\n");
388 exit (-1);
389 }
390 bc_limit -= size;
391 bc_limit = (unsigned char *)ROUND_DOWN (bc_limit, 0x10);
392 p = bc_limit;
393 blocks[blocks_number].address = p;
394 blocks[blocks_number].size = size;
395 blocks[blocks_number].occupied = TRUE;
396 blocks_number++;
397 /* Check that areas do not overlap. */
398 if (bc_limit < dumped_data + committed)
399 {
400 fprintf (stderr,
401 "malloc_before_dump: memory exhausted.\nEnlarge dumped_data[]!\n");
402 exit (-1);
403 }
404 }
405 }
406 return p;
407 }
408
409 /* Re-allocate the previously allocated block in ptr, making the new
410 block SIZE bytes long. */
411 void *
412 realloc_after_dump (void *ptr, size_t size)
413 {
414 void *p;
415
416 /* After dumping. */
417 if (FREEABLE_P (ptr))
418 {
419 /* Reallocate the block since it lies in the new heap. */
420 p = HeapReAlloc (heap, 0, ptr, size);
421 if (!p)
422 errno = ENOMEM;
423 }
424 else
425 {
426 /* If the block lies in the dumped data, do not free it. Only
427 allocate a new one. */
428 p = HeapAlloc (heap, 0, size);
429 if (!p)
430 errno = ENOMEM;
431 else if (ptr)
432 CopyMemory (p, ptr, size);
433 }
434 /* After dump, keep track of the "brk value" for sbrk(0). */
435 if (p)
436 {
437 unsigned char *new_brk = (unsigned char *)p + size;
438
439 if (new_brk > data_region_end)
440 data_region_end = new_brk;
441 }
442 return p;
443 }
444
445 void *
446 realloc_before_dump (void *ptr, size_t size)
447 {
448 void *p;
449
450 /* Before dumping. */
451 if (dumped_data < (unsigned char *)ptr
452 && (unsigned char *)ptr < bc_limit && size <= MaxBlockSize)
453 {
454 p = HeapReAlloc (heap, 0, ptr, size);
455 if (!p)
456 errno = ENOMEM;
457 }
458 else
459 {
460 /* In this case, either the new block is too large for the heap,
461 or the old block was already too large. In both cases,
462 malloc_before_dump() and free_before_dump() will take care of
463 reallocation. */
464 p = malloc_before_dump (size);
465 /* If SIZE is below MaxBlockSize, malloc_before_dump will try to
466 allocate it in the fixed heap. If that fails, we could have
467 kept the block in its original place, above bc_limit, instead
468 of failing the call as below. But this doesn't seem to be
469 worth the added complexity, as loadup allocates only a very
470 small number of large blocks, and never reallocates them. */
471 if (p && ptr)
472 {
473 CopyMemory (p, ptr, size);
474 free_before_dump (ptr);
475 }
476 }
477 return p;
478 }
479
480 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
481 void
482 free_after_dump (void *ptr)
483 {
484 /* After dumping. */
485 if (FREEABLE_P (ptr))
486 {
487 /* Free the block if it is in the new private heap. */
488 HeapFree (heap, 0, ptr);
489 }
490 }
491
492 void
493 free_before_dump (void *ptr)
494 {
495 if (!ptr)
496 return;
497
498 /* Before dumping. */
499 if (dumped_data < (unsigned char *)ptr
500 && (unsigned char *)ptr < bc_limit)
501 {
502 /* Free the block if it is allocated in the private heap. */
503 HeapFree (heap, 0, ptr);
504 }
505 else
506 {
507 /* Look for the big chunk. */
508 int i;
509
510 for (i = 0; i < blocks_number; i++)
511 {
512 if (blocks[i].address == ptr)
513 {
514 /* Reset block occupation if found. */
515 blocks[i].occupied = 0;
516 break;
517 }
518 /* What if the block is not found? We should trigger an
519 error here. */
520 eassert (i < blocks_number);
521 }
522 }
523 }
524
525 /* On Windows 9X, HeapAlloc may return pointers that are not aligned
526 on 8-byte boundary, alignment which is required by the Lisp memory
527 management. To circumvent this problem, manually enforce alignment
528 on Windows 9X. */
529
530 void *
531 malloc_after_dump_9x (size_t size)
532 {
533 void *p = malloc_after_dump (size + 8);
534 void *pa;
535 if (p == NULL)
536 return p;
537 pa = (void*)(((intptr_t)p + 8) & ~7);
538 *((void**)pa-1) = p;
539 return pa;
540 }
541
542 void *
543 realloc_after_dump_9x (void *ptr, size_t size)
544 {
545 if (FREEABLE_P (ptr))
546 {
547 void *po = *((void**)ptr-1);
548 void *p;
549 void *pa;
550 p = realloc_after_dump (po, size + 8);
551 if (p == NULL)
552 return p;
553 pa = (void*)(((intptr_t)p + 8) & ~7);
554 if (ptr != NULL &&
555 (char*)pa - (char*)p != (char*)ptr - (char*)po)
556 {
557 /* Handle the case where alignment in pre-realloc and
558 post-realloc blocks does not match. */
559 MoveMemory (pa, (void*)((char*)p + ((char*)ptr - (char*)po)), size);
560 }
561 *((void**)pa-1) = p;
562 return pa;
563 }
564 else
565 {
566 /* Non-freeable pointers have no alignment-enforcing header
567 (since dumping is not allowed on Windows 9X). */
568 void* p = malloc_after_dump_9x (size);
569 if (p != NULL)
570 CopyMemory (p, ptr, size);
571 return p;
572 }
573 }
574
575 void
576 free_after_dump_9x (void *ptr)
577 {
578 if (FREEABLE_P (ptr))
579 {
580 free_after_dump (*((void**)ptr-1));
581 }
582 }
583
584 #ifdef ENABLE_CHECKING
585 void
586 report_temacs_memory_usage (void)
587 {
588 DWORD blocks_used = 0;
589 size_t large_mem_used = 0;
590 int i;
591
592 for (i = 0; i < blocks_number; i++)
593 if (blocks[i].occupied)
594 {
595 blocks_used++;
596 large_mem_used += blocks[i].size;
597 }
598
599 /* Emulate 'message', which writes to stderr in non-interactive
600 sessions. */
601 fprintf (stderr,
602 "Dump memory usage: Heap: %" PRIu64 " Large blocks(%lu/%lu): %" PRIu64 "/%" PRIu64 "\n",
603 (unsigned long long)committed, blocks_used, blocks_number,
604 (unsigned long long)large_mem_used,
605 (unsigned long long)(dumped_data + DUMPED_HEAP_SIZE - bc_limit));
606 }
607 #endif
608
609 /* Emulate getpagesize. */
610 int
611 getpagesize (void)
612 {
613 return sysinfo_cache.dwPageSize;
614 }
615
616 void *
617 sbrk (ptrdiff_t increment)
618 {
619 /* data_region_end is the address beyond the last allocated byte.
620 The sbrk() function is not emulated at all, except for a 0 value
621 of its parameter. This is needed by the Emacs Lisp function
622 `memory-limit'. */
623 eassert (increment == 0);
624 return data_region_end;
625 }
626
627 #define MAX_BUFFER_SIZE (512 * 1024 * 1024)
628
629 /* MMAP allocation for buffers. */
630 void *
631 mmap_alloc (void **var, size_t nbytes)
632 {
633 void *p = NULL;
634
635 /* We implement amortized allocation. We start by reserving twice
636 the size requested and commit only the size requested. Then
637 realloc could proceed and use the reserved pages, reallocating
638 only if needed. Buffer shrink would happen only so that we stay
639 in the 2x range. This is a big win when visiting compressed
640 files, where the final size of the buffer is not known in
641 advance, and the buffer is enlarged several times as the data is
642 decompressed on the fly. */
643 if (nbytes < MAX_BUFFER_SIZE)
644 p = VirtualAlloc (NULL, ROUND_UP (nbytes * 2, get_allocation_unit ()),
645 MEM_RESERVE, PAGE_READWRITE);
646
647 /* If it fails, or if the request is above 512MB, try with the
648 requested size. */
649 if (p == NULL)
650 p = VirtualAlloc (NULL, ROUND_UP (nbytes, get_allocation_unit ()),
651 MEM_RESERVE, PAGE_READWRITE);
652
653 if (p != NULL)
654 {
655 /* Now, commit pages for NBYTES. */
656 *var = VirtualAlloc (p, nbytes, MEM_COMMIT, PAGE_READWRITE);
657 if (*var == NULL)
658 p = *var;
659 }
660
661 if (!p)
662 {
663 DWORD e = GetLastError ();
664
665 if (e == ERROR_NOT_ENOUGH_MEMORY)
666 errno = ENOMEM;
667 else
668 {
669 DebPrint (("mmap_alloc: error %ld\n", e));
670 errno = EINVAL;
671 }
672 }
673
674 return *var = p;
675 }
676
677 void
678 mmap_free (void **var)
679 {
680 if (*var)
681 {
682 if (VirtualFree (*var, 0, MEM_RELEASE) == 0)
683 DebPrint (("mmap_free: error %ld\n", GetLastError ()));
684 *var = NULL;
685 }
686 }
687
688 void *
689 mmap_realloc (void **var, size_t nbytes)
690 {
691 MEMORY_BASIC_INFORMATION memInfo, m2;
692 void *old_ptr;
693
694 if (*var == NULL)
695 return mmap_alloc (var, nbytes);
696
697 /* This case happens in init_buffer(). */
698 if (nbytes == 0)
699 {
700 mmap_free (var);
701 return mmap_alloc (var, nbytes);
702 }
703
704 memset (&memInfo, 0, sizeof (memInfo));
705 if (VirtualQuery (*var, &memInfo, sizeof (memInfo)) == 0)
706 DebPrint (("mmap_realloc: VirtualQuery error = %ld\n", GetLastError ()));
707
708 /* We need to enlarge the block. */
709 if (memInfo.RegionSize < nbytes)
710 {
711 memset (&m2, 0, sizeof (m2));
712 if (VirtualQuery (*var + memInfo.RegionSize, &m2, sizeof(m2)) == 0)
713 DebPrint (("mmap_realloc: VirtualQuery error = %ld\n",
714 GetLastError ()));
715 /* If there is enough room in the current reserved area, then
716 commit more pages as needed. */
717 if (m2.State == MEM_RESERVE
718 && nbytes <= memInfo.RegionSize + m2.RegionSize)
719 {
720 void *p;
721
722 p = VirtualAlloc (*var + memInfo.RegionSize,
723 nbytes - memInfo.RegionSize,
724 MEM_COMMIT, PAGE_READWRITE);
725 if (!p /* && GetLastError() != ERROR_NOT_ENOUGH_MEMORY */)
726 {
727 DebPrint (("realloc enlarge: VirtualAlloc (%p + %I64x, %I64x) error %ld\n",
728 *var, (uint64_t)memInfo.RegionSize,
729 (uint64_t)(nbytes - memInfo.RegionSize),
730 GetLastError ()));
731 DebPrint (("next region: %p %p %I64x %x\n", m2.BaseAddress,
732 m2.AllocationBase, m2.RegionSize, m2.AllocationProtect));
733 }
734 else
735 return *var;
736 }
737 /* Else we must actually enlarge the block by allocating a new
738 one and copying previous contents from the old to the new one. */
739 old_ptr = *var;
740
741 if (mmap_alloc (var, nbytes))
742 {
743 CopyMemory (*var, old_ptr, memInfo.RegionSize);
744 mmap_free (&old_ptr);
745 return *var;
746 }
747 else
748 {
749 /* We failed to reallocate the buffer. */
750 *var = old_ptr;
751 return NULL;
752 }
753 }
754
755 /* If we are shrinking by more than one page... */
756 if (memInfo.RegionSize > nbytes + getpagesize())
757 {
758 /* If we are shrinking a lot... */
759 if ((memInfo.RegionSize / 2) > nbytes)
760 {
761 /* Let's give some memory back to the system and release
762 some pages. */
763 old_ptr = *var;
764
765 if (mmap_alloc (var, nbytes))
766 {
767 CopyMemory (*var, old_ptr, nbytes);
768 mmap_free (&old_ptr);
769 return *var;
770 }
771 else
772 {
773 /* In case we fail to shrink, try to go on with the old block.
774 But that means there is a lot of memory pressure.
775 We could also decommit pages. */
776 *var = old_ptr;
777 return *var;
778 }
779 }
780
781 /* We still can decommit pages. */
782 if (VirtualFree (*var + nbytes + get_page_size(),
783 memInfo.RegionSize - nbytes - get_page_size(),
784 MEM_DECOMMIT) == 0)
785 DebPrint (("mmap_realloc: VirtualFree error %ld\n", GetLastError ()));
786 return *var;
787 }
788
789 /* Not enlarging, not shrinking by more than one page. */
790 return *var;
791 }