Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * generation.c
4 : * Generational allocator definitions.
5 : *
6 : * Generation is a custom MemoryContext implementation designed for cases of
7 : * chunks with similar lifespan.
8 : *
9 : * Portions Copyright (c) 2017-2026, PostgreSQL Global Development Group
10 : *
11 : * IDENTIFICATION
12 : * src/backend/utils/mmgr/generation.c
13 : *
14 : *
15 : * This memory context is based on the assumption that the chunks are freed
16 : * roughly in the same order as they were allocated (FIFO), or in groups with
17 : * similar lifespan (generations - hence the name of the context). This is
18 : * typical for various queue-like use cases, i.e. when tuples are constructed,
19 : * processed and then thrown away.
20 : *
21 : * The memory context uses a very simple approach to free space management.
22 : * Instead of a complex global freelist, each block tracks a number
23 : * of allocated and freed chunks. The block is classed as empty when the
24 : * number of free chunks is equal to the number of allocated chunks. When
25 : * this occurs, instead of freeing the block, we try to "recycle" it, i.e.
26 : * reuse it for new allocations. This is done by setting the block in the
27 : * context's 'freeblock' field. If the freeblock field is already occupied
28 : * by another free block we simply return the newly empty block to malloc.
29 : *
30 : * This approach to free blocks requires fewer malloc/free calls for truly
31 : * first allocated, first free'd allocation patterns.
32 : *
33 : *-------------------------------------------------------------------------
34 : */
35 :
36 : #include "postgres.h"
37 :
38 : #include "lib/ilist.h"
39 : #include "port/pg_bitutils.h"
40 : #include "utils/memdebug.h"
41 : #include "utils/memutils.h"
42 : #include "utils/memutils_internal.h"
43 : #include "utils/memutils_memorychunk.h"
44 :
45 :
46 : #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
47 : #define Generation_CHUNKHDRSZ sizeof(MemoryChunk)
48 : #define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(GenerationContext)) + \
49 : Generation_BLOCKHDRSZ)
50 :
51 : #define Generation_CHUNK_FRACTION 8
52 :
53 : typedef struct GenerationBlock GenerationBlock; /* forward reference */
54 :
55 : typedef void *GenerationPointer;
56 :
57 : /*
58 : * GenerationContext is a simple memory context not reusing allocated chunks,
59 : * and freeing blocks once all chunks are freed.
60 : */
61 : typedef struct GenerationContext
62 : {
63 : MemoryContextData header; /* Standard memory-context fields */
64 :
65 : /* Generational context parameters */
66 : uint32 initBlockSize; /* initial block size */
67 : uint32 maxBlockSize; /* maximum block size */
68 : uint32 nextBlockSize; /* next block size to allocate */
69 : uint32 allocChunkLimit; /* effective chunk size limit */
70 :
71 : GenerationBlock *block; /* current (most recently allocated) block */
72 : GenerationBlock *freeblock; /* pointer to an empty block that's being
73 : * recycled, or NULL if there's no such block. */
74 : dlist_head blocks; /* list of blocks */
75 : } GenerationContext;
76 :
77 : /*
78 : * GenerationBlock
79 : * GenerationBlock is the unit of memory that is obtained by generation.c
80 : * from malloc(). It contains zero or more MemoryChunks, which are the
81 : * units requested by palloc() and freed by pfree(). MemoryChunks cannot
82 : * be returned to malloc() individually, instead pfree() updates the free
83 : * counter of the block and when all chunks in a block are free the whole
84 : * block can be returned to malloc().
85 : *
86 : * GenerationBlock is the header data for a block --- the usable space
87 : * within the block begins at the next alignment boundary.
88 : */
89 : struct GenerationBlock
90 : {
91 : dlist_node node; /* doubly-linked list of blocks */
92 : GenerationContext *context; /* pointer back to the owning context */
93 : Size blksize; /* allocated size of this block */
94 : int nchunks; /* number of chunks in the block */
95 : int nfree; /* number of free chunks */
96 : char *freeptr; /* start of free space in this block */
97 : char *endptr; /* end of space in this block */
98 : };
99 :
100 : /*
101 : * GenerationIsValid
102 : * True iff set is valid generation set.
103 : */
104 : #define GenerationIsValid(set) \
105 : ((set) && IsA(set, GenerationContext))
106 :
107 : /*
108 : * GenerationBlockIsValid
109 : * True iff block is valid block of generation set.
110 : */
111 : #define GenerationBlockIsValid(block) \
112 : ((block) && GenerationIsValid((block)->context))
113 :
114 : /*
115 : * GenerationBlockIsEmpty
116 : * True iff block contains no chunks
117 : */
118 : #define GenerationBlockIsEmpty(b) ((b)->nchunks == 0)
119 :
120 : /*
121 : * We always store external chunks on a dedicated block. This makes fetching
122 : * the block from an external chunk easy since it's always the first and only
123 : * chunk on the block.
124 : */
125 : #define ExternalChunkGetBlock(chunk) \
126 : (GenerationBlock *) ((char *) chunk - Generation_BLOCKHDRSZ)
127 :
128 : /* Obtain the keeper block for a generation context */
129 : #define KeeperBlock(set) \
130 : ((GenerationBlock *) (((char *) set) + \
131 : MAXALIGN(sizeof(GenerationContext))))
132 :
133 : /* Check if the block is the keeper block of the given generation context */
134 : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
135 :
136 : /* Inlined helper functions */
137 : static inline void GenerationBlockInit(GenerationContext *context,
138 : GenerationBlock *block,
139 : Size blksize);
140 : static inline void GenerationBlockMarkEmpty(GenerationBlock *block);
141 : static inline Size GenerationBlockFreeBytes(GenerationBlock *block);
142 : static inline void GenerationBlockFree(GenerationContext *set,
143 : GenerationBlock *block);
144 :
145 :
146 : /*
147 : * Public routines
148 : */
149 :
150 :
151 : /*
152 : * GenerationContextCreate
153 : * Create a new Generation context.
154 : *
155 : * parent: parent context, or NULL if top-level context
156 : * name: name of context (must be statically allocated)
157 : * minContextSize: minimum context size
158 : * initBlockSize: initial allocation block size
159 : * maxBlockSize: maximum allocation block size
160 : */
161 : MemoryContext
162 148656 : GenerationContextCreate(MemoryContext parent,
163 : const char *name,
164 : Size minContextSize,
165 : Size initBlockSize,
166 : Size maxBlockSize)
167 : {
168 : Size firstBlockSize;
169 : Size allocSize;
170 : GenerationContext *set;
171 : GenerationBlock *block;
172 :
173 : /* ensure MemoryChunk's size is properly maxaligned */
174 : StaticAssertDecl(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
175 : "sizeof(MemoryChunk) is not maxaligned");
176 :
177 : /*
178 : * First, validate allocation parameters. Asserts seem sufficient because
179 : * nobody varies their parameters at runtime. We somewhat arbitrarily
180 : * enforce a minimum 1K block size. We restrict the maximum block size to
181 : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
182 : * regards to addressing the offset between the chunk and the block that
183 : * the chunk is stored on. We would be unable to store the offset between
184 : * the chunk and block for any chunks that were beyond
185 : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
186 : * larger than this.
187 : */
188 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
189 : initBlockSize >= 1024);
190 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
191 : maxBlockSize >= initBlockSize &&
192 : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
193 : Assert(minContextSize == 0 ||
194 : (minContextSize == MAXALIGN(minContextSize) &&
195 : minContextSize >= 1024 &&
196 : minContextSize <= maxBlockSize));
197 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
198 :
199 : /* Determine size of initial block */
200 148656 : allocSize = MAXALIGN(sizeof(GenerationContext)) +
201 : Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
202 148656 : if (minContextSize != 0)
203 1264 : allocSize = Max(allocSize, minContextSize);
204 : else
205 147392 : allocSize = Max(allocSize, initBlockSize);
206 :
207 : /*
208 : * Allocate the initial block. Unlike other generation.c blocks, it
209 : * starts with the context header and its block header follows that.
210 : */
211 148656 : set = (GenerationContext *) malloc(allocSize);
212 148656 : if (set == NULL)
213 : {
214 0 : MemoryContextStats(TopMemoryContext);
215 0 : ereport(ERROR,
216 : (errcode(ERRCODE_OUT_OF_MEMORY),
217 : errmsg("out of memory"),
218 : errdetail("Failed while creating memory context \"%s\".",
219 : name)));
220 : }
221 :
222 : /*
223 : * Avoid writing code that can fail between here and MemoryContextCreate;
224 : * we'd leak the header if we ereport in this stretch.
225 : */
226 :
227 : /* See comments about Valgrind interactions in aset.c */
228 : VALGRIND_CREATE_MEMPOOL(set, 0, false);
229 : /* This vchunk covers the GenerationContext and the keeper block header */
230 : VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
231 :
232 148656 : dlist_init(&set->blocks);
233 :
234 : /* Fill in the initial block's block header */
235 148656 : block = KeeperBlock(set);
236 : /* determine the block size and initialize it */
237 148656 : firstBlockSize = allocSize - MAXALIGN(sizeof(GenerationContext));
238 148656 : GenerationBlockInit(set, block, firstBlockSize);
239 :
240 : /* add it to the doubly-linked list of blocks */
241 148656 : dlist_push_head(&set->blocks, &block->node);
242 :
243 : /* use it as the current allocation block */
244 148656 : set->block = block;
245 :
246 : /* No free block, yet */
247 148656 : set->freeblock = NULL;
248 :
249 : /* Fill in GenerationContext-specific header fields */
250 148656 : set->initBlockSize = (uint32) initBlockSize;
251 148656 : set->maxBlockSize = (uint32) maxBlockSize;
252 148656 : set->nextBlockSize = (uint32) initBlockSize;
253 :
254 : /*
255 : * Compute the allocation chunk size limit for this context.
256 : *
257 : * Limit the maximum size a non-dedicated chunk can be so that we can fit
258 : * at least Generation_CHUNK_FRACTION of chunks this big onto the maximum
259 : * sized block. We must further limit this value so that it's no more
260 : * than MEMORYCHUNK_MAX_VALUE. We're unable to have non-external chunks
261 : * larger than that value as we store the chunk size in the MemoryChunk
262 : * 'value' field in the call to MemoryChunkSetHdrMask().
263 : */
264 148656 : set->allocChunkLimit = Min(maxBlockSize, MEMORYCHUNK_MAX_VALUE);
265 148656 : while ((Size) (set->allocChunkLimit + Generation_CHUNKHDRSZ) >
266 743280 : (Size) ((Size) (maxBlockSize - Generation_BLOCKHDRSZ) / Generation_CHUNK_FRACTION))
267 594624 : set->allocChunkLimit >>= 1;
268 :
269 : /* Finally, do the type-independent part of context creation */
270 148656 : MemoryContextCreate((MemoryContext) set,
271 : T_GenerationContext,
272 : MCTX_GENERATION_ID,
273 : parent,
274 : name);
275 :
276 148656 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
277 :
278 148656 : return (MemoryContext) set;
279 : }
280 :
281 : /*
282 : * GenerationReset
283 : * Frees all memory which is allocated in the given set.
284 : *
285 : * The initial "keeper" block (which shares a malloc chunk with the context
286 : * header) is not given back to the operating system though. In this way, we
287 : * don't thrash malloc() when a context is repeatedly reset after small
288 : * allocations.
289 : */
290 : void
291 155194 : GenerationReset(MemoryContext context)
292 : {
293 155194 : GenerationContext *set = (GenerationContext *) context;
294 : dlist_mutable_iter miter;
295 :
296 : Assert(GenerationIsValid(set));
297 :
298 : #ifdef MEMORY_CONTEXT_CHECKING
299 : /* Check for corruption and leaks before freeing */
300 : GenerationCheck(context);
301 : #endif
302 :
303 : /*
304 : * NULLify the free block pointer. We must do this before calling
305 : * GenerationBlockFree as that function never expects to free the
306 : * freeblock.
307 : */
308 155194 : set->freeblock = NULL;
309 :
310 324201 : dlist_foreach_modify(miter, &set->blocks)
311 : {
312 169007 : GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
313 :
314 169007 : if (IsKeeperBlock(set, block))
315 155194 : GenerationBlockMarkEmpty(block);
316 : else
317 13813 : GenerationBlockFree(set, block);
318 : }
319 :
320 : /*
321 : * Instruct Valgrind to throw away all the vchunks associated with this
322 : * context, except for the one covering the GenerationContext and
323 : * keeper-block header. This gets rid of the vchunks for whatever user
324 : * data is getting discarded by the context reset.
325 : */
326 : VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
327 :
328 : /* set it so new allocations to make use of the keeper block */
329 155194 : set->block = KeeperBlock(set);
330 :
331 : /* Reset block size allocation sequence, too */
332 155194 : set->nextBlockSize = set->initBlockSize;
333 :
334 : /* Ensure there is only 1 item in the dlist */
335 : Assert(!dlist_is_empty(&set->blocks));
336 : Assert(!dlist_has_next(&set->blocks, dlist_head_node(&set->blocks)));
337 155194 : }
338 :
339 : /*
340 : * GenerationDelete
341 : * Free all memory which is allocated in the given context.
342 : */
343 : void
344 148387 : GenerationDelete(MemoryContext context)
345 : {
346 : /* Reset to release all releasable GenerationBlocks */
347 148387 : GenerationReset(context);
348 :
349 : /* Destroy the vpool -- see notes in aset.c */
350 : VALGRIND_DESTROY_MEMPOOL(context);
351 :
352 : /* And free the context header and keeper block */
353 148387 : free(context);
354 148387 : }
355 :
356 : /*
357 : * Helper for GenerationAlloc() that allocates an entire block for the chunk.
358 : *
359 : * GenerationAlloc()'s comment explains why this is separate.
360 : */
361 : pg_noinline
362 : static void *
363 4616 : GenerationAllocLarge(MemoryContext context, Size size, int flags)
364 : {
365 4616 : GenerationContext *set = (GenerationContext *) context;
366 : GenerationBlock *block;
367 : MemoryChunk *chunk;
368 : Size chunk_size;
369 : Size required_size;
370 : Size blksize;
371 :
372 : /* validate 'size' is within the limits for the given 'flags' */
373 4616 : MemoryContextCheckSize(context, size, flags);
374 :
375 : #ifdef MEMORY_CONTEXT_CHECKING
376 : /* ensure there's always space for the sentinel byte */
377 : chunk_size = MAXALIGN(size + 1);
378 : #else
379 4616 : chunk_size = MAXALIGN(size);
380 : #endif
381 4616 : required_size = chunk_size + Generation_CHUNKHDRSZ;
382 4616 : blksize = required_size + Generation_BLOCKHDRSZ;
383 :
384 4616 : block = (GenerationBlock *) malloc(blksize);
385 4616 : if (block == NULL)
386 0 : return MemoryContextAllocationFailure(context, size, flags);
387 :
388 : /* Make a vchunk covering the new block's header */
389 : VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
390 :
391 4616 : context->mem_allocated += blksize;
392 :
393 : /* block with a single (used) chunk */
394 4616 : block->context = set;
395 4616 : block->blksize = blksize;
396 4616 : block->nchunks = 1;
397 4616 : block->nfree = 0;
398 :
399 : /* the block is completely full */
400 4616 : block->freeptr = block->endptr = ((char *) block) + blksize;
401 :
402 4616 : chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
403 :
404 : /* mark the MemoryChunk as externally managed */
405 4616 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_GENERATION_ID);
406 :
407 : #ifdef MEMORY_CONTEXT_CHECKING
408 : chunk->requested_size = size;
409 : /* set mark to catch clobber of "unused" space */
410 : Assert(size < chunk_size);
411 : set_sentinel(MemoryChunkGetPointer(chunk), size);
412 : #endif
413 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
414 : /* fill the allocated space with junk */
415 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
416 : #endif
417 :
418 : /* add the block to the list of allocated blocks */
419 4616 : dlist_push_head(&set->blocks, &block->node);
420 :
421 : /* Ensure any padding bytes are marked NOACCESS. */
422 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
423 : chunk_size - size);
424 :
425 : /* Disallow access to the chunk header. */
426 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
427 :
428 4616 : return MemoryChunkGetPointer(chunk);
429 : }
430 :
431 : /*
432 : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
433 : * the code between GenerationAlloc() and GenerationAllocFromNewBlock().
434 : */
435 : static inline void *
436 16127032 : GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block,
437 : Size size, Size chunk_size)
438 : {
439 16127032 : MemoryChunk *chunk = (MemoryChunk *) (block->freeptr);
440 :
441 : /* validate we've been given a block with enough free space */
442 : Assert(block != NULL);
443 : Assert((block->endptr - block->freeptr) >=
444 : Generation_CHUNKHDRSZ + chunk_size);
445 :
446 : /* Prepare to initialize the chunk header. */
447 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
448 :
449 16127032 : block->nchunks += 1;
450 16127032 : block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
451 :
452 : Assert(block->freeptr <= block->endptr);
453 :
454 16127032 : MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID);
455 : #ifdef MEMORY_CONTEXT_CHECKING
456 : chunk->requested_size = size;
457 : /* set mark to catch clobber of "unused" space */
458 : Assert(size < chunk_size);
459 : set_sentinel(MemoryChunkGetPointer(chunk), size);
460 : #endif
461 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
462 : /* fill the allocated space with junk */
463 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
464 : #endif
465 :
466 : /* Ensure any padding bytes are marked NOACCESS. */
467 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
468 : chunk_size - size);
469 :
470 : /* Disallow access to the chunk header. */
471 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
472 :
473 16127032 : return MemoryChunkGetPointer(chunk);
474 : }
475 :
476 : /*
477 : * Helper for GenerationAlloc() that allocates a new block and returns a chunk
478 : * allocated from it.
479 : *
480 : * GenerationAlloc()'s comment explains why this is separate.
481 : */
482 : pg_noinline
483 : static void *
484 23898 : GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags,
485 : Size chunk_size)
486 : {
487 23898 : GenerationContext *set = (GenerationContext *) context;
488 : GenerationBlock *block;
489 : Size blksize;
490 : Size required_size;
491 :
492 : /*
493 : * The first such block has size initBlockSize, and we double the space in
494 : * each succeeding block, but not more than maxBlockSize.
495 : */
496 23898 : blksize = set->nextBlockSize;
497 23898 : set->nextBlockSize <<= 1;
498 23898 : if (set->nextBlockSize > set->maxBlockSize)
499 10160 : set->nextBlockSize = set->maxBlockSize;
500 :
501 : /* we'll need space for the chunk, chunk hdr and block hdr */
502 23898 : required_size = chunk_size + Generation_CHUNKHDRSZ + Generation_BLOCKHDRSZ;
503 :
504 : /* round the size up to the next power of 2 */
505 23898 : if (blksize < required_size)
506 81 : blksize = pg_nextpower2_size_t(required_size);
507 :
508 23898 : block = (GenerationBlock *) malloc(blksize);
509 :
510 23898 : if (block == NULL)
511 0 : return MemoryContextAllocationFailure(context, size, flags);
512 :
513 : /* Make a vchunk covering the new block's header */
514 : VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
515 :
516 23898 : context->mem_allocated += blksize;
517 :
518 : /* initialize the new block */
519 23898 : GenerationBlockInit(set, block, blksize);
520 :
521 : /* add it to the doubly-linked list of blocks */
522 23898 : dlist_push_head(&set->blocks, &block->node);
523 :
524 : /* make this the current block */
525 23898 : set->block = block;
526 :
527 23898 : return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
528 : }
529 :
530 : /*
531 : * GenerationAlloc
532 : * Returns a pointer to allocated memory of given size or raises an ERROR
533 : * on allocation failure, or returns NULL when flags contains
534 : * MCXT_ALLOC_NO_OOM.
535 : *
536 : * No request may exceed:
537 : * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
538 : * All callers use a much-lower limit.
539 : *
540 : * Note: when using valgrind, it doesn't matter how the returned allocation
541 : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
542 : * return space that is marked NOACCESS - GenerationRealloc has to beware!
543 : *
544 : * This function should only contain the most common code paths. Everything
545 : * else should be in pg_noinline helper functions, thus avoiding the overhead
546 : * of creating a stack frame for the common cases. Allocating memory is often
547 : * a bottleneck in many workloads, so avoiding stack frame setup is
548 : * worthwhile. Helper functions should always directly return the newly
549 : * allocated memory so that we can just return that address directly as a tail
550 : * call.
551 : */
552 : void *
553 16131648 : GenerationAlloc(MemoryContext context, Size size, int flags)
554 : {
555 16131648 : GenerationContext *set = (GenerationContext *) context;
556 : GenerationBlock *block;
557 : Size chunk_size;
558 : Size required_size;
559 :
560 : Assert(GenerationIsValid(set));
561 :
562 : #ifdef MEMORY_CONTEXT_CHECKING
563 : /* ensure there's always space for the sentinel byte */
564 : chunk_size = MAXALIGN(size + 1);
565 : #else
566 16131648 : chunk_size = MAXALIGN(size);
567 : #endif
568 :
569 : /*
570 : * If requested size exceeds maximum for chunks we hand the request off to
571 : * GenerationAllocLarge().
572 : */
573 16131648 : if (chunk_size > set->allocChunkLimit)
574 4616 : return GenerationAllocLarge(context, size, flags);
575 :
576 16127032 : required_size = chunk_size + Generation_CHUNKHDRSZ;
577 :
578 : /*
579 : * Not an oversized chunk. We try to first make use of the current block,
580 : * but if there's not enough space in it, instead of allocating a new
581 : * block, we look to see if the empty freeblock has enough space. We
582 : * don't try reusing the keeper block. If it's become empty we'll reuse
583 : * that again only if the context is reset.
584 : *
585 : * We only try reusing the freeblock if we've no space for this allocation
586 : * on the current block. When a freeblock exists, we'll switch to it once
587 : * the first time we can't fit an allocation in the current block. We
588 : * avoid ping-ponging between the two as we need to be careful not to
589 : * fragment differently sized consecutive allocations between several
590 : * blocks. Going between the two could cause fragmentation for FIFO
591 : * workloads, which generation is meant to be good at.
592 : */
593 16127032 : block = set->block;
594 :
595 16127032 : if (unlikely(GenerationBlockFreeBytes(block) < required_size))
596 : {
597 28831 : GenerationBlock *freeblock = set->freeblock;
598 :
599 : /* freeblock, if set, must be empty */
600 : Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock));
601 :
602 : /* check if we have a freeblock and if it's big enough */
603 33764 : if (freeblock != NULL &&
604 4933 : GenerationBlockFreeBytes(freeblock) >= required_size)
605 : {
606 : /* make the freeblock the current block */
607 4933 : set->freeblock = NULL;
608 4933 : set->block = freeblock;
609 :
610 4933 : return GenerationAllocChunkFromBlock(context,
611 : freeblock,
612 : size,
613 : chunk_size);
614 : }
615 : else
616 : {
617 : /*
618 : * No freeblock, or it's not big enough for this allocation. Make
619 : * a new block.
620 : */
621 23898 : return GenerationAllocFromNewBlock(context, size, flags, chunk_size);
622 : }
623 : }
624 :
625 : /* The current block has space, so just allocate chunk there. */
626 16098201 : return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
627 : }
628 :
629 : /*
630 : * GenerationBlockInit
631 : * Initializes 'block' assuming 'blksize'. Does not update the context's
632 : * mem_allocated field.
633 : */
634 : static inline void
635 172554 : GenerationBlockInit(GenerationContext *context, GenerationBlock *block,
636 : Size blksize)
637 : {
638 172554 : block->context = context;
639 172554 : block->blksize = blksize;
640 172554 : block->nchunks = 0;
641 172554 : block->nfree = 0;
642 :
643 172554 : block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
644 172554 : block->endptr = ((char *) block) + blksize;
645 :
646 : /* Mark unallocated space NOACCESS. */
647 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
648 : blksize - Generation_BLOCKHDRSZ);
649 172554 : }
650 :
651 : /*
652 : * GenerationBlockMarkEmpty
653 : * Set a block as empty. Does not free the block.
654 : */
655 : static inline void
656 4441243 : GenerationBlockMarkEmpty(GenerationBlock *block)
657 : {
658 : #if defined(USE_VALGRIND) || defined(CLOBBER_FREED_MEMORY)
659 : char *datastart = ((char *) block) + Generation_BLOCKHDRSZ;
660 : #endif
661 :
662 : #ifdef CLOBBER_FREED_MEMORY
663 : wipe_mem(datastart, block->freeptr - datastart);
664 : #else
665 : /* wipe_mem() would have done this */
666 : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
667 : #endif
668 :
669 : /* Reset the block, but don't return it to malloc */
670 4441243 : block->nchunks = 0;
671 4441243 : block->nfree = 0;
672 4441243 : block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
673 4441243 : }
674 :
675 : /*
676 : * GenerationBlockFreeBytes
677 : * Returns the number of bytes free in 'block'
678 : */
679 : static inline Size
680 16131965 : GenerationBlockFreeBytes(GenerationBlock *block)
681 : {
682 16131965 : return (block->endptr - block->freeptr);
683 : }
684 :
685 : /*
686 : * GenerationBlockFree
687 : * Remove 'block' from 'set' and release the memory consumed by it.
688 : */
689 : static inline void
690 28325 : GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
691 : {
692 : /* Make sure nobody tries to free the keeper block */
693 : Assert(!IsKeeperBlock(set, block));
694 : /* We shouldn't be freeing the freeblock either */
695 : Assert(block != set->freeblock);
696 :
697 : /* release the block from the list of blocks */
698 28325 : dlist_delete(&block->node);
699 :
700 28325 : ((MemoryContext) set)->mem_allocated -= block->blksize;
701 :
702 : #ifdef CLOBBER_FREED_MEMORY
703 : wipe_mem(block, block->blksize);
704 : #endif
705 :
706 : /* As in aset.c, free block-header vchunks explicitly */
707 : VALGRIND_MEMPOOL_FREE(set, block);
708 :
709 28325 : free(block);
710 28325 : }
711 :
712 : /*
713 : * GenerationFree
714 : * Update number of chunks in the block, and consider freeing the block
715 : * if it's become empty.
716 : */
717 : void
718 6835348 : GenerationFree(void *pointer)
719 : {
720 6835348 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
721 : GenerationBlock *block;
722 : GenerationContext *set;
723 : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
724 : || defined(CLOBBER_FREED_MEMORY)
725 : Size chunksize;
726 : #endif
727 :
728 : /* Allow access to the chunk header. */
729 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
730 :
731 6835348 : if (MemoryChunkIsExternal(chunk))
732 : {
733 4498 : block = ExternalChunkGetBlock(chunk);
734 :
735 : /*
736 : * Try to verify that we have a sane block pointer: the block header
737 : * should reference a generation context.
738 : */
739 4498 : if (!GenerationBlockIsValid(block))
740 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
741 :
742 : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
743 : || defined(CLOBBER_FREED_MEMORY)
744 : chunksize = block->endptr - (char *) pointer;
745 : #endif
746 : }
747 : else
748 : {
749 6830850 : block = MemoryChunkGetBlock(chunk);
750 :
751 : /*
752 : * In this path, for speed reasons we just Assert that the referenced
753 : * block is good. Future field experience may show that this Assert
754 : * had better become a regular runtime test-and-elog check.
755 : */
756 : Assert(GenerationBlockIsValid(block));
757 :
758 : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
759 : || defined(CLOBBER_FREED_MEMORY)
760 : chunksize = MemoryChunkGetValue(chunk);
761 : #endif
762 : }
763 :
764 : #ifdef MEMORY_CONTEXT_CHECKING
765 : /* See comments in AllocSetFree about uses of ERROR and WARNING here */
766 : /* Test for previously-freed chunk */
767 : if (unlikely(chunk->requested_size == InvalidAllocSize))
768 : elog(ERROR, "detected double pfree in %s %p",
769 : ((MemoryContext) block->context)->name, chunk);
770 : /* Test for someone scribbling on unused space in chunk */
771 : Assert(chunk->requested_size < chunksize);
772 : if (!sentinel_ok(pointer, chunk->requested_size))
773 : elog(WARNING, "detected write past chunk end in %s %p",
774 : ((MemoryContext) block->context)->name, chunk);
775 : #endif
776 :
777 : #ifdef CLOBBER_FREED_MEMORY
778 : wipe_mem(pointer, chunksize);
779 : #endif
780 :
781 : #ifdef MEMORY_CONTEXT_CHECKING
782 : /* Reset requested_size to InvalidAllocSize in freed chunks */
783 : chunk->requested_size = InvalidAllocSize;
784 : #endif
785 :
786 6835348 : block->nfree += 1;
787 :
788 : Assert(block->nchunks > 0);
789 : Assert(block->nfree <= block->nchunks);
790 : Assert(block != block->context->freeblock);
791 :
792 : /* If there are still allocated chunks in the block, we're done. */
793 6835348 : if (likely(block->nfree < block->nchunks))
794 2534787 : return;
795 :
796 4300561 : set = block->context;
797 :
798 : /*-----------------------
799 : * The block this allocation was on has now become completely empty of
800 : * chunks. In the general case, we can now return the memory for this
801 : * block back to malloc. However, there are cases where we don't want to
802 : * do that:
803 : *
804 : * 1) If it's the keeper block. This block was malloc'd in the same
805 : * allocation as the context itself and can't be free'd without
806 : * freeing the context.
807 : * 2) If it's the current block. We could free this, but doing so would
808 : * leave us nothing to set the current block to, so we just mark the
809 : * block as empty so new allocations can reuse it again.
810 : * 3) If we have no "freeblock" set, then we save a single block for
811 : * future allocations to avoid having to malloc a new block again.
812 : * This is useful for FIFO workloads as it avoids continual
813 : * free/malloc cycles.
814 : */
815 4300561 : if (IsKeeperBlock(set, block) || set->block == block)
816 4280865 : GenerationBlockMarkEmpty(block); /* case 1 and 2 */
817 19696 : else if (set->freeblock == NULL)
818 : {
819 : /* case 3 */
820 5184 : GenerationBlockMarkEmpty(block);
821 5184 : set->freeblock = block;
822 : }
823 : else
824 14512 : GenerationBlockFree(set, block); /* Otherwise, free it */
825 : }
826 :
827 : /*
828 : * GenerationRealloc
829 : * When handling repalloc, we simply allocate a new chunk, copy the data
830 : * and discard the old one. The only exception is when the new size fits
831 : * into the old chunk - in that case we just update chunk header.
832 : */
833 : void *
834 0 : GenerationRealloc(void *pointer, Size size, int flags)
835 : {
836 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
837 : GenerationContext *set;
838 : GenerationBlock *block;
839 : GenerationPointer newPointer;
840 : Size oldsize;
841 :
842 : /* Allow access to the chunk header. */
843 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
844 :
845 0 : if (MemoryChunkIsExternal(chunk))
846 : {
847 0 : block = ExternalChunkGetBlock(chunk);
848 :
849 : /*
850 : * Try to verify that we have a sane block pointer: the block header
851 : * should reference a generation context.
852 : */
853 0 : if (!GenerationBlockIsValid(block))
854 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
855 :
856 0 : oldsize = block->endptr - (char *) pointer;
857 : }
858 : else
859 : {
860 0 : block = MemoryChunkGetBlock(chunk);
861 :
862 : /*
863 : * In this path, for speed reasons we just Assert that the referenced
864 : * block is good. Future field experience may show that this Assert
865 : * had better become a regular runtime test-and-elog check.
866 : */
867 : Assert(GenerationBlockIsValid(block));
868 :
869 0 : oldsize = MemoryChunkGetValue(chunk);
870 : }
871 :
872 0 : set = block->context;
873 :
874 : #ifdef MEMORY_CONTEXT_CHECKING
875 : /* See comments in AllocSetFree about uses of ERROR and WARNING here */
876 : /* Test for previously-freed chunk */
877 : if (unlikely(chunk->requested_size == InvalidAllocSize))
878 : elog(ERROR, "detected realloc of freed chunk in %s %p",
879 : ((MemoryContext) set)->name, chunk);
880 : /* Test for someone scribbling on unused space in chunk */
881 : Assert(chunk->requested_size < oldsize);
882 : if (!sentinel_ok(pointer, chunk->requested_size))
883 : elog(WARNING, "detected write past chunk end in %s %p",
884 : ((MemoryContext) set)->name, chunk);
885 : #endif
886 :
887 : /*
888 : * Maybe the allocated area already big enough. (In particular, we always
889 : * fall out here if the requested size is a decrease.)
890 : *
891 : * This memory context does not use power-of-2 chunk sizing and instead
892 : * carves the chunks to be as small as possible, so most repalloc() calls
893 : * will end up in the palloc/memcpy/pfree branch.
894 : *
895 : * XXX Perhaps we should annotate this condition with unlikely()?
896 : */
897 : #ifdef MEMORY_CONTEXT_CHECKING
898 : /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */
899 : if (oldsize > size)
900 : #else
901 0 : if (oldsize >= size)
902 : #endif
903 : {
904 : #ifdef MEMORY_CONTEXT_CHECKING
905 : Size oldrequest = chunk->requested_size;
906 :
907 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
908 : /* We can only fill the extra space if we know the prior request */
909 : if (size > oldrequest)
910 : randomize_mem((char *) pointer + oldrequest,
911 : size - oldrequest);
912 : #endif
913 :
914 : chunk->requested_size = size;
915 :
916 : /*
917 : * If this is an increase, mark any newly-available part UNDEFINED.
918 : * Otherwise, mark the obsolete part NOACCESS.
919 : */
920 : if (size > oldrequest)
921 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
922 : size - oldrequest);
923 : else
924 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
925 : oldsize - size);
926 :
927 : /* set mark to catch clobber of "unused" space */
928 : set_sentinel(pointer, size);
929 : #else /* !MEMORY_CONTEXT_CHECKING */
930 :
931 : /*
932 : * We don't have the information to determine whether we're growing
933 : * the old request or shrinking it, so we conservatively mark the
934 : * entire new allocation DEFINED.
935 : */
936 : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
937 : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
938 : #endif
939 :
940 : /* Disallow access to the chunk header. */
941 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
942 :
943 0 : return pointer;
944 : }
945 :
946 : /* allocate new chunk (this also checks size is valid) */
947 0 : newPointer = GenerationAlloc((MemoryContext) set, size, flags);
948 :
949 : /* leave immediately if request was not completed */
950 0 : if (newPointer == NULL)
951 : {
952 : /* Disallow access to the chunk header. */
953 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
954 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
955 : }
956 :
957 : /*
958 : * GenerationAlloc() may have returned a region that is still NOACCESS.
959 : * Change it to UNDEFINED for the moment; memcpy() will then transfer
960 : * definedness from the old allocation to the new. If we know the old
961 : * allocation, copy just that much. Otherwise, make the entire old chunk
962 : * defined to avoid errors as we copy the currently-NOACCESS trailing
963 : * bytes.
964 : */
965 : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
966 : #ifdef MEMORY_CONTEXT_CHECKING
967 : oldsize = chunk->requested_size;
968 : #else
969 : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
970 : #endif
971 :
972 : /* transfer existing data (certain to fit) */
973 0 : memcpy(newPointer, pointer, oldsize);
974 :
975 : /* free old chunk */
976 0 : GenerationFree(pointer);
977 :
978 0 : return newPointer;
979 : }
980 :
981 : /*
982 : * GenerationGetChunkContext
983 : * Return the MemoryContext that 'pointer' belongs to.
984 : */
985 : MemoryContext
986 0 : GenerationGetChunkContext(void *pointer)
987 : {
988 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
989 : GenerationBlock *block;
990 :
991 : /* Allow access to the chunk header. */
992 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
993 :
994 0 : if (MemoryChunkIsExternal(chunk))
995 0 : block = ExternalChunkGetBlock(chunk);
996 : else
997 0 : block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
998 :
999 : /* Disallow access to the chunk header. */
1000 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
1001 :
1002 : Assert(GenerationBlockIsValid(block));
1003 0 : return &block->context->header;
1004 : }
1005 :
1006 : /*
1007 : * GenerationGetChunkSpace
1008 : * Given a currently-allocated chunk, determine the total space
1009 : * it occupies (including all memory-allocation overhead).
1010 : */
1011 : Size
1012 19842585 : GenerationGetChunkSpace(void *pointer)
1013 : {
1014 19842585 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1015 : Size chunksize;
1016 :
1017 : /* Allow access to the chunk header. */
1018 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
1019 :
1020 19842585 : if (MemoryChunkIsExternal(chunk))
1021 : {
1022 118 : GenerationBlock *block = ExternalChunkGetBlock(chunk);
1023 :
1024 : Assert(GenerationBlockIsValid(block));
1025 118 : chunksize = block->endptr - (char *) pointer;
1026 : }
1027 : else
1028 19842467 : chunksize = MemoryChunkGetValue(chunk);
1029 :
1030 : /* Disallow access to the chunk header. */
1031 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
1032 :
1033 19842585 : return Generation_CHUNKHDRSZ + chunksize;
1034 : }
1035 :
1036 : /*
1037 : * GenerationIsEmpty
1038 : * Is a GenerationContext empty of any allocated space?
1039 : */
1040 : bool
1041 0 : GenerationIsEmpty(MemoryContext context)
1042 : {
1043 0 : GenerationContext *set = (GenerationContext *) context;
1044 : dlist_iter iter;
1045 :
1046 : Assert(GenerationIsValid(set));
1047 :
1048 0 : dlist_foreach(iter, &set->blocks)
1049 : {
1050 0 : GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1051 :
1052 0 : if (block->nchunks > 0)
1053 0 : return false;
1054 : }
1055 :
1056 0 : return true;
1057 : }
1058 :
1059 : /*
1060 : * GenerationStats
1061 : * Compute stats about memory consumption of a Generation context.
1062 : *
1063 : * printfunc: if not NULL, pass a human-readable stats string to this.
1064 : * passthru: pass this pointer through to printfunc.
1065 : * totals: if not NULL, add stats about this context into *totals.
1066 : * print_to_stderr: print stats to stderr if true, elog otherwise.
1067 : *
1068 : * XXX freespace only accounts for empty space at the end of the block, not
1069 : * space of freed chunks (which is unknown).
1070 : */
1071 : void
1072 20 : GenerationStats(MemoryContext context,
1073 : MemoryStatsPrintFunc printfunc, void *passthru,
1074 : MemoryContextCounters *totals, bool print_to_stderr)
1075 : {
1076 20 : GenerationContext *set = (GenerationContext *) context;
1077 20 : Size nblocks = 0;
1078 20 : Size nchunks = 0;
1079 20 : Size nfreechunks = 0;
1080 : Size totalspace;
1081 20 : Size freespace = 0;
1082 : dlist_iter iter;
1083 :
1084 : Assert(GenerationIsValid(set));
1085 :
1086 : /* Include context header in totalspace */
1087 20 : totalspace = MAXALIGN(sizeof(GenerationContext));
1088 :
1089 72 : dlist_foreach(iter, &set->blocks)
1090 : {
1091 52 : GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1092 :
1093 52 : nblocks++;
1094 52 : nchunks += block->nchunks;
1095 52 : nfreechunks += block->nfree;
1096 52 : totalspace += block->blksize;
1097 52 : freespace += (block->endptr - block->freeptr);
1098 : }
1099 :
1100 20 : if (printfunc)
1101 : {
1102 : char stats_string[200];
1103 :
1104 0 : snprintf(stats_string, sizeof(stats_string),
1105 : "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
1106 : totalspace, nblocks, nchunks, freespace,
1107 : nfreechunks, totalspace - freespace);
1108 0 : printfunc(context, passthru, stats_string, print_to_stderr);
1109 : }
1110 :
1111 20 : if (totals)
1112 : {
1113 20 : totals->nblocks += nblocks;
1114 20 : totals->freechunks += nfreechunks;
1115 20 : totals->totalspace += totalspace;
1116 20 : totals->freespace += freespace;
1117 : }
1118 20 : }
1119 :
1120 :
1121 : #ifdef MEMORY_CONTEXT_CHECKING
1122 :
1123 : /*
1124 : * GenerationCheck
1125 : * Walk through chunks and check consistency of memory.
1126 : *
1127 : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1128 : * find yourself in an infinite loop when trouble occurs, because this
1129 : * routine will be entered again when elog cleanup tries to release memory!
1130 : */
1131 : void
1132 : GenerationCheck(MemoryContext context)
1133 : {
1134 : GenerationContext *gen = (GenerationContext *) context;
1135 : const char *name = context->name;
1136 : dlist_iter iter;
1137 : Size total_allocated = 0;
1138 :
1139 : /* walk all blocks in this context */
1140 : dlist_foreach(iter, &gen->blocks)
1141 : {
1142 : GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1143 : int nfree,
1144 : nchunks;
1145 : char *ptr;
1146 : bool has_external_chunk = false;
1147 :
1148 : total_allocated += block->blksize;
1149 :
1150 : /*
1151 : * nfree > nchunks is surely wrong. Equality is allowed as the block
1152 : * might completely empty if it's the freeblock.
1153 : */
1154 : if (block->nfree > block->nchunks)
1155 : elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
1156 : name, block->nfree, block, block->nchunks);
1157 :
1158 : /* check block belongs to the correct context */
1159 : if (block->context != gen)
1160 : elog(WARNING, "problem in Generation %s: bogus context link in block %p",
1161 : name, block);
1162 :
1163 : /* Now walk through the chunks and count them. */
1164 : nfree = 0;
1165 : nchunks = 0;
1166 : ptr = ((char *) block) + Generation_BLOCKHDRSZ;
1167 :
1168 : while (ptr < block->freeptr)
1169 : {
1170 : MemoryChunk *chunk = (MemoryChunk *) ptr;
1171 : GenerationBlock *chunkblock;
1172 : Size chunksize;
1173 :
1174 : /* Allow access to the chunk header. */
1175 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
1176 :
1177 : if (MemoryChunkIsExternal(chunk))
1178 : {
1179 : chunkblock = ExternalChunkGetBlock(chunk);
1180 : chunksize = block->endptr - (char *) MemoryChunkGetPointer(chunk);
1181 : has_external_chunk = true;
1182 : }
1183 : else
1184 : {
1185 : chunkblock = MemoryChunkGetBlock(chunk);
1186 : chunksize = MemoryChunkGetValue(chunk);
1187 : }
1188 :
1189 : /* move to the next chunk */
1190 : ptr += (chunksize + Generation_CHUNKHDRSZ);
1191 :
1192 : nchunks += 1;
1193 :
1194 : /* chunks have both block and context pointers, so check both */
1195 : if (chunkblock != block)
1196 : elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
1197 : name, block, chunk);
1198 :
1199 :
1200 : /* is chunk allocated? */
1201 : if (chunk->requested_size != InvalidAllocSize)
1202 : {
1203 : /* now make sure the chunk size is correct */
1204 : if (chunksize < chunk->requested_size ||
1205 : chunksize != MAXALIGN(chunksize))
1206 : elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
1207 : name, block, chunk);
1208 :
1209 : /* check sentinel */
1210 : Assert(chunk->requested_size < chunksize);
1211 : if (!sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
1212 : elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
1213 : name, block, chunk);
1214 : }
1215 : else
1216 : nfree += 1;
1217 :
1218 : /* if chunk is allocated, disallow access to the chunk header */
1219 : if (chunk->requested_size != InvalidAllocSize)
1220 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
1221 : }
1222 :
1223 : /*
1224 : * Make sure we got the expected number of allocated and free chunks
1225 : * (as tracked in the block header).
1226 : */
1227 : if (nchunks != block->nchunks)
1228 : elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
1229 : name, nchunks, block, block->nchunks);
1230 :
1231 : if (nfree != block->nfree)
1232 : elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
1233 : name, nfree, block, block->nfree);
1234 :
1235 : if (has_external_chunk && nchunks > 1)
1236 : elog(WARNING, "problem in Generation %s: external chunk on non-dedicated block %p",
1237 : name, block);
1238 :
1239 : }
1240 :
1241 : Assert(total_allocated == context->mem_allocated);
1242 : }
1243 :
1244 : #endif /* MEMORY_CONTEXT_CHECKING */
|