Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * generation.c
4 : * Generational allocator definitions.
5 : *
6 : * Generation is a custom MemoryContext implementation designed for cases of
7 : * chunks with similar lifespan.
8 : *
9 : * Portions Copyright (c) 2017-2024, PostgreSQL Global Development Group
10 : *
11 : * IDENTIFICATION
12 : * src/backend/utils/mmgr/generation.c
13 : *
14 : *
15 : * This memory context is based on the assumption that the chunks are freed
16 : * roughly in the same order as they were allocated (FIFO), or in groups with
17 : * similar lifespan (generations - hence the name of the context). This is
18 : * typical for various queue-like use cases, i.e. when tuples are constructed,
19 : * processed and then thrown away.
20 : *
21 : * The memory context uses a very simple approach to free space management.
22 : * Instead of a complex global freelist, each block tracks a number
23 : * of allocated and freed chunks. The block is classed as empty when the
24 : * number of free chunks is equal to the number of allocated chunks. When
25 : * this occurs, instead of freeing the block, we try to "recycle" it, i.e.
26 : * reuse it for new allocations. This is done by setting the block in the
27 : * context's 'freeblock' field. If the freeblock field is already occupied
28 : * by another free block we simply return the newly empty block to malloc.
29 : *
30 : * This approach to free blocks requires fewer malloc/free calls for truly
31 : * first allocated, first free'd allocation patterns.
32 : *
33 : *-------------------------------------------------------------------------
34 : */
35 :
36 : #include "postgres.h"
37 :
38 : #include "lib/ilist.h"
39 : #include "port/pg_bitutils.h"
40 : #include "utils/memdebug.h"
41 : #include "utils/memutils.h"
42 : #include "utils/memutils_internal.h"
43 : #include "utils/memutils_memorychunk.h"
44 :
45 :
46 : #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
47 : #define Generation_CHUNKHDRSZ sizeof(MemoryChunk)
48 :
49 : #define Generation_CHUNK_FRACTION 8
50 :
51 : typedef struct GenerationBlock GenerationBlock; /* forward reference */
52 :
53 : typedef void *GenerationPointer;
54 :
55 : /*
56 : * GenerationContext is a simple memory context not reusing allocated chunks,
57 : * and freeing blocks once all chunks are freed.
58 : */
59 : typedef struct GenerationContext
60 : {
61 : MemoryContextData header; /* Standard memory-context fields */
62 :
63 : /* Generational context parameters */
64 : uint32 initBlockSize; /* initial block size */
65 : uint32 maxBlockSize; /* maximum block size */
66 : uint32 nextBlockSize; /* next block size to allocate */
67 : uint32 allocChunkLimit; /* effective chunk size limit */
68 :
69 : GenerationBlock *block; /* current (most recently allocated) block */
70 : GenerationBlock *freeblock; /* pointer to an empty block that's being
71 : * recycled, or NULL if there's no such block. */
72 : dlist_head blocks; /* list of blocks */
73 : } GenerationContext;
74 :
75 : /*
76 : * GenerationBlock
77 : * GenerationBlock is the unit of memory that is obtained by generation.c
78 : * from malloc(). It contains zero or more MemoryChunks, which are the
79 : * units requested by palloc() and freed by pfree(). MemoryChunks cannot
80 : * be returned to malloc() individually, instead pfree() updates the free
81 : * counter of the block and when all chunks in a block are free the whole
82 : * block can be returned to malloc().
83 : *
84 : * GenerationBlock is the header data for a block --- the usable space
85 : * within the block begins at the next alignment boundary.
86 : */
87 : struct GenerationBlock
88 : {
89 : dlist_node node; /* doubly-linked list of blocks */
90 : GenerationContext *context; /* pointer back to the owning context */
91 : Size blksize; /* allocated size of this block */
92 : int nchunks; /* number of chunks in the block */
93 : int nfree; /* number of free chunks */
94 : char *freeptr; /* start of free space in this block */
95 : char *endptr; /* end of space in this block */
96 : };
97 :
98 : /*
99 : * GenerationIsValid
100 : * True iff set is valid generation set.
101 : */
102 : #define GenerationIsValid(set) \
103 : (PointerIsValid(set) && IsA(set, GenerationContext))
104 :
105 : /*
106 : * GenerationBlockIsValid
107 : * True iff block is valid block of generation set.
108 : */
109 : #define GenerationBlockIsValid(block) \
110 : (PointerIsValid(block) && GenerationIsValid((block)->context))
111 :
112 : /*
113 : * GenerationBlockIsEmpty
114 : * True iff block contains no chunks
115 : */
116 : #define GenerationBlockIsEmpty(b) ((b)->nchunks == 0)
117 :
118 : /*
119 : * We always store external chunks on a dedicated block. This makes fetching
120 : * the block from an external chunk easy since it's always the first and only
121 : * chunk on the block.
122 : */
123 : #define ExternalChunkGetBlock(chunk) \
124 : (GenerationBlock *) ((char *) chunk - Generation_BLOCKHDRSZ)
125 :
126 : /* Obtain the keeper block for a generation context */
127 : #define KeeperBlock(set) \
128 : ((GenerationBlock *) (((char *) set) + \
129 : MAXALIGN(sizeof(GenerationContext))))
130 :
131 : /* Check if the block is the keeper block of the given generation context */
132 : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
133 :
134 : /* Inlined helper functions */
135 : static inline void GenerationBlockInit(GenerationContext *context,
136 : GenerationBlock *block,
137 : Size blksize);
138 : static inline void GenerationBlockMarkEmpty(GenerationBlock *block);
139 : static inline Size GenerationBlockFreeBytes(GenerationBlock *block);
140 : static inline void GenerationBlockFree(GenerationContext *set,
141 : GenerationBlock *block);
142 :
143 :
144 : /*
145 : * Public routines
146 : */
147 :
148 :
149 : /*
150 : * GenerationContextCreate
151 : * Create a new Generation context.
152 : *
153 : * parent: parent context, or NULL if top-level context
154 : * name: name of context (must be statically allocated)
155 : * minContextSize: minimum context size
156 : * initBlockSize: initial allocation block size
157 : * maxBlockSize: maximum allocation block size
158 : */
159 : MemoryContext
160 256092 : GenerationContextCreate(MemoryContext parent,
161 : const char *name,
162 : Size minContextSize,
163 : Size initBlockSize,
164 : Size maxBlockSize)
165 : {
166 : Size firstBlockSize;
167 : Size allocSize;
168 : GenerationContext *set;
169 : GenerationBlock *block;
170 :
171 : /* ensure MemoryChunk's size is properly maxaligned */
172 : StaticAssertDecl(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
173 : "sizeof(MemoryChunk) is not maxaligned");
174 :
175 : /*
176 : * First, validate allocation parameters. Asserts seem sufficient because
177 : * nobody varies their parameters at runtime. We somewhat arbitrarily
178 : * enforce a minimum 1K block size. We restrict the maximum block size to
179 : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
180 : * regards to addressing the offset between the chunk and the block that
181 : * the chunk is stored on. We would be unable to store the offset between
182 : * the chunk and block for any chunks that were beyond
183 : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
184 : * larger than this.
185 : */
186 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
187 : initBlockSize >= 1024);
188 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
189 : maxBlockSize >= initBlockSize &&
190 : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
191 : Assert(minContextSize == 0 ||
192 : (minContextSize == MAXALIGN(minContextSize) &&
193 : minContextSize >= 1024 &&
194 : minContextSize <= maxBlockSize));
195 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
196 :
197 : /* Determine size of initial block */
198 256092 : allocSize = MAXALIGN(sizeof(GenerationContext)) +
199 : Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
200 256092 : if (minContextSize != 0)
201 2096 : allocSize = Max(allocSize, minContextSize);
202 : else
203 253996 : allocSize = Max(allocSize, initBlockSize);
204 :
205 : /*
206 : * Allocate the initial block. Unlike other generation.c blocks, it
207 : * starts with the context header and its block header follows that.
208 : */
209 256092 : set = (GenerationContext *) malloc(allocSize);
210 256092 : if (set == NULL)
211 : {
212 0 : MemoryContextStats(TopMemoryContext);
213 0 : ereport(ERROR,
214 : (errcode(ERRCODE_OUT_OF_MEMORY),
215 : errmsg("out of memory"),
216 : errdetail("Failed while creating memory context \"%s\".",
217 : name)));
218 : }
219 :
220 : /*
221 : * Avoid writing code that can fail between here and MemoryContextCreate;
222 : * we'd leak the header if we ereport in this stretch.
223 : */
224 256092 : dlist_init(&set->blocks);
225 :
226 : /* Fill in the initial block's block header */
227 256092 : block = KeeperBlock(set);
228 : /* determine the block size and initialize it */
229 256092 : firstBlockSize = allocSize - MAXALIGN(sizeof(GenerationContext));
230 256092 : GenerationBlockInit(set, block, firstBlockSize);
231 :
232 : /* add it to the doubly-linked list of blocks */
233 256092 : dlist_push_head(&set->blocks, &block->node);
234 :
235 : /* use it as the current allocation block */
236 256092 : set->block = block;
237 :
238 : /* No free block, yet */
239 256092 : set->freeblock = NULL;
240 :
241 : /* Fill in GenerationContext-specific header fields */
242 256092 : set->initBlockSize = (uint32) initBlockSize;
243 256092 : set->maxBlockSize = (uint32) maxBlockSize;
244 256092 : set->nextBlockSize = (uint32) initBlockSize;
245 :
246 : /*
247 : * Compute the allocation chunk size limit for this context.
248 : *
249 : * Limit the maximum size a non-dedicated chunk can be so that we can fit
250 : * at least Generation_CHUNK_FRACTION of chunks this big onto the maximum
251 : * sized block. We must further limit this value so that it's no more
252 : * than MEMORYCHUNK_MAX_VALUE. We're unable to have non-external chunks
253 : * larger than that value as we store the chunk size in the MemoryChunk
254 : * 'value' field in the call to MemoryChunkSetHdrMask().
255 : */
256 256092 : set->allocChunkLimit = Min(maxBlockSize, MEMORYCHUNK_MAX_VALUE);
257 256092 : while ((Size) (set->allocChunkLimit + Generation_CHUNKHDRSZ) >
258 1280460 : (Size) ((Size) (maxBlockSize - Generation_BLOCKHDRSZ) / Generation_CHUNK_FRACTION))
259 1024368 : set->allocChunkLimit >>= 1;
260 :
261 : /* Finally, do the type-independent part of context creation */
262 256092 : MemoryContextCreate((MemoryContext) set,
263 : T_GenerationContext,
264 : MCTX_GENERATION_ID,
265 : parent,
266 : name);
267 :
268 256092 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
269 :
270 256092 : return (MemoryContext) set;
271 : }
272 :
273 : /*
274 : * GenerationReset
275 : * Frees all memory which is allocated in the given set.
276 : *
277 : * The initial "keeper" block (which shares a malloc chunk with the context
278 : * header) is not given back to the operating system though. In this way, we
279 : * don't thrash malloc() when a context is repeatedly reset after small
280 : * allocations.
281 : */
282 : void
283 385130 : GenerationReset(MemoryContext context)
284 : {
285 385130 : GenerationContext *set = (GenerationContext *) context;
286 : dlist_mutable_iter miter;
287 :
288 : Assert(GenerationIsValid(set));
289 :
290 : #ifdef MEMORY_CONTEXT_CHECKING
291 : /* Check for corruption and leaks before freeing */
292 : GenerationCheck(context);
293 : #endif
294 :
295 : /*
296 : * NULLify the free block pointer. We must do this before calling
297 : * GenerationBlockFree as that function never expects to free the
298 : * freeblock.
299 : */
300 385130 : set->freeblock = NULL;
301 :
302 791970 : dlist_foreach_modify(miter, &set->blocks)
303 : {
304 406840 : GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
305 :
306 406840 : if (IsKeeperBlock(set, block))
307 385130 : GenerationBlockMarkEmpty(block);
308 : else
309 21710 : GenerationBlockFree(set, block);
310 : }
311 :
312 : /* set it so new allocations to make use of the keeper block */
313 385130 : set->block = KeeperBlock(set);
314 :
315 : /* Reset block size allocation sequence, too */
316 385130 : set->nextBlockSize = set->initBlockSize;
317 :
318 : /* Ensure there is only 1 item in the dlist */
319 : Assert(!dlist_is_empty(&set->blocks));
320 : Assert(!dlist_has_next(&set->blocks, dlist_head_node(&set->blocks)));
321 385130 : }
322 :
323 : /*
324 : * GenerationDelete
325 : * Free all memory which is allocated in the given context.
326 : */
327 : void
328 255738 : GenerationDelete(MemoryContext context)
329 : {
330 : /* Reset to release all releasable GenerationBlocks */
331 255738 : GenerationReset(context);
332 : /* And free the context header and keeper block */
333 255738 : free(context);
334 255738 : }
335 :
336 : /*
337 : * Helper for GenerationAlloc() that allocates an entire block for the chunk.
338 : *
339 : * GenerationAlloc()'s comment explains why this is separate.
340 : */
341 : pg_noinline
342 : static void *
343 9236 : GenerationAllocLarge(MemoryContext context, Size size, int flags)
344 : {
345 9236 : GenerationContext *set = (GenerationContext *) context;
346 : GenerationBlock *block;
347 : MemoryChunk *chunk;
348 : Size chunk_size;
349 : Size required_size;
350 : Size blksize;
351 :
352 : /* validate 'size' is within the limits for the given 'flags' */
353 9236 : MemoryContextCheckSize(context, size, flags);
354 :
355 : #ifdef MEMORY_CONTEXT_CHECKING
356 : /* ensure there's always space for the sentinel byte */
357 : chunk_size = MAXALIGN(size + 1);
358 : #else
359 9236 : chunk_size = MAXALIGN(size);
360 : #endif
361 9236 : required_size = chunk_size + Generation_CHUNKHDRSZ;
362 9236 : blksize = required_size + Generation_BLOCKHDRSZ;
363 :
364 9236 : block = (GenerationBlock *) malloc(blksize);
365 9236 : if (block == NULL)
366 0 : return MemoryContextAllocationFailure(context, size, flags);
367 :
368 9236 : context->mem_allocated += blksize;
369 :
370 : /* block with a single (used) chunk */
371 9236 : block->context = set;
372 9236 : block->blksize = blksize;
373 9236 : block->nchunks = 1;
374 9236 : block->nfree = 0;
375 :
376 : /* the block is completely full */
377 9236 : block->freeptr = block->endptr = ((char *) block) + blksize;
378 :
379 9236 : chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
380 :
381 : /* mark the MemoryChunk as externally managed */
382 9236 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_GENERATION_ID);
383 :
384 : #ifdef MEMORY_CONTEXT_CHECKING
385 : chunk->requested_size = size;
386 : /* set mark to catch clobber of "unused" space */
387 : Assert(size < chunk_size);
388 : set_sentinel(MemoryChunkGetPointer(chunk), size);
389 : #endif
390 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
391 : /* fill the allocated space with junk */
392 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
393 : #endif
394 :
395 : /* add the block to the list of allocated blocks */
396 9236 : dlist_push_head(&set->blocks, &block->node);
397 :
398 : /* Ensure any padding bytes are marked NOACCESS. */
399 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
400 : chunk_size - size);
401 :
402 : /* Disallow access to the chunk header. */
403 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
404 :
405 9236 : return MemoryChunkGetPointer(chunk);
406 : }
407 :
408 : /*
409 : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
410 : * the code between GenerationAlloc() and GenerationAllocFromNewBlock().
411 : */
412 : static inline void *
413 22530218 : GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block,
414 : Size size, Size chunk_size)
415 : {
416 22530218 : MemoryChunk *chunk = (MemoryChunk *) (block->freeptr);
417 :
418 : /* validate we've been given a block with enough free space */
419 : Assert(block != NULL);
420 : Assert((block->endptr - block->freeptr) >=
421 : Generation_CHUNKHDRSZ + chunk_size);
422 :
423 : /* Prepare to initialize the chunk header. */
424 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
425 :
426 22530218 : block->nchunks += 1;
427 22530218 : block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
428 :
429 : Assert(block->freeptr <= block->endptr);
430 :
431 22530218 : MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID);
432 : #ifdef MEMORY_CONTEXT_CHECKING
433 : chunk->requested_size = size;
434 : /* set mark to catch clobber of "unused" space */
435 : Assert(size < chunk_size);
436 : set_sentinel(MemoryChunkGetPointer(chunk), size);
437 : #endif
438 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
439 : /* fill the allocated space with junk */
440 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
441 : #endif
442 :
443 : /* Ensure any padding bytes are marked NOACCESS. */
444 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
445 : chunk_size - size);
446 :
447 : /* Disallow access to the chunk header. */
448 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
449 :
450 22530218 : return MemoryChunkGetPointer(chunk);
451 : }
452 :
453 : /*
454 : * Helper for GenerationAlloc() that allocates a new block and returns a chunk
455 : * allocated from it.
456 : *
457 : * GenerationAlloc()'s comment explains why this is separate.
458 : */
459 : pg_noinline
460 : static void *
461 40480 : GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags,
462 : Size chunk_size)
463 : {
464 40480 : GenerationContext *set = (GenerationContext *) context;
465 : GenerationBlock *block;
466 : Size blksize;
467 : Size required_size;
468 :
469 : /*
470 : * The first such block has size initBlockSize, and we double the space in
471 : * each succeeding block, but not more than maxBlockSize.
472 : */
473 40480 : blksize = set->nextBlockSize;
474 40480 : set->nextBlockSize <<= 1;
475 40480 : if (set->nextBlockSize > set->maxBlockSize)
476 18868 : set->nextBlockSize = set->maxBlockSize;
477 :
478 : /* we'll need space for the chunk, chunk hdr and block hdr */
479 40480 : required_size = chunk_size + Generation_CHUNKHDRSZ + Generation_BLOCKHDRSZ;
480 :
481 : /* round the size up to the next power of 2 */
482 40480 : if (blksize < required_size)
483 148 : blksize = pg_nextpower2_size_t(required_size);
484 :
485 40480 : block = (GenerationBlock *) malloc(blksize);
486 :
487 40480 : if (block == NULL)
488 0 : return MemoryContextAllocationFailure(context, size, flags);
489 :
490 40480 : context->mem_allocated += blksize;
491 :
492 : /* initialize the new block */
493 40480 : GenerationBlockInit(set, block, blksize);
494 :
495 : /* add it to the doubly-linked list of blocks */
496 40480 : dlist_push_head(&set->blocks, &block->node);
497 :
498 : /* make this the current block */
499 40480 : set->block = block;
500 :
501 40480 : return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
502 : }
503 :
504 : /*
505 : * GenerationAlloc
506 : * Returns a pointer to allocated memory of given size or raises an ERROR
507 : * on allocation failure, or returns NULL when flags contains
508 : * MCXT_ALLOC_NO_OOM.
509 : *
510 : * No request may exceed:
511 : * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
512 : * All callers use a much-lower limit.
513 : *
514 : * Note: when using valgrind, it doesn't matter how the returned allocation
515 : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
516 : * return space that is marked NOACCESS - GenerationRealloc has to beware!
517 : *
518 : * This function should only contain the most common code paths. Everything
519 : * else should be in pg_noinline helper functions, thus avoiding the overhead
520 : * of creating a stack frame for the common cases. Allocating memory is often
521 : * a bottleneck in many workloads, so avoiding stack frame setup is
522 : * worthwhile. Helper functions should always directly return the newly
523 : * allocated memory so that we can just return that address directly as a tail
524 : * call.
525 : */
526 : void *
527 22539454 : GenerationAlloc(MemoryContext context, Size size, int flags)
528 : {
529 22539454 : GenerationContext *set = (GenerationContext *) context;
530 : GenerationBlock *block;
531 : Size chunk_size;
532 : Size required_size;
533 :
534 : Assert(GenerationIsValid(set));
535 :
536 : #ifdef MEMORY_CONTEXT_CHECKING
537 : /* ensure there's always space for the sentinel byte */
538 : chunk_size = MAXALIGN(size + 1);
539 : #else
540 22539454 : chunk_size = MAXALIGN(size);
541 : #endif
542 :
543 : /*
544 : * If requested size exceeds maximum for chunks we hand the request off to
545 : * GenerationAllocLarge().
546 : */
547 22539454 : if (chunk_size > set->allocChunkLimit)
548 9236 : return GenerationAllocLarge(context, size, flags);
549 :
550 22530218 : required_size = chunk_size + Generation_CHUNKHDRSZ;
551 :
552 : /*
553 : * Not an oversized chunk. We try to first make use of the current block,
554 : * but if there's not enough space in it, instead of allocating a new
555 : * block, we look to see if the empty freeblock has enough space. We
556 : * don't try reusing the keeper block. If it's become empty we'll reuse
557 : * that again only if the context is reset.
558 : *
559 : * We only try reusing the freeblock if we've no space for this allocation
560 : * on the current block. When a freeblock exists, we'll switch to it once
561 : * the first time we can't fit an allocation in the current block. We
562 : * avoid ping-ponging between the two as we need to be careful not to
563 : * fragment differently sized consecutive allocations between several
564 : * blocks. Going between the two could cause fragmentation for FIFO
565 : * workloads, which generation is meant to be good at.
566 : */
567 22530218 : block = set->block;
568 :
569 22530218 : if (unlikely(GenerationBlockFreeBytes(block) < required_size))
570 : {
571 49054 : GenerationBlock *freeblock = set->freeblock;
572 :
573 : /* freeblock, if set, must be empty */
574 : Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock));
575 :
576 : /* check if we have a freeblock and if it's big enough */
577 57628 : if (freeblock != NULL &&
578 8574 : GenerationBlockFreeBytes(freeblock) >= required_size)
579 : {
580 : /* make the freeblock the current block */
581 8574 : set->freeblock = NULL;
582 8574 : set->block = freeblock;
583 :
584 8574 : return GenerationAllocChunkFromBlock(context,
585 : freeblock,
586 : size,
587 : chunk_size);
588 : }
589 : else
590 : {
591 : /*
592 : * No freeblock, or it's not big enough for this allocation. Make
593 : * a new block.
594 : */
595 40480 : return GenerationAllocFromNewBlock(context, size, flags, chunk_size);
596 : }
597 : }
598 :
599 : /* The current block has space, so just allocate chunk there. */
600 22481164 : return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
601 : }
602 :
603 : /*
604 : * GenerationBlockInit
605 : * Initializes 'block' assuming 'blksize'. Does not update the context's
606 : * mem_allocated field.
607 : */
608 : static inline void
609 296572 : GenerationBlockInit(GenerationContext *context, GenerationBlock *block,
610 : Size blksize)
611 : {
612 296572 : block->context = context;
613 296572 : block->blksize = blksize;
614 296572 : block->nchunks = 0;
615 296572 : block->nfree = 0;
616 :
617 296572 : block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
618 296572 : block->endptr = ((char *) block) + blksize;
619 :
620 : /* Mark unallocated space NOACCESS. */
621 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
622 : blksize - Generation_BLOCKHDRSZ);
623 296572 : }
624 :
625 : /*
626 : * GenerationBlockMarkEmpty
627 : * Set a block as empty. Does not free the block.
628 : */
629 : static inline void
630 5503390 : GenerationBlockMarkEmpty(GenerationBlock *block)
631 : {
632 : #if defined(USE_VALGRIND) || defined(CLOBBER_FREED_MEMORY)
633 : char *datastart = ((char *) block) + Generation_BLOCKHDRSZ;
634 : #endif
635 :
636 : #ifdef CLOBBER_FREED_MEMORY
637 : wipe_mem(datastart, block->freeptr - datastart);
638 : #else
639 : /* wipe_mem() would have done this */
640 : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
641 : #endif
642 :
643 : /* Reset the block, but don't return it to malloc */
644 5503390 : block->nchunks = 0;
645 5503390 : block->nfree = 0;
646 5503390 : block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
647 5503390 : }
648 :
649 : /*
650 : * GenerationBlockFreeBytes
651 : * Returns the number of bytes free in 'block'
652 : */
653 : static inline Size
654 22538792 : GenerationBlockFreeBytes(GenerationBlock *block)
655 : {
656 22538792 : return (block->endptr - block->freeptr);
657 : }
658 :
659 : /*
660 : * GenerationBlockFree
661 : * Remove 'block' from 'set' and release the memory consumed by it.
662 : */
663 : static inline void
664 49658 : GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
665 : {
666 : /* Make sure nobody tries to free the keeper block */
667 : Assert(!IsKeeperBlock(set, block));
668 : /* We shouldn't be freeing the freeblock either */
669 : Assert(block != set->freeblock);
670 :
671 : /* release the block from the list of blocks */
672 49658 : dlist_delete(&block->node);
673 :
674 49658 : ((MemoryContext) set)->mem_allocated -= block->blksize;
675 :
676 : #ifdef CLOBBER_FREED_MEMORY
677 : wipe_mem(block, block->blksize);
678 : #endif
679 :
680 49658 : free(block);
681 49658 : }
682 :
683 : /*
684 : * GenerationFree
685 : * Update number of chunks in the block, and consider freeing the block
686 : * if it's become empty.
687 : */
688 : void
689 9689466 : GenerationFree(void *pointer)
690 : {
691 9689466 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
692 : GenerationBlock *block;
693 : GenerationContext *set;
694 : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
695 : || defined(CLOBBER_FREED_MEMORY)
696 : Size chunksize;
697 : #endif
698 :
699 : /* Allow access to the chunk header. */
700 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
701 :
702 9689466 : if (MemoryChunkIsExternal(chunk))
703 : {
704 9066 : block = ExternalChunkGetBlock(chunk);
705 :
706 : /*
707 : * Try to verify that we have a sane block pointer: the block header
708 : * should reference a generation context.
709 : */
710 9066 : if (!GenerationBlockIsValid(block))
711 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
712 :
713 : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
714 : || defined(CLOBBER_FREED_MEMORY)
715 : chunksize = block->endptr - (char *) pointer;
716 : #endif
717 : }
718 : else
719 : {
720 9680400 : block = MemoryChunkGetBlock(chunk);
721 :
722 : /*
723 : * In this path, for speed reasons we just Assert that the referenced
724 : * block is good. Future field experience may show that this Assert
725 : * had better become a regular runtime test-and-elog check.
726 : */
727 : Assert(GenerationBlockIsValid(block));
728 :
729 : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
730 : || defined(CLOBBER_FREED_MEMORY)
731 : chunksize = MemoryChunkGetValue(chunk);
732 : #endif
733 : }
734 :
735 : #ifdef MEMORY_CONTEXT_CHECKING
736 : /* Test for someone scribbling on unused space in chunk */
737 : Assert(chunk->requested_size < chunksize);
738 : if (!sentinel_ok(pointer, chunk->requested_size))
739 : elog(WARNING, "detected write past chunk end in %s %p",
740 : ((MemoryContext) block->context)->name, chunk);
741 : #endif
742 :
743 : #ifdef CLOBBER_FREED_MEMORY
744 : wipe_mem(pointer, chunksize);
745 : #endif
746 :
747 : #ifdef MEMORY_CONTEXT_CHECKING
748 : /* Reset requested_size to InvalidAllocSize in freed chunks */
749 : chunk->requested_size = InvalidAllocSize;
750 : #endif
751 :
752 9689466 : block->nfree += 1;
753 :
754 : Assert(block->nchunks > 0);
755 : Assert(block->nfree <= block->nchunks);
756 : Assert(block != block->context->freeblock);
757 :
758 : /* If there are still allocated chunks in the block, we're done. */
759 9689466 : if (likely(block->nfree < block->nchunks))
760 4543258 : return;
761 :
762 5146208 : set = block->context;
763 :
764 : /*-----------------------
765 : * The block this allocation was on has now become completely empty of
766 : * chunks. In the general case, we can now return the memory for this
767 : * block back to malloc. However, there are cases where we don't want to
768 : * do that:
769 : *
770 : * 1) If it's the keeper block. This block was malloc'd in the same
771 : * allocation as the context itself and can't be free'd without
772 : * freeing the context.
773 : * 2) If it's the current block. We could free this, but doing so would
774 : * leave us nothing to set the current block to, so we just mark the
775 : * block as empty so new allocations can reuse it again.
776 : * 3) If we have no "freeblock" set, then we save a single block for
777 : * future allocations to avoid having to malloc a new block again.
778 : * This is useful for FIFO workloads as it avoids continual
779 : * free/malloc cycles.
780 : */
781 5146208 : if (IsKeeperBlock(set, block) || set->block == block)
782 5109288 : GenerationBlockMarkEmpty(block); /* case 1 and 2 */
783 36920 : else if (set->freeblock == NULL)
784 : {
785 : /* case 3 */
786 8972 : GenerationBlockMarkEmpty(block);
787 8972 : set->freeblock = block;
788 : }
789 : else
790 27948 : GenerationBlockFree(set, block); /* Otherwise, free it */
791 : }
792 :
793 : /*
794 : * GenerationRealloc
795 : * When handling repalloc, we simply allocate a new chunk, copy the data
796 : * and discard the old one. The only exception is when the new size fits
797 : * into the old chunk - in that case we just update chunk header.
798 : */
799 : void *
800 0 : GenerationRealloc(void *pointer, Size size, int flags)
801 : {
802 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
803 : GenerationContext *set;
804 : GenerationBlock *block;
805 : GenerationPointer newPointer;
806 : Size oldsize;
807 :
808 : /* Allow access to the chunk header. */
809 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
810 :
811 0 : if (MemoryChunkIsExternal(chunk))
812 : {
813 0 : block = ExternalChunkGetBlock(chunk);
814 :
815 : /*
816 : * Try to verify that we have a sane block pointer: the block header
817 : * should reference a generation context.
818 : */
819 0 : if (!GenerationBlockIsValid(block))
820 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
821 :
822 0 : oldsize = block->endptr - (char *) pointer;
823 : }
824 : else
825 : {
826 0 : block = MemoryChunkGetBlock(chunk);
827 :
828 : /*
829 : * In this path, for speed reasons we just Assert that the referenced
830 : * block is good. Future field experience may show that this Assert
831 : * had better become a regular runtime test-and-elog check.
832 : */
833 : Assert(GenerationBlockIsValid(block));
834 :
835 0 : oldsize = MemoryChunkGetValue(chunk);
836 : }
837 :
838 0 : set = block->context;
839 :
840 : #ifdef MEMORY_CONTEXT_CHECKING
841 : /* Test for someone scribbling on unused space in chunk */
842 : Assert(chunk->requested_size < oldsize);
843 : if (!sentinel_ok(pointer, chunk->requested_size))
844 : elog(WARNING, "detected write past chunk end in %s %p",
845 : ((MemoryContext) set)->name, chunk);
846 : #endif
847 :
848 : /*
849 : * Maybe the allocated area already big enough. (In particular, we always
850 : * fall out here if the requested size is a decrease.)
851 : *
852 : * This memory context does not use power-of-2 chunk sizing and instead
853 : * carves the chunks to be as small as possible, so most repalloc() calls
854 : * will end up in the palloc/memcpy/pfree branch.
855 : *
856 : * XXX Perhaps we should annotate this condition with unlikely()?
857 : */
858 : #ifdef MEMORY_CONTEXT_CHECKING
859 : /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */
860 : if (oldsize > size)
861 : #else
862 0 : if (oldsize >= size)
863 : #endif
864 : {
865 : #ifdef MEMORY_CONTEXT_CHECKING
866 : Size oldrequest = chunk->requested_size;
867 :
868 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
869 : /* We can only fill the extra space if we know the prior request */
870 : if (size > oldrequest)
871 : randomize_mem((char *) pointer + oldrequest,
872 : size - oldrequest);
873 : #endif
874 :
875 : chunk->requested_size = size;
876 :
877 : /*
878 : * If this is an increase, mark any newly-available part UNDEFINED.
879 : * Otherwise, mark the obsolete part NOACCESS.
880 : */
881 : if (size > oldrequest)
882 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
883 : size - oldrequest);
884 : else
885 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
886 : oldsize - size);
887 :
888 : /* set mark to catch clobber of "unused" space */
889 : set_sentinel(pointer, size);
890 : #else /* !MEMORY_CONTEXT_CHECKING */
891 :
892 : /*
893 : * We don't have the information to determine whether we're growing
894 : * the old request or shrinking it, so we conservatively mark the
895 : * entire new allocation DEFINED.
896 : */
897 : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
898 : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
899 : #endif
900 :
901 : /* Disallow access to the chunk header. */
902 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
903 :
904 0 : return pointer;
905 : }
906 :
907 : /* allocate new chunk (this also checks size is valid) */
908 0 : newPointer = GenerationAlloc((MemoryContext) set, size, flags);
909 :
910 : /* leave immediately if request was not completed */
911 0 : if (newPointer == NULL)
912 : {
913 : /* Disallow access to the chunk header. */
914 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
915 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
916 : }
917 :
918 : /*
919 : * GenerationAlloc() may have returned a region that is still NOACCESS.
920 : * Change it to UNDEFINED for the moment; memcpy() will then transfer
921 : * definedness from the old allocation to the new. If we know the old
922 : * allocation, copy just that much. Otherwise, make the entire old chunk
923 : * defined to avoid errors as we copy the currently-NOACCESS trailing
924 : * bytes.
925 : */
926 : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
927 : #ifdef MEMORY_CONTEXT_CHECKING
928 : oldsize = chunk->requested_size;
929 : #else
930 : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
931 : #endif
932 :
933 : /* transfer existing data (certain to fit) */
934 0 : memcpy(newPointer, pointer, oldsize);
935 :
936 : /* free old chunk */
937 0 : GenerationFree(pointer);
938 :
939 0 : return newPointer;
940 : }
941 :
942 : /*
943 : * GenerationGetChunkContext
944 : * Return the MemoryContext that 'pointer' belongs to.
945 : */
946 : MemoryContext
947 0 : GenerationGetChunkContext(void *pointer)
948 : {
949 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
950 : GenerationBlock *block;
951 :
952 : /* Allow access to the chunk header. */
953 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
954 :
955 0 : if (MemoryChunkIsExternal(chunk))
956 0 : block = ExternalChunkGetBlock(chunk);
957 : else
958 0 : block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
959 :
960 : /* Disallow access to the chunk header. */
961 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
962 :
963 : Assert(GenerationBlockIsValid(block));
964 0 : return &block->context->header;
965 : }
966 :
967 : /*
968 : * GenerationGetChunkSpace
969 : * Given a currently-allocated chunk, determine the total space
970 : * it occupies (including all memory-allocation overhead).
971 : */
972 : Size
973 26342526 : GenerationGetChunkSpace(void *pointer)
974 : {
975 26342526 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
976 : Size chunksize;
977 :
978 : /* Allow access to the chunk header. */
979 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
980 :
981 26342526 : if (MemoryChunkIsExternal(chunk))
982 : {
983 170 : GenerationBlock *block = ExternalChunkGetBlock(chunk);
984 :
985 : Assert(GenerationBlockIsValid(block));
986 170 : chunksize = block->endptr - (char *) pointer;
987 : }
988 : else
989 26342356 : chunksize = MemoryChunkGetValue(chunk);
990 :
991 : /* Disallow access to the chunk header. */
992 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
993 :
994 26342526 : return Generation_CHUNKHDRSZ + chunksize;
995 : }
996 :
997 : /*
998 : * GenerationIsEmpty
999 : * Is a GenerationContext empty of any allocated space?
1000 : */
1001 : bool
1002 0 : GenerationIsEmpty(MemoryContext context)
1003 : {
1004 0 : GenerationContext *set = (GenerationContext *) context;
1005 : dlist_iter iter;
1006 :
1007 : Assert(GenerationIsValid(set));
1008 :
1009 0 : dlist_foreach(iter, &set->blocks)
1010 : {
1011 0 : GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1012 :
1013 0 : if (block->nchunks > 0)
1014 0 : return false;
1015 : }
1016 :
1017 0 : return true;
1018 : }
1019 :
1020 : /*
1021 : * GenerationStats
1022 : * Compute stats about memory consumption of a Generation context.
1023 : *
1024 : * printfunc: if not NULL, pass a human-readable stats string to this.
1025 : * passthru: pass this pointer through to printfunc.
1026 : * totals: if not NULL, add stats about this context into *totals.
1027 : * print_to_stderr: print stats to stderr if true, elog otherwise.
1028 : *
1029 : * XXX freespace only accounts for empty space at the end of the block, not
1030 : * space of freed chunks (which is unknown).
1031 : */
1032 : void
1033 30 : GenerationStats(MemoryContext context,
1034 : MemoryStatsPrintFunc printfunc, void *passthru,
1035 : MemoryContextCounters *totals, bool print_to_stderr)
1036 : {
1037 30 : GenerationContext *set = (GenerationContext *) context;
1038 30 : Size nblocks = 0;
1039 30 : Size nchunks = 0;
1040 30 : Size nfreechunks = 0;
1041 : Size totalspace;
1042 30 : Size freespace = 0;
1043 : dlist_iter iter;
1044 :
1045 : Assert(GenerationIsValid(set));
1046 :
1047 : /* Include context header in totalspace */
1048 30 : totalspace = MAXALIGN(sizeof(GenerationContext));
1049 :
1050 100 : dlist_foreach(iter, &set->blocks)
1051 : {
1052 70 : GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1053 :
1054 70 : nblocks++;
1055 70 : nchunks += block->nchunks;
1056 70 : nfreechunks += block->nfree;
1057 70 : totalspace += block->blksize;
1058 70 : freespace += (block->endptr - block->freeptr);
1059 : }
1060 :
1061 30 : if (printfunc)
1062 : {
1063 : char stats_string[200];
1064 :
1065 0 : snprintf(stats_string, sizeof(stats_string),
1066 : "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
1067 : totalspace, nblocks, nchunks, freespace,
1068 : nfreechunks, totalspace - freespace);
1069 0 : printfunc(context, passthru, stats_string, print_to_stderr);
1070 : }
1071 :
1072 30 : if (totals)
1073 : {
1074 30 : totals->nblocks += nblocks;
1075 30 : totals->freechunks += nfreechunks;
1076 30 : totals->totalspace += totalspace;
1077 30 : totals->freespace += freespace;
1078 : }
1079 30 : }
1080 :
1081 :
1082 : #ifdef MEMORY_CONTEXT_CHECKING
1083 :
1084 : /*
1085 : * GenerationCheck
1086 : * Walk through chunks and check consistency of memory.
1087 : *
1088 : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1089 : * find yourself in an infinite loop when trouble occurs, because this
1090 : * routine will be entered again when elog cleanup tries to release memory!
1091 : */
1092 : void
1093 : GenerationCheck(MemoryContext context)
1094 : {
1095 : GenerationContext *gen = (GenerationContext *) context;
1096 : const char *name = context->name;
1097 : dlist_iter iter;
1098 : Size total_allocated = 0;
1099 :
1100 : /* walk all blocks in this context */
1101 : dlist_foreach(iter, &gen->blocks)
1102 : {
1103 : GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1104 : int nfree,
1105 : nchunks;
1106 : char *ptr;
1107 : bool has_external_chunk = false;
1108 :
1109 : total_allocated += block->blksize;
1110 :
1111 : /*
1112 : * nfree > nchunks is surely wrong. Equality is allowed as the block
1113 : * might completely empty if it's the freeblock.
1114 : */
1115 : if (block->nfree > block->nchunks)
1116 : elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
1117 : name, block->nfree, block, block->nchunks);
1118 :
1119 : /* check block belongs to the correct context */
1120 : if (block->context != gen)
1121 : elog(WARNING, "problem in Generation %s: bogus context link in block %p",
1122 : name, block);
1123 :
1124 : /* Now walk through the chunks and count them. */
1125 : nfree = 0;
1126 : nchunks = 0;
1127 : ptr = ((char *) block) + Generation_BLOCKHDRSZ;
1128 :
1129 : while (ptr < block->freeptr)
1130 : {
1131 : MemoryChunk *chunk = (MemoryChunk *) ptr;
1132 : GenerationBlock *chunkblock;
1133 : Size chunksize;
1134 :
1135 : /* Allow access to the chunk header. */
1136 : VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
1137 :
1138 : if (MemoryChunkIsExternal(chunk))
1139 : {
1140 : chunkblock = ExternalChunkGetBlock(chunk);
1141 : chunksize = block->endptr - (char *) MemoryChunkGetPointer(chunk);
1142 : has_external_chunk = true;
1143 : }
1144 : else
1145 : {
1146 : chunkblock = MemoryChunkGetBlock(chunk);
1147 : chunksize = MemoryChunkGetValue(chunk);
1148 : }
1149 :
1150 : /* move to the next chunk */
1151 : ptr += (chunksize + Generation_CHUNKHDRSZ);
1152 :
1153 : nchunks += 1;
1154 :
1155 : /* chunks have both block and context pointers, so check both */
1156 : if (chunkblock != block)
1157 : elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
1158 : name, block, chunk);
1159 :
1160 :
1161 : /* is chunk allocated? */
1162 : if (chunk->requested_size != InvalidAllocSize)
1163 : {
1164 : /* now make sure the chunk size is correct */
1165 : if (chunksize < chunk->requested_size ||
1166 : chunksize != MAXALIGN(chunksize))
1167 : elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
1168 : name, block, chunk);
1169 :
1170 : /* check sentinel */
1171 : Assert(chunk->requested_size < chunksize);
1172 : if (!sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
1173 : elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
1174 : name, block, chunk);
1175 : }
1176 : else
1177 : nfree += 1;
1178 :
1179 : /* if chunk is allocated, disallow access to the chunk header */
1180 : if (chunk->requested_size != InvalidAllocSize)
1181 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
1182 : }
1183 :
1184 : /*
1185 : * Make sure we got the expected number of allocated and free chunks
1186 : * (as tracked in the block header).
1187 : */
1188 : if (nchunks != block->nchunks)
1189 : elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
1190 : name, nchunks, block, block->nchunks);
1191 :
1192 : if (nfree != block->nfree)
1193 : elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
1194 : name, nfree, block, block->nfree);
1195 :
1196 : if (has_external_chunk && nchunks > 1)
1197 : elog(WARNING, "problem in Generation %s: external chunk on non-dedicated block %p",
1198 : name, block);
1199 :
1200 : }
1201 :
1202 : Assert(total_allocated == context->mem_allocated);
1203 : }
1204 :
1205 : #endif /* MEMORY_CONTEXT_CHECKING */
|