LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - generation.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 159 197 80.7 %
Date: 2025-08-17 01:17:32 Functions: 14 17 82.4 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * generation.c
       4             :  *    Generational allocator definitions.
       5             :  *
       6             :  * Generation is a custom MemoryContext implementation designed for cases of
       7             :  * chunks with similar lifespan.
       8             :  *
       9             :  * Portions Copyright (c) 2017-2025, PostgreSQL Global Development Group
      10             :  *
      11             :  * IDENTIFICATION
      12             :  *    src/backend/utils/mmgr/generation.c
      13             :  *
      14             :  *
      15             :  *  This memory context is based on the assumption that the chunks are freed
      16             :  *  roughly in the same order as they were allocated (FIFO), or in groups with
      17             :  *  similar lifespan (generations - hence the name of the context). This is
      18             :  *  typical for various queue-like use cases, i.e. when tuples are constructed,
      19             :  *  processed and then thrown away.
      20             :  *
      21             :  *  The memory context uses a very simple approach to free space management.
      22             :  *  Instead of a complex global freelist, each block tracks a number
      23             :  *  of allocated and freed chunks.  The block is classed as empty when the
      24             :  *  number of free chunks is equal to the number of allocated chunks.  When
      25             :  *  this occurs, instead of freeing the block, we try to "recycle" it, i.e.
      26             :  *  reuse it for new allocations.  This is done by setting the block in the
      27             :  *  context's 'freeblock' field.  If the freeblock field is already occupied
      28             :  *  by another free block we simply return the newly empty block to malloc.
      29             :  *
      30             :  *  This approach to free blocks requires fewer malloc/free calls for truly
      31             :  *  first allocated, first free'd allocation patterns.
      32             :  *
      33             :  *-------------------------------------------------------------------------
      34             :  */
      35             : 
      36             : #include "postgres.h"
      37             : 
      38             : #include "lib/ilist.h"
      39             : #include "port/pg_bitutils.h"
      40             : #include "utils/memdebug.h"
      41             : #include "utils/memutils.h"
      42             : #include "utils/memutils_internal.h"
      43             : #include "utils/memutils_memorychunk.h"
      44             : 
      45             : 
      46             : #define Generation_BLOCKHDRSZ   MAXALIGN(sizeof(GenerationBlock))
      47             : #define Generation_CHUNKHDRSZ   sizeof(MemoryChunk)
      48             : #define FIRST_BLOCKHDRSZ        (MAXALIGN(sizeof(GenerationContext)) + \
      49             :                                  Generation_BLOCKHDRSZ)
      50             : 
      51             : #define Generation_CHUNK_FRACTION   8
      52             : 
      53             : typedef struct GenerationBlock GenerationBlock; /* forward reference */
      54             : 
      55             : typedef void *GenerationPointer;
      56             : 
      57             : /*
      58             :  * GenerationContext is a simple memory context not reusing allocated chunks,
      59             :  * and freeing blocks once all chunks are freed.
      60             :  */
      61             : typedef struct GenerationContext
      62             : {
      63             :     MemoryContextData header;   /* Standard memory-context fields */
      64             : 
      65             :     /* Generational context parameters */
      66             :     uint32      initBlockSize;  /* initial block size */
      67             :     uint32      maxBlockSize;   /* maximum block size */
      68             :     uint32      nextBlockSize;  /* next block size to allocate */
      69             :     uint32      allocChunkLimit;    /* effective chunk size limit */
      70             : 
      71             :     GenerationBlock *block;     /* current (most recently allocated) block */
      72             :     GenerationBlock *freeblock; /* pointer to an empty block that's being
      73             :                                  * recycled, or NULL if there's no such block. */
      74             :     dlist_head  blocks;         /* list of blocks */
      75             : } GenerationContext;
      76             : 
      77             : /*
      78             :  * GenerationBlock
      79             :  *      GenerationBlock is the unit of memory that is obtained by generation.c
      80             :  *      from malloc().  It contains zero or more MemoryChunks, which are the
      81             :  *      units requested by palloc() and freed by pfree().  MemoryChunks cannot
      82             :  *      be returned to malloc() individually, instead pfree() updates the free
      83             :  *      counter of the block and when all chunks in a block are free the whole
      84             :  *      block can be returned to malloc().
      85             :  *
      86             :  *      GenerationBlock is the header data for a block --- the usable space
      87             :  *      within the block begins at the next alignment boundary.
      88             :  */
      89             : struct GenerationBlock
      90             : {
      91             :     dlist_node  node;           /* doubly-linked list of blocks */
      92             :     GenerationContext *context; /* pointer back to the owning context */
      93             :     Size        blksize;        /* allocated size of this block */
      94             :     int         nchunks;        /* number of chunks in the block */
      95             :     int         nfree;          /* number of free chunks */
      96             :     char       *freeptr;        /* start of free space in this block */
      97             :     char       *endptr;         /* end of space in this block */
      98             : };
      99             : 
     100             : /*
     101             :  * GenerationIsValid
     102             :  *      True iff set is valid generation set.
     103             :  */
     104             : #define GenerationIsValid(set) \
     105             :     (PointerIsValid(set) && IsA(set, GenerationContext))
     106             : 
     107             : /*
     108             :  * GenerationBlockIsValid
     109             :  *      True iff block is valid block of generation set.
     110             :  */
     111             : #define GenerationBlockIsValid(block) \
     112             :     (PointerIsValid(block) && GenerationIsValid((block)->context))
     113             : 
     114             : /*
     115             :  * GenerationBlockIsEmpty
     116             :  *      True iff block contains no chunks
     117             :  */
     118             : #define GenerationBlockIsEmpty(b) ((b)->nchunks == 0)
     119             : 
     120             : /*
     121             :  * We always store external chunks on a dedicated block.  This makes fetching
     122             :  * the block from an external chunk easy since it's always the first and only
     123             :  * chunk on the block.
     124             :  */
     125             : #define ExternalChunkGetBlock(chunk) \
     126             :     (GenerationBlock *) ((char *) chunk - Generation_BLOCKHDRSZ)
     127             : 
     128             : /* Obtain the keeper block for a generation context */
     129             : #define KeeperBlock(set) \
     130             :     ((GenerationBlock *) (((char *) set) + \
     131             :     MAXALIGN(sizeof(GenerationContext))))
     132             : 
     133             : /* Check if the block is the keeper block of the given generation context */
     134             : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
     135             : 
     136             : /* Inlined helper functions */
     137             : static inline void GenerationBlockInit(GenerationContext *context,
     138             :                                        GenerationBlock *block,
     139             :                                        Size blksize);
     140             : static inline void GenerationBlockMarkEmpty(GenerationBlock *block);
     141             : static inline Size GenerationBlockFreeBytes(GenerationBlock *block);
     142             : static inline void GenerationBlockFree(GenerationContext *set,
     143             :                                        GenerationBlock *block);
     144             : 
     145             : 
     146             : /*
     147             :  * Public routines
     148             :  */
     149             : 
     150             : 
     151             : /*
     152             :  * GenerationContextCreate
     153             :  *      Create a new Generation context.
     154             :  *
     155             :  * parent: parent context, or NULL if top-level context
     156             :  * name: name of context (must be statically allocated)
     157             :  * minContextSize: minimum context size
     158             :  * initBlockSize: initial allocation block size
     159             :  * maxBlockSize: maximum allocation block size
     160             :  */
     161             : MemoryContext
     162      233008 : GenerationContextCreate(MemoryContext parent,
     163             :                         const char *name,
     164             :                         Size minContextSize,
     165             :                         Size initBlockSize,
     166             :                         Size maxBlockSize)
     167             : {
     168             :     Size        firstBlockSize;
     169             :     Size        allocSize;
     170             :     GenerationContext *set;
     171             :     GenerationBlock *block;
     172             : 
     173             :     /* ensure MemoryChunk's size is properly maxaligned */
     174             :     StaticAssertDecl(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
     175             :                      "sizeof(MemoryChunk) is not maxaligned");
     176             : 
     177             :     /*
     178             :      * First, validate allocation parameters.  Asserts seem sufficient because
     179             :      * nobody varies their parameters at runtime.  We somewhat arbitrarily
     180             :      * enforce a minimum 1K block size.  We restrict the maximum block size to
     181             :      * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
     182             :      * regards to addressing the offset between the chunk and the block that
     183             :      * the chunk is stored on.  We would be unable to store the offset between
     184             :      * the chunk and block for any chunks that were beyond
     185             :      * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
     186             :      * larger than this.
     187             :      */
     188             :     Assert(initBlockSize == MAXALIGN(initBlockSize) &&
     189             :            initBlockSize >= 1024);
     190             :     Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
     191             :            maxBlockSize >= initBlockSize &&
     192             :            AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
     193             :     Assert(minContextSize == 0 ||
     194             :            (minContextSize == MAXALIGN(minContextSize) &&
     195             :             minContextSize >= 1024 &&
     196             :             minContextSize <= maxBlockSize));
     197             :     Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
     198             : 
     199             :     /* Determine size of initial block */
     200      233008 :     allocSize = MAXALIGN(sizeof(GenerationContext)) +
     201             :         Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
     202      233008 :     if (minContextSize != 0)
     203        2238 :         allocSize = Max(allocSize, minContextSize);
     204             :     else
     205      230770 :         allocSize = Max(allocSize, initBlockSize);
     206             : 
     207             :     /*
     208             :      * Allocate the initial block.  Unlike other generation.c blocks, it
     209             :      * starts with the context header and its block header follows that.
     210             :      */
     211      233008 :     set = (GenerationContext *) malloc(allocSize);
     212      233008 :     if (set == NULL)
     213             :     {
     214           0 :         MemoryContextStats(TopMemoryContext);
     215           0 :         ereport(ERROR,
     216             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
     217             :                  errmsg("out of memory"),
     218             :                  errdetail("Failed while creating memory context \"%s\".",
     219             :                            name)));
     220             :     }
     221             : 
     222             :     /*
     223             :      * Avoid writing code that can fail between here and MemoryContextCreate;
     224             :      * we'd leak the header if we ereport in this stretch.
     225             :      */
     226             : 
     227             :     /* See comments about Valgrind interactions in aset.c */
     228             :     VALGRIND_CREATE_MEMPOOL(set, 0, false);
     229             :     /* This vchunk covers the GenerationContext and the keeper block header */
     230             :     VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
     231             : 
     232      233008 :     dlist_init(&set->blocks);
     233             : 
     234             :     /* Fill in the initial block's block header */
     235      233008 :     block = KeeperBlock(set);
     236             :     /* determine the block size and initialize it */
     237      233008 :     firstBlockSize = allocSize - MAXALIGN(sizeof(GenerationContext));
     238      233008 :     GenerationBlockInit(set, block, firstBlockSize);
     239             : 
     240             :     /* add it to the doubly-linked list of blocks */
     241      233008 :     dlist_push_head(&set->blocks, &block->node);
     242             : 
     243             :     /* use it as the current allocation block */
     244      233008 :     set->block = block;
     245             : 
     246             :     /* No free block, yet */
     247      233008 :     set->freeblock = NULL;
     248             : 
     249             :     /* Fill in GenerationContext-specific header fields */
     250      233008 :     set->initBlockSize = (uint32) initBlockSize;
     251      233008 :     set->maxBlockSize = (uint32) maxBlockSize;
     252      233008 :     set->nextBlockSize = (uint32) initBlockSize;
     253             : 
     254             :     /*
     255             :      * Compute the allocation chunk size limit for this context.
     256             :      *
     257             :      * Limit the maximum size a non-dedicated chunk can be so that we can fit
     258             :      * at least Generation_CHUNK_FRACTION of chunks this big onto the maximum
     259             :      * sized block.  We must further limit this value so that it's no more
     260             :      * than MEMORYCHUNK_MAX_VALUE.  We're unable to have non-external chunks
     261             :      * larger than that value as we store the chunk size in the MemoryChunk
     262             :      * 'value' field in the call to MemoryChunkSetHdrMask().
     263             :      */
     264      233008 :     set->allocChunkLimit = Min(maxBlockSize, MEMORYCHUNK_MAX_VALUE);
     265      233008 :     while ((Size) (set->allocChunkLimit + Generation_CHUNKHDRSZ) >
     266     1165040 :            (Size) ((Size) (maxBlockSize - Generation_BLOCKHDRSZ) / Generation_CHUNK_FRACTION))
     267      932032 :         set->allocChunkLimit >>= 1;
     268             : 
     269             :     /* Finally, do the type-independent part of context creation */
     270      233008 :     MemoryContextCreate((MemoryContext) set,
     271             :                         T_GenerationContext,
     272             :                         MCTX_GENERATION_ID,
     273             :                         parent,
     274             :                         name);
     275             : 
     276      233008 :     ((MemoryContext) set)->mem_allocated = firstBlockSize;
     277             : 
     278      233008 :     return (MemoryContext) set;
     279             : }
     280             : 
     281             : /*
     282             :  * GenerationReset
     283             :  *      Frees all memory which is allocated in the given set.
     284             :  *
     285             :  * The initial "keeper" block (which shares a malloc chunk with the context
     286             :  * header) is not given back to the operating system though.  In this way, we
     287             :  * don't thrash malloc() when a context is repeatedly reset after small
     288             :  * allocations.
     289             :  */
     290             : void
     291      242892 : GenerationReset(MemoryContext context)
     292             : {
     293      242892 :     GenerationContext *set = (GenerationContext *) context;
     294             :     dlist_mutable_iter miter;
     295             : 
     296             :     Assert(GenerationIsValid(set));
     297             : 
     298             : #ifdef MEMORY_CONTEXT_CHECKING
     299             :     /* Check for corruption and leaks before freeing */
     300             :     GenerationCheck(context);
     301             : #endif
     302             : 
     303             :     /*
     304             :      * NULLify the free block pointer.  We must do this before calling
     305             :      * GenerationBlockFree as that function never expects to free the
     306             :      * freeblock.
     307             :      */
     308      242892 :     set->freeblock = NULL;
     309             : 
     310      508416 :     dlist_foreach_modify(miter, &set->blocks)
     311             :     {
     312      265524 :         GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
     313             : 
     314      265524 :         if (IsKeeperBlock(set, block))
     315      242892 :             GenerationBlockMarkEmpty(block);
     316             :         else
     317       22632 :             GenerationBlockFree(set, block);
     318             :     }
     319             : 
     320             :     /*
     321             :      * Instruct Valgrind to throw away all the vchunks associated with this
     322             :      * context, except for the one covering the GenerationContext and
     323             :      * keeper-block header.  This gets rid of the vchunks for whatever user
     324             :      * data is getting discarded by the context reset.
     325             :      */
     326             :     VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
     327             : 
     328             :     /* set it so new allocations to make use of the keeper block */
     329      242892 :     set->block = KeeperBlock(set);
     330             : 
     331             :     /* Reset block size allocation sequence, too */
     332      242892 :     set->nextBlockSize = set->initBlockSize;
     333             : 
     334             :     /* Ensure there is only 1 item in the dlist */
     335             :     Assert(!dlist_is_empty(&set->blocks));
     336             :     Assert(!dlist_has_next(&set->blocks, dlist_head_node(&set->blocks)));
     337      242892 : }
     338             : 
     339             : /*
     340             :  * GenerationDelete
     341             :  *      Free all memory which is allocated in the given context.
     342             :  */
     343             : void
     344      232584 : GenerationDelete(MemoryContext context)
     345             : {
     346             :     /* Reset to release all releasable GenerationBlocks */
     347      232584 :     GenerationReset(context);
     348             : 
     349             :     /* Destroy the vpool -- see notes in aset.c */
     350             :     VALGRIND_DESTROY_MEMPOOL(context);
     351             : 
     352             :     /* And free the context header and keeper block */
     353      232584 :     free(context);
     354      232584 : }
     355             : 
     356             : /*
     357             :  * Helper for GenerationAlloc() that allocates an entire block for the chunk.
     358             :  *
     359             :  * GenerationAlloc()'s comment explains why this is separate.
     360             :  */
     361             : pg_noinline
     362             : static void *
     363        9234 : GenerationAllocLarge(MemoryContext context, Size size, int flags)
     364             : {
     365        9234 :     GenerationContext *set = (GenerationContext *) context;
     366             :     GenerationBlock *block;
     367             :     MemoryChunk *chunk;
     368             :     Size        chunk_size;
     369             :     Size        required_size;
     370             :     Size        blksize;
     371             : 
     372             :     /* validate 'size' is within the limits for the given 'flags' */
     373        9234 :     MemoryContextCheckSize(context, size, flags);
     374             : 
     375             : #ifdef MEMORY_CONTEXT_CHECKING
     376             :     /* ensure there's always space for the sentinel byte */
     377             :     chunk_size = MAXALIGN(size + 1);
     378             : #else
     379        9234 :     chunk_size = MAXALIGN(size);
     380             : #endif
     381        9234 :     required_size = chunk_size + Generation_CHUNKHDRSZ;
     382        9234 :     blksize = required_size + Generation_BLOCKHDRSZ;
     383             : 
     384        9234 :     block = (GenerationBlock *) malloc(blksize);
     385        9234 :     if (block == NULL)
     386           0 :         return MemoryContextAllocationFailure(context, size, flags);
     387             : 
     388             :     /* Make a vchunk covering the new block's header */
     389             :     VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
     390             : 
     391        9234 :     context->mem_allocated += blksize;
     392             : 
     393             :     /* block with a single (used) chunk */
     394        9234 :     block->context = set;
     395        9234 :     block->blksize = blksize;
     396        9234 :     block->nchunks = 1;
     397        9234 :     block->nfree = 0;
     398             : 
     399             :     /* the block is completely full */
     400        9234 :     block->freeptr = block->endptr = ((char *) block) + blksize;
     401             : 
     402        9234 :     chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
     403             : 
     404             :     /* mark the MemoryChunk as externally managed */
     405        9234 :     MemoryChunkSetHdrMaskExternal(chunk, MCTX_GENERATION_ID);
     406             : 
     407             : #ifdef MEMORY_CONTEXT_CHECKING
     408             :     chunk->requested_size = size;
     409             :     /* set mark to catch clobber of "unused" space */
     410             :     Assert(size < chunk_size);
     411             :     set_sentinel(MemoryChunkGetPointer(chunk), size);
     412             : #endif
     413             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     414             :     /* fill the allocated space with junk */
     415             :     randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
     416             : #endif
     417             : 
     418             :     /* add the block to the list of allocated blocks */
     419        9234 :     dlist_push_head(&set->blocks, &block->node);
     420             : 
     421             :     /* Ensure any padding bytes are marked NOACCESS. */
     422             :     VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
     423             :                                chunk_size - size);
     424             : 
     425             :     /* Disallow access to the chunk header. */
     426             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
     427             : 
     428        9234 :     return MemoryChunkGetPointer(chunk);
     429             : }
     430             : 
     431             : /*
     432             :  * Small helper for allocating a new chunk from a chunk, to avoid duplicating
     433             :  * the code between GenerationAlloc() and GenerationAllocFromNewBlock().
     434             :  */
     435             : static inline void *
     436    26321414 : GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block,
     437             :                               Size size, Size chunk_size)
     438             : {
     439    26321414 :     MemoryChunk *chunk = (MemoryChunk *) (block->freeptr);
     440             : 
     441             :     /* validate we've been given a block with enough free space */
     442             :     Assert(block != NULL);
     443             :     Assert((block->endptr - block->freeptr) >=
     444             :            Generation_CHUNKHDRSZ + chunk_size);
     445             : 
     446             :     /* Prepare to initialize the chunk header. */
     447             :     VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
     448             : 
     449    26321414 :     block->nchunks += 1;
     450    26321414 :     block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
     451             : 
     452             :     Assert(block->freeptr <= block->endptr);
     453             : 
     454    26321414 :     MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID);
     455             : #ifdef MEMORY_CONTEXT_CHECKING
     456             :     chunk->requested_size = size;
     457             :     /* set mark to catch clobber of "unused" space */
     458             :     Assert(size < chunk_size);
     459             :     set_sentinel(MemoryChunkGetPointer(chunk), size);
     460             : #endif
     461             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     462             :     /* fill the allocated space with junk */
     463             :     randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
     464             : #endif
     465             : 
     466             :     /* Ensure any padding bytes are marked NOACCESS. */
     467             :     VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
     468             :                                chunk_size - size);
     469             : 
     470             :     /* Disallow access to the chunk header. */
     471             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
     472             : 
     473    26321414 :     return MemoryChunkGetPointer(chunk);
     474             : }
     475             : 
     476             : /*
     477             :  * Helper for GenerationAlloc() that allocates a new block and returns a chunk
     478             :  * allocated from it.
     479             :  *
     480             :  * GenerationAlloc()'s comment explains why this is separate.
     481             :  */
     482             : pg_noinline
     483             : static void *
     484       45504 : GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags,
     485             :                             Size chunk_size)
     486             : {
     487       45504 :     GenerationContext *set = (GenerationContext *) context;
     488             :     GenerationBlock *block;
     489             :     Size        blksize;
     490             :     Size        required_size;
     491             : 
     492             :     /*
     493             :      * The first such block has size initBlockSize, and we double the space in
     494             :      * each succeeding block, but not more than maxBlockSize.
     495             :      */
     496       45504 :     blksize = set->nextBlockSize;
     497       45504 :     set->nextBlockSize <<= 1;
     498       45504 :     if (set->nextBlockSize > set->maxBlockSize)
     499       22970 :         set->nextBlockSize = set->maxBlockSize;
     500             : 
     501             :     /* we'll need space for the chunk, chunk hdr and block hdr */
     502       45504 :     required_size = chunk_size + Generation_CHUNKHDRSZ + Generation_BLOCKHDRSZ;
     503             : 
     504             :     /* round the size up to the next power of 2 */
     505       45504 :     if (blksize < required_size)
     506         126 :         blksize = pg_nextpower2_size_t(required_size);
     507             : 
     508       45504 :     block = (GenerationBlock *) malloc(blksize);
     509             : 
     510       45504 :     if (block == NULL)
     511           0 :         return MemoryContextAllocationFailure(context, size, flags);
     512             : 
     513             :     /* Make a vchunk covering the new block's header */
     514             :     VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
     515             : 
     516       45504 :     context->mem_allocated += blksize;
     517             : 
     518             :     /* initialize the new block */
     519       45504 :     GenerationBlockInit(set, block, blksize);
     520             : 
     521             :     /* add it to the doubly-linked list of blocks */
     522       45504 :     dlist_push_head(&set->blocks, &block->node);
     523             : 
     524             :     /* make this the current block */
     525       45504 :     set->block = block;
     526             : 
     527       45504 :     return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
     528             : }
     529             : 
     530             : /*
     531             :  * GenerationAlloc
     532             :  *      Returns a pointer to allocated memory of given size or raises an ERROR
     533             :  *      on allocation failure, or returns NULL when flags contains
     534             :  *      MCXT_ALLOC_NO_OOM.
     535             :  *
     536             :  * No request may exceed:
     537             :  *      MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
     538             :  * All callers use a much-lower limit.
     539             :  *
     540             :  * Note: when using valgrind, it doesn't matter how the returned allocation
     541             :  * is marked, as mcxt.c will set it to UNDEFINED.  In some paths we will
     542             :  * return space that is marked NOACCESS - GenerationRealloc has to beware!
     543             :  *
     544             :  * This function should only contain the most common code paths.  Everything
     545             :  * else should be in pg_noinline helper functions, thus avoiding the overhead
     546             :  * of creating a stack frame for the common cases.  Allocating memory is often
     547             :  * a bottleneck in many workloads, so avoiding stack frame setup is
     548             :  * worthwhile.  Helper functions should always directly return the newly
     549             :  * allocated memory so that we can just return that address directly as a tail
     550             :  * call.
     551             :  */
     552             : void *
     553    26330648 : GenerationAlloc(MemoryContext context, Size size, int flags)
     554             : {
     555    26330648 :     GenerationContext *set = (GenerationContext *) context;
     556             :     GenerationBlock *block;
     557             :     Size        chunk_size;
     558             :     Size        required_size;
     559             : 
     560             :     Assert(GenerationIsValid(set));
     561             : 
     562             : #ifdef MEMORY_CONTEXT_CHECKING
     563             :     /* ensure there's always space for the sentinel byte */
     564             :     chunk_size = MAXALIGN(size + 1);
     565             : #else
     566    26330648 :     chunk_size = MAXALIGN(size);
     567             : #endif
     568             : 
     569             :     /*
     570             :      * If requested size exceeds maximum for chunks we hand the request off to
     571             :      * GenerationAllocLarge().
     572             :      */
     573    26330648 :     if (chunk_size > set->allocChunkLimit)
     574        9234 :         return GenerationAllocLarge(context, size, flags);
     575             : 
     576    26321414 :     required_size = chunk_size + Generation_CHUNKHDRSZ;
     577             : 
     578             :     /*
     579             :      * Not an oversized chunk.  We try to first make use of the current block,
     580             :      * but if there's not enough space in it, instead of allocating a new
     581             :      * block, we look to see if the empty freeblock has enough space.  We
     582             :      * don't try reusing the keeper block.  If it's become empty we'll reuse
     583             :      * that again only if the context is reset.
     584             :      *
     585             :      * We only try reusing the freeblock if we've no space for this allocation
     586             :      * on the current block.  When a freeblock exists, we'll switch to it once
     587             :      * the first time we can't fit an allocation in the current block.  We
     588             :      * avoid ping-ponging between the two as we need to be careful not to
     589             :      * fragment differently sized consecutive allocations between several
     590             :      * blocks.  Going between the two could cause fragmentation for FIFO
     591             :      * workloads, which generation is meant to be good at.
     592             :      */
     593    26321414 :     block = set->block;
     594             : 
     595    26321414 :     if (unlikely(GenerationBlockFreeBytes(block) < required_size))
     596             :     {
     597       55756 :         GenerationBlock *freeblock = set->freeblock;
     598             : 
     599             :         /* freeblock, if set, must be empty */
     600             :         Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock));
     601             : 
     602             :         /* check if we have a freeblock and if it's big enough */
     603       66008 :         if (freeblock != NULL &&
     604       10252 :             GenerationBlockFreeBytes(freeblock) >= required_size)
     605             :         {
     606             :             /* make the freeblock the current block */
     607       10252 :             set->freeblock = NULL;
     608       10252 :             set->block = freeblock;
     609             : 
     610       10252 :             return GenerationAllocChunkFromBlock(context,
     611             :                                                  freeblock,
     612             :                                                  size,
     613             :                                                  chunk_size);
     614             :         }
     615             :         else
     616             :         {
     617             :             /*
     618             :              * No freeblock, or it's not big enough for this allocation.  Make
     619             :              * a new block.
     620             :              */
     621       45504 :             return GenerationAllocFromNewBlock(context, size, flags, chunk_size);
     622             :         }
     623             :     }
     624             : 
     625             :     /* The current block has space, so just allocate chunk there. */
     626    26265658 :     return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
     627             : }
     628             : 
     629             : /*
     630             :  * GenerationBlockInit
     631             :  *      Initializes 'block' assuming 'blksize'.  Does not update the context's
     632             :  *      mem_allocated field.
     633             :  */
     634             : static inline void
     635      278512 : GenerationBlockInit(GenerationContext *context, GenerationBlock *block,
     636             :                     Size blksize)
     637             : {
     638      278512 :     block->context = context;
     639      278512 :     block->blksize = blksize;
     640      278512 :     block->nchunks = 0;
     641      278512 :     block->nfree = 0;
     642             : 
     643      278512 :     block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
     644      278512 :     block->endptr = ((char *) block) + blksize;
     645             : 
     646             :     /* Mark unallocated space NOACCESS. */
     647             :     VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
     648             :                                blksize - Generation_BLOCKHDRSZ);
     649      278512 : }
     650             : 
     651             : /*
     652             :  * GenerationBlockMarkEmpty
     653             :  *      Set a block as empty.  Does not free the block.
     654             :  */
     655             : static inline void
     656     7376138 : GenerationBlockMarkEmpty(GenerationBlock *block)
     657             : {
     658             : #if defined(USE_VALGRIND) || defined(CLOBBER_FREED_MEMORY)
     659             :     char       *datastart = ((char *) block) + Generation_BLOCKHDRSZ;
     660             : #endif
     661             : 
     662             : #ifdef CLOBBER_FREED_MEMORY
     663             :     wipe_mem(datastart, block->freeptr - datastart);
     664             : #else
     665             :     /* wipe_mem() would have done this */
     666             :     VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
     667             : #endif
     668             : 
     669             :     /* Reset the block, but don't return it to malloc */
     670     7376138 :     block->nchunks = 0;
     671     7376138 :     block->nfree = 0;
     672     7376138 :     block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
     673     7376138 : }
     674             : 
     675             : /*
     676             :  * GenerationBlockFreeBytes
     677             :  *      Returns the number of bytes free in 'block'
     678             :  */
     679             : static inline Size
     680    26331666 : GenerationBlockFreeBytes(GenerationBlock *block)
     681             : {
     682    26331666 :     return (block->endptr - block->freeptr);
     683             : }
     684             : 
     685             : /*
     686             :  * GenerationBlockFree
     687             :  *      Remove 'block' from 'set' and release the memory consumed by it.
     688             :  */
     689             : static inline void
     690       54678 : GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
     691             : {
     692             :     /* Make sure nobody tries to free the keeper block */
     693             :     Assert(!IsKeeperBlock(set, block));
     694             :     /* We shouldn't be freeing the freeblock either */
     695             :     Assert(block != set->freeblock);
     696             : 
     697             :     /* release the block from the list of blocks */
     698       54678 :     dlist_delete(&block->node);
     699             : 
     700       54678 :     ((MemoryContext) set)->mem_allocated -= block->blksize;
     701             : 
     702             : #ifdef CLOBBER_FREED_MEMORY
     703             :     wipe_mem(block, block->blksize);
     704             : #endif
     705             : 
     706             :     /* As in aset.c, free block-header vchunks explicitly */
     707             :     VALGRIND_MEMPOOL_FREE(set, block);
     708             : 
     709       54678 :     free(block);
     710       54678 : }
     711             : 
     712             : /*
     713             :  * GenerationFree
     714             :  *      Update number of chunks in the block, and consider freeing the block
     715             :  *      if it's become empty.
     716             :  */
     717             : void
     718    12345112 : GenerationFree(void *pointer)
     719             : {
     720    12345112 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
     721             :     GenerationBlock *block;
     722             :     GenerationContext *set;
     723             : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
     724             :     || defined(CLOBBER_FREED_MEMORY)
     725             :     Size        chunksize;
     726             : #endif
     727             : 
     728             :     /* Allow access to the chunk header. */
     729             :     VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
     730             : 
     731    12345112 :     if (MemoryChunkIsExternal(chunk))
     732             :     {
     733        9066 :         block = ExternalChunkGetBlock(chunk);
     734             : 
     735             :         /*
     736             :          * Try to verify that we have a sane block pointer: the block header
     737             :          * should reference a generation context.
     738             :          */
     739        9066 :         if (!GenerationBlockIsValid(block))
     740           0 :             elog(ERROR, "could not find block containing chunk %p", chunk);
     741             : 
     742             : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
     743             :     || defined(CLOBBER_FREED_MEMORY)
     744             :         chunksize = block->endptr - (char *) pointer;
     745             : #endif
     746             :     }
     747             :     else
     748             :     {
     749    12336046 :         block = MemoryChunkGetBlock(chunk);
     750             : 
     751             :         /*
     752             :          * In this path, for speed reasons we just Assert that the referenced
     753             :          * block is good.  Future field experience may show that this Assert
     754             :          * had better become a regular runtime test-and-elog check.
     755             :          */
     756             :         Assert(GenerationBlockIsValid(block));
     757             : 
     758             : #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
     759             :     || defined(CLOBBER_FREED_MEMORY)
     760             :         chunksize = MemoryChunkGetValue(chunk);
     761             : #endif
     762             :     }
     763             : 
     764             : #ifdef MEMORY_CONTEXT_CHECKING
     765             :     /* Test for someone scribbling on unused space in chunk */
     766             :     Assert(chunk->requested_size < chunksize);
     767             :     if (!sentinel_ok(pointer, chunk->requested_size))
     768             :         elog(WARNING, "detected write past chunk end in %s %p",
     769             :              ((MemoryContext) block->context)->name, chunk);
     770             : #endif
     771             : 
     772             : #ifdef CLOBBER_FREED_MEMORY
     773             :     wipe_mem(pointer, chunksize);
     774             : #endif
     775             : 
     776             : #ifdef MEMORY_CONTEXT_CHECKING
     777             :     /* Reset requested_size to InvalidAllocSize in freed chunks */
     778             :     chunk->requested_size = InvalidAllocSize;
     779             : #endif
     780             : 
     781    12345112 :     block->nfree += 1;
     782             : 
     783             :     Assert(block->nchunks > 0);
     784             :     Assert(block->nfree <= block->nchunks);
     785             :     Assert(block != block->context->freeblock);
     786             : 
     787             :     /* If there are still allocated chunks in the block, we're done. */
     788    12345112 :     if (likely(block->nfree < block->nchunks))
     789     5179820 :         return;
     790             : 
     791     7165292 :     set = block->context;
     792             : 
     793             :     /*-----------------------
     794             :      * The block this allocation was on has now become completely empty of
     795             :      * chunks.  In the general case, we can now return the memory for this
     796             :      * block back to malloc.  However, there are cases where we don't want to
     797             :      * do that:
     798             :      *
     799             :      * 1)   If it's the keeper block.  This block was malloc'd in the same
     800             :      *      allocation as the context itself and can't be free'd without
     801             :      *      freeing the context.
     802             :      * 2)   If it's the current block.  We could free this, but doing so would
     803             :      *      leave us nothing to set the current block to, so we just mark the
     804             :      *      block as empty so new allocations can reuse it again.
     805             :      * 3)   If we have no "freeblock" set, then we save a single block for
     806             :      *      future allocations to avoid having to malloc a new block again.
     807             :      *      This is useful for FIFO workloads as it avoids continual
     808             :      *      free/malloc cycles.
     809             :      */
     810     7165292 :     if (IsKeeperBlock(set, block) || set->block == block)
     811     7122588 :         GenerationBlockMarkEmpty(block);    /* case 1 and 2 */
     812       42704 :     else if (set->freeblock == NULL)
     813             :     {
     814             :         /* case 3 */
     815       10658 :         GenerationBlockMarkEmpty(block);
     816       10658 :         set->freeblock = block;
     817             :     }
     818             :     else
     819       32046 :         GenerationBlockFree(set, block);    /* Otherwise, free it */
     820             : }
     821             : 
     822             : /*
     823             :  * GenerationRealloc
     824             :  *      When handling repalloc, we simply allocate a new chunk, copy the data
     825             :  *      and discard the old one. The only exception is when the new size fits
     826             :  *      into the old chunk - in that case we just update chunk header.
     827             :  */
     828             : void *
     829           0 : GenerationRealloc(void *pointer, Size size, int flags)
     830             : {
     831           0 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
     832             :     GenerationContext *set;
     833             :     GenerationBlock *block;
     834             :     GenerationPointer newPointer;
     835             :     Size        oldsize;
     836             : 
     837             :     /* Allow access to the chunk header. */
     838             :     VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
     839             : 
     840           0 :     if (MemoryChunkIsExternal(chunk))
     841             :     {
     842           0 :         block = ExternalChunkGetBlock(chunk);
     843             : 
     844             :         /*
     845             :          * Try to verify that we have a sane block pointer: the block header
     846             :          * should reference a generation context.
     847             :          */
     848           0 :         if (!GenerationBlockIsValid(block))
     849           0 :             elog(ERROR, "could not find block containing chunk %p", chunk);
     850             : 
     851           0 :         oldsize = block->endptr - (char *) pointer;
     852             :     }
     853             :     else
     854             :     {
     855           0 :         block = MemoryChunkGetBlock(chunk);
     856             : 
     857             :         /*
     858             :          * In this path, for speed reasons we just Assert that the referenced
     859             :          * block is good.  Future field experience may show that this Assert
     860             :          * had better become a regular runtime test-and-elog check.
     861             :          */
     862             :         Assert(GenerationBlockIsValid(block));
     863             : 
     864           0 :         oldsize = MemoryChunkGetValue(chunk);
     865             :     }
     866             : 
     867           0 :     set = block->context;
     868             : 
     869             : #ifdef MEMORY_CONTEXT_CHECKING
     870             :     /* Test for someone scribbling on unused space in chunk */
     871             :     Assert(chunk->requested_size < oldsize);
     872             :     if (!sentinel_ok(pointer, chunk->requested_size))
     873             :         elog(WARNING, "detected write past chunk end in %s %p",
     874             :              ((MemoryContext) set)->name, chunk);
     875             : #endif
     876             : 
     877             :     /*
     878             :      * Maybe the allocated area already big enough.  (In particular, we always
     879             :      * fall out here if the requested size is a decrease.)
     880             :      *
     881             :      * This memory context does not use power-of-2 chunk sizing and instead
     882             :      * carves the chunks to be as small as possible, so most repalloc() calls
     883             :      * will end up in the palloc/memcpy/pfree branch.
     884             :      *
     885             :      * XXX Perhaps we should annotate this condition with unlikely()?
     886             :      */
     887             : #ifdef MEMORY_CONTEXT_CHECKING
     888             :     /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */
     889             :     if (oldsize > size)
     890             : #else
     891           0 :     if (oldsize >= size)
     892             : #endif
     893             :     {
     894             : #ifdef MEMORY_CONTEXT_CHECKING
     895             :         Size        oldrequest = chunk->requested_size;
     896             : 
     897             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     898             :         /* We can only fill the extra space if we know the prior request */
     899             :         if (size > oldrequest)
     900             :             randomize_mem((char *) pointer + oldrequest,
     901             :                           size - oldrequest);
     902             : #endif
     903             : 
     904             :         chunk->requested_size = size;
     905             : 
     906             :         /*
     907             :          * If this is an increase, mark any newly-available part UNDEFINED.
     908             :          * Otherwise, mark the obsolete part NOACCESS.
     909             :          */
     910             :         if (size > oldrequest)
     911             :             VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
     912             :                                         size - oldrequest);
     913             :         else
     914             :             VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
     915             :                                        oldsize - size);
     916             : 
     917             :         /* set mark to catch clobber of "unused" space */
     918             :         set_sentinel(pointer, size);
     919             : #else                           /* !MEMORY_CONTEXT_CHECKING */
     920             : 
     921             :         /*
     922             :          * We don't have the information to determine whether we're growing
     923             :          * the old request or shrinking it, so we conservatively mark the
     924             :          * entire new allocation DEFINED.
     925             :          */
     926             :         VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
     927             :         VALGRIND_MAKE_MEM_DEFINED(pointer, size);
     928             : #endif
     929             : 
     930             :         /* Disallow access to the chunk header. */
     931             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
     932             : 
     933           0 :         return pointer;
     934             :     }
     935             : 
     936             :     /* allocate new chunk (this also checks size is valid) */
     937           0 :     newPointer = GenerationAlloc((MemoryContext) set, size, flags);
     938             : 
     939             :     /* leave immediately if request was not completed */
     940           0 :     if (newPointer == NULL)
     941             :     {
     942             :         /* Disallow access to the chunk header. */
     943             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
     944           0 :         return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
     945             :     }
     946             : 
     947             :     /*
     948             :      * GenerationAlloc() may have returned a region that is still NOACCESS.
     949             :      * Change it to UNDEFINED for the moment; memcpy() will then transfer
     950             :      * definedness from the old allocation to the new.  If we know the old
     951             :      * allocation, copy just that much.  Otherwise, make the entire old chunk
     952             :      * defined to avoid errors as we copy the currently-NOACCESS trailing
     953             :      * bytes.
     954             :      */
     955             :     VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
     956             : #ifdef MEMORY_CONTEXT_CHECKING
     957             :     oldsize = chunk->requested_size;
     958             : #else
     959             :     VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
     960             : #endif
     961             : 
     962             :     /* transfer existing data (certain to fit) */
     963           0 :     memcpy(newPointer, pointer, oldsize);
     964             : 
     965             :     /* free old chunk */
     966           0 :     GenerationFree(pointer);
     967             : 
     968           0 :     return newPointer;
     969             : }
     970             : 
     971             : /*
     972             :  * GenerationGetChunkContext
     973             :  *      Return the MemoryContext that 'pointer' belongs to.
     974             :  */
     975             : MemoryContext
     976           0 : GenerationGetChunkContext(void *pointer)
     977             : {
     978           0 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
     979             :     GenerationBlock *block;
     980             : 
     981             :     /* Allow access to the chunk header. */
     982             :     VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
     983             : 
     984           0 :     if (MemoryChunkIsExternal(chunk))
     985           0 :         block = ExternalChunkGetBlock(chunk);
     986             :     else
     987           0 :         block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
     988             : 
     989             :     /* Disallow access to the chunk header. */
     990             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
     991             : 
     992             :     Assert(GenerationBlockIsValid(block));
     993           0 :     return &block->context->header;
     994             : }
     995             : 
     996             : /*
     997             :  * GenerationGetChunkSpace
     998             :  *      Given a currently-allocated chunk, determine the total space
     999             :  *      it occupies (including all memory-allocation overhead).
    1000             :  */
    1001             : Size
    1002    31569386 : GenerationGetChunkSpace(void *pointer)
    1003             : {
    1004    31569386 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
    1005             :     Size        chunksize;
    1006             : 
    1007             :     /* Allow access to the chunk header. */
    1008             :     VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
    1009             : 
    1010    31569386 :     if (MemoryChunkIsExternal(chunk))
    1011             :     {
    1012         168 :         GenerationBlock *block = ExternalChunkGetBlock(chunk);
    1013             : 
    1014             :         Assert(GenerationBlockIsValid(block));
    1015         168 :         chunksize = block->endptr - (char *) pointer;
    1016             :     }
    1017             :     else
    1018    31569218 :         chunksize = MemoryChunkGetValue(chunk);
    1019             : 
    1020             :     /* Disallow access to the chunk header. */
    1021             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
    1022             : 
    1023    31569386 :     return Generation_CHUNKHDRSZ + chunksize;
    1024             : }
    1025             : 
    1026             : /*
    1027             :  * GenerationIsEmpty
    1028             :  *      Is a GenerationContext empty of any allocated space?
    1029             :  */
    1030             : bool
    1031           0 : GenerationIsEmpty(MemoryContext context)
    1032             : {
    1033           0 :     GenerationContext *set = (GenerationContext *) context;
    1034             :     dlist_iter  iter;
    1035             : 
    1036             :     Assert(GenerationIsValid(set));
    1037             : 
    1038           0 :     dlist_foreach(iter, &set->blocks)
    1039             :     {
    1040           0 :         GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
    1041             : 
    1042           0 :         if (block->nchunks > 0)
    1043           0 :             return false;
    1044             :     }
    1045             : 
    1046           0 :     return true;
    1047             : }
    1048             : 
    1049             : /*
    1050             :  * GenerationStats
    1051             :  *      Compute stats about memory consumption of a Generation context.
    1052             :  *
    1053             :  * printfunc: if not NULL, pass a human-readable stats string to this.
    1054             :  * passthru: pass this pointer through to printfunc.
    1055             :  * totals: if not NULL, add stats about this context into *totals.
    1056             :  * print_to_stderr: print stats to stderr if true, elog otherwise.
    1057             :  *
    1058             :  * XXX freespace only accounts for empty space at the end of the block, not
    1059             :  * space of freed chunks (which is unknown).
    1060             :  */
    1061             : void
    1062          30 : GenerationStats(MemoryContext context,
    1063             :                 MemoryStatsPrintFunc printfunc, void *passthru,
    1064             :                 MemoryContextCounters *totals, bool print_to_stderr)
    1065             : {
    1066          30 :     GenerationContext *set = (GenerationContext *) context;
    1067          30 :     Size        nblocks = 0;
    1068          30 :     Size        nchunks = 0;
    1069          30 :     Size        nfreechunks = 0;
    1070             :     Size        totalspace;
    1071          30 :     Size        freespace = 0;
    1072             :     dlist_iter  iter;
    1073             : 
    1074             :     Assert(GenerationIsValid(set));
    1075             : 
    1076             :     /* Include context header in totalspace */
    1077          30 :     totalspace = MAXALIGN(sizeof(GenerationContext));
    1078             : 
    1079          92 :     dlist_foreach(iter, &set->blocks)
    1080             :     {
    1081          62 :         GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
    1082             : 
    1083          62 :         nblocks++;
    1084          62 :         nchunks += block->nchunks;
    1085          62 :         nfreechunks += block->nfree;
    1086          62 :         totalspace += block->blksize;
    1087          62 :         freespace += (block->endptr - block->freeptr);
    1088             :     }
    1089             : 
    1090          30 :     if (printfunc)
    1091             :     {
    1092             :         char        stats_string[200];
    1093             : 
    1094           0 :         snprintf(stats_string, sizeof(stats_string),
    1095             :                  "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
    1096             :                  totalspace, nblocks, nchunks, freespace,
    1097             :                  nfreechunks, totalspace - freespace);
    1098           0 :         printfunc(context, passthru, stats_string, print_to_stderr);
    1099             :     }
    1100             : 
    1101          30 :     if (totals)
    1102             :     {
    1103          30 :         totals->nblocks += nblocks;
    1104          30 :         totals->freechunks += nfreechunks;
    1105          30 :         totals->totalspace += totalspace;
    1106          30 :         totals->freespace += freespace;
    1107             :     }
    1108          30 : }
    1109             : 
    1110             : 
    1111             : #ifdef MEMORY_CONTEXT_CHECKING
    1112             : 
    1113             : /*
    1114             :  * GenerationCheck
    1115             :  *      Walk through chunks and check consistency of memory.
    1116             :  *
    1117             :  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
    1118             :  * find yourself in an infinite loop when trouble occurs, because this
    1119             :  * routine will be entered again when elog cleanup tries to release memory!
    1120             :  */
    1121             : void
    1122             : GenerationCheck(MemoryContext context)
    1123             : {
    1124             :     GenerationContext *gen = (GenerationContext *) context;
    1125             :     const char *name = context->name;
    1126             :     dlist_iter  iter;
    1127             :     Size        total_allocated = 0;
    1128             : 
    1129             :     /* walk all blocks in this context */
    1130             :     dlist_foreach(iter, &gen->blocks)
    1131             :     {
    1132             :         GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
    1133             :         int         nfree,
    1134             :                     nchunks;
    1135             :         char       *ptr;
    1136             :         bool        has_external_chunk = false;
    1137             : 
    1138             :         total_allocated += block->blksize;
    1139             : 
    1140             :         /*
    1141             :          * nfree > nchunks is surely wrong.  Equality is allowed as the block
    1142             :          * might completely empty if it's the freeblock.
    1143             :          */
    1144             :         if (block->nfree > block->nchunks)
    1145             :             elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
    1146             :                  name, block->nfree, block, block->nchunks);
    1147             : 
    1148             :         /* check block belongs to the correct context */
    1149             :         if (block->context != gen)
    1150             :             elog(WARNING, "problem in Generation %s: bogus context link in block %p",
    1151             :                  name, block);
    1152             : 
    1153             :         /* Now walk through the chunks and count them. */
    1154             :         nfree = 0;
    1155             :         nchunks = 0;
    1156             :         ptr = ((char *) block) + Generation_BLOCKHDRSZ;
    1157             : 
    1158             :         while (ptr < block->freeptr)
    1159             :         {
    1160             :             MemoryChunk *chunk = (MemoryChunk *) ptr;
    1161             :             GenerationBlock *chunkblock;
    1162             :             Size        chunksize;
    1163             : 
    1164             :             /* Allow access to the chunk header. */
    1165             :             VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ);
    1166             : 
    1167             :             if (MemoryChunkIsExternal(chunk))
    1168             :             {
    1169             :                 chunkblock = ExternalChunkGetBlock(chunk);
    1170             :                 chunksize = block->endptr - (char *) MemoryChunkGetPointer(chunk);
    1171             :                 has_external_chunk = true;
    1172             :             }
    1173             :             else
    1174             :             {
    1175             :                 chunkblock = MemoryChunkGetBlock(chunk);
    1176             :                 chunksize = MemoryChunkGetValue(chunk);
    1177             :             }
    1178             : 
    1179             :             /* move to the next chunk */
    1180             :             ptr += (chunksize + Generation_CHUNKHDRSZ);
    1181             : 
    1182             :             nchunks += 1;
    1183             : 
    1184             :             /* chunks have both block and context pointers, so check both */
    1185             :             if (chunkblock != block)
    1186             :                 elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
    1187             :                      name, block, chunk);
    1188             : 
    1189             : 
    1190             :             /* is chunk allocated? */
    1191             :             if (chunk->requested_size != InvalidAllocSize)
    1192             :             {
    1193             :                 /* now make sure the chunk size is correct */
    1194             :                 if (chunksize < chunk->requested_size ||
    1195             :                     chunksize != MAXALIGN(chunksize))
    1196             :                     elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
    1197             :                          name, block, chunk);
    1198             : 
    1199             :                 /* check sentinel */
    1200             :                 Assert(chunk->requested_size < chunksize);
    1201             :                 if (!sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
    1202             :                     elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
    1203             :                          name, block, chunk);
    1204             :             }
    1205             :             else
    1206             :                 nfree += 1;
    1207             : 
    1208             :             /* if chunk is allocated, disallow access to the chunk header */
    1209             :             if (chunk->requested_size != InvalidAllocSize)
    1210             :                 VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ);
    1211             :         }
    1212             : 
    1213             :         /*
    1214             :          * Make sure we got the expected number of allocated and free chunks
    1215             :          * (as tracked in the block header).
    1216             :          */
    1217             :         if (nchunks != block->nchunks)
    1218             :             elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
    1219             :                  name, nchunks, block, block->nchunks);
    1220             : 
    1221             :         if (nfree != block->nfree)
    1222             :             elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
    1223             :                  name, nfree, block, block->nfree);
    1224             : 
    1225             :         if (has_external_chunk && nchunks > 1)
    1226             :             elog(WARNING, "problem in Generation %s: external chunk on non-dedicated block %p",
    1227             :                  name, block);
    1228             : 
    1229             :     }
    1230             : 
    1231             :     Assert(total_allocated == context->mem_allocated);
    1232             : }
    1233             : 
    1234             : #endif                          /* MEMORY_CONTEXT_CHECKING */

Generated by: LCOV version 1.16