LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - aset.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 260 287 90.6 %
Date: 2025-08-17 01:17:32 Functions: 14 14 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * aset.c
       4             :  *    Allocation set definitions.
       5             :  *
       6             :  * AllocSet is our standard implementation of the abstract MemoryContext
       7             :  * type.
       8             :  *
       9             :  *
      10             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
      11             :  * Portions Copyright (c) 1994, Regents of the University of California
      12             :  *
      13             :  * IDENTIFICATION
      14             :  *    src/backend/utils/mmgr/aset.c
      15             :  *
      16             :  * NOTE:
      17             :  *  This is a new (Feb. 05, 1999) implementation of the allocation set
      18             :  *  routines. AllocSet...() does not use OrderedSet...() any more.
      19             :  *  Instead it manages allocations in a block pool by itself, combining
      20             :  *  many small allocations in a few bigger blocks. AllocSetFree() normally
      21             :  *  doesn't free() memory really. It just add's the free'd area to some
      22             :  *  list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
      23             :  *  at once on AllocSetReset(), which happens when the memory context gets
      24             :  *  destroyed.
      25             :  *              Jan Wieck
      26             :  *
      27             :  *  Performance improvement from Tom Lane, 8/99: for extremely large request
      28             :  *  sizes, we do want to be able to give the memory back to free() as soon
      29             :  *  as it is pfree()'d.  Otherwise we risk tying up a lot of memory in
      30             :  *  freelist entries that might never be usable.  This is specially needed
      31             :  *  when the caller is repeatedly repalloc()'ing a block bigger and bigger;
      32             :  *  the previous instances of the block were guaranteed to be wasted until
      33             :  *  AllocSetReset() under the old way.
      34             :  *
      35             :  *  Further improvement 12/00: as the code stood, request sizes in the
      36             :  *  midrange between "small" and "large" were handled very inefficiently,
      37             :  *  because any sufficiently large free chunk would be used to satisfy a
      38             :  *  request, even if it was much larger than necessary.  This led to more
      39             :  *  and more wasted space in allocated chunks over time.  To fix, get rid
      40             :  *  of the midrange behavior: we now handle only "small" power-of-2-size
      41             :  *  chunks as chunks.  Anything "large" is passed off to malloc().  Change
      42             :  *  the number of freelists to change the small/large boundary.
      43             :  *
      44             :  *-------------------------------------------------------------------------
      45             :  */
      46             : 
      47             : #include "postgres.h"
      48             : 
      49             : #include "port/pg_bitutils.h"
      50             : #include "utils/memdebug.h"
      51             : #include "utils/memutils.h"
      52             : #include "utils/memutils_internal.h"
      53             : #include "utils/memutils_memorychunk.h"
      54             : 
      55             : /*--------------------
      56             :  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
      57             :  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
      58             :  *
      59             :  * Note that all chunks in the freelists have power-of-2 sizes.  This
      60             :  * improves recyclability: we may waste some space, but the wasted space
      61             :  * should stay pretty constant as requests are made and released.
      62             :  *
      63             :  * A request too large for the last freelist is handled by allocating a
      64             :  * dedicated block from malloc().  The block still has a block header and
      65             :  * chunk header, but when the chunk is freed we'll return the whole block
      66             :  * to malloc(), not put it on our freelists.
      67             :  *
      68             :  * CAUTION: ALLOC_MINBITS must be large enough so that
      69             :  * 1<<ALLOC_MINBITS is at least MAXALIGN,
      70             :  * or we may fail to align the smallest chunks adequately.
      71             :  * 8-byte alignment is enough on all currently known machines.  This 8-byte
      72             :  * minimum also allows us to store a pointer to the next freelist item within
      73             :  * the chunk of memory itself.
      74             :  *
      75             :  * With the current parameters, request sizes up to 8K are treated as chunks,
      76             :  * larger requests go into dedicated blocks.  Change ALLOCSET_NUM_FREELISTS
      77             :  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
      78             :  * memutils.h to agree.  (Note: in contexts with small maxBlockSize, we may
      79             :  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
      80             :  *--------------------
      81             :  */
      82             : 
      83             : #define ALLOC_MINBITS       3   /* smallest chunk size is 8 bytes */
      84             : #define ALLOCSET_NUM_FREELISTS  11
      85             : #define ALLOC_CHUNK_LIMIT   (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
      86             : /* Size of largest chunk that we use a fixed size for */
      87             : #define ALLOC_CHUNK_FRACTION    4
      88             : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
      89             : 
      90             : /*--------------------
      91             :  * The first block allocated for an allocset has size initBlockSize.
      92             :  * Each time we have to allocate another block, we double the block size
      93             :  * (if possible, and without exceeding maxBlockSize), so as to reduce
      94             :  * the bookkeeping load on malloc().
      95             :  *
      96             :  * Blocks allocated to hold oversize chunks do not follow this rule, however;
      97             :  * they are just however big they need to be to hold that single chunk.
      98             :  *
      99             :  * Also, if a minContextSize is specified, the first block has that size,
     100             :  * and then initBlockSize is used for the next one.
     101             :  *--------------------
     102             :  */
     103             : 
     104             : #define ALLOC_BLOCKHDRSZ    MAXALIGN(sizeof(AllocBlockData))
     105             : #define ALLOC_CHUNKHDRSZ    sizeof(MemoryChunk)
     106             : #define FIRST_BLOCKHDRSZ    (MAXALIGN(sizeof(AllocSetContext)) + \
     107             :                              ALLOC_BLOCKHDRSZ)
     108             : 
     109             : typedef struct AllocBlockData *AllocBlock;  /* forward reference */
     110             : 
     111             : /*
     112             :  * AllocPointer
     113             :  *      Aligned pointer which may be a member of an allocation set.
     114             :  */
     115             : typedef void *AllocPointer;
     116             : 
     117             : /*
     118             :  * AllocFreeListLink
     119             :  *      When pfreeing memory, if we maintain a freelist for the given chunk's
     120             :  *      size then we use a AllocFreeListLink to point to the current item in
     121             :  *      the AllocSetContext's freelist and then set the given freelist element
     122             :  *      to point to the chunk being freed.
     123             :  */
     124             : typedef struct AllocFreeListLink
     125             : {
     126             :     MemoryChunk *next;
     127             : } AllocFreeListLink;
     128             : 
     129             : /*
     130             :  * Obtain a AllocFreeListLink for the given chunk.  Allocation sizes are
     131             :  * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
     132             :  * itself to store the freelist link.
     133             :  */
     134             : #define GetFreeListLink(chkptr) \
     135             :     (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
     136             : 
     137             : /* Validate a freelist index retrieved from a chunk header */
     138             : #define FreeListIdxIsValid(fidx) \
     139             :     ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
     140             : 
     141             : /* Determine the size of the chunk based on the freelist index */
     142             : #define GetChunkSizeFromFreeListIdx(fidx) \
     143             :     ((((Size) 1) << ALLOC_MINBITS) << (fidx))
     144             : 
     145             : /*
     146             :  * AllocSetContext is our standard implementation of MemoryContext.
     147             :  *
     148             :  * Note: header.isReset means there is nothing for AllocSetReset to do.
     149             :  * This is different from the aset being physically empty (empty blocks list)
     150             :  * because we will still have a keeper block.  It's also different from the set
     151             :  * being logically empty, because we don't attempt to detect pfree'ing the
     152             :  * last active chunk.
     153             :  */
     154             : typedef struct AllocSetContext
     155             : {
     156             :     MemoryContextData header;   /* Standard memory-context fields */
     157             :     /* Info about storage allocated in this context: */
     158             :     AllocBlock  blocks;         /* head of list of blocks in this set */
     159             :     MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS];  /* free chunk lists */
     160             :     /* Allocation parameters for this context: */
     161             :     uint32      initBlockSize;  /* initial block size */
     162             :     uint32      maxBlockSize;   /* maximum block size */
     163             :     uint32      nextBlockSize;  /* next block size to allocate */
     164             :     uint32      allocChunkLimit;    /* effective chunk size limit */
     165             :     /* freelist this context could be put in, or -1 if not a candidate: */
     166             :     int         freeListIndex;  /* index in context_freelists[], or -1 */
     167             : } AllocSetContext;
     168             : 
     169             : typedef AllocSetContext *AllocSet;
     170             : 
     171             : /*
     172             :  * AllocBlock
     173             :  *      An AllocBlock is the unit of memory that is obtained by aset.c
     174             :  *      from malloc().  It contains one or more MemoryChunks, which are
     175             :  *      the units requested by palloc() and freed by pfree(). MemoryChunks
     176             :  *      cannot be returned to malloc() individually, instead they are put
     177             :  *      on freelists by pfree() and re-used by the next palloc() that has
     178             :  *      a matching request size.
     179             :  *
     180             :  *      AllocBlockData is the header data for a block --- the usable space
     181             :  *      within the block begins at the next alignment boundary.
     182             :  */
     183             : typedef struct AllocBlockData
     184             : {
     185             :     AllocSet    aset;           /* aset that owns this block */
     186             :     AllocBlock  prev;           /* prev block in aset's blocks list, if any */
     187             :     AllocBlock  next;           /* next block in aset's blocks list, if any */
     188             :     char       *freeptr;        /* start of free space in this block */
     189             :     char       *endptr;         /* end of space in this block */
     190             : }           AllocBlockData;
     191             : 
     192             : /*
     193             :  * AllocPointerIsValid
     194             :  *      True iff pointer is valid allocation pointer.
     195             :  */
     196             : #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
     197             : 
     198             : /*
     199             :  * AllocSetIsValid
     200             :  *      True iff set is valid allocation set.
     201             :  */
     202             : #define AllocSetIsValid(set) \
     203             :     (PointerIsValid(set) && IsA(set, AllocSetContext))
     204             : 
     205             : /*
     206             :  * AllocBlockIsValid
     207             :  *      True iff block is valid block of allocation set.
     208             :  */
     209             : #define AllocBlockIsValid(block) \
     210             :     (PointerIsValid(block) && AllocSetIsValid((block)->aset))
     211             : 
     212             : /*
     213             :  * We always store external chunks on a dedicated block.  This makes fetching
     214             :  * the block from an external chunk easy since it's always the first and only
     215             :  * chunk on the block.
     216             :  */
     217             : #define ExternalChunkGetBlock(chunk) \
     218             :     (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
     219             : 
     220             : /*
     221             :  * Rather than repeatedly creating and deleting memory contexts, we keep some
     222             :  * freed contexts in freelists so that we can hand them out again with little
     223             :  * work.  Before putting a context in a freelist, we reset it so that it has
     224             :  * only its initial malloc chunk and no others.  To be a candidate for a
     225             :  * freelist, a context must have the same minContextSize/initBlockSize as
     226             :  * other contexts in the list; but its maxBlockSize is irrelevant since that
     227             :  * doesn't affect the size of the initial chunk.
     228             :  *
     229             :  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
     230             :  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
     231             :  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
     232             :  *
     233             :  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
     234             :  * hopes of improving locality of reference.  But if there get to be too
     235             :  * many contexts in the list, we'd prefer to drop the most-recently-created
     236             :  * contexts in hopes of keeping the process memory map compact.
     237             :  * We approximate that by simply deleting all existing entries when the list
     238             :  * overflows, on the assumption that queries that allocate a lot of contexts
     239             :  * will probably free them in more or less reverse order of allocation.
     240             :  *
     241             :  * Contexts in a freelist are chained via their nextchild pointers.
     242             :  */
     243             : #define MAX_FREE_CONTEXTS 100   /* arbitrary limit on freelist length */
     244             : 
     245             : /* Obtain the keeper block for an allocation set */
     246             : #define KeeperBlock(set) \
     247             :     ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
     248             : 
     249             : /* Check if the block is the keeper block of the given allocation set */
     250             : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
     251             : 
     252             : typedef struct AllocSetFreeList
     253             : {
     254             :     int         num_free;       /* current list length */
     255             :     AllocSetContext *first_free;    /* list header */
     256             : } AllocSetFreeList;
     257             : 
     258             : /* context_freelists[0] is for default params, [1] for small params */
     259             : static AllocSetFreeList context_freelists[2] =
     260             : {
     261             :     {
     262             :         0, NULL
     263             :     },
     264             :     {
     265             :         0, NULL
     266             :     }
     267             : };
     268             : 
     269             : 
     270             : /* ----------
     271             :  * AllocSetFreeIndex -
     272             :  *
     273             :  *      Depending on the size of an allocation compute which freechunk
     274             :  *      list of the alloc set it belongs to.  Caller must have verified
     275             :  *      that size <= ALLOC_CHUNK_LIMIT.
     276             :  * ----------
     277             :  */
     278             : static inline int
     279  1347319464 : AllocSetFreeIndex(Size size)
     280             : {
     281             :     int         idx;
     282             : 
     283  1347319464 :     if (size > (1 << ALLOC_MINBITS))
     284             :     {
     285             :         /*----------
     286             :          * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
     287             :          * This is the same as
     288             :          *      pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
     289             :          * or equivalently
     290             :          *      pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
     291             :          *
     292             :          * However, for platforms without intrinsic support, we duplicate the
     293             :          * logic here, allowing an additional optimization.  It's reasonable
     294             :          * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
     295             :          * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
     296             :          * the last two bytes.
     297             :          *
     298             :          * Yes, this function is enough of a hot-spot to make it worth this
     299             :          * much trouble.
     300             :          *----------
     301             :          */
     302             : #ifdef HAVE_BITSCAN_REVERSE
     303  1175074968 :         idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
     304             : #else
     305             :         uint32      t,
     306             :                     tsize;
     307             : 
     308             :         /* Statically assert that we only have a 16-bit input value. */
     309             :         StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
     310             :                          "ALLOC_CHUNK_LIMIT must be less than 64kB");
     311             : 
     312             :         tsize = size - 1;
     313             :         t = tsize >> 8;
     314             :         idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
     315             :         idx -= ALLOC_MINBITS - 1;
     316             : #endif
     317             : 
     318             :         Assert(idx < ALLOCSET_NUM_FREELISTS);
     319             :     }
     320             :     else
     321   172244496 :         idx = 0;
     322             : 
     323  1347319464 :     return idx;
     324             : }
     325             : 
     326             : 
     327             : /*
     328             :  * Public routines
     329             :  */
     330             : 
     331             : 
     332             : /*
     333             :  * AllocSetContextCreateInternal
     334             :  *      Create a new AllocSet context.
     335             :  *
     336             :  * parent: parent context, or NULL if top-level context
     337             :  * name: name of context (must be statically allocated)
     338             :  * minContextSize: minimum context size
     339             :  * initBlockSize: initial allocation block size
     340             :  * maxBlockSize: maximum allocation block size
     341             :  *
     342             :  * Most callers should abstract the context size parameters using a macro
     343             :  * such as ALLOCSET_DEFAULT_SIZES.
     344             :  *
     345             :  * Note: don't call this directly; go through the wrapper macro
     346             :  * AllocSetContextCreate.
     347             :  */
     348             : MemoryContext
     349    13200054 : AllocSetContextCreateInternal(MemoryContext parent,
     350             :                               const char *name,
     351             :                               Size minContextSize,
     352             :                               Size initBlockSize,
     353             :                               Size maxBlockSize)
     354             : {
     355             :     int         freeListIndex;
     356             :     Size        firstBlockSize;
     357             :     AllocSet    set;
     358             :     AllocBlock  block;
     359             : 
     360             :     /* ensure MemoryChunk's size is properly maxaligned */
     361             :     StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
     362             :                      "sizeof(MemoryChunk) is not maxaligned");
     363             :     /* check we have enough space to store the freelist link */
     364             :     StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
     365             :                      "sizeof(AllocFreeListLink) larger than minimum allocation size");
     366             : 
     367             :     /*
     368             :      * First, validate allocation parameters.  Once these were regular runtime
     369             :      * tests and elog's, but in practice Asserts seem sufficient because
     370             :      * nobody varies their parameters at runtime.  We somewhat arbitrarily
     371             :      * enforce a minimum 1K block size.  We restrict the maximum block size to
     372             :      * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
     373             :      * regards to addressing the offset between the chunk and the block that
     374             :      * the chunk is stored on.  We would be unable to store the offset between
     375             :      * the chunk and block for any chunks that were beyond
     376             :      * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
     377             :      * larger than this.
     378             :      */
     379             :     Assert(initBlockSize == MAXALIGN(initBlockSize) &&
     380             :            initBlockSize >= 1024);
     381             :     Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
     382             :            maxBlockSize >= initBlockSize &&
     383             :            AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
     384             :     Assert(minContextSize == 0 ||
     385             :            (minContextSize == MAXALIGN(minContextSize) &&
     386             :             minContextSize >= 1024 &&
     387             :             minContextSize <= maxBlockSize));
     388             :     Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
     389             : 
     390             :     /*
     391             :      * Check whether the parameters match either available freelist.  We do
     392             :      * not need to demand a match of maxBlockSize.
     393             :      */
     394    13200054 :     if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
     395             :         initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
     396     8484942 :         freeListIndex = 0;
     397     4715112 :     else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
     398             :              initBlockSize == ALLOCSET_SMALL_INITSIZE)
     399     4676128 :         freeListIndex = 1;
     400             :     else
     401       38984 :         freeListIndex = -1;
     402             : 
     403             :     /*
     404             :      * If a suitable freelist entry exists, just recycle that context.
     405             :      */
     406    13200054 :     if (freeListIndex >= 0)
     407             :     {
     408    13161070 :         AllocSetFreeList *freelist = &context_freelists[freeListIndex];
     409             : 
     410    13161070 :         if (freelist->first_free != NULL)
     411             :         {
     412             :             /* Remove entry from freelist */
     413     9034510 :             set = freelist->first_free;
     414     9034510 :             freelist->first_free = (AllocSet) set->header.nextchild;
     415     9034510 :             freelist->num_free--;
     416             : 
     417             :             /* Update its maxBlockSize; everything else should be OK */
     418     9034510 :             set->maxBlockSize = maxBlockSize;
     419             : 
     420             :             /* Reinitialize its header, installing correct name and parent */
     421     9034510 :             MemoryContextCreate((MemoryContext) set,
     422             :                                 T_AllocSetContext,
     423             :                                 MCTX_ASET_ID,
     424             :                                 parent,
     425             :                                 name);
     426             : 
     427     9034510 :             ((MemoryContext) set)->mem_allocated =
     428     9034510 :                 KeeperBlock(set)->endptr - ((char *) set);
     429             : 
     430     9034510 :             return (MemoryContext) set;
     431             :         }
     432             :     }
     433             : 
     434             :     /* Determine size of initial block */
     435     4165544 :     firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
     436             :         ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
     437     4165544 :     if (minContextSize != 0)
     438       38984 :         firstBlockSize = Max(firstBlockSize, minContextSize);
     439             :     else
     440     4126560 :         firstBlockSize = Max(firstBlockSize, initBlockSize);
     441             : 
     442             :     /*
     443             :      * Allocate the initial block.  Unlike other aset.c blocks, it starts with
     444             :      * the context header and its block header follows that.
     445             :      */
     446     4165544 :     set = (AllocSet) malloc(firstBlockSize);
     447     4165544 :     if (set == NULL)
     448             :     {
     449           0 :         if (TopMemoryContext)
     450           0 :             MemoryContextStats(TopMemoryContext);
     451           0 :         ereport(ERROR,
     452             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
     453             :                  errmsg("out of memory"),
     454             :                  errdetail("Failed while creating memory context \"%s\".",
     455             :                            name)));
     456             :     }
     457             : 
     458             :     /*
     459             :      * Avoid writing code that can fail between here and MemoryContextCreate;
     460             :      * we'd leak the header/initial block if we ereport in this stretch.
     461             :      */
     462             : 
     463             :     /* Create a vpool associated with the context */
     464             :     VALGRIND_CREATE_MEMPOOL(set, 0, false);
     465             : 
     466             :     /*
     467             :      * Create a vchunk covering both the AllocSetContext struct and the keeper
     468             :      * block's header.  (Perhaps it would be more sensible for these to be two
     469             :      * separate vchunks, but doing that seems to tickle bugs in some versions
     470             :      * of Valgrind.)  We must have these vchunks, and also a vchunk for each
     471             :      * subsequently-added block header, so that Valgrind considers the
     472             :      * pointers within them while checking for leaked memory.  Note that
     473             :      * Valgrind doesn't distinguish between these vchunks and those created by
     474             :      * mcxt.c for the user-accessible-data chunks we allocate.
     475             :      */
     476             :     VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
     477             : 
     478             :     /* Fill in the initial block's block header */
     479     4165544 :     block = KeeperBlock(set);
     480     4165544 :     block->aset = set;
     481     4165544 :     block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
     482     4165544 :     block->endptr = ((char *) set) + firstBlockSize;
     483     4165544 :     block->prev = NULL;
     484     4165544 :     block->next = NULL;
     485             : 
     486             :     /* Mark unallocated space NOACCESS; leave the block header alone. */
     487             :     VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
     488             : 
     489             :     /* Remember block as part of block list */
     490     4165544 :     set->blocks = block;
     491             : 
     492             :     /* Finish filling in aset-specific parts of the context header */
     493    49986528 :     MemSetAligned(set->freelist, 0, sizeof(set->freelist));
     494             : 
     495     4165544 :     set->initBlockSize = (uint32) initBlockSize;
     496     4165544 :     set->maxBlockSize = (uint32) maxBlockSize;
     497     4165544 :     set->nextBlockSize = (uint32) initBlockSize;
     498     4165544 :     set->freeListIndex = freeListIndex;
     499             : 
     500             :     /*
     501             :      * Compute the allocation chunk size limit for this context.  It can't be
     502             :      * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
     503             :      * If maxBlockSize is small then requests exceeding the maxBlockSize, or
     504             :      * even a significant fraction of it, should be treated as large chunks
     505             :      * too.  For the typical case of maxBlockSize a power of 2, the chunk size
     506             :      * limit will be at most 1/8th maxBlockSize, so that given a stream of
     507             :      * requests that are all the maximum chunk size we will waste at most
     508             :      * 1/8th of the allocated space.
     509             :      *
     510             :      * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
     511             :      */
     512             :     StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
     513             :                      "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
     514             : 
     515             :     /*
     516             :      * Determine the maximum size that a chunk can be before we allocate an
     517             :      * entire AllocBlock dedicated for that chunk.  We set the absolute limit
     518             :      * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
     519             :      * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
     520             :      * sized block.  (We opt to keep allocChunkLimit a power-of-2 value
     521             :      * primarily for legacy reasons rather than calculating it so that exactly
     522             :      * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
     523             :      */
     524     4165544 :     set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
     525     4165544 :     while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
     526    13983408 :            (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
     527     9817864 :         set->allocChunkLimit >>= 1;
     528             : 
     529             :     /* Finally, do the type-independent part of context creation */
     530     4165544 :     MemoryContextCreate((MemoryContext) set,
     531             :                         T_AllocSetContext,
     532             :                         MCTX_ASET_ID,
     533             :                         parent,
     534             :                         name);
     535             : 
     536     4165544 :     ((MemoryContext) set)->mem_allocated = firstBlockSize;
     537             : 
     538     4165544 :     return (MemoryContext) set;
     539             : }
     540             : 
     541             : /*
     542             :  * AllocSetReset
     543             :  *      Frees all memory which is allocated in the given set.
     544             :  *
     545             :  * Actually, this routine has some discretion about what to do.
     546             :  * It should mark all allocated chunks freed, but it need not necessarily
     547             :  * give back all the resources the set owns.  Our actual implementation is
     548             :  * that we give back all but the "keeper" block (which we must keep, since
     549             :  * it shares a malloc chunk with the context header).  In this way, we don't
     550             :  * thrash malloc() when a context is repeatedly reset after small allocations,
     551             :  * which is typical behavior for per-tuple contexts.
     552             :  */
     553             : void
     554    51416198 : AllocSetReset(MemoryContext context)
     555             : {
     556    51416198 :     AllocSet    set = (AllocSet) context;
     557             :     AllocBlock  block;
     558             :     Size        keepersize PG_USED_FOR_ASSERTS_ONLY;
     559             : 
     560             :     Assert(AllocSetIsValid(set));
     561             : 
     562             : #ifdef MEMORY_CONTEXT_CHECKING
     563             :     /* Check for corruption and leaks before freeing */
     564             :     AllocSetCheck(context);
     565             : #endif
     566             : 
     567             :     /* Remember keeper block size for Assert below */
     568    51416198 :     keepersize = KeeperBlock(set)->endptr - ((char *) set);
     569             : 
     570             :     /* Clear chunk freelists */
     571   616994376 :     MemSetAligned(set->freelist, 0, sizeof(set->freelist));
     572             : 
     573    51416198 :     block = set->blocks;
     574             : 
     575             :     /* New blocks list will be just the keeper block */
     576    51416198 :     set->blocks = KeeperBlock(set);
     577             : 
     578   113581890 :     while (block != NULL)
     579             :     {
     580    62165692 :         AllocBlock  next = block->next;
     581             : 
     582    62165692 :         if (IsKeeperBlock(set, block))
     583             :         {
     584             :             /* Reset the block, but don't return it to malloc */
     585    51416198 :             char       *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
     586             : 
     587             : #ifdef CLOBBER_FREED_MEMORY
     588             :             wipe_mem(datastart, block->freeptr - datastart);
     589             : #else
     590             :             /* wipe_mem() would have done this */
     591             :             VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
     592             : #endif
     593    51416198 :             block->freeptr = datastart;
     594    51416198 :             block->prev = NULL;
     595    51416198 :             block->next = NULL;
     596             :         }
     597             :         else
     598             :         {
     599             :             /* Normal case, release the block */
     600    10749494 :             context->mem_allocated -= block->endptr - ((char *) block);
     601             : 
     602             : #ifdef CLOBBER_FREED_MEMORY
     603             :             wipe_mem(block, block->freeptr - ((char *) block));
     604             : #endif
     605             : 
     606             :             /*
     607             :              * We need to free the block header's vchunk explicitly, although
     608             :              * the user-data vchunks within will go away in the TRIM below.
     609             :              * Otherwise Valgrind complains about leaked allocations.
     610             :              */
     611             :             VALGRIND_MEMPOOL_FREE(set, block);
     612             : 
     613    10749494 :             free(block);
     614             :         }
     615    62165692 :         block = next;
     616             :     }
     617             : 
     618             :     Assert(context->mem_allocated == keepersize);
     619             : 
     620             :     /*
     621             :      * Instruct Valgrind to throw away all the vchunks associated with this
     622             :      * context, except for the one covering the AllocSetContext and
     623             :      * keeper-block header.  This gets rid of the vchunks for whatever user
     624             :      * data is getting discarded by the context reset.
     625             :      */
     626             :     VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
     627             : 
     628             :     /* Reset block size allocation sequence, too */
     629    51416198 :     set->nextBlockSize = set->initBlockSize;
     630    51416198 : }
     631             : 
     632             : /*
     633             :  * AllocSetDelete
     634             :  *      Frees all memory which is allocated in the given set,
     635             :  *      in preparation for deletion of the set.
     636             :  *
     637             :  * Unlike AllocSetReset, this *must* free all resources of the set.
     638             :  */
     639             : void
     640     9502548 : AllocSetDelete(MemoryContext context)
     641             : {
     642     9502548 :     AllocSet    set = (AllocSet) context;
     643     9502548 :     AllocBlock  block = set->blocks;
     644             :     Size        keepersize PG_USED_FOR_ASSERTS_ONLY;
     645             : 
     646             :     Assert(AllocSetIsValid(set));
     647             : 
     648             : #ifdef MEMORY_CONTEXT_CHECKING
     649             :     /* Check for corruption and leaks before freeing */
     650             :     AllocSetCheck(context);
     651             : #endif
     652             : 
     653             :     /* Remember keeper block size for Assert below */
     654     9502548 :     keepersize = KeeperBlock(set)->endptr - ((char *) set);
     655             : 
     656             :     /*
     657             :      * If the context is a candidate for a freelist, put it into that freelist
     658             :      * instead of destroying it.
     659             :      */
     660     9502548 :     if (set->freeListIndex >= 0)
     661             :     {
     662     9502548 :         AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
     663             : 
     664             :         /*
     665             :          * Reset the context, if it needs it, so that we aren't hanging on to
     666             :          * more than the initial malloc chunk.
     667             :          */
     668     9502548 :         if (!context->isReset)
     669     5876460 :             MemoryContextResetOnly(context);
     670             : 
     671             :         /*
     672             :          * If the freelist is full, just discard what's already in it.  See
     673             :          * comments with context_freelists[].
     674             :          */
     675     9502548 :         if (freelist->num_free >= MAX_FREE_CONTEXTS)
     676             :         {
     677       59590 :             while (freelist->first_free != NULL)
     678             :             {
     679       59000 :                 AllocSetContext *oldset = freelist->first_free;
     680             : 
     681       59000 :                 freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
     682       59000 :                 freelist->num_free--;
     683             : 
     684             :                 /* Destroy the context's vpool --- see notes below */
     685             :                 VALGRIND_DESTROY_MEMPOOL(oldset);
     686             : 
     687             :                 /* All that remains is to free the header/initial block */
     688       59000 :                 free(oldset);
     689             :             }
     690             :             Assert(freelist->num_free == 0);
     691             :         }
     692             : 
     693             :         /* Now add the just-deleted context to the freelist. */
     694     9502548 :         set->header.nextchild = (MemoryContext) freelist->first_free;
     695     9502548 :         freelist->first_free = set;
     696     9502548 :         freelist->num_free++;
     697             : 
     698     9502548 :         return;
     699             :     }
     700             : 
     701             :     /* Free all blocks, except the keeper which is part of context header */
     702           0 :     while (block != NULL)
     703             :     {
     704           0 :         AllocBlock  next = block->next;
     705             : 
     706           0 :         if (!IsKeeperBlock(set, block))
     707           0 :             context->mem_allocated -= block->endptr - ((char *) block);
     708             : 
     709             : #ifdef CLOBBER_FREED_MEMORY
     710             :         wipe_mem(block, block->freeptr - ((char *) block));
     711             : #endif
     712             : 
     713           0 :         if (!IsKeeperBlock(set, block))
     714             :         {
     715             :             /* As in AllocSetReset, free block-header vchunks explicitly */
     716             :             VALGRIND_MEMPOOL_FREE(set, block);
     717           0 :             free(block);
     718             :         }
     719             : 
     720           0 :         block = next;
     721             :     }
     722             : 
     723             :     Assert(context->mem_allocated == keepersize);
     724             : 
     725             :     /*
     726             :      * Destroy the vpool.  We don't seem to need to explicitly free the
     727             :      * initial block's header vchunk, nor any user-data vchunks that Valgrind
     728             :      * still knows about; they'll all go away automatically.
     729             :      */
     730             :     VALGRIND_DESTROY_MEMPOOL(set);
     731             : 
     732             :     /* Finally, free the context header, including the keeper block */
     733           0 :     free(set);
     734             : }
     735             : 
     736             : /*
     737             :  * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
     738             :  *
     739             :  * AllocSetAlloc()'s comment explains why this is separate.
     740             :  */
     741             : pg_noinline
     742             : static void *
     743    20925548 : AllocSetAllocLarge(MemoryContext context, Size size, int flags)
     744             : {
     745    20925548 :     AllocSet    set = (AllocSet) context;
     746             :     AllocBlock  block;
     747             :     MemoryChunk *chunk;
     748             :     Size        chunk_size;
     749             :     Size        blksize;
     750             : 
     751             :     /* validate 'size' is within the limits for the given 'flags' */
     752    20925548 :     MemoryContextCheckSize(context, size, flags);
     753             : 
     754             : #ifdef MEMORY_CONTEXT_CHECKING
     755             :     /* ensure there's always space for the sentinel byte */
     756             :     chunk_size = MAXALIGN(size + 1);
     757             : #else
     758    20925548 :     chunk_size = MAXALIGN(size);
     759             : #endif
     760             : 
     761    20925548 :     blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
     762    20925548 :     block = (AllocBlock) malloc(blksize);
     763    20925548 :     if (block == NULL)
     764           0 :         return MemoryContextAllocationFailure(context, size, flags);
     765             : 
     766             :     /* Make a vchunk covering the new block's header */
     767             :     VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
     768             : 
     769    20925548 :     context->mem_allocated += blksize;
     770             : 
     771    20925548 :     block->aset = set;
     772    20925548 :     block->freeptr = block->endptr = ((char *) block) + blksize;
     773             : 
     774    20925548 :     chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
     775             : 
     776             :     /* mark the MemoryChunk as externally managed */
     777    20925548 :     MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
     778             : 
     779             : #ifdef MEMORY_CONTEXT_CHECKING
     780             :     chunk->requested_size = size;
     781             :     /* set mark to catch clobber of "unused" space */
     782             :     Assert(size < chunk_size);
     783             :     set_sentinel(MemoryChunkGetPointer(chunk), size);
     784             : #endif
     785             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     786             :     /* fill the allocated space with junk */
     787             :     randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
     788             : #endif
     789             : 
     790             :     /*
     791             :      * Stick the new block underneath the active allocation block, if any, so
     792             :      * that we don't lose the use of the space remaining therein.
     793             :      */
     794    20925548 :     if (set->blocks != NULL)
     795             :     {
     796    20925548 :         block->prev = set->blocks;
     797    20925548 :         block->next = set->blocks->next;
     798    20925548 :         if (block->next)
     799    16243824 :             block->next->prev = block;
     800    20925548 :         set->blocks->next = block;
     801             :     }
     802             :     else
     803             :     {
     804           0 :         block->prev = NULL;
     805           0 :         block->next = NULL;
     806           0 :         set->blocks = block;
     807             :     }
     808             : 
     809             :     /* Ensure any padding bytes are marked NOACCESS. */
     810             :     VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
     811             :                                chunk_size - size);
     812             : 
     813             :     /* Disallow access to the chunk header. */
     814             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
     815             : 
     816    20925548 :     return MemoryChunkGetPointer(chunk);
     817             : }
     818             : 
     819             : /*
     820             :  * Small helper for allocating a new chunk from a chunk, to avoid duplicating
     821             :  * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
     822             :  */
     823             : static inline void *
     824   910190524 : AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block,
     825             :                             Size size, Size chunk_size, int fidx)
     826             : {
     827             :     MemoryChunk *chunk;
     828             : 
     829   910190524 :     chunk = (MemoryChunk *) (block->freeptr);
     830             : 
     831             :     /* Prepare to initialize the chunk header. */
     832             :     VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
     833             : 
     834   910190524 :     block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
     835             :     Assert(block->freeptr <= block->endptr);
     836             : 
     837             :     /* store the free list index in the value field */
     838   910190524 :     MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
     839             : 
     840             : #ifdef MEMORY_CONTEXT_CHECKING
     841             :     chunk->requested_size = size;
     842             :     /* set mark to catch clobber of "unused" space */
     843             :     if (size < chunk_size)
     844             :         set_sentinel(MemoryChunkGetPointer(chunk), size);
     845             : #endif
     846             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     847             :     /* fill the allocated space with junk */
     848             :     randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
     849             : #endif
     850             : 
     851             :     /* Ensure any padding bytes are marked NOACCESS. */
     852             :     VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
     853             :                                chunk_size - size);
     854             : 
     855             :     /* Disallow access to the chunk header. */
     856             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
     857             : 
     858   910190524 :     return MemoryChunkGetPointer(chunk);
     859             : }
     860             : 
     861             : /*
     862             :  * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
     863             :  * allocated from it.
     864             :  *
     865             :  * AllocSetAlloc()'s comment explains why this is separate.
     866             :  */
     867             : pg_noinline
     868             : static void *
     869    13056186 : AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
     870             :                           int fidx)
     871             : {
     872    13056186 :     AllocSet    set = (AllocSet) context;
     873             :     AllocBlock  block;
     874             :     Size        availspace;
     875             :     Size        blksize;
     876             :     Size        required_size;
     877             :     Size        chunk_size;
     878             : 
     879             :     /* due to the keeper block set->blocks should always be valid */
     880             :     Assert(set->blocks != NULL);
     881    13056186 :     block = set->blocks;
     882    13056186 :     availspace = block->endptr - block->freeptr;
     883             : 
     884             :     /*
     885             :      * The existing active (top) block does not have enough room for the
     886             :      * requested allocation, but it might still have a useful amount of space
     887             :      * in it.  Once we push it down in the block list, we'll never try to
     888             :      * allocate more space from it. So, before we do that, carve up its free
     889             :      * space into chunks that we can put on the set's freelists.
     890             :      *
     891             :      * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
     892             :      * left in the block, this loop cannot iterate more than
     893             :      * ALLOCSET_NUM_FREELISTS-1 times.
     894             :      */
     895    51494214 :     while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
     896             :     {
     897             :         AllocFreeListLink *link;
     898             :         MemoryChunk *chunk;
     899    38438028 :         Size        availchunk = availspace - ALLOC_CHUNKHDRSZ;
     900    38438028 :         int         a_fidx = AllocSetFreeIndex(availchunk);
     901             : 
     902             :         /*
     903             :          * In most cases, we'll get back the index of the next larger freelist
     904             :          * than the one we need to put this chunk on.  The exception is when
     905             :          * availchunk is exactly a power of 2.
     906             :          */
     907    38438028 :         if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
     908             :         {
     909    28941826 :             a_fidx--;
     910             :             Assert(a_fidx >= 0);
     911    28941826 :             availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
     912             :         }
     913             : 
     914    38438028 :         chunk = (MemoryChunk *) (block->freeptr);
     915             : 
     916             :         /* Prepare to initialize the chunk header. */
     917             :         VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
     918    38438028 :         block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
     919    38438028 :         availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
     920             : 
     921             :         /* store the freelist index in the value field */
     922    38438028 :         MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
     923             : #ifdef MEMORY_CONTEXT_CHECKING
     924             :         chunk->requested_size = InvalidAllocSize;    /* mark it free */
     925             : #endif
     926             :         /* push this chunk onto the free list */
     927    38438028 :         link = GetFreeListLink(chunk);
     928             : 
     929             :         VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
     930    38438028 :         link->next = set->freelist[a_fidx];
     931             :         VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
     932             : 
     933    38438028 :         set->freelist[a_fidx] = chunk;
     934             :     }
     935             : 
     936             :     /*
     937             :      * The first such block has size initBlockSize, and we double the space in
     938             :      * each succeeding block, but not more than maxBlockSize.
     939             :      */
     940    13056186 :     blksize = set->nextBlockSize;
     941    13056186 :     set->nextBlockSize <<= 1;
     942    13056186 :     if (set->nextBlockSize > set->maxBlockSize)
     943      696816 :         set->nextBlockSize = set->maxBlockSize;
     944             : 
     945             :     /* Choose the actual chunk size to allocate */
     946    13056186 :     chunk_size = GetChunkSizeFromFreeListIdx(fidx);
     947             :     Assert(chunk_size >= size);
     948             : 
     949             :     /*
     950             :      * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
     951             :      * space... but try to keep it a power of 2.
     952             :      */
     953    13056186 :     required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
     954    18822024 :     while (blksize < required_size)
     955     5765838 :         blksize <<= 1;
     956             : 
     957             :     /* Try to allocate it */
     958    13056186 :     block = (AllocBlock) malloc(blksize);
     959             : 
     960             :     /*
     961             :      * We could be asking for pretty big blocks here, so cope if malloc fails.
     962             :      * But give up if there's less than 1 MB or so available...
     963             :      */
     964    13056186 :     while (block == NULL && blksize > 1024 * 1024)
     965             :     {
     966           0 :         blksize >>= 1;
     967           0 :         if (blksize < required_size)
     968           0 :             break;
     969           0 :         block = (AllocBlock) malloc(blksize);
     970             :     }
     971             : 
     972    13056186 :     if (block == NULL)
     973           0 :         return MemoryContextAllocationFailure(context, size, flags);
     974             : 
     975             :     /* Make a vchunk covering the new block's header */
     976             :     VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
     977             : 
     978    13056186 :     context->mem_allocated += blksize;
     979             : 
     980    13056186 :     block->aset = set;
     981    13056186 :     block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
     982    13056186 :     block->endptr = ((char *) block) + blksize;
     983             : 
     984             :     /* Mark unallocated space NOACCESS. */
     985             :     VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
     986             :                                blksize - ALLOC_BLOCKHDRSZ);
     987             : 
     988    13056186 :     block->prev = NULL;
     989    13056186 :     block->next = set->blocks;
     990    13056186 :     if (block->next)
     991    13056186 :         block->next->prev = block;
     992    13056186 :     set->blocks = block;
     993             : 
     994    13056186 :     return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
     995             : }
     996             : 
     997             : /*
     998             :  * AllocSetAlloc
     999             :  *      Returns a pointer to allocated memory of given size or raises an ERROR
    1000             :  *      on allocation failure, or returns NULL when flags contains
    1001             :  *      MCXT_ALLOC_NO_OOM.
    1002             :  *
    1003             :  * No request may exceed:
    1004             :  *      MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
    1005             :  * All callers use a much-lower limit.
    1006             :  *
    1007             :  * Note: when using valgrind, it doesn't matter how the returned allocation
    1008             :  * is marked, as mcxt.c will set it to UNDEFINED.  In some paths we will
    1009             :  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
    1010             :  *
    1011             :  * This function should only contain the most common code paths.  Everything
    1012             :  * else should be in pg_noinline helper functions, thus avoiding the overhead
    1013             :  * of creating a stack frame for the common cases.  Allocating memory is often
    1014             :  * a bottleneck in many workloads, so avoiding stack frame setup is
    1015             :  * worthwhile.  Helper functions should always directly return the newly
    1016             :  * allocated memory so that we can just return that address directly as a tail
    1017             :  * call.
    1018             :  */
    1019             : void *
    1020  1329806984 : AllocSetAlloc(MemoryContext context, Size size, int flags)
    1021             : {
    1022  1329806984 :     AllocSet    set = (AllocSet) context;
    1023             :     AllocBlock  block;
    1024             :     MemoryChunk *chunk;
    1025             :     int         fidx;
    1026             :     Size        chunk_size;
    1027             :     Size        availspace;
    1028             : 
    1029             :     Assert(AllocSetIsValid(set));
    1030             : 
    1031             :     /* due to the keeper block set->blocks should never be NULL */
    1032             :     Assert(set->blocks != NULL);
    1033             : 
    1034             :     /*
    1035             :      * If requested size exceeds maximum for chunks we hand the request off to
    1036             :      * AllocSetAllocLarge().
    1037             :      */
    1038  1329806984 :     if (size > set->allocChunkLimit)
    1039    20925548 :         return AllocSetAllocLarge(context, size, flags);
    1040             : 
    1041             :     /*
    1042             :      * Request is small enough to be treated as a chunk.  Look in the
    1043             :      * corresponding free list to see if there is a free chunk we could reuse.
    1044             :      * If one is found, remove it from the free list, make it again a member
    1045             :      * of the alloc set and return its data address.
    1046             :      *
    1047             :      * Note that we don't attempt to ensure there's space for the sentinel
    1048             :      * byte here.  We expect a large proportion of allocations to be for sizes
    1049             :      * which are already a power of 2.  If we were to always make space for a
    1050             :      * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
    1051             :      * doubling the memory requirements for such allocations.
    1052             :      */
    1053  1308881436 :     fidx = AllocSetFreeIndex(size);
    1054  1308881436 :     chunk = set->freelist[fidx];
    1055  1308881436 :     if (chunk != NULL)
    1056             :     {
    1057   398690912 :         AllocFreeListLink *link = GetFreeListLink(chunk);
    1058             : 
    1059             :         /* Allow access to the chunk header. */
    1060             :         VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1061             : 
    1062             :         Assert(fidx == MemoryChunkGetValue(chunk));
    1063             : 
    1064             :         /* pop this chunk off the freelist */
    1065             :         VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
    1066   398690912 :         set->freelist[fidx] = link->next;
    1067             :         VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
    1068             : 
    1069             : #ifdef MEMORY_CONTEXT_CHECKING
    1070             :         chunk->requested_size = size;
    1071             :         /* set mark to catch clobber of "unused" space */
    1072             :         if (size < GetChunkSizeFromFreeListIdx(fidx))
    1073             :             set_sentinel(MemoryChunkGetPointer(chunk), size);
    1074             : #endif
    1075             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
    1076             :         /* fill the allocated space with junk */
    1077             :         randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
    1078             : #endif
    1079             : 
    1080             :         /* Ensure any padding bytes are marked NOACCESS. */
    1081             :         VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
    1082             :                                    GetChunkSizeFromFreeListIdx(fidx) - size);
    1083             : 
    1084             :         /* Disallow access to the chunk header. */
    1085             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1086             : 
    1087   398690912 :         return MemoryChunkGetPointer(chunk);
    1088             :     }
    1089             : 
    1090             :     /*
    1091             :      * Choose the actual chunk size to allocate.
    1092             :      */
    1093   910190524 :     chunk_size = GetChunkSizeFromFreeListIdx(fidx);
    1094             :     Assert(chunk_size >= size);
    1095             : 
    1096   910190524 :     block = set->blocks;
    1097   910190524 :     availspace = block->endptr - block->freeptr;
    1098             : 
    1099             :     /*
    1100             :      * If there is enough room in the active allocation block, we will put the
    1101             :      * chunk into that block.  Else must start a new one.
    1102             :      */
    1103   910190524 :     if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
    1104    13056186 :         return AllocSetAllocFromNewBlock(context, size, flags, fidx);
    1105             : 
    1106             :     /* There's enough space on the current block, so allocate from that */
    1107   897134338 :     return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
    1108             : }
    1109             : 
    1110             : /*
    1111             :  * AllocSetFree
    1112             :  *      Frees allocated memory; memory is removed from the set.
    1113             :  */
    1114             : void
    1115   491842106 : AllocSetFree(void *pointer)
    1116             : {
    1117             :     AllocSet    set;
    1118   491842106 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
    1119             : 
    1120             :     /* Allow access to the chunk header. */
    1121             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1122             : 
    1123   491842106 :     if (MemoryChunkIsExternal(chunk))
    1124             :     {
    1125             :         /* Release single-chunk block. */
    1126    19551110 :         AllocBlock  block = ExternalChunkGetBlock(chunk);
    1127             : 
    1128             :         /*
    1129             :          * Try to verify that we have a sane block pointer: the block header
    1130             :          * should reference an aset and the freeptr should match the endptr.
    1131             :          */
    1132    19551110 :         if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
    1133           0 :             elog(ERROR, "could not find block containing chunk %p", chunk);
    1134             : 
    1135    19551110 :         set = block->aset;
    1136             : 
    1137             : #ifdef MEMORY_CONTEXT_CHECKING
    1138             :         {
    1139             :             /* Test for someone scribbling on unused space in chunk */
    1140             :             Assert(chunk->requested_size < (block->endptr - (char *) pointer));
    1141             :             if (!sentinel_ok(pointer, chunk->requested_size))
    1142             :                 elog(WARNING, "detected write past chunk end in %s %p",
    1143             :                      set->header.name, chunk);
    1144             :         }
    1145             : #endif
    1146             : 
    1147             :         /* OK, remove block from aset's list and free it */
    1148    19551110 :         if (block->prev)
    1149    19551110 :             block->prev->next = block->next;
    1150             :         else
    1151           0 :             set->blocks = block->next;
    1152    19551110 :         if (block->next)
    1153    15239118 :             block->next->prev = block->prev;
    1154             : 
    1155    19551110 :         set->header.mem_allocated -= block->endptr - ((char *) block);
    1156             : 
    1157             : #ifdef CLOBBER_FREED_MEMORY
    1158             :         wipe_mem(block, block->freeptr - ((char *) block));
    1159             : #endif
    1160             : 
    1161             :         /* As in AllocSetReset, free block-header vchunks explicitly */
    1162             :         VALGRIND_MEMPOOL_FREE(set, block);
    1163             : 
    1164    19551110 :         free(block);
    1165             :     }
    1166             :     else
    1167             :     {
    1168   472290996 :         AllocBlock  block = MemoryChunkGetBlock(chunk);
    1169             :         int         fidx;
    1170             :         AllocFreeListLink *link;
    1171             : 
    1172             :         /*
    1173             :          * In this path, for speed reasons we just Assert that the referenced
    1174             :          * block is good.  We can also Assert that the value field is sane.
    1175             :          * Future field experience may show that these Asserts had better
    1176             :          * become regular runtime test-and-elog checks.
    1177             :          */
    1178             :         Assert(AllocBlockIsValid(block));
    1179   472290996 :         set = block->aset;
    1180             : 
    1181   472290996 :         fidx = MemoryChunkGetValue(chunk);
    1182             :         Assert(FreeListIdxIsValid(fidx));
    1183   472290996 :         link = GetFreeListLink(chunk);
    1184             : 
    1185             : #ifdef MEMORY_CONTEXT_CHECKING
    1186             :         /* Test for someone scribbling on unused space in chunk */
    1187             :         if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
    1188             :             if (!sentinel_ok(pointer, chunk->requested_size))
    1189             :                 elog(WARNING, "detected write past chunk end in %s %p",
    1190             :                      set->header.name, chunk);
    1191             : #endif
    1192             : 
    1193             : #ifdef CLOBBER_FREED_MEMORY
    1194             :         wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
    1195             : #endif
    1196             :         /* push this chunk onto the top of the free list */
    1197             :         VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
    1198   472290996 :         link->next = set->freelist[fidx];
    1199             :         VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
    1200   472290996 :         set->freelist[fidx] = chunk;
    1201             : 
    1202             : #ifdef MEMORY_CONTEXT_CHECKING
    1203             : 
    1204             :         /*
    1205             :          * Reset requested_size to InvalidAllocSize in chunks that are on free
    1206             :          * list.
    1207             :          */
    1208             :         chunk->requested_size = InvalidAllocSize;
    1209             : #endif
    1210             :     }
    1211   491842106 : }
    1212             : 
    1213             : /*
    1214             :  * AllocSetRealloc
    1215             :  *      Returns new pointer to allocated memory of given size or NULL if
    1216             :  *      request could not be completed; this memory is added to the set.
    1217             :  *      Memory associated with given pointer is copied into the new memory,
    1218             :  *      and the old memory is freed.
    1219             :  *
    1220             :  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size.  This
    1221             :  * makes our Valgrind client requests less-precise, hazarding false negatives.
    1222             :  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
    1223             :  * request size.)
    1224             :  */
    1225             : void *
    1226     9043732 : AllocSetRealloc(void *pointer, Size size, int flags)
    1227             : {
    1228             :     AllocBlock  block;
    1229             :     AllocSet    set;
    1230     9043732 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
    1231             :     Size        oldchksize;
    1232             :     int         fidx;
    1233             : 
    1234             :     /* Allow access to the chunk header. */
    1235             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1236             : 
    1237     9043732 :     if (MemoryChunkIsExternal(chunk))
    1238             :     {
    1239             :         /*
    1240             :          * The chunk must have been allocated as a single-chunk block.  Use
    1241             :          * realloc() to make the containing block bigger, or smaller, with
    1242             :          * minimum space wastage.
    1243             :          */
    1244             :         AllocBlock  newblock;
    1245             :         Size        chksize;
    1246             :         Size        blksize;
    1247             :         Size        oldblksize;
    1248             : 
    1249      262630 :         block = ExternalChunkGetBlock(chunk);
    1250             : 
    1251             :         /*
    1252             :          * Try to verify that we have a sane block pointer: the block header
    1253             :          * should reference an aset and the freeptr should match the endptr.
    1254             :          */
    1255      262630 :         if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
    1256           0 :             elog(ERROR, "could not find block containing chunk %p", chunk);
    1257             : 
    1258      262630 :         set = block->aset;
    1259             : 
    1260             :         /* only check size in paths where the limits could be hit */
    1261      262630 :         MemoryContextCheckSize((MemoryContext) set, size, flags);
    1262             : 
    1263      262630 :         oldchksize = block->endptr - (char *) pointer;
    1264             : 
    1265             : #ifdef MEMORY_CONTEXT_CHECKING
    1266             :         /* Test for someone scribbling on unused space in chunk */
    1267             :         Assert(chunk->requested_size < oldchksize);
    1268             :         if (!sentinel_ok(pointer, chunk->requested_size))
    1269             :             elog(WARNING, "detected write past chunk end in %s %p",
    1270             :                  set->header.name, chunk);
    1271             : #endif
    1272             : 
    1273             : #ifdef MEMORY_CONTEXT_CHECKING
    1274             :         /* ensure there's always space for the sentinel byte */
    1275             :         chksize = MAXALIGN(size + 1);
    1276             : #else
    1277      262630 :         chksize = MAXALIGN(size);
    1278             : #endif
    1279             : 
    1280             :         /* Do the realloc */
    1281      262630 :         blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
    1282      262630 :         oldblksize = block->endptr - ((char *) block);
    1283             : 
    1284      262630 :         newblock = (AllocBlock) realloc(block, blksize);
    1285      262630 :         if (newblock == NULL)
    1286             :         {
    1287             :             /* Disallow access to the chunk header. */
    1288             :             VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1289           0 :             return MemoryContextAllocationFailure(&set->header, size, flags);
    1290             :         }
    1291             : 
    1292             :         /*
    1293             :          * Move the block-header vchunk explicitly.  (mcxt.c will take care of
    1294             :          * moving the vchunk for the user data.)
    1295             :          */
    1296             :         VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
    1297      262630 :         block = newblock;
    1298             : 
    1299             :         /* updated separately, not to underflow when (oldblksize > blksize) */
    1300      262630 :         set->header.mem_allocated -= oldblksize;
    1301      262630 :         set->header.mem_allocated += blksize;
    1302             : 
    1303      262630 :         block->freeptr = block->endptr = ((char *) block) + blksize;
    1304             : 
    1305             :         /* Update pointers since block has likely been moved */
    1306      262630 :         chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
    1307      262630 :         pointer = MemoryChunkGetPointer(chunk);
    1308      262630 :         if (block->prev)
    1309      262630 :             block->prev->next = block;
    1310             :         else
    1311           0 :             set->blocks = block;
    1312      262630 :         if (block->next)
    1313      237754 :             block->next->prev = block;
    1314             : 
    1315             : #ifdef MEMORY_CONTEXT_CHECKING
    1316             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
    1317             : 
    1318             :         /*
    1319             :          * We can only randomize the extra space if we know the prior request.
    1320             :          * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
    1321             :          */
    1322             :         if (size > chunk->requested_size)
    1323             :             randomize_mem((char *) pointer + chunk->requested_size,
    1324             :                           size - chunk->requested_size);
    1325             : #else
    1326             : 
    1327             :         /*
    1328             :          * If this is an increase, realloc() will have marked any
    1329             :          * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
    1330             :          * also need to adjust trailing bytes from the old allocation (from
    1331             :          * chunk->requested_size to oldchksize) as they are marked NOACCESS.
    1332             :          * Make sure not to mark too many bytes in case chunk->requested_size
    1333             :          * < size < oldchksize.
    1334             :          */
    1335             : #ifdef USE_VALGRIND
    1336             :         if (Min(size, oldchksize) > chunk->requested_size)
    1337             :             VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
    1338             :                                         Min(size, oldchksize) - chunk->requested_size);
    1339             : #endif
    1340             : #endif
    1341             : 
    1342             :         chunk->requested_size = size;
    1343             :         /* set mark to catch clobber of "unused" space */
    1344             :         Assert(size < chksize);
    1345             :         set_sentinel(pointer, size);
    1346             : #else                           /* !MEMORY_CONTEXT_CHECKING */
    1347             : 
    1348             :         /*
    1349             :          * We may need to adjust marking of bytes from the old allocation as
    1350             :          * some of them may be marked NOACCESS.  We don't know how much of the
    1351             :          * old chunk size was the requested size; it could have been as small
    1352             :          * as one byte.  We have to be conservative and just mark the entire
    1353             :          * old portion DEFINED.  Make sure not to mark memory beyond the new
    1354             :          * allocation in case it's smaller than the old one.
    1355             :          */
    1356             :         VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
    1357             : #endif
    1358             : 
    1359             :         /* Ensure any padding bytes are marked NOACCESS. */
    1360             :         VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
    1361             : 
    1362             :         /* Disallow access to the chunk header. */
    1363             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1364             : 
    1365      262630 :         return pointer;
    1366             :     }
    1367             : 
    1368     8781102 :     block = MemoryChunkGetBlock(chunk);
    1369             : 
    1370             :     /*
    1371             :      * In this path, for speed reasons we just Assert that the referenced
    1372             :      * block is good. We can also Assert that the value field is sane. Future
    1373             :      * field experience may show that these Asserts had better become regular
    1374             :      * runtime test-and-elog checks.
    1375             :      */
    1376             :     Assert(AllocBlockIsValid(block));
    1377     8781102 :     set = block->aset;
    1378             : 
    1379     8781102 :     fidx = MemoryChunkGetValue(chunk);
    1380             :     Assert(FreeListIdxIsValid(fidx));
    1381     8781102 :     oldchksize = GetChunkSizeFromFreeListIdx(fidx);
    1382             : 
    1383             : #ifdef MEMORY_CONTEXT_CHECKING
    1384             :     /* Test for someone scribbling on unused space in chunk */
    1385             :     if (chunk->requested_size < oldchksize)
    1386             :         if (!sentinel_ok(pointer, chunk->requested_size))
    1387             :             elog(WARNING, "detected write past chunk end in %s %p",
    1388             :                  set->header.name, chunk);
    1389             : #endif
    1390             : 
    1391             :     /*
    1392             :      * Chunk sizes are aligned to power of 2 in AllocSetAlloc().  Maybe the
    1393             :      * allocated area already is >= the new size.  (In particular, we will
    1394             :      * fall out here if the requested size is a decrease.)
    1395             :      */
    1396     8781102 :     if (oldchksize >= size)
    1397             :     {
    1398             : #ifdef MEMORY_CONTEXT_CHECKING
    1399             :         Size        oldrequest = chunk->requested_size;
    1400             : 
    1401             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
    1402             :         /* We can only fill the extra space if we know the prior request */
    1403             :         if (size > oldrequest)
    1404             :             randomize_mem((char *) pointer + oldrequest,
    1405             :                           size - oldrequest);
    1406             : #endif
    1407             : 
    1408             :         chunk->requested_size = size;
    1409             : 
    1410             :         /*
    1411             :          * If this is an increase, mark any newly-available part UNDEFINED.
    1412             :          * Otherwise, mark the obsolete part NOACCESS.
    1413             :          */
    1414             :         if (size > oldrequest)
    1415             :             VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
    1416             :                                         size - oldrequest);
    1417             :         else
    1418             :             VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
    1419             :                                        oldchksize - size);
    1420             : 
    1421             :         /* set mark to catch clobber of "unused" space */
    1422             :         if (size < oldchksize)
    1423             :             set_sentinel(pointer, size);
    1424             : #else                           /* !MEMORY_CONTEXT_CHECKING */
    1425             : 
    1426             :         /*
    1427             :          * We don't have the information to determine whether we're growing
    1428             :          * the old request or shrinking it, so we conservatively mark the
    1429             :          * entire new allocation DEFINED.
    1430             :          */
    1431             :         VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
    1432             :         VALGRIND_MAKE_MEM_DEFINED(pointer, size);
    1433             : #endif
    1434             : 
    1435             :         /* Disallow access to the chunk header. */
    1436             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1437             : 
    1438     2580584 :         return pointer;
    1439             :     }
    1440             :     else
    1441             :     {
    1442             :         /*
    1443             :          * Enlarge-a-small-chunk case.  We just do this by brute force, ie,
    1444             :          * allocate a new chunk and copy the data.  Since we know the existing
    1445             :          * data isn't huge, this won't involve any great memcpy expense, so
    1446             :          * it's not worth being smarter.  (At one time we tried to avoid
    1447             :          * memcpy when it was possible to enlarge the chunk in-place, but that
    1448             :          * turns out to misbehave unpleasantly for repeated cycles of
    1449             :          * palloc/repalloc/pfree: the eventually freed chunks go into the
    1450             :          * wrong freelist for the next initial palloc request, and so we leak
    1451             :          * memory indefinitely.  See pgsql-hackers archives for 2007-08-11.)
    1452             :          */
    1453             :         AllocPointer newPointer;
    1454             :         Size        oldsize;
    1455             : 
    1456             :         /* allocate new chunk (this also checks size is valid) */
    1457     6200518 :         newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
    1458             : 
    1459             :         /* leave immediately if request was not completed */
    1460     6200518 :         if (newPointer == NULL)
    1461             :         {
    1462             :             /* Disallow access to the chunk header. */
    1463             :             VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1464           0 :             return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
    1465             :         }
    1466             : 
    1467             :         /*
    1468             :          * AllocSetAlloc() may have returned a region that is still NOACCESS.
    1469             :          * Change it to UNDEFINED for the moment; memcpy() will then transfer
    1470             :          * definedness from the old allocation to the new.  If we know the old
    1471             :          * allocation, copy just that much.  Otherwise, make the entire old
    1472             :          * chunk defined to avoid errors as we copy the currently-NOACCESS
    1473             :          * trailing bytes.
    1474             :          */
    1475             :         VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
    1476             : #ifdef MEMORY_CONTEXT_CHECKING
    1477             :         oldsize = chunk->requested_size;
    1478             : #else
    1479     6200518 :         oldsize = oldchksize;
    1480             :         VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
    1481             : #endif
    1482             : 
    1483             :         /* transfer existing data (certain to fit) */
    1484     6200518 :         memcpy(newPointer, pointer, oldsize);
    1485             : 
    1486             :         /* free old chunk */
    1487     6200518 :         AllocSetFree(pointer);
    1488             : 
    1489     6200518 :         return newPointer;
    1490             :     }
    1491             : }
    1492             : 
    1493             : /*
    1494             :  * AllocSetGetChunkContext
    1495             :  *      Return the MemoryContext that 'pointer' belongs to.
    1496             :  */
    1497             : MemoryContext
    1498     3527908 : AllocSetGetChunkContext(void *pointer)
    1499             : {
    1500     3527908 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
    1501             :     AllocBlock  block;
    1502             :     AllocSet    set;
    1503             : 
    1504             :     /* Allow access to the chunk header. */
    1505             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1506             : 
    1507     3527908 :     if (MemoryChunkIsExternal(chunk))
    1508           0 :         block = ExternalChunkGetBlock(chunk);
    1509             :     else
    1510     3527908 :         block = (AllocBlock) MemoryChunkGetBlock(chunk);
    1511             : 
    1512             :     /* Disallow access to the chunk header. */
    1513             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1514             : 
    1515             :     Assert(AllocBlockIsValid(block));
    1516     3527908 :     set = block->aset;
    1517             : 
    1518     3527908 :     return &set->header;
    1519             : }
    1520             : 
    1521             : /*
    1522             :  * AllocSetGetChunkSpace
    1523             :  *      Given a currently-allocated chunk, determine the total space
    1524             :  *      it occupies (including all memory-allocation overhead).
    1525             :  */
    1526             : Size
    1527     8784558 : AllocSetGetChunkSpace(void *pointer)
    1528             : {
    1529     8784558 :     MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
    1530             :     int         fidx;
    1531             : 
    1532             :     /* Allow access to the chunk header. */
    1533             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1534             : 
    1535     8784558 :     if (MemoryChunkIsExternal(chunk))
    1536             :     {
    1537      966316 :         AllocBlock  block = ExternalChunkGetBlock(chunk);
    1538             : 
    1539             :         /* Disallow access to the chunk header. */
    1540             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1541             : 
    1542             :         Assert(AllocBlockIsValid(block));
    1543             : 
    1544      966316 :         return block->endptr - (char *) chunk;
    1545             :     }
    1546             : 
    1547     7818242 :     fidx = MemoryChunkGetValue(chunk);
    1548             :     Assert(FreeListIdxIsValid(fidx));
    1549             : 
    1550             :     /* Disallow access to the chunk header. */
    1551             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1552             : 
    1553     7818242 :     return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
    1554             : }
    1555             : 
    1556             : /*
    1557             :  * AllocSetIsEmpty
    1558             :  *      Is an allocset empty of any allocated space?
    1559             :  */
    1560             : bool
    1561       10720 : AllocSetIsEmpty(MemoryContext context)
    1562             : {
    1563             :     Assert(AllocSetIsValid(context));
    1564             : 
    1565             :     /*
    1566             :      * For now, we say "empty" only if the context is new or just reset. We
    1567             :      * could examine the freelists to determine if all space has been freed,
    1568             :      * but it's not really worth the trouble for present uses of this
    1569             :      * functionality.
    1570             :      */
    1571       10720 :     if (context->isReset)
    1572       10696 :         return true;
    1573          24 :     return false;
    1574             : }
    1575             : 
    1576             : /*
    1577             :  * AllocSetStats
    1578             :  *      Compute stats about memory consumption of an allocset.
    1579             :  *
    1580             :  * printfunc: if not NULL, pass a human-readable stats string to this.
    1581             :  * passthru: pass this pointer through to printfunc.
    1582             :  * totals: if not NULL, add stats about this context into *totals.
    1583             :  * print_to_stderr: print stats to stderr if true, elog otherwise.
    1584             :  */
    1585             : void
    1586        4208 : AllocSetStats(MemoryContext context,
    1587             :               MemoryStatsPrintFunc printfunc, void *passthru,
    1588             :               MemoryContextCounters *totals, bool print_to_stderr)
    1589             : {
    1590        4208 :     AllocSet    set = (AllocSet) context;
    1591        4208 :     Size        nblocks = 0;
    1592        4208 :     Size        freechunks = 0;
    1593             :     Size        totalspace;
    1594        4208 :     Size        freespace = 0;
    1595             :     AllocBlock  block;
    1596             :     int         fidx;
    1597             : 
    1598             :     Assert(AllocSetIsValid(set));
    1599             : 
    1600             :     /* Include context header in totalspace */
    1601        4208 :     totalspace = MAXALIGN(sizeof(AllocSetContext));
    1602             : 
    1603       12978 :     for (block = set->blocks; block != NULL; block = block->next)
    1604             :     {
    1605        8770 :         nblocks++;
    1606        8770 :         totalspace += block->endptr - ((char *) block);
    1607        8770 :         freespace += block->endptr - block->freeptr;
    1608             :     }
    1609       50496 :     for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
    1610             :     {
    1611       46288 :         Size        chksz = GetChunkSizeFromFreeListIdx(fidx);
    1612       46288 :         MemoryChunk *chunk = set->freelist[fidx];
    1613             : 
    1614       69538 :         while (chunk != NULL)
    1615             :         {
    1616       23250 :             AllocFreeListLink *link = GetFreeListLink(chunk);
    1617             : 
    1618             :             /* Allow access to the chunk header. */
    1619             :             VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1620             :             Assert(MemoryChunkGetValue(chunk) == fidx);
    1621             :             VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1622             : 
    1623       23250 :             freechunks++;
    1624       23250 :             freespace += chksz + ALLOC_CHUNKHDRSZ;
    1625             : 
    1626             :             VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
    1627       23250 :             chunk = link->next;
    1628             :             VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
    1629             :         }
    1630             :     }
    1631             : 
    1632        4208 :     if (printfunc)
    1633             :     {
    1634             :         char        stats_string[200];
    1635             : 
    1636        1620 :         snprintf(stats_string, sizeof(stats_string),
    1637             :                  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
    1638             :                  totalspace, nblocks, freespace, freechunks,
    1639             :                  totalspace - freespace);
    1640        1620 :         printfunc(context, passthru, stats_string, print_to_stderr);
    1641             :     }
    1642             : 
    1643        4208 :     if (totals)
    1644             :     {
    1645        4208 :         totals->nblocks += nblocks;
    1646        4208 :         totals->freechunks += freechunks;
    1647        4208 :         totals->totalspace += totalspace;
    1648        4208 :         totals->freespace += freespace;
    1649             :     }
    1650        4208 : }
    1651             : 
    1652             : 
    1653             : #ifdef MEMORY_CONTEXT_CHECKING
    1654             : 
    1655             : /*
    1656             :  * AllocSetCheck
    1657             :  *      Walk through chunks and check consistency of memory.
    1658             :  *
    1659             :  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
    1660             :  * find yourself in an infinite loop when trouble occurs, because this
    1661             :  * routine will be entered again when elog cleanup tries to release memory!
    1662             :  */
    1663             : void
    1664             : AllocSetCheck(MemoryContext context)
    1665             : {
    1666             :     AllocSet    set = (AllocSet) context;
    1667             :     const char *name = set->header.name;
    1668             :     AllocBlock  prevblock;
    1669             :     AllocBlock  block;
    1670             :     Size        total_allocated = 0;
    1671             : 
    1672             :     for (prevblock = NULL, block = set->blocks;
    1673             :          block != NULL;
    1674             :          prevblock = block, block = block->next)
    1675             :     {
    1676             :         char       *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
    1677             :         long        blk_used = block->freeptr - bpoz;
    1678             :         long        blk_data = 0;
    1679             :         long        nchunks = 0;
    1680             :         bool        has_external_chunk = false;
    1681             : 
    1682             :         if (IsKeeperBlock(set, block))
    1683             :             total_allocated += block->endptr - ((char *) set);
    1684             :         else
    1685             :             total_allocated += block->endptr - ((char *) block);
    1686             : 
    1687             :         /*
    1688             :          * Empty block - empty can be keeper-block only
    1689             :          */
    1690             :         if (!blk_used)
    1691             :         {
    1692             :             if (!IsKeeperBlock(set, block))
    1693             :                 elog(WARNING, "problem in alloc set %s: empty block %p",
    1694             :                      name, block);
    1695             :         }
    1696             : 
    1697             :         /*
    1698             :          * Check block header fields
    1699             :          */
    1700             :         if (block->aset != set ||
    1701             :             block->prev != prevblock ||
    1702             :             block->freeptr < bpoz ||
    1703             :             block->freeptr > block->endptr)
    1704             :             elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
    1705             :                  name, block);
    1706             : 
    1707             :         /*
    1708             :          * Chunk walker
    1709             :          */
    1710             :         while (bpoz < block->freeptr)
    1711             :         {
    1712             :             MemoryChunk *chunk = (MemoryChunk *) bpoz;
    1713             :             Size        chsize,
    1714             :                         dsize;
    1715             : 
    1716             :             /* Allow access to the chunk header. */
    1717             :             VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
    1718             : 
    1719             :             if (MemoryChunkIsExternal(chunk))
    1720             :             {
    1721             :                 chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
    1722             :                 has_external_chunk = true;
    1723             : 
    1724             :                 /* make sure this chunk consumes the entire block */
    1725             :                 if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
    1726             :                     elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
    1727             :                          name, chunk, block);
    1728             :             }
    1729             :             else
    1730             :             {
    1731             :                 int         fidx = MemoryChunkGetValue(chunk);
    1732             : 
    1733             :                 if (!FreeListIdxIsValid(fidx))
    1734             :                     elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
    1735             :                          name, chunk, block);
    1736             : 
    1737             :                 chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
    1738             : 
    1739             :                 /*
    1740             :                  * Check the stored block offset correctly references this
    1741             :                  * block.
    1742             :                  */
    1743             :                 if (block != MemoryChunkGetBlock(chunk))
    1744             :                     elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
    1745             :                          name, chunk, block);
    1746             :             }
    1747             :             dsize = chunk->requested_size;   /* real data */
    1748             : 
    1749             :             /* an allocated chunk's requested size must be <= the chsize */
    1750             :             if (dsize != InvalidAllocSize && dsize > chsize)
    1751             :                 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
    1752             :                      name, chunk, block);
    1753             : 
    1754             :             /* chsize must not be smaller than the first freelist's size */
    1755             :             if (chsize < (1 << ALLOC_MINBITS))
    1756             :                 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
    1757             :                      name, chsize, chunk, block);
    1758             : 
    1759             :             /*
    1760             :              * Check for overwrite of padding space in an allocated chunk.
    1761             :              */
    1762             :             if (dsize != InvalidAllocSize && dsize < chsize &&
    1763             :                 !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
    1764             :                 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
    1765             :                      name, block, chunk);
    1766             : 
    1767             :             /* if chunk is allocated, disallow access to the chunk header */
    1768             :             if (dsize != InvalidAllocSize)
    1769             :                 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
    1770             : 
    1771             :             blk_data += chsize;
    1772             :             nchunks++;
    1773             : 
    1774             :             bpoz += ALLOC_CHUNKHDRSZ + chsize;
    1775             :         }
    1776             : 
    1777             :         if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
    1778             :             elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
    1779             :                  name, block);
    1780             : 
    1781             :         if (has_external_chunk && nchunks > 1)
    1782             :             elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
    1783             :                  name, block);
    1784             :     }
    1785             : 
    1786             :     Assert(total_allocated == context->mem_allocated);
    1787             : }
    1788             : 
    1789             : #endif                          /* MEMORY_CONTEXT_CHECKING */

Generated by: LCOV version 1.16