LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - aset.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 238 264 90.2 %
Date: 2021-12-05 02:08:31 Functions: 10 10 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * aset.c
       4             :  *    Allocation set definitions.
       5             :  *
       6             :  * AllocSet is our standard implementation of the abstract MemoryContext
       7             :  * type.
       8             :  *
       9             :  *
      10             :  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
      11             :  * Portions Copyright (c) 1994, Regents of the University of California
      12             :  *
      13             :  * IDENTIFICATION
      14             :  *    src/backend/utils/mmgr/aset.c
      15             :  *
      16             :  * NOTE:
      17             :  *  This is a new (Feb. 05, 1999) implementation of the allocation set
      18             :  *  routines. AllocSet...() does not use OrderedSet...() any more.
      19             :  *  Instead it manages allocations in a block pool by itself, combining
      20             :  *  many small allocations in a few bigger blocks. AllocSetFree() normally
      21             :  *  doesn't free() memory really. It just add's the free'd area to some
      22             :  *  list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
      23             :  *  at once on AllocSetReset(), which happens when the memory context gets
      24             :  *  destroyed.
      25             :  *              Jan Wieck
      26             :  *
      27             :  *  Performance improvement from Tom Lane, 8/99: for extremely large request
      28             :  *  sizes, we do want to be able to give the memory back to free() as soon
      29             :  *  as it is pfree()'d.  Otherwise we risk tying up a lot of memory in
      30             :  *  freelist entries that might never be usable.  This is specially needed
      31             :  *  when the caller is repeatedly repalloc()'ing a block bigger and bigger;
      32             :  *  the previous instances of the block were guaranteed to be wasted until
      33             :  *  AllocSetReset() under the old way.
      34             :  *
      35             :  *  Further improvement 12/00: as the code stood, request sizes in the
      36             :  *  midrange between "small" and "large" were handled very inefficiently,
      37             :  *  because any sufficiently large free chunk would be used to satisfy a
      38             :  *  request, even if it was much larger than necessary.  This led to more
      39             :  *  and more wasted space in allocated chunks over time.  To fix, get rid
      40             :  *  of the midrange behavior: we now handle only "small" power-of-2-size
      41             :  *  chunks as chunks.  Anything "large" is passed off to malloc().  Change
      42             :  *  the number of freelists to change the small/large boundary.
      43             :  *
      44             :  *-------------------------------------------------------------------------
      45             :  */
      46             : 
      47             : #include "postgres.h"
      48             : 
      49             : #include "port/pg_bitutils.h"
      50             : #include "utils/memdebug.h"
      51             : #include "utils/memutils.h"
      52             : 
      53             : /*--------------------
      54             :  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
      55             :  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
      56             :  *
      57             :  * Note that all chunks in the freelists have power-of-2 sizes.  This
      58             :  * improves recyclability: we may waste some space, but the wasted space
      59             :  * should stay pretty constant as requests are made and released.
      60             :  *
      61             :  * A request too large for the last freelist is handled by allocating a
      62             :  * dedicated block from malloc().  The block still has a block header and
      63             :  * chunk header, but when the chunk is freed we'll return the whole block
      64             :  * to malloc(), not put it on our freelists.
      65             :  *
      66             :  * CAUTION: ALLOC_MINBITS must be large enough so that
      67             :  * 1<<ALLOC_MINBITS is at least MAXALIGN,
      68             :  * or we may fail to align the smallest chunks adequately.
      69             :  * 8-byte alignment is enough on all currently known machines.
      70             :  *
      71             :  * With the current parameters, request sizes up to 8K are treated as chunks,
      72             :  * larger requests go into dedicated blocks.  Change ALLOCSET_NUM_FREELISTS
      73             :  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
      74             :  * memutils.h to agree.  (Note: in contexts with small maxBlockSize, we may
      75             :  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
      76             :  *--------------------
      77             :  */
      78             : 
      79             : #define ALLOC_MINBITS       3   /* smallest chunk size is 8 bytes */
      80             : #define ALLOCSET_NUM_FREELISTS  11
      81             : #define ALLOC_CHUNK_LIMIT   (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
      82             : /* Size of largest chunk that we use a fixed size for */
      83             : #define ALLOC_CHUNK_FRACTION    4
      84             : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
      85             : 
      86             : /*--------------------
      87             :  * The first block allocated for an allocset has size initBlockSize.
      88             :  * Each time we have to allocate another block, we double the block size
      89             :  * (if possible, and without exceeding maxBlockSize), so as to reduce
      90             :  * the bookkeeping load on malloc().
      91             :  *
      92             :  * Blocks allocated to hold oversize chunks do not follow this rule, however;
      93             :  * they are just however big they need to be to hold that single chunk.
      94             :  *
      95             :  * Also, if a minContextSize is specified, the first block has that size,
      96             :  * and then initBlockSize is used for the next one.
      97             :  *--------------------
      98             :  */
      99             : 
     100             : #define ALLOC_BLOCKHDRSZ    MAXALIGN(sizeof(AllocBlockData))
     101             : #define ALLOC_CHUNKHDRSZ    sizeof(struct AllocChunkData)
     102             : 
     103             : typedef struct AllocBlockData *AllocBlock;  /* forward reference */
     104             : typedef struct AllocChunkData *AllocChunk;
     105             : 
     106             : /*
     107             :  * AllocPointer
     108             :  *      Aligned pointer which may be a member of an allocation set.
     109             :  */
     110             : typedef void *AllocPointer;
     111             : 
     112             : /*
     113             :  * AllocSetContext is our standard implementation of MemoryContext.
     114             :  *
     115             :  * Note: header.isReset means there is nothing for AllocSetReset to do.
     116             :  * This is different from the aset being physically empty (empty blocks list)
     117             :  * because we will still have a keeper block.  It's also different from the set
     118             :  * being logically empty, because we don't attempt to detect pfree'ing the
     119             :  * last active chunk.
     120             :  */
     121             : typedef struct AllocSetContext
     122             : {
     123             :     MemoryContextData header;   /* Standard memory-context fields */
     124             :     /* Info about storage allocated in this context: */
     125             :     AllocBlock  blocks;         /* head of list of blocks in this set */
     126             :     AllocChunk  freelist[ALLOCSET_NUM_FREELISTS];   /* free chunk lists */
     127             :     /* Allocation parameters for this context: */
     128             :     Size        initBlockSize;  /* initial block size */
     129             :     Size        maxBlockSize;   /* maximum block size */
     130             :     Size        nextBlockSize;  /* next block size to allocate */
     131             :     Size        allocChunkLimit;    /* effective chunk size limit */
     132             :     AllocBlock  keeper;         /* keep this block over resets */
     133             :     /* freelist this context could be put in, or -1 if not a candidate: */
     134             :     int         freeListIndex;  /* index in context_freelists[], or -1 */
     135             : } AllocSetContext;
     136             : 
     137             : typedef AllocSetContext *AllocSet;
     138             : 
     139             : /*
     140             :  * AllocBlock
     141             :  *      An AllocBlock is the unit of memory that is obtained by aset.c
     142             :  *      from malloc().  It contains one or more AllocChunks, which are
     143             :  *      the units requested by palloc() and freed by pfree().  AllocChunks
     144             :  *      cannot be returned to malloc() individually, instead they are put
     145             :  *      on freelists by pfree() and re-used by the next palloc() that has
     146             :  *      a matching request size.
     147             :  *
     148             :  *      AllocBlockData is the header data for a block --- the usable space
     149             :  *      within the block begins at the next alignment boundary.
     150             :  */
     151             : typedef struct AllocBlockData
     152             : {
     153             :     AllocSet    aset;           /* aset that owns this block */
     154             :     AllocBlock  prev;           /* prev block in aset's blocks list, if any */
     155             :     AllocBlock  next;           /* next block in aset's blocks list, if any */
     156             :     char       *freeptr;        /* start of free space in this block */
     157             :     char       *endptr;         /* end of space in this block */
     158             : }           AllocBlockData;
     159             : 
     160             : /*
     161             :  * AllocChunk
     162             :  *      The prefix of each piece of memory in an AllocBlock
     163             :  *
     164             :  * Note: to meet the memory context APIs, the payload area of the chunk must
     165             :  * be maxaligned, and the "aset" link must be immediately adjacent to the
     166             :  * payload area (cf. GetMemoryChunkContext).  We simplify matters for this
     167             :  * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
     168             :  * we can ensure things work by adding any required alignment padding before
     169             :  * the "aset" field.  There is a static assertion below that the alignment
     170             :  * is done correctly.
     171             :  */
     172             : typedef struct AllocChunkData
     173             : {
     174             :     /* size is always the size of the usable space in the chunk */
     175             :     Size        size;
     176             : #ifdef MEMORY_CONTEXT_CHECKING
     177             :     /* when debugging memory usage, also store actual requested size */
     178             :     /* this is zero in a free chunk */
     179             :     Size        requested_size;
     180             : 
     181             : #define ALLOCCHUNK_RAWSIZE  (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
     182             : #else
     183             : #define ALLOCCHUNK_RAWSIZE  (SIZEOF_SIZE_T + SIZEOF_VOID_P)
     184             : #endif                          /* MEMORY_CONTEXT_CHECKING */
     185             : 
     186             :     /* ensure proper alignment by adding padding if needed */
     187             : #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
     188             :     char        padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
     189             : #endif
     190             : 
     191             :     /* aset is the owning aset if allocated, or the freelist link if free */
     192             :     void       *aset;
     193             :     /* there must not be any padding to reach a MAXALIGN boundary here! */
     194             : }           AllocChunkData;
     195             : 
     196             : /*
     197             :  * Only the "aset" field should be accessed outside this module.
     198             :  * We keep the rest of an allocated chunk's header marked NOACCESS when using
     199             :  * valgrind.  But note that chunk headers that are in a freelist are kept
     200             :  * accessible, for simplicity.
     201             :  */
     202             : #define ALLOCCHUNK_PRIVATE_LEN  offsetof(AllocChunkData, aset)
     203             : 
     204             : /*
     205             :  * AllocPointerIsValid
     206             :  *      True iff pointer is valid allocation pointer.
     207             :  */
     208             : #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
     209             : 
     210             : /*
     211             :  * AllocSetIsValid
     212             :  *      True iff set is valid allocation set.
     213             :  */
     214             : #define AllocSetIsValid(set) PointerIsValid(set)
     215             : 
     216             : #define AllocPointerGetChunk(ptr)   \
     217             :                     ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
     218             : #define AllocChunkGetPointer(chk)   \
     219             :                     ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
     220             : 
     221             : /*
     222             :  * Rather than repeatedly creating and deleting memory contexts, we keep some
     223             :  * freed contexts in freelists so that we can hand them out again with little
     224             :  * work.  Before putting a context in a freelist, we reset it so that it has
     225             :  * only its initial malloc chunk and no others.  To be a candidate for a
     226             :  * freelist, a context must have the same minContextSize/initBlockSize as
     227             :  * other contexts in the list; but its maxBlockSize is irrelevant since that
     228             :  * doesn't affect the size of the initial chunk.
     229             :  *
     230             :  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
     231             :  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
     232             :  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
     233             :  *
     234             :  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
     235             :  * hopes of improving locality of reference.  But if there get to be too
     236             :  * many contexts in the list, we'd prefer to drop the most-recently-created
     237             :  * contexts in hopes of keeping the process memory map compact.
     238             :  * We approximate that by simply deleting all existing entries when the list
     239             :  * overflows, on the assumption that queries that allocate a lot of contexts
     240             :  * will probably free them in more or less reverse order of allocation.
     241             :  *
     242             :  * Contexts in a freelist are chained via their nextchild pointers.
     243             :  */
     244             : #define MAX_FREE_CONTEXTS 100   /* arbitrary limit on freelist length */
     245             : 
     246             : typedef struct AllocSetFreeList
     247             : {
     248             :     int         num_free;       /* current list length */
     249             :     AllocSetContext *first_free;    /* list header */
     250             : } AllocSetFreeList;
     251             : 
     252             : /* context_freelists[0] is for default params, [1] for small params */
     253             : static AllocSetFreeList context_freelists[2] =
     254             : {
     255             :     {
     256             :         0, NULL
     257             :     },
     258             :     {
     259             :         0, NULL
     260             :     }
     261             : };
     262             : 
     263             : /*
     264             :  * These functions implement the MemoryContext API for AllocSet contexts.
     265             :  */
     266             : static void *AllocSetAlloc(MemoryContext context, Size size);
     267             : static void AllocSetFree(MemoryContext context, void *pointer);
     268             : static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
     269             : static void AllocSetReset(MemoryContext context);
     270             : static void AllocSetDelete(MemoryContext context);
     271             : static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
     272             : static bool AllocSetIsEmpty(MemoryContext context);
     273             : static void AllocSetStats(MemoryContext context,
     274             :                           MemoryStatsPrintFunc printfunc, void *passthru,
     275             :                           MemoryContextCounters *totals,
     276             :                           bool print_to_stderr);
     277             : 
     278             : #ifdef MEMORY_CONTEXT_CHECKING
     279             : static void AllocSetCheck(MemoryContext context);
     280             : #endif
     281             : 
     282             : /*
     283             :  * This is the virtual function table for AllocSet contexts.
     284             :  */
     285             : static const MemoryContextMethods AllocSetMethods = {
     286             :     AllocSetAlloc,
     287             :     AllocSetFree,
     288             :     AllocSetRealloc,
     289             :     AllocSetReset,
     290             :     AllocSetDelete,
     291             :     AllocSetGetChunkSpace,
     292             :     AllocSetIsEmpty,
     293             :     AllocSetStats
     294             : #ifdef MEMORY_CONTEXT_CHECKING
     295             :     ,AllocSetCheck
     296             : #endif
     297             : };
     298             : 
     299             : 
     300             : /* ----------
     301             :  * AllocSetFreeIndex -
     302             :  *
     303             :  *      Depending on the size of an allocation compute which freechunk
     304             :  *      list of the alloc set it belongs to.  Caller must have verified
     305             :  *      that size <= ALLOC_CHUNK_LIMIT.
     306             :  * ----------
     307             :  */
     308             : static inline int
     309  1835592910 : AllocSetFreeIndex(Size size)
     310             : {
     311             :     int         idx;
     312             : 
     313  1835592910 :     if (size > (1 << ALLOC_MINBITS))
     314             :     {
     315             :         /*----------
     316             :          * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
     317             :          * This is the same as
     318             :          *      pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
     319             :          * or equivalently
     320             :          *      pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
     321             :          *
     322             :          * However, rather than just calling that function, we duplicate the
     323             :          * logic here, allowing an additional optimization.  It's reasonable
     324             :          * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
     325             :          * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
     326             :          * the last two bytes.
     327             :          *
     328             :          * Yes, this function is enough of a hot-spot to make it worth this
     329             :          * much trouble.
     330             :          *----------
     331             :          */
     332             : #ifdef HAVE__BUILTIN_CLZ
     333  1635384896 :         idx = 31 - __builtin_clz((uint32) size - 1) - ALLOC_MINBITS + 1;
     334             : #else
     335             :         uint32      t,
     336             :                     tsize;
     337             : 
     338             :         /* Statically assert that we only have a 16-bit input value. */
     339             :         StaticAssertStmt(ALLOC_CHUNK_LIMIT < (1 << 16),
     340             :                          "ALLOC_CHUNK_LIMIT must be less than 64kB");
     341             : 
     342             :         tsize = size - 1;
     343             :         t = tsize >> 8;
     344             :         idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
     345             :         idx -= ALLOC_MINBITS - 1;
     346             : #endif
     347             : 
     348             :         Assert(idx < ALLOCSET_NUM_FREELISTS);
     349             :     }
     350             :     else
     351   200208014 :         idx = 0;
     352             : 
     353  1835592910 :     return idx;
     354             : }
     355             : 
     356             : 
     357             : /*
     358             :  * Public routines
     359             :  */
     360             : 
     361             : 
     362             : /*
     363             :  * AllocSetContextCreateInternal
     364             :  *      Create a new AllocSet context.
     365             :  *
     366             :  * parent: parent context, or NULL if top-level context
     367             :  * name: name of context (must be statically allocated)
     368             :  * minContextSize: minimum context size
     369             :  * initBlockSize: initial allocation block size
     370             :  * maxBlockSize: maximum allocation block size
     371             :  *
     372             :  * Most callers should abstract the context size parameters using a macro
     373             :  * such as ALLOCSET_DEFAULT_SIZES.
     374             :  *
     375             :  * Note: don't call this directly; go through the wrapper macro
     376             :  * AllocSetContextCreate.
     377             :  */
     378             : MemoryContext
     379    17568330 : AllocSetContextCreateInternal(MemoryContext parent,
     380             :                               const char *name,
     381             :                               Size minContextSize,
     382             :                               Size initBlockSize,
     383             :                               Size maxBlockSize)
     384             : {
     385             :     int         freeListIndex;
     386             :     Size        firstBlockSize;
     387             :     AllocSet    set;
     388             :     AllocBlock  block;
     389             : 
     390             :     /* Assert we padded AllocChunkData properly */
     391             :     StaticAssertStmt(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
     392             :                      "sizeof(AllocChunkData) is not maxaligned");
     393             :     StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
     394             :                      ALLOC_CHUNKHDRSZ,
     395             :                      "padding calculation in AllocChunkData is wrong");
     396             : 
     397             :     /*
     398             :      * First, validate allocation parameters.  Once these were regular runtime
     399             :      * test and elog's, but in practice Asserts seem sufficient because nobody
     400             :      * varies their parameters at runtime.  We somewhat arbitrarily enforce a
     401             :      * minimum 1K block size.
     402             :      */
     403             :     Assert(initBlockSize == MAXALIGN(initBlockSize) &&
     404             :            initBlockSize >= 1024);
     405             :     Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
     406             :            maxBlockSize >= initBlockSize &&
     407             :            AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
     408             :     Assert(minContextSize == 0 ||
     409             :            (minContextSize == MAXALIGN(minContextSize) &&
     410             :             minContextSize >= 1024 &&
     411             :             minContextSize <= maxBlockSize));
     412             : 
     413             :     /*
     414             :      * Check whether the parameters match either available freelist.  We do
     415             :      * not need to demand a match of maxBlockSize.
     416             :      */
     417    17568330 :     if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
     418             :         initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
     419    13186716 :         freeListIndex = 0;
     420     4381614 :     else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
     421             :              initBlockSize == ALLOCSET_SMALL_INITSIZE)
     422     4361594 :         freeListIndex = 1;
     423             :     else
     424       20020 :         freeListIndex = -1;
     425             : 
     426             :     /*
     427             :      * If a suitable freelist entry exists, just recycle that context.
     428             :      */
     429    17568330 :     if (freeListIndex >= 0)
     430             :     {
     431    17548310 :         AllocSetFreeList *freelist = &context_freelists[freeListIndex];
     432             : 
     433    17548310 :         if (freelist->first_free != NULL)
     434             :         {
     435             :             /* Remove entry from freelist */
     436    15740064 :             set = freelist->first_free;
     437    15740064 :             freelist->first_free = (AllocSet) set->header.nextchild;
     438    15740064 :             freelist->num_free--;
     439             : 
     440             :             /* Update its maxBlockSize; everything else should be OK */
     441    15740064 :             set->maxBlockSize = maxBlockSize;
     442             : 
     443             :             /* Reinitialize its header, installing correct name and parent */
     444    15740064 :             MemoryContextCreate((MemoryContext) set,
     445             :                                 T_AllocSetContext,
     446             :                                 &AllocSetMethods,
     447             :                                 parent,
     448             :                                 name);
     449             : 
     450    15740064 :             ((MemoryContext) set)->mem_allocated =
     451    15740064 :                 set->keeper->endptr - ((char *) set);
     452             : 
     453    15740064 :             return (MemoryContext) set;
     454             :         }
     455             :     }
     456             : 
     457             :     /* Determine size of initial block */
     458     1828266 :     firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
     459             :         ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
     460     1828266 :     if (minContextSize != 0)
     461       20020 :         firstBlockSize = Max(firstBlockSize, minContextSize);
     462             :     else
     463     1808246 :         firstBlockSize = Max(firstBlockSize, initBlockSize);
     464             : 
     465             :     /*
     466             :      * Allocate the initial block.  Unlike other aset.c blocks, it starts with
     467             :      * the context header and its block header follows that.
     468             :      */
     469     1828266 :     set = (AllocSet) malloc(firstBlockSize);
     470     1828266 :     if (set == NULL)
     471             :     {
     472           0 :         if (TopMemoryContext)
     473           0 :             MemoryContextStats(TopMemoryContext);
     474           0 :         ereport(ERROR,
     475             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
     476             :                  errmsg("out of memory"),
     477             :                  errdetail("Failed while creating memory context \"%s\".",
     478             :                            name)));
     479             :     }
     480             : 
     481             :     /*
     482             :      * Avoid writing code that can fail between here and MemoryContextCreate;
     483             :      * we'd leak the header/initial block if we ereport in this stretch.
     484             :      */
     485             : 
     486             :     /* Fill in the initial block's block header */
     487     1828266 :     block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
     488     1828266 :     block->aset = set;
     489     1828266 :     block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
     490     1828266 :     block->endptr = ((char *) set) + firstBlockSize;
     491     1828266 :     block->prev = NULL;
     492     1828266 :     block->next = NULL;
     493             : 
     494             :     /* Mark unallocated space NOACCESS; leave the block header alone. */
     495             :     VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
     496             : 
     497             :     /* Remember block as part of block list */
     498     1828266 :     set->blocks = block;
     499             :     /* Mark block as not to be released at reset time */
     500     1828266 :     set->keeper = block;
     501             : 
     502             :     /* Finish filling in aset-specific parts of the context header */
     503    21939192 :     MemSetAligned(set->freelist, 0, sizeof(set->freelist));
     504             : 
     505     1828266 :     set->initBlockSize = initBlockSize;
     506     1828266 :     set->maxBlockSize = maxBlockSize;
     507     1828266 :     set->nextBlockSize = initBlockSize;
     508     1828266 :     set->freeListIndex = freeListIndex;
     509             : 
     510             :     /*
     511             :      * Compute the allocation chunk size limit for this context.  It can't be
     512             :      * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
     513             :      * If maxBlockSize is small then requests exceeding the maxBlockSize, or
     514             :      * even a significant fraction of it, should be treated as large chunks
     515             :      * too.  For the typical case of maxBlockSize a power of 2, the chunk size
     516             :      * limit will be at most 1/8th maxBlockSize, so that given a stream of
     517             :      * requests that are all the maximum chunk size we will waste at most
     518             :      * 1/8th of the allocated space.
     519             :      *
     520             :      * We have to have allocChunkLimit a power of two, because the requested
     521             :      * and actually-allocated sizes of any chunk must be on the same side of
     522             :      * the limit, else we get confused about whether the chunk is "big".
     523             :      *
     524             :      * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
     525             :      */
     526             :     StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
     527             :                      "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
     528             : 
     529     1828266 :     set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
     530     1828266 :     while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
     531     5902248 :            (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
     532     4073982 :         set->allocChunkLimit >>= 1;
     533             : 
     534             :     /* Finally, do the type-independent part of context creation */
     535     1828266 :     MemoryContextCreate((MemoryContext) set,
     536             :                         T_AllocSetContext,
     537             :                         &AllocSetMethods,
     538             :                         parent,
     539             :                         name);
     540             : 
     541     1828266 :     ((MemoryContext) set)->mem_allocated = firstBlockSize;
     542             : 
     543     1828266 :     return (MemoryContext) set;
     544             : }
     545             : 
     546             : /*
     547             :  * AllocSetReset
     548             :  *      Frees all memory which is allocated in the given set.
     549             :  *
     550             :  * Actually, this routine has some discretion about what to do.
     551             :  * It should mark all allocated chunks freed, but it need not necessarily
     552             :  * give back all the resources the set owns.  Our actual implementation is
     553             :  * that we give back all but the "keeper" block (which we must keep, since
     554             :  * it shares a malloc chunk with the context header).  In this way, we don't
     555             :  * thrash malloc() when a context is repeatedly reset after small allocations,
     556             :  * which is typical behavior for per-tuple contexts.
     557             :  */
     558             : static void
     559    44161938 : AllocSetReset(MemoryContext context)
     560             : {
     561    44161938 :     AllocSet    set = (AllocSet) context;
     562             :     AllocBlock  block;
     563    44161938 :     Size        keepersize PG_USED_FOR_ASSERTS_ONLY
     564    44161938 :     = set->keeper->endptr - ((char *) set);
     565             : 
     566             :     AssertArg(AllocSetIsValid(set));
     567             : 
     568             : #ifdef MEMORY_CONTEXT_CHECKING
     569             :     /* Check for corruption and leaks before freeing */
     570             :     AllocSetCheck(context);
     571             : #endif
     572             : 
     573             :     /* Clear chunk freelists */
     574   529943256 :     MemSetAligned(set->freelist, 0, sizeof(set->freelist));
     575             : 
     576    44161938 :     block = set->blocks;
     577             : 
     578             :     /* New blocks list will be just the keeper block */
     579    44161938 :     set->blocks = set->keeper;
     580             : 
     581    97666352 :     while (block != NULL)
     582             :     {
     583    53504414 :         AllocBlock  next = block->next;
     584             : 
     585    53504414 :         if (block == set->keeper)
     586             :         {
     587             :             /* Reset the block, but don't return it to malloc */
     588    44161938 :             char       *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
     589             : 
     590             : #ifdef CLOBBER_FREED_MEMORY
     591             :             wipe_mem(datastart, block->freeptr - datastart);
     592             : #else
     593             :             /* wipe_mem() would have done this */
     594             :             VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
     595             : #endif
     596    44161938 :             block->freeptr = datastart;
     597    44161938 :             block->prev = NULL;
     598    44161938 :             block->next = NULL;
     599             :         }
     600             :         else
     601             :         {
     602             :             /* Normal case, release the block */
     603     9342476 :             context->mem_allocated -= block->endptr - ((char *) block);
     604             : 
     605             : #ifdef CLOBBER_FREED_MEMORY
     606             :             wipe_mem(block, block->freeptr - ((char *) block));
     607             : #endif
     608     9342476 :             free(block);
     609             :         }
     610    53504414 :         block = next;
     611             :     }
     612             : 
     613             :     Assert(context->mem_allocated == keepersize);
     614             : 
     615             :     /* Reset block size allocation sequence, too */
     616    44161938 :     set->nextBlockSize = set->initBlockSize;
     617    44161938 : }
     618             : 
     619             : /*
     620             :  * AllocSetDelete
     621             :  *      Frees all memory which is allocated in the given set,
     622             :  *      in preparation for deletion of the set.
     623             :  *
     624             :  * Unlike AllocSetReset, this *must* free all resources of the set.
     625             :  */
     626             : static void
     627    16063956 : AllocSetDelete(MemoryContext context)
     628             : {
     629    16063956 :     AllocSet    set = (AllocSet) context;
     630    16063956 :     AllocBlock  block = set->blocks;
     631    16063956 :     Size        keepersize PG_USED_FOR_ASSERTS_ONLY
     632    16063956 :     = set->keeper->endptr - ((char *) set);
     633             : 
     634             :     AssertArg(AllocSetIsValid(set));
     635             : 
     636             : #ifdef MEMORY_CONTEXT_CHECKING
     637             :     /* Check for corruption and leaks before freeing */
     638             :     AllocSetCheck(context);
     639             : #endif
     640             : 
     641             :     /*
     642             :      * If the context is a candidate for a freelist, put it into that freelist
     643             :      * instead of destroying it.
     644             :      */
     645    16063956 :     if (set->freeListIndex >= 0)
     646             :     {
     647    16063956 :         AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
     648             : 
     649             :         /*
     650             :          * Reset the context, if it needs it, so that we aren't hanging on to
     651             :          * more than the initial malloc chunk.
     652             :          */
     653    16063956 :         if (!context->isReset)
     654     9858800 :             MemoryContextResetOnly(context);
     655             : 
     656             :         /*
     657             :          * If the freelist is full, just discard what's already in it.  See
     658             :          * comments with context_freelists[].
     659             :          */
     660    16063956 :         if (freelist->num_free >= MAX_FREE_CONTEXTS)
     661             :         {
     662       32926 :             while (freelist->first_free != NULL)
     663             :             {
     664       32600 :                 AllocSetContext *oldset = freelist->first_free;
     665             : 
     666       32600 :                 freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
     667       32600 :                 freelist->num_free--;
     668             : 
     669             :                 /* All that remains is to free the header/initial block */
     670       32600 :                 free(oldset);
     671             :             }
     672             :             Assert(freelist->num_free == 0);
     673             :         }
     674             : 
     675             :         /* Now add the just-deleted context to the freelist. */
     676    16063956 :         set->header.nextchild = (MemoryContext) freelist->first_free;
     677    16063956 :         freelist->first_free = set;
     678    16063956 :         freelist->num_free++;
     679             : 
     680    16063956 :         return;
     681             :     }
     682             : 
     683             :     /* Free all blocks, except the keeper which is part of context header */
     684           0 :     while (block != NULL)
     685             :     {
     686           0 :         AllocBlock  next = block->next;
     687             : 
     688           0 :         if (block != set->keeper)
     689           0 :             context->mem_allocated -= block->endptr - ((char *) block);
     690             : 
     691             : #ifdef CLOBBER_FREED_MEMORY
     692             :         wipe_mem(block, block->freeptr - ((char *) block));
     693             : #endif
     694             : 
     695           0 :         if (block != set->keeper)
     696           0 :             free(block);
     697             : 
     698           0 :         block = next;
     699             :     }
     700             : 
     701             :     Assert(context->mem_allocated == keepersize);
     702             : 
     703             :     /* Finally, free the context header, including the keeper block */
     704           0 :     free(set);
     705             : }
     706             : 
     707             : /*
     708             :  * AllocSetAlloc
     709             :  *      Returns pointer to allocated memory of given size or NULL if
     710             :  *      request could not be completed; memory is added to the set.
     711             :  *
     712             :  * No request may exceed:
     713             :  *      MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
     714             :  * All callers use a much-lower limit.
     715             :  *
     716             :  * Note: when using valgrind, it doesn't matter how the returned allocation
     717             :  * is marked, as mcxt.c will set it to UNDEFINED.  In some paths we will
     718             :  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
     719             :  */
     720             : static void *
     721  1449043872 : AllocSetAlloc(MemoryContext context, Size size)
     722             : {
     723  1449043872 :     AllocSet    set = (AllocSet) context;
     724             :     AllocBlock  block;
     725             :     AllocChunk  chunk;
     726             :     int         fidx;
     727             :     Size        chunk_size;
     728             :     Size        blksize;
     729             : 
     730             :     AssertArg(AllocSetIsValid(set));
     731             : 
     732             :     /*
     733             :      * If requested size exceeds maximum for chunks, allocate an entire block
     734             :      * for this request.
     735             :      */
     736  1449043872 :     if (size > set->allocChunkLimit)
     737             :     {
     738    18803482 :         chunk_size = MAXALIGN(size);
     739    18803482 :         blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
     740    18803482 :         block = (AllocBlock) malloc(blksize);
     741    18803482 :         if (block == NULL)
     742           0 :             return NULL;
     743             : 
     744    18803482 :         context->mem_allocated += blksize;
     745             : 
     746    18803482 :         block->aset = set;
     747    18803482 :         block->freeptr = block->endptr = ((char *) block) + blksize;
     748             : 
     749    18803482 :         chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
     750    18803482 :         chunk->aset = set;
     751    18803482 :         chunk->size = chunk_size;
     752             : #ifdef MEMORY_CONTEXT_CHECKING
     753             :         chunk->requested_size = size;
     754             :         /* set mark to catch clobber of "unused" space */
     755             :         if (size < chunk_size)
     756             :             set_sentinel(AllocChunkGetPointer(chunk), size);
     757             : #endif
     758             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     759             :         /* fill the allocated space with junk */
     760             :         randomize_mem((char *) AllocChunkGetPointer(chunk), size);
     761             : #endif
     762             : 
     763             :         /*
     764             :          * Stick the new block underneath the active allocation block, if any,
     765             :          * so that we don't lose the use of the space remaining therein.
     766             :          */
     767    18803482 :         if (set->blocks != NULL)
     768             :         {
     769    18803482 :             block->prev = set->blocks;
     770    18803482 :             block->next = set->blocks->next;
     771    18803482 :             if (block->next)
     772    13856812 :                 block->next->prev = block;
     773    18803482 :             set->blocks->next = block;
     774             :         }
     775             :         else
     776             :         {
     777           0 :             block->prev = NULL;
     778           0 :             block->next = NULL;
     779           0 :             set->blocks = block;
     780             :         }
     781             : 
     782             :         /* Ensure any padding bytes are marked NOACCESS. */
     783             :         VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
     784             :                                    chunk_size - size);
     785             : 
     786             :         /* Disallow external access to private part of chunk header. */
     787             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
     788             : 
     789    18803482 :         return AllocChunkGetPointer(chunk);
     790             :     }
     791             : 
     792             :     /*
     793             :      * Request is small enough to be treated as a chunk.  Look in the
     794             :      * corresponding free list to see if there is a free chunk we could reuse.
     795             :      * If one is found, remove it from the free list, make it again a member
     796             :      * of the alloc set and return its data address.
     797             :      */
     798  1430240390 :     fidx = AllocSetFreeIndex(size);
     799  1430240390 :     chunk = set->freelist[fidx];
     800  1430240390 :     if (chunk != NULL)
     801             :     {
     802             :         Assert(chunk->size >= size);
     803             : 
     804   338380218 :         set->freelist[fidx] = (AllocChunk) chunk->aset;
     805             : 
     806   338380218 :         chunk->aset = (void *) set;
     807             : 
     808             : #ifdef MEMORY_CONTEXT_CHECKING
     809             :         chunk->requested_size = size;
     810             :         /* set mark to catch clobber of "unused" space */
     811             :         if (size < chunk->size)
     812             :             set_sentinel(AllocChunkGetPointer(chunk), size);
     813             : #endif
     814             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     815             :         /* fill the allocated space with junk */
     816             :         randomize_mem((char *) AllocChunkGetPointer(chunk), size);
     817             : #endif
     818             : 
     819             :         /* Ensure any padding bytes are marked NOACCESS. */
     820             :         VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
     821             :                                    chunk->size - size);
     822             : 
     823             :         /* Disallow external access to private part of chunk header. */
     824             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
     825             : 
     826   338380218 :         return AllocChunkGetPointer(chunk);
     827             :     }
     828             : 
     829             :     /*
     830             :      * Choose the actual chunk size to allocate.
     831             :      */
     832  1091860172 :     chunk_size = (1 << ALLOC_MINBITS) << fidx;
     833             :     Assert(chunk_size >= size);
     834             : 
     835             :     /*
     836             :      * If there is enough room in the active allocation block, we will put the
     837             :      * chunk into that block.  Else must start a new one.
     838             :      */
     839  1091860172 :     if ((block = set->blocks) != NULL)
     840             :     {
     841  1091860172 :         Size        availspace = block->endptr - block->freeptr;
     842             : 
     843  1091860172 :         if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
     844             :         {
     845             :             /*
     846             :              * The existing active (top) block does not have enough room for
     847             :              * the requested allocation, but it might still have a useful
     848             :              * amount of space in it.  Once we push it down in the block list,
     849             :              * we'll never try to allocate more space from it. So, before we
     850             :              * do that, carve up its free space into chunks that we can put on
     851             :              * the set's freelists.
     852             :              *
     853             :              * Because we can only get here when there's less than
     854             :              * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
     855             :              * more than ALLOCSET_NUM_FREELISTS-1 times.
     856             :              */
     857    20150344 :             while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
     858             :             {
     859    13114468 :                 Size        availchunk = availspace - ALLOC_CHUNKHDRSZ;
     860    13114468 :                 int         a_fidx = AllocSetFreeIndex(availchunk);
     861             : 
     862             :                 /*
     863             :                  * In most cases, we'll get back the index of the next larger
     864             :                  * freelist than the one we need to put this chunk on.  The
     865             :                  * exception is when availchunk is exactly a power of 2.
     866             :                  */
     867    13114468 :                 if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
     868             :                 {
     869    10815404 :                     a_fidx--;
     870             :                     Assert(a_fidx >= 0);
     871    10815404 :                     availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
     872             :                 }
     873             : 
     874    13114468 :                 chunk = (AllocChunk) (block->freeptr);
     875             : 
     876             :                 /* Prepare to initialize the chunk header. */
     877             :                 VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
     878             : 
     879    13114468 :                 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
     880    13114468 :                 availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
     881             : 
     882    13114468 :                 chunk->size = availchunk;
     883             : #ifdef MEMORY_CONTEXT_CHECKING
     884             :                 chunk->requested_size = 0;   /* mark it free */
     885             : #endif
     886    13114468 :                 chunk->aset = (void *) set->freelist[a_fidx];
     887    13114468 :                 set->freelist[a_fidx] = chunk;
     888             :             }
     889             : 
     890             :             /* Mark that we need to create a new block */
     891     7035876 :             block = NULL;
     892             :         }
     893             :     }
     894             : 
     895             :     /*
     896             :      * Time to create a new regular (multi-chunk) block?
     897             :      */
     898  1091860172 :     if (block == NULL)
     899             :     {
     900             :         Size        required_size;
     901             : 
     902             :         /*
     903             :          * The first such block has size initBlockSize, and we double the
     904             :          * space in each succeeding block, but not more than maxBlockSize.
     905             :          */
     906     7035876 :         blksize = set->nextBlockSize;
     907     7035876 :         set->nextBlockSize <<= 1;
     908     7035876 :         if (set->nextBlockSize > set->maxBlockSize)
     909     1318988 :             set->nextBlockSize = set->maxBlockSize;
     910             : 
     911             :         /*
     912             :          * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
     913             :          * space... but try to keep it a power of 2.
     914             :          */
     915     7035876 :         required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
     916     7894976 :         while (blksize < required_size)
     917      859100 :             blksize <<= 1;
     918             : 
     919             :         /* Try to allocate it */
     920     7035876 :         block = (AllocBlock) malloc(blksize);
     921             : 
     922             :         /*
     923             :          * We could be asking for pretty big blocks here, so cope if malloc
     924             :          * fails.  But give up if there's less than 1 MB or so available...
     925             :          */
     926     7035876 :         while (block == NULL && blksize > 1024 * 1024)
     927             :         {
     928           0 :             blksize >>= 1;
     929           0 :             if (blksize < required_size)
     930           0 :                 break;
     931           0 :             block = (AllocBlock) malloc(blksize);
     932             :         }
     933             : 
     934     7035876 :         if (block == NULL)
     935           0 :             return NULL;
     936             : 
     937     7035876 :         context->mem_allocated += blksize;
     938             : 
     939     7035876 :         block->aset = set;
     940     7035876 :         block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
     941     7035876 :         block->endptr = ((char *) block) + blksize;
     942             : 
     943             :         /* Mark unallocated space NOACCESS. */
     944             :         VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
     945             :                                    blksize - ALLOC_BLOCKHDRSZ);
     946             : 
     947     7035876 :         block->prev = NULL;
     948     7035876 :         block->next = set->blocks;
     949     7035876 :         if (block->next)
     950     7035876 :             block->next->prev = block;
     951     7035876 :         set->blocks = block;
     952             :     }
     953             : 
     954             :     /*
     955             :      * OK, do the allocation
     956             :      */
     957  1091860172 :     chunk = (AllocChunk) (block->freeptr);
     958             : 
     959             :     /* Prepare to initialize the chunk header. */
     960             :     VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
     961             : 
     962  1091860172 :     block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
     963             :     Assert(block->freeptr <= block->endptr);
     964             : 
     965  1091860172 :     chunk->aset = (void *) set;
     966  1091860172 :     chunk->size = chunk_size;
     967             : #ifdef MEMORY_CONTEXT_CHECKING
     968             :     chunk->requested_size = size;
     969             :     /* set mark to catch clobber of "unused" space */
     970             :     if (size < chunk->size)
     971             :         set_sentinel(AllocChunkGetPointer(chunk), size);
     972             : #endif
     973             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     974             :     /* fill the allocated space with junk */
     975             :     randomize_mem((char *) AllocChunkGetPointer(chunk), size);
     976             : #endif
     977             : 
     978             :     /* Ensure any padding bytes are marked NOACCESS. */
     979             :     VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
     980             :                                chunk_size - size);
     981             : 
     982             :     /* Disallow external access to private part of chunk header. */
     983             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
     984             : 
     985  1091860172 :     return AllocChunkGetPointer(chunk);
     986             : }
     987             : 
     988             : /*
     989             :  * AllocSetFree
     990             :  *      Frees allocated memory; memory is removed from the set.
     991             :  */
     992             : static void
     993   407531848 : AllocSetFree(MemoryContext context, void *pointer)
     994             : {
     995   407531848 :     AllocSet    set = (AllocSet) context;
     996   407531848 :     AllocChunk  chunk = AllocPointerGetChunk(pointer);
     997             : 
     998             :     /* Allow access to private part of chunk header. */
     999             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1000             : 
    1001             : #ifdef MEMORY_CONTEXT_CHECKING
    1002             :     /* Test for someone scribbling on unused space in chunk */
    1003             :     if (chunk->requested_size < chunk->size)
    1004             :         if (!sentinel_ok(pointer, chunk->requested_size))
    1005             :             elog(WARNING, "detected write past chunk end in %s %p",
    1006             :                  set->header.name, chunk);
    1007             : #endif
    1008             : 
    1009   407531848 :     if (chunk->size > set->allocChunkLimit)
    1010             :     {
    1011             :         /*
    1012             :          * Big chunks are certain to have been allocated as single-chunk
    1013             :          * blocks.  Just unlink that block and return it to malloc().
    1014             :          */
    1015    15293796 :         AllocBlock  block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
    1016             : 
    1017             :         /*
    1018             :          * Try to verify that we have a sane block pointer: it should
    1019             :          * reference the correct aset, and freeptr and endptr should point
    1020             :          * just past the chunk.
    1021             :          */
    1022    15293796 :         if (block->aset != set ||
    1023    15293796 :             block->freeptr != block->endptr ||
    1024    15293796 :             block->freeptr != ((char *) block) +
    1025    15293796 :             (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
    1026           0 :             elog(ERROR, "could not find block containing chunk %p", chunk);
    1027             : 
    1028             :         /* OK, remove block from aset's list and free it */
    1029    15293796 :         if (block->prev)
    1030    15293796 :             block->prev->next = block->next;
    1031             :         else
    1032           0 :             set->blocks = block->next;
    1033    15293796 :         if (block->next)
    1034    12469000 :             block->next->prev = block->prev;
    1035             : 
    1036    15293796 :         context->mem_allocated -= block->endptr - ((char *) block);
    1037             : 
    1038             : #ifdef CLOBBER_FREED_MEMORY
    1039             :         wipe_mem(block, block->freeptr - ((char *) block));
    1040             : #endif
    1041    15293796 :         free(block);
    1042             :     }
    1043             :     else
    1044             :     {
    1045             :         /* Normal case, put the chunk into appropriate freelist */
    1046   392238052 :         int         fidx = AllocSetFreeIndex(chunk->size);
    1047             : 
    1048   392238052 :         chunk->aset = (void *) set->freelist[fidx];
    1049             : 
    1050             : #ifdef CLOBBER_FREED_MEMORY
    1051             :         wipe_mem(pointer, chunk->size);
    1052             : #endif
    1053             : 
    1054             : #ifdef MEMORY_CONTEXT_CHECKING
    1055             :         /* Reset requested_size to 0 in chunks that are on freelist */
    1056             :         chunk->requested_size = 0;
    1057             : #endif
    1058   392238052 :         set->freelist[fidx] = chunk;
    1059             :     }
    1060   407531848 : }
    1061             : 
    1062             : /*
    1063             :  * AllocSetRealloc
    1064             :  *      Returns new pointer to allocated memory of given size or NULL if
    1065             :  *      request could not be completed; this memory is added to the set.
    1066             :  *      Memory associated with given pointer is copied into the new memory,
    1067             :  *      and the old memory is freed.
    1068             :  *
    1069             :  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size.  This
    1070             :  * makes our Valgrind client requests less-precise, hazarding false negatives.
    1071             :  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
    1072             :  * request size.)
    1073             :  */
    1074             : static void *
    1075     9829046 : AllocSetRealloc(MemoryContext context, void *pointer, Size size)
    1076             : {
    1077     9829046 :     AllocSet    set = (AllocSet) context;
    1078     9829046 :     AllocChunk  chunk = AllocPointerGetChunk(pointer);
    1079             :     Size        oldsize;
    1080             : 
    1081             :     /* Allow access to private part of chunk header. */
    1082             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1083             : 
    1084     9829046 :     oldsize = chunk->size;
    1085             : 
    1086             : #ifdef MEMORY_CONTEXT_CHECKING
    1087             :     /* Test for someone scribbling on unused space in chunk */
    1088             :     if (chunk->requested_size < oldsize)
    1089             :         if (!sentinel_ok(pointer, chunk->requested_size))
    1090             :             elog(WARNING, "detected write past chunk end in %s %p",
    1091             :                  set->header.name, chunk);
    1092             : #endif
    1093             : 
    1094     9829046 :     if (oldsize > set->allocChunkLimit)
    1095             :     {
    1096             :         /*
    1097             :          * The chunk must have been allocated as a single-chunk block.  Use
    1098             :          * realloc() to make the containing block bigger, or smaller, with
    1099             :          * minimum space wastage.
    1100             :          */
    1101      588604 :         AllocBlock  block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
    1102             :         Size        chksize;
    1103             :         Size        blksize;
    1104             :         Size        oldblksize;
    1105             : 
    1106             :         /*
    1107             :          * Try to verify that we have a sane block pointer: it should
    1108             :          * reference the correct aset, and freeptr and endptr should point
    1109             :          * just past the chunk.
    1110             :          */
    1111      588604 :         if (block->aset != set ||
    1112      588604 :             block->freeptr != block->endptr ||
    1113      588604 :             block->freeptr != ((char *) block) +
    1114             :             (oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
    1115           0 :             elog(ERROR, "could not find block containing chunk %p", chunk);
    1116             : 
    1117             :         /*
    1118             :          * Even if the new request is less than set->allocChunkLimit, we stick
    1119             :          * with the single-chunk block approach.  Therefore we need
    1120             :          * chunk->size to be bigger than set->allocChunkLimit, so we don't get
    1121             :          * confused about the chunk's status in future calls.
    1122             :          */
    1123      588604 :         chksize = Max(size, set->allocChunkLimit + 1);
    1124      588604 :         chksize = MAXALIGN(chksize);
    1125             : 
    1126             :         /* Do the realloc */
    1127      588604 :         blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
    1128      588604 :         oldblksize = block->endptr - ((char *) block);
    1129             : 
    1130      588604 :         block = (AllocBlock) realloc(block, blksize);
    1131      588604 :         if (block == NULL)
    1132             :         {
    1133             :             /* Disallow external access to private part of chunk header. */
    1134             :             VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1135           0 :             return NULL;
    1136             :         }
    1137             : 
    1138             :         /* updated separately, not to underflow when (oldblksize > blksize) */
    1139      588604 :         context->mem_allocated -= oldblksize;
    1140      588604 :         context->mem_allocated += blksize;
    1141             : 
    1142      588604 :         block->freeptr = block->endptr = ((char *) block) + blksize;
    1143             : 
    1144             :         /* Update pointers since block has likely been moved */
    1145      588604 :         chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
    1146      588604 :         pointer = AllocChunkGetPointer(chunk);
    1147      588604 :         if (block->prev)
    1148      588604 :             block->prev->next = block;
    1149             :         else
    1150           0 :             set->blocks = block;
    1151      588604 :         if (block->next)
    1152      570206 :             block->next->prev = block;
    1153      588604 :         chunk->size = chksize;
    1154             : 
    1155             : #ifdef MEMORY_CONTEXT_CHECKING
    1156             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
    1157             :         /* We can only fill the extra space if we know the prior request */
    1158             :         if (size > chunk->requested_size)
    1159             :             randomize_mem((char *) pointer + chunk->requested_size,
    1160             :                           size - chunk->requested_size);
    1161             : #endif
    1162             : 
    1163             :         /*
    1164             :          * realloc() (or randomize_mem()) will have left any newly-allocated
    1165             :          * part UNDEFINED, but we may need to adjust trailing bytes from the
    1166             :          * old allocation.
    1167             :          */
    1168             : #ifdef USE_VALGRIND
    1169             :         if (oldsize > chunk->requested_size)
    1170             :             VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
    1171             :                                         oldsize - chunk->requested_size);
    1172             : #endif
    1173             : 
    1174             :         chunk->requested_size = size;
    1175             : 
    1176             :         /* set mark to catch clobber of "unused" space */
    1177             :         if (size < chunk->size)
    1178             :             set_sentinel(pointer, size);
    1179             : #else                           /* !MEMORY_CONTEXT_CHECKING */
    1180             : 
    1181             :         /*
    1182             :          * We don't know how much of the old chunk size was the actual
    1183             :          * allocation; it could have been as small as one byte.  We have to be
    1184             :          * conservative and just mark the entire old portion DEFINED.
    1185             :          */
    1186             :         VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
    1187             : #endif
    1188             : 
    1189             :         /* Ensure any padding bytes are marked NOACCESS. */
    1190             :         VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
    1191             : 
    1192             :         /* Disallow external access to private part of chunk header. */
    1193             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1194             : 
    1195      588604 :         return pointer;
    1196             :     }
    1197             : 
    1198             :     /*
    1199             :      * Chunk sizes are aligned to power of 2 in AllocSetAlloc().  Maybe the
    1200             :      * allocated area already is >= the new size.  (In particular, we will
    1201             :      * fall out here if the requested size is a decrease.)
    1202             :      */
    1203     9240442 :     else if (oldsize >= size)
    1204             :     {
    1205             : #ifdef MEMORY_CONTEXT_CHECKING
    1206             :         Size        oldrequest = chunk->requested_size;
    1207             : 
    1208             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
    1209             :         /* We can only fill the extra space if we know the prior request */
    1210             :         if (size > oldrequest)
    1211             :             randomize_mem((char *) pointer + oldrequest,
    1212             :                           size - oldrequest);
    1213             : #endif
    1214             : 
    1215             :         chunk->requested_size = size;
    1216             : 
    1217             :         /*
    1218             :          * If this is an increase, mark any newly-available part UNDEFINED.
    1219             :          * Otherwise, mark the obsolete part NOACCESS.
    1220             :          */
    1221             :         if (size > oldrequest)
    1222             :             VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
    1223             :                                         size - oldrequest);
    1224             :         else
    1225             :             VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
    1226             :                                        oldsize - size);
    1227             : 
    1228             :         /* set mark to catch clobber of "unused" space */
    1229             :         if (size < oldsize)
    1230             :             set_sentinel(pointer, size);
    1231             : #else                           /* !MEMORY_CONTEXT_CHECKING */
    1232             : 
    1233             :         /*
    1234             :          * We don't have the information to determine whether we're growing
    1235             :          * the old request or shrinking it, so we conservatively mark the
    1236             :          * entire new allocation DEFINED.
    1237             :          */
    1238             :         VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
    1239             :         VALGRIND_MAKE_MEM_DEFINED(pointer, size);
    1240             : #endif
    1241             : 
    1242             :         /* Disallow external access to private part of chunk header. */
    1243             :         VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1244             : 
    1245     1764558 :         return pointer;
    1246             :     }
    1247             :     else
    1248             :     {
    1249             :         /*
    1250             :          * Enlarge-a-small-chunk case.  We just do this by brute force, ie,
    1251             :          * allocate a new chunk and copy the data.  Since we know the existing
    1252             :          * data isn't huge, this won't involve any great memcpy expense, so
    1253             :          * it's not worth being smarter.  (At one time we tried to avoid
    1254             :          * memcpy when it was possible to enlarge the chunk in-place, but that
    1255             :          * turns out to misbehave unpleasantly for repeated cycles of
    1256             :          * palloc/repalloc/pfree: the eventually freed chunks go into the
    1257             :          * wrong freelist for the next initial palloc request, and so we leak
    1258             :          * memory indefinitely.  See pgsql-hackers archives for 2007-08-11.)
    1259             :          */
    1260             :         AllocPointer newPointer;
    1261             : 
    1262             :         /* allocate new chunk */
    1263     7475884 :         newPointer = AllocSetAlloc((MemoryContext) set, size);
    1264             : 
    1265             :         /* leave immediately if request was not completed */
    1266     7475884 :         if (newPointer == NULL)
    1267             :         {
    1268             :             /* Disallow external access to private part of chunk header. */
    1269             :             VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1270           0 :             return NULL;
    1271             :         }
    1272             : 
    1273             :         /*
    1274             :          * AllocSetAlloc() may have returned a region that is still NOACCESS.
    1275             :          * Change it to UNDEFINED for the moment; memcpy() will then transfer
    1276             :          * definedness from the old allocation to the new.  If we know the old
    1277             :          * allocation, copy just that much.  Otherwise, make the entire old
    1278             :          * chunk defined to avoid errors as we copy the currently-NOACCESS
    1279             :          * trailing bytes.
    1280             :          */
    1281             :         VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
    1282             : #ifdef MEMORY_CONTEXT_CHECKING
    1283             :         oldsize = chunk->requested_size;
    1284             : #else
    1285             :         VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
    1286             : #endif
    1287             : 
    1288             :         /* transfer existing data (certain to fit) */
    1289     7475884 :         memcpy(newPointer, pointer, oldsize);
    1290             : 
    1291             :         /* free old chunk */
    1292     7475884 :         AllocSetFree((MemoryContext) set, pointer);
    1293             : 
    1294     7475884 :         return newPointer;
    1295             :     }
    1296             : }
    1297             : 
    1298             : /*
    1299             :  * AllocSetGetChunkSpace
    1300             :  *      Given a currently-allocated chunk, determine the total space
    1301             :  *      it occupies (including all memory-allocation overhead).
    1302             :  */
    1303             : static Size
    1304    74151204 : AllocSetGetChunkSpace(MemoryContext context, void *pointer)
    1305             : {
    1306    74151204 :     AllocChunk  chunk = AllocPointerGetChunk(pointer);
    1307             :     Size        result;
    1308             : 
    1309             :     VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1310    74151204 :     result = chunk->size + ALLOC_CHUNKHDRSZ;
    1311             :     VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1312    74151204 :     return result;
    1313             : }
    1314             : 
    1315             : /*
    1316             :  * AllocSetIsEmpty
    1317             :  *      Is an allocset empty of any allocated space?
    1318             :  */
    1319             : static bool
    1320        6440 : AllocSetIsEmpty(MemoryContext context)
    1321             : {
    1322             :     /*
    1323             :      * For now, we say "empty" only if the context is new or just reset. We
    1324             :      * could examine the freelists to determine if all space has been freed,
    1325             :      * but it's not really worth the trouble for present uses of this
    1326             :      * functionality.
    1327             :      */
    1328        6440 :     if (context->isReset)
    1329        6420 :         return true;
    1330          20 :     return false;
    1331             : }
    1332             : 
    1333             : /*
    1334             :  * AllocSetStats
    1335             :  *      Compute stats about memory consumption of an allocset.
    1336             :  *
    1337             :  * printfunc: if not NULL, pass a human-readable stats string to this.
    1338             :  * passthru: pass this pointer through to printfunc.
    1339             :  * totals: if not NULL, add stats about this context into *totals.
    1340             :  * print_to_stderr: print stats to stderr if true, elog otherwise.
    1341             :  */
    1342             : static void
    1343        1788 : AllocSetStats(MemoryContext context,
    1344             :               MemoryStatsPrintFunc printfunc, void *passthru,
    1345             :               MemoryContextCounters *totals, bool print_to_stderr)
    1346             : {
    1347        1788 :     AllocSet    set = (AllocSet) context;
    1348        1788 :     Size        nblocks = 0;
    1349        1788 :     Size        freechunks = 0;
    1350             :     Size        totalspace;
    1351        1788 :     Size        freespace = 0;
    1352             :     AllocBlock  block;
    1353             :     int         fidx;
    1354             : 
    1355             :     /* Include context header in totalspace */
    1356        1788 :     totalspace = MAXALIGN(sizeof(AllocSetContext));
    1357             : 
    1358        4874 :     for (block = set->blocks; block != NULL; block = block->next)
    1359             :     {
    1360        3086 :         nblocks++;
    1361        3086 :         totalspace += block->endptr - ((char *) block);
    1362        3086 :         freespace += block->endptr - block->freeptr;
    1363             :     }
    1364       21456 :     for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
    1365             :     {
    1366             :         AllocChunk  chunk;
    1367             : 
    1368       21366 :         for (chunk = set->freelist[fidx]; chunk != NULL;
    1369        1698 :              chunk = (AllocChunk) chunk->aset)
    1370             :         {
    1371        1698 :             freechunks++;
    1372        1698 :             freespace += chunk->size + ALLOC_CHUNKHDRSZ;
    1373             :         }
    1374             :     }
    1375             : 
    1376        1788 :     if (printfunc)
    1377             :     {
    1378             :         char        stats_string[200];
    1379             : 
    1380         868 :         snprintf(stats_string, sizeof(stats_string),
    1381             :                  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
    1382             :                  totalspace, nblocks, freespace, freechunks,
    1383             :                  totalspace - freespace);
    1384         868 :         printfunc(context, passthru, stats_string, print_to_stderr);
    1385             :     }
    1386             : 
    1387        1788 :     if (totals)
    1388             :     {
    1389        1788 :         totals->nblocks += nblocks;
    1390        1788 :         totals->freechunks += freechunks;
    1391        1788 :         totals->totalspace += totalspace;
    1392        1788 :         totals->freespace += freespace;
    1393             :     }
    1394        1788 : }
    1395             : 
    1396             : 
    1397             : #ifdef MEMORY_CONTEXT_CHECKING
    1398             : 
    1399             : /*
    1400             :  * AllocSetCheck
    1401             :  *      Walk through chunks and check consistency of memory.
    1402             :  *
    1403             :  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
    1404             :  * find yourself in an infinite loop when trouble occurs, because this
    1405             :  * routine will be entered again when elog cleanup tries to release memory!
    1406             :  */
    1407             : static void
    1408             : AllocSetCheck(MemoryContext context)
    1409             : {
    1410             :     AllocSet    set = (AllocSet) context;
    1411             :     const char *name = set->header.name;
    1412             :     AllocBlock  prevblock;
    1413             :     AllocBlock  block;
    1414             :     Size        total_allocated = 0;
    1415             : 
    1416             :     for (prevblock = NULL, block = set->blocks;
    1417             :          block != NULL;
    1418             :          prevblock = block, block = block->next)
    1419             :     {
    1420             :         char       *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
    1421             :         long        blk_used = block->freeptr - bpoz;
    1422             :         long        blk_data = 0;
    1423             :         long        nchunks = 0;
    1424             : 
    1425             :         if (set->keeper == block)
    1426             :             total_allocated += block->endptr - ((char *) set);
    1427             :         else
    1428             :             total_allocated += block->endptr - ((char *) block);
    1429             : 
    1430             :         /*
    1431             :          * Empty block - empty can be keeper-block only
    1432             :          */
    1433             :         if (!blk_used)
    1434             :         {
    1435             :             if (set->keeper != block)
    1436             :                 elog(WARNING, "problem in alloc set %s: empty block %p",
    1437             :                      name, block);
    1438             :         }
    1439             : 
    1440             :         /*
    1441             :          * Check block header fields
    1442             :          */
    1443             :         if (block->aset != set ||
    1444             :             block->prev != prevblock ||
    1445             :             block->freeptr < bpoz ||
    1446             :             block->freeptr > block->endptr)
    1447             :             elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
    1448             :                  name, block);
    1449             : 
    1450             :         /*
    1451             :          * Chunk walker
    1452             :          */
    1453             :         while (bpoz < block->freeptr)
    1454             :         {
    1455             :             AllocChunk  chunk = (AllocChunk) bpoz;
    1456             :             Size        chsize,
    1457             :                         dsize;
    1458             : 
    1459             :             /* Allow access to private part of chunk header. */
    1460             :             VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1461             : 
    1462             :             chsize = chunk->size;    /* aligned chunk size */
    1463             :             dsize = chunk->requested_size;   /* real data */
    1464             : 
    1465             :             /*
    1466             :              * Check chunk size
    1467             :              */
    1468             :             if (dsize > chsize)
    1469             :                 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
    1470             :                      name, chunk, block);
    1471             :             if (chsize < (1 << ALLOC_MINBITS))
    1472             :                 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
    1473             :                      name, chsize, chunk, block);
    1474             : 
    1475             :             /* single-chunk block? */
    1476             :             if (chsize > set->allocChunkLimit &&
    1477             :                 chsize + ALLOC_CHUNKHDRSZ != blk_used)
    1478             :                 elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
    1479             :                      name, chunk, block);
    1480             : 
    1481             :             /*
    1482             :              * If chunk is allocated, check for correct aset pointer. (If it's
    1483             :              * free, the aset is the freelist pointer, which we can't check as
    1484             :              * easily...)  Note this is an incomplete test, since palloc(0)
    1485             :              * produces an allocated chunk with requested_size == 0.
    1486             :              */
    1487             :             if (dsize > 0 && chunk->aset != (void *) set)
    1488             :                 elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
    1489             :                      name, block, chunk);
    1490             : 
    1491             :             /*
    1492             :              * Check for overwrite of padding space in an allocated chunk.
    1493             :              */
    1494             :             if (chunk->aset == (void *) set && dsize < chsize &&
    1495             :                 !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
    1496             :                 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
    1497             :                      name, block, chunk);
    1498             : 
    1499             :             /*
    1500             :              * If chunk is allocated, disallow external access to private part
    1501             :              * of chunk header.
    1502             :              */
    1503             :             if (chunk->aset == (void *) set)
    1504             :                 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
    1505             : 
    1506             :             blk_data += chsize;
    1507             :             nchunks++;
    1508             : 
    1509             :             bpoz += ALLOC_CHUNKHDRSZ + chsize;
    1510             :         }
    1511             : 
    1512             :         if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
    1513             :             elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
    1514             :                  name, block);
    1515             :     }
    1516             : 
    1517             :     Assert(total_allocated == context->mem_allocated);
    1518             : }
    1519             : 
    1520             : #endif                          /* MEMORY_CONTEXT_CHECKING */

Generated by: LCOV version 1.14