LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - slab.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 88 128 68.8 %
Date: 2021-12-05 01:09:12 Functions: 5 9 55.6 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * slab.c
       4             :  *    SLAB allocator definitions.
       5             :  *
       6             :  * SLAB is a MemoryContext implementation designed for cases where large
       7             :  * numbers of equally-sized objects are allocated (and freed).
       8             :  *
       9             :  *
      10             :  * Portions Copyright (c) 2017-2021, PostgreSQL Global Development Group
      11             :  *
      12             :  * IDENTIFICATION
      13             :  *    src/backend/utils/mmgr/slab.c
      14             :  *
      15             :  *
      16             :  * NOTE:
      17             :  *  The constant allocation size allows significant simplification and various
      18             :  *  optimizations over more general purpose allocators. The blocks are carved
      19             :  *  into chunks of exactly the right size (plus alignment), not wasting any
      20             :  *  memory.
      21             :  *
      22             :  *  The information about free chunks is maintained both at the block level and
      23             :  *  global (context) level. This is possible as the chunk size (and thus also
      24             :  *  the number of chunks per block) is fixed.
      25             :  *
      26             :  *  On each block, free chunks are tracked in a simple linked list. Contents
      27             :  *  of free chunks is replaced with an index of the next free chunk, forming
      28             :  *  a very simple linked list. Each block also contains a counter of free
      29             :  *  chunks. Combined with the local block-level freelist, it makes it trivial
      30             :  *  to eventually free the whole block.
      31             :  *
      32             :  *  At the context level, we use 'freelist' to track blocks ordered by number
      33             :  *  of free chunks, starting with blocks having a single allocated chunk, and
      34             :  *  with completely full blocks on the tail.
      35             :  *
      36             :  *  This also allows various optimizations - for example when searching for
      37             :  *  free chunk, the allocator reuses space from the fullest blocks first, in
      38             :  *  the hope that some of the less full blocks will get completely empty (and
      39             :  *  returned back to the OS).
      40             :  *
      41             :  *  For each block, we maintain pointer to the first free chunk - this is quite
      42             :  *  cheap and allows us to skip all the preceding used chunks, eliminating
      43             :  *  a significant number of lookups in many common usage patterns. In the worst
      44             :  *  case this performs as if the pointer was not maintained.
      45             :  *
      46             :  *  We cache the freelist index for the blocks with the fewest free chunks
      47             :  *  (minFreeChunks), so that we don't have to search the freelist on every
      48             :  *  SlabAlloc() call, which is quite expensive.
      49             :  *
      50             :  *-------------------------------------------------------------------------
      51             :  */
      52             : 
      53             : #include "postgres.h"
      54             : 
      55             : #include "lib/ilist.h"
      56             : #include "utils/memdebug.h"
      57             : #include "utils/memutils.h"
      58             : 
      59             : /*
      60             :  * SlabContext is a specialized implementation of MemoryContext.
      61             :  */
      62             : typedef struct SlabContext
      63             : {
      64             :     MemoryContextData header;   /* Standard memory-context fields */
      65             :     /* Allocation parameters for this context: */
      66             :     Size        chunkSize;      /* chunk size */
      67             :     Size        fullChunkSize;  /* chunk size including header and alignment */
      68             :     Size        blockSize;      /* block size */
      69             :     Size        headerSize;     /* allocated size of context header */
      70             :     int         chunksPerBlock; /* number of chunks per block */
      71             :     int         minFreeChunks;  /* min number of free chunks in any block */
      72             :     int         nblocks;        /* number of blocks allocated */
      73             : #ifdef MEMORY_CONTEXT_CHECKING
      74             :     bool       *freechunks;     /* bitmap of free chunks in a block */
      75             : #endif
      76             :     /* blocks with free space, grouped by number of free chunks: */
      77             :     dlist_head  freelist[FLEXIBLE_ARRAY_MEMBER];
      78             : } SlabContext;
      79             : 
      80             : /*
      81             :  * SlabBlock
      82             :  *      Structure of a single block in SLAB allocator.
      83             :  *
      84             :  * node: doubly-linked list of blocks in global freelist
      85             :  * nfree: number of free chunks in this block
      86             :  * firstFreeChunk: index of the first free chunk
      87             :  */
      88             : typedef struct SlabBlock
      89             : {
      90             :     dlist_node  node;           /* doubly-linked list */
      91             :     int         nfree;          /* number of free chunks */
      92             :     int         firstFreeChunk; /* index of the first free chunk in the block */
      93             : } SlabBlock;
      94             : 
      95             : /*
      96             :  * SlabChunk
      97             :  *      The prefix of each piece of memory in a SlabBlock
      98             :  *
      99             :  * Note: to meet the memory context APIs, the payload area of the chunk must
     100             :  * be maxaligned, and the "slab" link must be immediately adjacent to the
     101             :  * payload area (cf. GetMemoryChunkContext).  Since we support no machines on
     102             :  * which MAXALIGN is more than twice sizeof(void *), this happens without any
     103             :  * special hacking in this struct declaration.  But there is a static
     104             :  * assertion below that the alignment is done correctly.
     105             :  */
     106             : typedef struct SlabChunk
     107             : {
     108             :     SlabBlock  *block;          /* block owning this chunk */
     109             :     SlabContext *slab;          /* owning context */
     110             :     /* there must not be any padding to reach a MAXALIGN boundary here! */
     111             : } SlabChunk;
     112             : 
     113             : 
     114             : #define SlabPointerGetChunk(ptr)    \
     115             :     ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
     116             : #define SlabChunkGetPointer(chk)    \
     117             :     ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
     118             : #define SlabBlockGetChunk(slab, block, idx) \
     119             :     ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock)    \
     120             :                     + (idx * slab->fullChunkSize)))
     121             : #define SlabBlockStart(block)   \
     122             :     ((char *) block + sizeof(SlabBlock))
     123             : #define SlabChunkIndex(slab, block, chunk)  \
     124             :     (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
     125             : 
     126             : /*
     127             :  * These functions implement the MemoryContext API for Slab contexts.
     128             :  */
     129             : static void *SlabAlloc(MemoryContext context, Size size);
     130             : static void SlabFree(MemoryContext context, void *pointer);
     131             : static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
     132             : static void SlabReset(MemoryContext context);
     133             : static void SlabDelete(MemoryContext context);
     134             : static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
     135             : static bool SlabIsEmpty(MemoryContext context);
     136             : static void SlabStats(MemoryContext context,
     137             :                       MemoryStatsPrintFunc printfunc, void *passthru,
     138             :                       MemoryContextCounters *totals,
     139             :                       bool print_to_stderr);
     140             : #ifdef MEMORY_CONTEXT_CHECKING
     141             : static void SlabCheck(MemoryContext context);
     142             : #endif
     143             : 
     144             : /*
     145             :  * This is the virtual function table for Slab contexts.
     146             :  */
     147             : static const MemoryContextMethods SlabMethods = {
     148             :     SlabAlloc,
     149             :     SlabFree,
     150             :     SlabRealloc,
     151             :     SlabReset,
     152             :     SlabDelete,
     153             :     SlabGetChunkSpace,
     154             :     SlabIsEmpty,
     155             :     SlabStats
     156             : #ifdef MEMORY_CONTEXT_CHECKING
     157             :     ,SlabCheck
     158             : #endif
     159             : };
     160             : 
     161             : 
     162             : /*
     163             :  * SlabContextCreate
     164             :  *      Create a new Slab context.
     165             :  *
     166             :  * parent: parent context, or NULL if top-level context
     167             :  * name: name of context (must be statically allocated)
     168             :  * blockSize: allocation block size
     169             :  * chunkSize: allocation chunk size
     170             :  *
     171             :  * The chunkSize may not exceed:
     172             :  *      MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
     173             :  */
     174             : MemoryContext
     175        2132 : SlabContextCreate(MemoryContext parent,
     176             :                   const char *name,
     177             :                   Size blockSize,
     178             :                   Size chunkSize)
     179             : {
     180             :     int         chunksPerBlock;
     181             :     Size        fullChunkSize;
     182             :     Size        freelistSize;
     183             :     Size        headerSize;
     184             :     SlabContext *slab;
     185             :     int         i;
     186             : 
     187             :     /* Assert we padded SlabChunk properly */
     188             :     StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
     189             :                      "sizeof(SlabChunk) is not maxaligned");
     190             :     StaticAssertStmt(offsetof(SlabChunk, slab) + sizeof(MemoryContext) ==
     191             :                      sizeof(SlabChunk),
     192             :                      "padding calculation in SlabChunk is wrong");
     193             : 
     194             :     /* Make sure the linked list node fits inside a freed chunk */
     195        2132 :     if (chunkSize < sizeof(int))
     196           0 :         chunkSize = sizeof(int);
     197             : 
     198             :     /* chunk, including SLAB header (both addresses nicely aligned) */
     199        2132 :     fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
     200             : 
     201             :     /* Make sure the block can store at least one chunk. */
     202        2132 :     if (blockSize < fullChunkSize + sizeof(SlabBlock))
     203           0 :         elog(ERROR, "block size %zu for slab is too small for %zu chunks",
     204             :              blockSize, chunkSize);
     205             : 
     206             :     /* Compute maximum number of chunks per block */
     207        2132 :     chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
     208             : 
     209             :     /* The freelist starts with 0, ends with chunksPerBlock. */
     210        2132 :     freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
     211             : 
     212             :     /*
     213             :      * Allocate the context header.  Unlike aset.c, we never try to combine
     214             :      * this with the first regular block; not worth the extra complication.
     215             :      */
     216             : 
     217             :     /* Size of the memory context header */
     218        2132 :     headerSize = offsetof(SlabContext, freelist) + freelistSize;
     219             : 
     220             : #ifdef MEMORY_CONTEXT_CHECKING
     221             : 
     222             :     /*
     223             :      * With memory checking, we need to allocate extra space for the bitmap of
     224             :      * free chunks. The bitmap is an array of bools, so we don't need to worry
     225             :      * about alignment.
     226             :      */
     227             :     headerSize += chunksPerBlock * sizeof(bool);
     228             : #endif
     229             : 
     230        2132 :     slab = (SlabContext *) malloc(headerSize);
     231        2132 :     if (slab == NULL)
     232             :     {
     233           0 :         MemoryContextStats(TopMemoryContext);
     234           0 :         ereport(ERROR,
     235             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
     236             :                  errmsg("out of memory"),
     237             :                  errdetail("Failed while creating memory context \"%s\".",
     238             :                            name)));
     239             :     }
     240             : 
     241             :     /*
     242             :      * Avoid writing code that can fail between here and MemoryContextCreate;
     243             :      * we'd leak the header if we ereport in this stretch.
     244             :      */
     245             : 
     246             :     /* Fill in SlabContext-specific header fields */
     247        2132 :     slab->chunkSize = chunkSize;
     248        2132 :     slab->fullChunkSize = fullChunkSize;
     249        2132 :     slab->blockSize = blockSize;
     250        2132 :     slab->headerSize = headerSize;
     251        2132 :     slab->chunksPerBlock = chunksPerBlock;
     252        2132 :     slab->minFreeChunks = 0;
     253        2132 :     slab->nblocks = 0;
     254             : 
     255             :     /* initialize the freelist slots */
     256      122590 :     for (i = 0; i < (slab->chunksPerBlock + 1); i++)
     257      120458 :         dlist_init(&slab->freelist[i]);
     258             : 
     259             : #ifdef MEMORY_CONTEXT_CHECKING
     260             :     /* set the freechunks pointer right after the freelists array */
     261             :     slab->freechunks
     262             :         = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
     263             : #endif
     264             : 
     265             :     /* Finally, do the type-independent part of context creation */
     266        2132 :     MemoryContextCreate((MemoryContext) slab,
     267             :                         T_SlabContext,
     268             :                         &SlabMethods,
     269             :                         parent,
     270             :                         name);
     271             : 
     272        2132 :     return (MemoryContext) slab;
     273             : }
     274             : 
     275             : /*
     276             :  * SlabReset
     277             :  *      Frees all memory which is allocated in the given set.
     278             :  *
     279             :  * The code simply frees all the blocks in the context - we don't keep any
     280             :  * keeper blocks or anything like that.
     281             :  */
     282             : static void
     283        1896 : SlabReset(MemoryContext context)
     284             : {
     285             :     int         i;
     286        1896 :     SlabContext *slab = castNode(SlabContext, context);
     287             : 
     288             :     Assert(slab);
     289             : 
     290             : #ifdef MEMORY_CONTEXT_CHECKING
     291             :     /* Check for corruption and leaks before freeing */
     292             :     SlabCheck(context);
     293             : #endif
     294             : 
     295             :     /* walk over freelists and free the blocks */
     296      109020 :     for (i = 0; i <= slab->chunksPerBlock; i++)
     297             :     {
     298             :         dlist_mutable_iter miter;
     299             : 
     300      107194 :         dlist_foreach_modify(miter, &slab->freelist[i])
     301             :         {
     302          70 :             SlabBlock  *block = dlist_container(SlabBlock, node, miter.cur);
     303             : 
     304          70 :             dlist_delete(miter.cur);
     305             : 
     306             : #ifdef CLOBBER_FREED_MEMORY
     307             :             wipe_mem(block, slab->blockSize);
     308             : #endif
     309          70 :             free(block);
     310          70 :             slab->nblocks--;
     311          70 :             context->mem_allocated -= slab->blockSize;
     312             :         }
     313             :     }
     314             : 
     315        1896 :     slab->minFreeChunks = 0;
     316             : 
     317             :     Assert(slab->nblocks == 0);
     318             :     Assert(context->mem_allocated == 0);
     319        1896 : }
     320             : 
     321             : /*
     322             :  * SlabDelete
     323             :  *      Free all memory which is allocated in the given context.
     324             :  */
     325             : static void
     326        1896 : SlabDelete(MemoryContext context)
     327             : {
     328             :     /* Reset to release all the SlabBlocks */
     329        1896 :     SlabReset(context);
     330             :     /* And free the context header */
     331        1896 :     free(context);
     332        1896 : }
     333             : 
     334             : /*
     335             :  * SlabAlloc
     336             :  *      Returns pointer to allocated memory of given size or NULL if
     337             :  *      request could not be completed; memory is added to the slab.
     338             :  */
     339             : static void *
     340     2998886 : SlabAlloc(MemoryContext context, Size size)
     341             : {
     342     2998886 :     SlabContext *slab = castNode(SlabContext, context);
     343             :     SlabBlock  *block;
     344             :     SlabChunk  *chunk;
     345             :     int         idx;
     346             : 
     347             :     Assert(slab);
     348             : 
     349             :     Assert((slab->minFreeChunks >= 0) &&
     350             :            (slab->minFreeChunks < slab->chunksPerBlock));
     351             : 
     352             :     /* make sure we only allow correct request size */
     353     2998886 :     if (size != slab->chunkSize)
     354           0 :         elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
     355             :              size, slab->chunkSize);
     356             : 
     357             :     /*
     358             :      * If there are no free chunks in any existing block, create a new block
     359             :      * and put it to the last freelist bucket.
     360             :      *
     361             :      * slab->minFreeChunks == 0 means there are no blocks with free chunks,
     362             :      * thanks to how minFreeChunks is updated at the end of SlabAlloc().
     363             :      */
     364     2998886 :     if (slab->minFreeChunks == 0)
     365             :     {
     366       41112 :         block = (SlabBlock *) malloc(slab->blockSize);
     367             : 
     368       41112 :         if (block == NULL)
     369           0 :             return NULL;
     370             : 
     371       41112 :         block->nfree = slab->chunksPerBlock;
     372       41112 :         block->firstFreeChunk = 0;
     373             : 
     374             :         /*
     375             :          * Put all the chunks on a freelist. Walk the chunks and point each
     376             :          * one to the next one.
     377             :          */
     378     3332436 :         for (idx = 0; idx < slab->chunksPerBlock; idx++)
     379             :         {
     380     3291324 :             chunk = SlabBlockGetChunk(slab, block, idx);
     381     3291324 :             *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
     382             :         }
     383             : 
     384             :         /*
     385             :          * And add it to the last freelist with all chunks empty.
     386             :          *
     387             :          * We know there are no blocks in the freelist, otherwise we wouldn't
     388             :          * need a new block.
     389             :          */
     390             :         Assert(dlist_is_empty(&slab->freelist[slab->chunksPerBlock]));
     391             : 
     392       41112 :         dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
     393             : 
     394       41112 :         slab->minFreeChunks = slab->chunksPerBlock;
     395       41112 :         slab->nblocks += 1;
     396       41112 :         context->mem_allocated += slab->blockSize;
     397             :     }
     398             : 
     399             :     /* grab the block from the freelist (even the new block is there) */
     400     2998886 :     block = dlist_head_element(SlabBlock, node,
     401             :                                &slab->freelist[slab->minFreeChunks]);
     402             : 
     403             :     /* make sure we actually got a valid block, with matching nfree */
     404             :     Assert(block != NULL);
     405             :     Assert(slab->minFreeChunks == block->nfree);
     406             :     Assert(block->nfree > 0);
     407             : 
     408             :     /* we know index of the first free chunk in the block */
     409     2998886 :     idx = block->firstFreeChunk;
     410             : 
     411             :     /* make sure the chunk index is valid, and that it's marked as empty */
     412             :     Assert((idx >= 0) && (idx < slab->chunksPerBlock));
     413             : 
     414             :     /* compute the chunk location block start (after the block header) */
     415     2998886 :     chunk = SlabBlockGetChunk(slab, block, idx);
     416             : 
     417             :     /*
     418             :      * Update the block nfree count, and also the minFreeChunks as we've
     419             :      * decreased nfree for a block with the minimum number of free chunks
     420             :      * (because that's how we chose the block).
     421             :      */
     422     2998886 :     block->nfree--;
     423     2998886 :     slab->minFreeChunks = block->nfree;
     424             : 
     425             :     /*
     426             :      * Remove the chunk from the freelist head. The index of the next free
     427             :      * chunk is stored in the chunk itself.
     428             :      */
     429             :     VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
     430     2998886 :     block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
     431             : 
     432             :     Assert(block->firstFreeChunk >= 0);
     433             :     Assert(block->firstFreeChunk <= slab->chunksPerBlock);
     434             : 
     435             :     Assert((block->nfree != 0 &&
     436             :             block->firstFreeChunk < slab->chunksPerBlock) ||
     437             :            (block->nfree == 0 &&
     438             :             block->firstFreeChunk == slab->chunksPerBlock));
     439             : 
     440             :     /* move the whole block to the right place in the freelist */
     441     2998886 :     dlist_delete(&block->node);
     442     2998886 :     dlist_push_head(&slab->freelist[block->nfree], &block->node);
     443             : 
     444             :     /*
     445             :      * And finally update minFreeChunks, i.e. the index to the block with the
     446             :      * lowest number of free chunks. We only need to do that when the block
     447             :      * got full (otherwise we know the current block is the right one). We'll
     448             :      * simply walk the freelist until we find a non-empty entry.
     449             :      */
     450     2998886 :     if (slab->minFreeChunks == 0)
     451             :     {
     452     2708558 :         for (idx = 1; idx <= slab->chunksPerBlock; idx++)
     453             :         {
     454     2678324 :             if (dlist_is_empty(&slab->freelist[idx]))
     455     2675954 :                 continue;
     456             : 
     457             :             /* found a non-empty freelist */
     458        2370 :             slab->minFreeChunks = idx;
     459        2370 :             break;
     460             :         }
     461             :     }
     462             : 
     463     2998886 :     if (slab->minFreeChunks == slab->chunksPerBlock)
     464           0 :         slab->minFreeChunks = 0;
     465             : 
     466             :     /* Prepare to initialize the chunk header. */
     467             :     VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
     468             : 
     469     2998886 :     chunk->block = block;
     470     2998886 :     chunk->slab = slab;
     471             : 
     472             : #ifdef MEMORY_CONTEXT_CHECKING
     473             :     /* slab mark to catch clobber of "unused" space */
     474             :     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     475             :     {
     476             :         set_sentinel(SlabChunkGetPointer(chunk), size);
     477             :         VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
     478             :                                    sizeof(SlabChunk) + slab->chunkSize,
     479             :                                    slab->fullChunkSize -
     480             :                                    (slab->chunkSize + sizeof(SlabChunk)));
     481             :     }
     482             : #endif
     483             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     484             :     /* fill the allocated space with junk */
     485             :     randomize_mem((char *) SlabChunkGetPointer(chunk), size);
     486             : #endif
     487             : 
     488             :     Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
     489             : 
     490     2998886 :     return SlabChunkGetPointer(chunk);
     491             : }
     492             : 
     493             : /*
     494             :  * SlabFree
     495             :  *      Frees allocated memory; memory is removed from the slab.
     496             :  */
     497             : static void
     498     2996948 : SlabFree(MemoryContext context, void *pointer)
     499             : {
     500             :     int         idx;
     501     2996948 :     SlabContext *slab = castNode(SlabContext, context);
     502     2996948 :     SlabChunk  *chunk = SlabPointerGetChunk(pointer);
     503     2996948 :     SlabBlock  *block = chunk->block;
     504             : 
     505             : #ifdef MEMORY_CONTEXT_CHECKING
     506             :     /* Test for someone scribbling on unused space in chunk */
     507             :     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     508             :         if (!sentinel_ok(pointer, slab->chunkSize))
     509             :             elog(WARNING, "detected write past chunk end in %s %p",
     510             :                  slab->header.name, chunk);
     511             : #endif
     512             : 
     513             :     /* compute index of the chunk with respect to block start */
     514     2996948 :     idx = SlabChunkIndex(slab, block, chunk);
     515             : 
     516             :     /* add chunk to freelist, and update block nfree count */
     517     2996948 :     *(int32 *) pointer = block->firstFreeChunk;
     518     2996948 :     block->firstFreeChunk = idx;
     519     2996948 :     block->nfree++;
     520             : 
     521             :     Assert(block->nfree > 0);
     522             :     Assert(block->nfree <= slab->chunksPerBlock);
     523             : 
     524             : #ifdef CLOBBER_FREED_MEMORY
     525             :     /* XXX don't wipe the int32 index, used for block-level freelist */
     526             :     wipe_mem((char *) pointer + sizeof(int32),
     527             :              slab->chunkSize - sizeof(int32));
     528             : #endif
     529             : 
     530             :     /* remove the block from a freelist */
     531     2996948 :     dlist_delete(&block->node);
     532             : 
     533             :     /*
     534             :      * See if we need to update the minFreeChunks field for the slab - we only
     535             :      * need to do that if there the block had that number of free chunks
     536             :      * before we freed one. In that case, we check if there still are blocks
     537             :      * in the original freelist and we either keep the current value (if there
     538             :      * still are blocks) or increment it by one (the new block is still the
     539             :      * one with minimum free chunks).
     540             :      *
     541             :      * The one exception is when the block will get completely free - in that
     542             :      * case we will free it, se we can't use it for minFreeChunks. It however
     543             :      * means there are no more blocks with free chunks.
     544             :      */
     545     2996948 :     if (slab->minFreeChunks == (block->nfree - 1))
     546             :     {
     547             :         /* Have we removed the last chunk from the freelist? */
     548      370412 :         if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
     549             :         {
     550             :             /* but if we made the block entirely free, we'll free it */
     551      337154 :             if (block->nfree == slab->chunksPerBlock)
     552       11002 :                 slab->minFreeChunks = 0;
     553             :             else
     554      326152 :                 slab->minFreeChunks++;
     555             :         }
     556             :     }
     557             : 
     558             :     /* If the block is now completely empty, free it. */
     559     2996948 :     if (block->nfree == slab->chunksPerBlock)
     560             :     {
     561       41012 :         free(block);
     562       41012 :         slab->nblocks--;
     563       41012 :         context->mem_allocated -= slab->blockSize;
     564             :     }
     565             :     else
     566     2955936 :         dlist_push_head(&slab->freelist[block->nfree], &block->node);
     567             : 
     568             :     Assert(slab->nblocks >= 0);
     569             :     Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
     570     2996948 : }
     571             : 
     572             : /*
     573             :  * SlabRealloc
     574             :  *      Change the allocated size of a chunk.
     575             :  *
     576             :  * As Slab is designed for allocating equally-sized chunks of memory, it can't
     577             :  * do an actual chunk size change.  We try to be gentle and allow calls with
     578             :  * exactly the same size, as in that case we can simply return the same
     579             :  * chunk.  When the size differs, we throw an error.
     580             :  *
     581             :  * We could also allow requests with size < chunkSize.  That however seems
     582             :  * rather pointless - Slab is meant for chunks of constant size, and moreover
     583             :  * realloc is usually used to enlarge the chunk.
     584             :  */
     585             : static void *
     586           0 : SlabRealloc(MemoryContext context, void *pointer, Size size)
     587             : {
     588           0 :     SlabContext *slab = castNode(SlabContext, context);
     589             : 
     590             :     Assert(slab);
     591             : 
     592             :     /* can't do actual realloc with slab, but let's try to be gentle */
     593           0 :     if (size == slab->chunkSize)
     594           0 :         return pointer;
     595             : 
     596           0 :     elog(ERROR, "slab allocator does not support realloc()");
     597             :     return NULL;                /* keep compiler quiet */
     598             : }
     599             : 
     600             : /*
     601             :  * SlabGetChunkSpace
     602             :  *      Given a currently-allocated chunk, determine the total space
     603             :  *      it occupies (including all memory-allocation overhead).
     604             :  */
     605             : static Size
     606           0 : SlabGetChunkSpace(MemoryContext context, void *pointer)
     607             : {
     608           0 :     SlabContext *slab = castNode(SlabContext, context);
     609             : 
     610             :     Assert(slab);
     611             : 
     612           0 :     return slab->fullChunkSize;
     613             : }
     614             : 
     615             : /*
     616             :  * SlabIsEmpty
     617             :  *      Is an Slab empty of any allocated space?
     618             :  */
     619             : static bool
     620           0 : SlabIsEmpty(MemoryContext context)
     621             : {
     622           0 :     SlabContext *slab = castNode(SlabContext, context);
     623             : 
     624             :     Assert(slab);
     625             : 
     626           0 :     return (slab->nblocks == 0);
     627             : }
     628             : 
     629             : /*
     630             :  * SlabStats
     631             :  *      Compute stats about memory consumption of a Slab context.
     632             :  *
     633             :  * printfunc: if not NULL, pass a human-readable stats string to this.
     634             :  * passthru: pass this pointer through to printfunc.
     635             :  * totals: if not NULL, add stats about this context into *totals.
     636             :  * print_to_stderr: print stats to stderr if true, elog otherwise.
     637             :  */
     638             : static void
     639           0 : SlabStats(MemoryContext context,
     640             :           MemoryStatsPrintFunc printfunc, void *passthru,
     641             :           MemoryContextCounters *totals,
     642             :           bool print_to_stderr)
     643             : {
     644           0 :     SlabContext *slab = castNode(SlabContext, context);
     645           0 :     Size        nblocks = 0;
     646           0 :     Size        freechunks = 0;
     647             :     Size        totalspace;
     648           0 :     Size        freespace = 0;
     649             :     int         i;
     650             : 
     651             :     /* Include context header in totalspace */
     652           0 :     totalspace = slab->headerSize;
     653             : 
     654           0 :     for (i = 0; i <= slab->chunksPerBlock; i++)
     655             :     {
     656             :         dlist_iter  iter;
     657             : 
     658           0 :         dlist_foreach(iter, &slab->freelist[i])
     659             :         {
     660           0 :             SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
     661             : 
     662           0 :             nblocks++;
     663           0 :             totalspace += slab->blockSize;
     664           0 :             freespace += slab->fullChunkSize * block->nfree;
     665           0 :             freechunks += block->nfree;
     666             :         }
     667             :     }
     668             : 
     669           0 :     if (printfunc)
     670             :     {
     671             :         char        stats_string[200];
     672             : 
     673           0 :         snprintf(stats_string, sizeof(stats_string),
     674             :                  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
     675             :                  totalspace, nblocks, freespace, freechunks,
     676             :                  totalspace - freespace);
     677           0 :         printfunc(context, passthru, stats_string, print_to_stderr);
     678             :     }
     679             : 
     680           0 :     if (totals)
     681             :     {
     682           0 :         totals->nblocks += nblocks;
     683           0 :         totals->freechunks += freechunks;
     684           0 :         totals->totalspace += totalspace;
     685           0 :         totals->freespace += freespace;
     686             :     }
     687           0 : }
     688             : 
     689             : 
     690             : #ifdef MEMORY_CONTEXT_CHECKING
     691             : 
     692             : /*
     693             :  * SlabCheck
     694             :  *      Walk through chunks and check consistency of memory.
     695             :  *
     696             :  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
     697             :  * find yourself in an infinite loop when trouble occurs, because this
     698             :  * routine will be entered again when elog cleanup tries to release memory!
     699             :  */
     700             : static void
     701             : SlabCheck(MemoryContext context)
     702             : {
     703             :     int         i;
     704             :     SlabContext *slab = castNode(SlabContext, context);
     705             :     const char *name = slab->header.name;
     706             : 
     707             :     Assert(slab);
     708             :     Assert(slab->chunksPerBlock > 0);
     709             : 
     710             :     /* walk all the freelists */
     711             :     for (i = 0; i <= slab->chunksPerBlock; i++)
     712             :     {
     713             :         int         j,
     714             :                     nfree;
     715             :         dlist_iter  iter;
     716             : 
     717             :         /* walk all blocks on this freelist */
     718             :         dlist_foreach(iter, &slab->freelist[i])
     719             :         {
     720             :             int         idx;
     721             :             SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
     722             : 
     723             :             /*
     724             :              * Make sure the number of free chunks (in the block header)
     725             :              * matches position in the freelist.
     726             :              */
     727             :             if (block->nfree != i)
     728             :                 elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
     729             :                      name, block->nfree, block, i);
     730             : 
     731             :             /* reset the bitmap of free chunks for this block */
     732             :             memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
     733             :             idx = block->firstFreeChunk;
     734             : 
     735             :             /*
     736             :              * Now walk through the chunks, count the free ones and also
     737             :              * perform some additional checks for the used ones. As the chunk
     738             :              * freelist is stored within the chunks themselves, we have to
     739             :              * walk through the chunks and construct our own bitmap.
     740             :              */
     741             : 
     742             :             nfree = 0;
     743             :             while (idx < slab->chunksPerBlock)
     744             :             {
     745             :                 SlabChunk  *chunk;
     746             : 
     747             :                 /* count the chunk as free, add it to the bitmap */
     748             :                 nfree++;
     749             :                 slab->freechunks[idx] = true;
     750             : 
     751             :                 /* read index of the next free chunk */
     752             :                 chunk = SlabBlockGetChunk(slab, block, idx);
     753             :                 VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
     754             :                 idx = *(int32 *) SlabChunkGetPointer(chunk);
     755             :             }
     756             : 
     757             :             for (j = 0; j < slab->chunksPerBlock; j++)
     758             :             {
     759             :                 /* non-zero bit in the bitmap means chunk the chunk is used */
     760             :                 if (!slab->freechunks[j])
     761             :                 {
     762             :                     SlabChunk  *chunk = SlabBlockGetChunk(slab, block, j);
     763             : 
     764             :                     /* chunks have both block and slab pointers, so check both */
     765             :                     if (chunk->block != block)
     766             :                         elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
     767             :                              name, block, chunk);
     768             : 
     769             :                     if (chunk->slab != slab)
     770             :                         elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
     771             :                              name, block, chunk);
     772             : 
     773             :                     /* there might be sentinel (thanks to alignment) */
     774             :                     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     775             :                         if (!sentinel_ok(chunk, slab->chunkSize))
     776             :                             elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
     777             :                                  name, block, chunk);
     778             :                 }
     779             :             }
     780             : 
     781             :             /*
     782             :              * Make sure we got the expected number of free chunks (as tracked
     783             :              * in the block header).
     784             :              */
     785             :             if (nfree != block->nfree)
     786             :                 elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
     787             :                      name, block->nfree, block, nfree);
     788             :         }
     789             :     }
     790             : 
     791             :     Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
     792             : }
     793             : 
     794             : #endif                          /* MEMORY_CONTEXT_CHECKING */

Generated by: LCOV version 1.14