LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - slab.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 88 128 68.8 %
Date: 2020-06-03 10:06:28 Functions: 5 9 55.6 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * slab.c
       4             :  *    SLAB allocator definitions.
       5             :  *
       6             :  * SLAB is a MemoryContext implementation designed for cases where large
       7             :  * numbers of equally-sized objects are allocated (and freed).
       8             :  *
       9             :  *
      10             :  * Portions Copyright (c) 2017-2020, PostgreSQL Global Development Group
      11             :  *
      12             :  * IDENTIFICATION
      13             :  *    src/backend/utils/mmgr/slab.c
      14             :  *
      15             :  *
      16             :  * NOTE:
      17             :  *  The constant allocation size allows significant simplification and various
      18             :  *  optimizations over more general purpose allocators. The blocks are carved
      19             :  *  into chunks of exactly the right size (plus alignment), not wasting any
      20             :  *  memory.
      21             :  *
      22             :  *  The information about free chunks is maintained both at the block level and
      23             :  *  global (context) level. This is possible as the chunk size (and thus also
      24             :  *  the number of chunks per block) is fixed.
      25             :  *
      26             :  *  On each block, free chunks are tracked in a simple linked list. Contents
      27             :  *  of free chunks is replaced with an index of the next free chunk, forming
      28             :  *  a very simple linked list. Each block also contains a counter of free
      29             :  *  chunks. Combined with the local block-level freelist, it makes it trivial
      30             :  *  to eventually free the whole block.
      31             :  *
      32             :  *  At the context level, we use 'freelist' to track blocks ordered by number
      33             :  *  of free chunks, starting with blocks having a single allocated chunk, and
      34             :  *  with completely full blocks on the tail.
      35             :  *
      36             :  *  This also allows various optimizations - for example when searching for
      37             :  *  free chunk, the allocator reuses space from the fullest blocks first, in
      38             :  *  the hope that some of the less full blocks will get completely empty (and
      39             :  *  returned back to the OS).
      40             :  *
      41             :  *  For each block, we maintain pointer to the first free chunk - this is quite
      42             :  *  cheap and allows us to skip all the preceding used chunks, eliminating
      43             :  *  a significant number of lookups in many common usage patterns. In the worst
      44             :  *  case this performs as if the pointer was not maintained.
      45             :  *
      46             :  *  We cache the freelist index for the blocks with the fewest free chunks
      47             :  *  (minFreeChunks), so that we don't have to search the freelist on every
      48             :  *  SlabAlloc() call, which is quite expensive.
      49             :  *
      50             :  *-------------------------------------------------------------------------
      51             :  */
      52             : 
      53             : #include "postgres.h"
      54             : 
      55             : #include "lib/ilist.h"
      56             : #include "utils/memdebug.h"
      57             : #include "utils/memutils.h"
      58             : 
      59             : /*
      60             :  * SlabContext is a specialized implementation of MemoryContext.
      61             :  */
      62             : typedef struct SlabContext
      63             : {
      64             :     MemoryContextData header;   /* Standard memory-context fields */
      65             :     /* Allocation parameters for this context: */
      66             :     Size        chunkSize;      /* chunk size */
      67             :     Size        fullChunkSize;  /* chunk size including header and alignment */
      68             :     Size        blockSize;      /* block size */
      69             :     Size        headerSize;     /* allocated size of context header */
      70             :     int         chunksPerBlock; /* number of chunks per block */
      71             :     int         minFreeChunks;  /* min number of free chunks in any block */
      72             :     int         nblocks;        /* number of blocks allocated */
      73             : #ifdef MEMORY_CONTEXT_CHECKING
      74             :     bool       *freechunks;     /* bitmap of free chunks in a block */
      75             : #endif
      76             :     /* blocks with free space, grouped by number of free chunks: */
      77             :     dlist_head  freelist[FLEXIBLE_ARRAY_MEMBER];
      78             : } SlabContext;
      79             : 
      80             : /*
      81             :  * SlabBlock
      82             :  *      Structure of a single block in SLAB allocator.
      83             :  *
      84             :  * node: doubly-linked list of blocks in global freelist
      85             :  * nfree: number of free chunks in this block
      86             :  * firstFreeChunk: index of the first free chunk
      87             :  */
      88             : typedef struct SlabBlock
      89             : {
      90             :     dlist_node  node;           /* doubly-linked list */
      91             :     int         nfree;          /* number of free chunks */
      92             :     int         firstFreeChunk; /* index of the first free chunk in the block */
      93             : } SlabBlock;
      94             : 
      95             : /*
      96             :  * SlabChunk
      97             :  *      The prefix of each piece of memory in a SlabBlock
      98             :  *
      99             :  * Note: to meet the memory context APIs, the payload area of the chunk must
     100             :  * be maxaligned, and the "slab" link must be immediately adjacent to the
     101             :  * payload area (cf. GetMemoryChunkContext).  Since we support no machines on
     102             :  * which MAXALIGN is more than twice sizeof(void *), this happens without any
     103             :  * special hacking in this struct declaration.  But there is a static
     104             :  * assertion below that the alignment is done correctly.
     105             :  */
     106             : typedef struct SlabChunk
     107             : {
     108             :     SlabBlock  *block;          /* block owning this chunk */
     109             :     SlabContext *slab;          /* owning context */
     110             :     /* there must not be any padding to reach a MAXALIGN boundary here! */
     111             : } SlabChunk;
     112             : 
     113             : 
     114             : #define SlabPointerGetChunk(ptr)    \
     115             :     ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
     116             : #define SlabChunkGetPointer(chk)    \
     117             :     ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
     118             : #define SlabBlockGetChunk(slab, block, idx) \
     119             :     ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock)    \
     120             :                     + (idx * slab->fullChunkSize)))
     121             : #define SlabBlockStart(block)   \
     122             :     ((char *) block + sizeof(SlabBlock))
     123             : #define SlabChunkIndex(slab, block, chunk)  \
     124             :     (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
     125             : 
     126             : /*
     127             :  * These functions implement the MemoryContext API for Slab contexts.
     128             :  */
     129             : static void *SlabAlloc(MemoryContext context, Size size);
     130             : static void SlabFree(MemoryContext context, void *pointer);
     131             : static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
     132             : static void SlabReset(MemoryContext context);
     133             : static void SlabDelete(MemoryContext context);
     134             : static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
     135             : static bool SlabIsEmpty(MemoryContext context);
     136             : static void SlabStats(MemoryContext context,
     137             :                       MemoryStatsPrintFunc printfunc, void *passthru,
     138             :                       MemoryContextCounters *totals);
     139             : #ifdef MEMORY_CONTEXT_CHECKING
     140             : static void SlabCheck(MemoryContext context);
     141             : #endif
     142             : 
     143             : /*
     144             :  * This is the virtual function table for Slab contexts.
     145             :  */
     146             : static const MemoryContextMethods SlabMethods = {
     147             :     SlabAlloc,
     148             :     SlabFree,
     149             :     SlabRealloc,
     150             :     SlabReset,
     151             :     SlabDelete,
     152             :     SlabGetChunkSpace,
     153             :     SlabIsEmpty,
     154             :     SlabStats
     155             : #ifdef MEMORY_CONTEXT_CHECKING
     156             :     ,SlabCheck
     157             : #endif
     158             : };
     159             : 
     160             : 
     161             : /*
     162             :  * SlabContextCreate
     163             :  *      Create a new Slab context.
     164             :  *
     165             :  * parent: parent context, or NULL if top-level context
     166             :  * name: name of context (must be statically allocated)
     167             :  * blockSize: allocation block size
     168             :  * chunkSize: allocation chunk size
     169             :  *
     170             :  * The chunkSize may not exceed:
     171             :  *      MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
     172             :  */
     173             : MemoryContext
     174        1192 : SlabContextCreate(MemoryContext parent,
     175             :                   const char *name,
     176             :                   Size blockSize,
     177             :                   Size chunkSize)
     178             : {
     179             :     int         chunksPerBlock;
     180             :     Size        fullChunkSize;
     181             :     Size        freelistSize;
     182             :     Size        headerSize;
     183             :     SlabContext *slab;
     184             :     int         i;
     185             : 
     186             :     /* Assert we padded SlabChunk properly */
     187             :     StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
     188             :                      "sizeof(SlabChunk) is not maxaligned");
     189             :     StaticAssertStmt(offsetof(SlabChunk, slab) + sizeof(MemoryContext) ==
     190             :                      sizeof(SlabChunk),
     191             :                      "padding calculation in SlabChunk is wrong");
     192             : 
     193             :     /* Make sure the linked list node fits inside a freed chunk */
     194        1192 :     if (chunkSize < sizeof(int))
     195           0 :         chunkSize = sizeof(int);
     196             : 
     197             :     /* chunk, including SLAB header (both addresses nicely aligned) */
     198        1192 :     fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
     199             : 
     200             :     /* Make sure the block can store at least one chunk. */
     201        1192 :     if (blockSize < fullChunkSize + sizeof(SlabBlock))
     202           0 :         elog(ERROR, "block size %zu for slab is too small for %zu chunks",
     203             :              blockSize, chunkSize);
     204             : 
     205             :     /* Compute maximum number of chunks per block */
     206        1192 :     chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
     207             : 
     208             :     /* The freelist starts with 0, ends with chunksPerBlock. */
     209        1192 :     freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
     210             : 
     211             :     /*
     212             :      * Allocate the context header.  Unlike aset.c, we never try to combine
     213             :      * this with the first regular block; not worth the extra complication.
     214             :      */
     215             : 
     216             :     /* Size of the memory context header */
     217        1192 :     headerSize = offsetof(SlabContext, freelist) + freelistSize;
     218             : 
     219             : #ifdef MEMORY_CONTEXT_CHECKING
     220             : 
     221             :     /*
     222             :      * With memory checking, we need to allocate extra space for the bitmap of
     223             :      * free chunks. The bitmap is an array of bools, so we don't need to worry
     224             :      * about alignment.
     225             :      */
     226             :     headerSize += chunksPerBlock * sizeof(bool);
     227             : #endif
     228             : 
     229        1192 :     slab = (SlabContext *) malloc(headerSize);
     230        1192 :     if (slab == NULL)
     231             :     {
     232           0 :         MemoryContextStats(TopMemoryContext);
     233           0 :         ereport(ERROR,
     234             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
     235             :                  errmsg("out of memory"),
     236             :                  errdetail("Failed while creating memory context \"%s\".",
     237             :                            name)));
     238             :     }
     239             : 
     240             :     /*
     241             :      * Avoid writing code that can fail between here and MemoryContextCreate;
     242             :      * we'd leak the header if we ereport in this stretch.
     243             :      */
     244             : 
     245             :     /* Fill in SlabContext-specific header fields */
     246        1192 :     slab->chunkSize = chunkSize;
     247        1192 :     slab->fullChunkSize = fullChunkSize;
     248        1192 :     slab->blockSize = blockSize;
     249        1192 :     slab->headerSize = headerSize;
     250        1192 :     slab->chunksPerBlock = chunksPerBlock;
     251        1192 :     slab->minFreeChunks = 0;
     252        1192 :     slab->nblocks = 0;
     253             : 
     254             :     /* initialize the freelist slots */
     255       72116 :     for (i = 0; i < (slab->chunksPerBlock + 1); i++)
     256       70924 :         dlist_init(&slab->freelist[i]);
     257             : 
     258             : #ifdef MEMORY_CONTEXT_CHECKING
     259             :     /* set the freechunks pointer right after the freelists array */
     260             :     slab->freechunks
     261             :         = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
     262             : #endif
     263             : 
     264             :     /* Finally, do the type-independent part of context creation */
     265        1192 :     MemoryContextCreate((MemoryContext) slab,
     266             :                         T_SlabContext,
     267             :                         &SlabMethods,
     268             :                         parent,
     269             :                         name);
     270             : 
     271        1192 :     return (MemoryContext) slab;
     272             : }
     273             : 
     274             : /*
     275             :  * SlabReset
     276             :  *      Frees all memory which is allocated in the given set.
     277             :  *
     278             :  * The code simply frees all the blocks in the context - we don't keep any
     279             :  * keeper blocks or anything like that.
     280             :  */
     281             : static void
     282        1108 : SlabReset(MemoryContext context)
     283             : {
     284             :     int         i;
     285        1108 :     SlabContext *slab = castNode(SlabContext, context);
     286             : 
     287             :     Assert(slab);
     288             : 
     289             : #ifdef MEMORY_CONTEXT_CHECKING
     290             :     /* Check for corruption and leaks before freeing */
     291             :     SlabCheck(context);
     292             : #endif
     293             : 
     294             :     /* walk over freelists and free the blocks */
     295       67034 :     for (i = 0; i <= slab->chunksPerBlock; i++)
     296             :     {
     297             :         dlist_mutable_iter miter;
     298             : 
     299       65946 :         dlist_foreach_modify(miter, &slab->freelist[i])
     300             :         {
     301          20 :             SlabBlock  *block = dlist_container(SlabBlock, node, miter.cur);
     302             : 
     303          20 :             dlist_delete(miter.cur);
     304             : 
     305             : #ifdef CLOBBER_FREED_MEMORY
     306             :             wipe_mem(block, slab->blockSize);
     307             : #endif
     308          20 :             free(block);
     309          20 :             slab->nblocks--;
     310          20 :             context->mem_allocated -= slab->blockSize;
     311             :         }
     312             :     }
     313             : 
     314        1108 :     slab->minFreeChunks = 0;
     315             : 
     316             :     Assert(slab->nblocks == 0);
     317             :     Assert(context->mem_allocated == 0);
     318        1108 : }
     319             : 
     320             : /*
     321             :  * SlabDelete
     322             :  *      Free all memory which is allocated in the given context.
     323             :  */
     324             : static void
     325        1108 : SlabDelete(MemoryContext context)
     326             : {
     327             :     /* Reset to release all the SlabBlocks */
     328        1108 :     SlabReset(context);
     329             :     /* And free the context header */
     330        1108 :     free(context);
     331        1108 : }
     332             : 
     333             : /*
     334             :  * SlabAlloc
     335             :  *      Returns pointer to allocated memory of given size or NULL if
     336             :  *      request could not be completed; memory is added to the slab.
     337             :  */
     338             : static void *
     339     2122620 : SlabAlloc(MemoryContext context, Size size)
     340             : {
     341     2122620 :     SlabContext *slab = castNode(SlabContext, context);
     342             :     SlabBlock  *block;
     343             :     SlabChunk  *chunk;
     344             :     int         idx;
     345             : 
     346             :     Assert(slab);
     347             : 
     348             :     Assert((slab->minFreeChunks >= 0) &&
     349             :            (slab->minFreeChunks < slab->chunksPerBlock));
     350             : 
     351             :     /* make sure we only allow correct request size */
     352     2122620 :     if (size != slab->chunkSize)
     353           0 :         elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
     354             :              size, slab->chunkSize);
     355             : 
     356             :     /*
     357             :      * If there are no free chunks in any existing block, create a new block
     358             :      * and put it to the last freelist bucket.
     359             :      *
     360             :      * slab->minFreeChunks == 0 means there are no blocks with free chunks,
     361             :      * thanks to how minFreeChunks is updated at the end of SlabAlloc().
     362             :      */
     363     2122620 :     if (slab->minFreeChunks == 0)
     364             :     {
     365       30320 :         block = (SlabBlock *) malloc(slab->blockSize);
     366             : 
     367       30320 :         if (block == NULL)
     368           0 :             return NULL;
     369             : 
     370       30320 :         block->nfree = slab->chunksPerBlock;
     371       30320 :         block->firstFreeChunk = 0;
     372             : 
     373             :         /*
     374             :          * Put all the chunks on a freelist. Walk the chunks and point each
     375             :          * one to the next one.
     376             :          */
     377     2467600 :         for (idx = 0; idx < slab->chunksPerBlock; idx++)
     378             :         {
     379     2437280 :             chunk = SlabBlockGetChunk(slab, block, idx);
     380     2437280 :             *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
     381             :         }
     382             : 
     383             :         /*
     384             :          * And add it to the last freelist with all chunks empty.
     385             :          *
     386             :          * We know there are no blocks in the freelist, otherwise we wouldn't
     387             :          * need a new block.
     388             :          */
     389             :         Assert(dlist_is_empty(&slab->freelist[slab->chunksPerBlock]));
     390             : 
     391       30320 :         dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
     392             : 
     393       30320 :         slab->minFreeChunks = slab->chunksPerBlock;
     394       30320 :         slab->nblocks += 1;
     395       30320 :         context->mem_allocated += slab->blockSize;
     396             :     }
     397             : 
     398             :     /* grab the block from the freelist (even the new block is there) */
     399     2122620 :     block = dlist_head_element(SlabBlock, node,
     400             :                                &slab->freelist[slab->minFreeChunks]);
     401             : 
     402             :     /* make sure we actually got a valid block, with matching nfree */
     403             :     Assert(block != NULL);
     404             :     Assert(slab->minFreeChunks == block->nfree);
     405             :     Assert(block->nfree > 0);
     406             : 
     407             :     /* we know index of the first free chunk in the block */
     408     2122620 :     idx = block->firstFreeChunk;
     409             : 
     410             :     /* make sure the chunk index is valid, and that it's marked as empty */
     411             :     Assert((idx >= 0) && (idx < slab->chunksPerBlock));
     412             : 
     413             :     /* compute the chunk location block start (after the block header) */
     414     2122620 :     chunk = SlabBlockGetChunk(slab, block, idx);
     415             : 
     416             :     /*
     417             :      * Update the block nfree count, and also the minFreeChunks as we've
     418             :      * decreased nfree for a block with the minimum number of free chunks
     419             :      * (because that's how we chose the block).
     420             :      */
     421     2122620 :     block->nfree--;
     422     2122620 :     slab->minFreeChunks = block->nfree;
     423             : 
     424             :     /*
     425             :      * Remove the chunk from the freelist head. The index of the next free
     426             :      * chunk is stored in the chunk itself.
     427             :      */
     428             :     VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
     429     2122620 :     block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
     430             : 
     431             :     Assert(block->firstFreeChunk >= 0);
     432             :     Assert(block->firstFreeChunk <= slab->chunksPerBlock);
     433             : 
     434             :     Assert((block->nfree != 0 &&
     435             :             block->firstFreeChunk < slab->chunksPerBlock) ||
     436             :            (block->nfree == 0 &&
     437             :             block->firstFreeChunk == slab->chunksPerBlock));
     438             : 
     439             :     /* move the whole block to the right place in the freelist */
     440     2122620 :     dlist_delete(&block->node);
     441     2122620 :     dlist_push_head(&slab->freelist[block->nfree], &block->node);
     442             : 
     443             :     /*
     444             :      * And finally update minFreeChunks, i.e. the index to the block with the
     445             :      * lowest number of free chunks. We only need to do that when the block
     446             :      * got full (otherwise we know the current block is the right one). We'll
     447             :      * simply walk the freelist until we find a non-empty entry.
     448             :      */
     449     2122620 :     if (slab->minFreeChunks == 0)
     450             :     {
     451     1955148 :         for (idx = 1; idx <= slab->chunksPerBlock; idx++)
     452             :         {
     453     1932986 :             if (dlist_is_empty(&slab->freelist[idx]))
     454     1931692 :                 continue;
     455             : 
     456             :             /* found a non-empty freelist */
     457        1294 :             slab->minFreeChunks = idx;
     458        1294 :             break;
     459             :         }
     460             :     }
     461             : 
     462     2122620 :     if (slab->minFreeChunks == slab->chunksPerBlock)
     463           0 :         slab->minFreeChunks = 0;
     464             : 
     465             :     /* Prepare to initialize the chunk header. */
     466             :     VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
     467             : 
     468     2122620 :     chunk->block = block;
     469     2122620 :     chunk->slab = slab;
     470             : 
     471             : #ifdef MEMORY_CONTEXT_CHECKING
     472             :     /* slab mark to catch clobber of "unused" space */
     473             :     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     474             :     {
     475             :         set_sentinel(SlabChunkGetPointer(chunk), size);
     476             :         VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
     477             :                                    sizeof(SlabChunk) + slab->chunkSize,
     478             :                                    slab->fullChunkSize -
     479             :                                    (slab->chunkSize + sizeof(SlabChunk)));
     480             :     }
     481             : #endif
     482             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     483             :     /* fill the allocated space with junk */
     484             :     randomize_mem((char *) SlabChunkGetPointer(chunk), size);
     485             : #endif
     486             : 
     487             :     Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
     488             : 
     489     2122620 :     return SlabChunkGetPointer(chunk);
     490             : }
     491             : 
     492             : /*
     493             :  * SlabFree
     494             :  *      Frees allocated memory; memory is removed from the slab.
     495             :  */
     496             : static void
     497     2122592 : SlabFree(MemoryContext context, void *pointer)
     498             : {
     499             :     int         idx;
     500     2122592 :     SlabContext *slab = castNode(SlabContext, context);
     501     2122592 :     SlabChunk  *chunk = SlabPointerGetChunk(pointer);
     502     2122592 :     SlabBlock  *block = chunk->block;
     503             : 
     504             : #ifdef MEMORY_CONTEXT_CHECKING
     505             :     /* Test for someone scribbling on unused space in chunk */
     506             :     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     507             :         if (!sentinel_ok(pointer, slab->chunkSize))
     508             :             elog(WARNING, "detected write past chunk end in %s %p",
     509             :                  slab->header.name, chunk);
     510             : #endif
     511             : 
     512             :     /* compute index of the chunk with respect to block start */
     513     2122592 :     idx = SlabChunkIndex(slab, block, chunk);
     514             : 
     515             :     /* add chunk to freelist, and update block nfree count */
     516     2122592 :     *(int32 *) pointer = block->firstFreeChunk;
     517     2122592 :     block->firstFreeChunk = idx;
     518     2122592 :     block->nfree++;
     519             : 
     520             :     Assert(block->nfree > 0);
     521             :     Assert(block->nfree <= slab->chunksPerBlock);
     522             : 
     523             : #ifdef CLOBBER_FREED_MEMORY
     524             :     /* XXX don't wipe the int32 index, used for block-level freelist */
     525             :     wipe_mem((char *) pointer + sizeof(int32),
     526             :              slab->chunkSize - sizeof(int32));
     527             : #endif
     528             : 
     529             :     /* remove the block from a freelist */
     530     2122592 :     dlist_delete(&block->node);
     531             : 
     532             :     /*
     533             :      * See if we need to update the minFreeChunks field for the slab - we only
     534             :      * need to do that if there the block had that number of free chunks
     535             :      * before we freed one. In that case, we check if there still are blocks
     536             :      * in the original freelist and we either keep the current value (if there
     537             :      * still are blocks) or increment it by one (the new block is still the
     538             :      * one with minimum free chunks).
     539             :      *
     540             :      * The one exception is when the block will get completely free - in that
     541             :      * case we will free it, se we can't use it for minFreeChunks. It however
     542             :      * means there are no more blocks with free chunks.
     543             :      */
     544     2122592 :     if (slab->minFreeChunks == (block->nfree - 1))
     545             :     {
     546             :         /* Have we removed the last chunk from the freelist? */
     547      230946 :         if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
     548             :         {
     549             :             /* but if we made the block entirely free, we'll free it */
     550      207966 :             if (block->nfree == slab->chunksPerBlock)
     551        8354 :                 slab->minFreeChunks = 0;
     552             :             else
     553      199612 :                 slab->minFreeChunks++;
     554             :         }
     555             :     }
     556             : 
     557             :     /* If the block is now completely empty, free it. */
     558     2122592 :     if (block->nfree == slab->chunksPerBlock)
     559             :     {
     560       30300 :         free(block);
     561       30300 :         slab->nblocks--;
     562       30300 :         context->mem_allocated -= slab->blockSize;
     563             :     }
     564             :     else
     565     2092292 :         dlist_push_head(&slab->freelist[block->nfree], &block->node);
     566             : 
     567             :     Assert(slab->nblocks >= 0);
     568             :     Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
     569     2122592 : }
     570             : 
     571             : /*
     572             :  * SlabRealloc
     573             :  *      Change the allocated size of a chunk.
     574             :  *
     575             :  * As Slab is designed for allocating equally-sized chunks of memory, it can't
     576             :  * do an actual chunk size change.  We try to be gentle and allow calls with
     577             :  * exactly the same size, as in that case we can simply return the same
     578             :  * chunk.  When the size differs, we throw an error.
     579             :  *
     580             :  * We could also allow requests with size < chunkSize.  That however seems
     581             :  * rather pointless - Slab is meant for chunks of constant size, and moreover
     582             :  * realloc is usually used to enlarge the chunk.
     583             :  */
     584             : static void *
     585           0 : SlabRealloc(MemoryContext context, void *pointer, Size size)
     586             : {
     587           0 :     SlabContext *slab = castNode(SlabContext, context);
     588             : 
     589             :     Assert(slab);
     590             : 
     591             :     /* can't do actual realloc with slab, but let's try to be gentle */
     592           0 :     if (size == slab->chunkSize)
     593           0 :         return pointer;
     594             : 
     595           0 :     elog(ERROR, "slab allocator does not support realloc()");
     596             :     return NULL;                /* keep compiler quiet */
     597             : }
     598             : 
     599             : /*
     600             :  * SlabGetChunkSpace
     601             :  *      Given a currently-allocated chunk, determine the total space
     602             :  *      it occupies (including all memory-allocation overhead).
     603             :  */
     604             : static Size
     605           0 : SlabGetChunkSpace(MemoryContext context, void *pointer)
     606             : {
     607           0 :     SlabContext *slab = castNode(SlabContext, context);
     608             : 
     609             :     Assert(slab);
     610             : 
     611           0 :     return slab->fullChunkSize;
     612             : }
     613             : 
     614             : /*
     615             :  * SlabIsEmpty
     616             :  *      Is an Slab empty of any allocated space?
     617             :  */
     618             : static bool
     619           0 : SlabIsEmpty(MemoryContext context)
     620             : {
     621           0 :     SlabContext *slab = castNode(SlabContext, context);
     622             : 
     623             :     Assert(slab);
     624             : 
     625           0 :     return (slab->nblocks == 0);
     626             : }
     627             : 
     628             : /*
     629             :  * SlabStats
     630             :  *      Compute stats about memory consumption of a Slab context.
     631             :  *
     632             :  * printfunc: if not NULL, pass a human-readable stats string to this.
     633             :  * passthru: pass this pointer through to printfunc.
     634             :  * totals: if not NULL, add stats about this context into *totals.
     635             :  */
     636             : static void
     637           0 : SlabStats(MemoryContext context,
     638             :           MemoryStatsPrintFunc printfunc, void *passthru,
     639             :           MemoryContextCounters *totals)
     640             : {
     641           0 :     SlabContext *slab = castNode(SlabContext, context);
     642           0 :     Size        nblocks = 0;
     643           0 :     Size        freechunks = 0;
     644             :     Size        totalspace;
     645           0 :     Size        freespace = 0;
     646             :     int         i;
     647             : 
     648             :     /* Include context header in totalspace */
     649           0 :     totalspace = slab->headerSize;
     650             : 
     651           0 :     for (i = 0; i <= slab->chunksPerBlock; i++)
     652             :     {
     653             :         dlist_iter  iter;
     654             : 
     655           0 :         dlist_foreach(iter, &slab->freelist[i])
     656             :         {
     657           0 :             SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
     658             : 
     659           0 :             nblocks++;
     660           0 :             totalspace += slab->blockSize;
     661           0 :             freespace += slab->fullChunkSize * block->nfree;
     662           0 :             freechunks += block->nfree;
     663             :         }
     664             :     }
     665             : 
     666           0 :     if (printfunc)
     667             :     {
     668             :         char        stats_string[200];
     669             : 
     670           0 :         snprintf(stats_string, sizeof(stats_string),
     671             :                  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
     672             :                  totalspace, nblocks, freespace, freechunks,
     673             :                  totalspace - freespace);
     674           0 :         printfunc(context, passthru, stats_string);
     675             :     }
     676             : 
     677           0 :     if (totals)
     678             :     {
     679           0 :         totals->nblocks += nblocks;
     680           0 :         totals->freechunks += freechunks;
     681           0 :         totals->totalspace += totalspace;
     682           0 :         totals->freespace += freespace;
     683             :     }
     684           0 : }
     685             : 
     686             : 
     687             : #ifdef MEMORY_CONTEXT_CHECKING
     688             : 
     689             : /*
     690             :  * SlabCheck
     691             :  *      Walk through chunks and check consistency of memory.
     692             :  *
     693             :  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
     694             :  * find yourself in an infinite loop when trouble occurs, because this
     695             :  * routine will be entered again when elog cleanup tries to release memory!
     696             :  */
     697             : static void
     698             : SlabCheck(MemoryContext context)
     699             : {
     700             :     int         i;
     701             :     SlabContext *slab = castNode(SlabContext, context);
     702             :     const char *name = slab->header.name;
     703             : 
     704             :     Assert(slab);
     705             :     Assert(slab->chunksPerBlock > 0);
     706             : 
     707             :     /* walk all the freelists */
     708             :     for (i = 0; i <= slab->chunksPerBlock; i++)
     709             :     {
     710             :         int         j,
     711             :                     nfree;
     712             :         dlist_iter  iter;
     713             : 
     714             :         /* walk all blocks on this freelist */
     715             :         dlist_foreach(iter, &slab->freelist[i])
     716             :         {
     717             :             int         idx;
     718             :             SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
     719             : 
     720             :             /*
     721             :              * Make sure the number of free chunks (in the block header)
     722             :              * matches position in the freelist.
     723             :              */
     724             :             if (block->nfree != i)
     725             :                 elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
     726             :                      name, block->nfree, block, i);
     727             : 
     728             :             /* reset the bitmap of free chunks for this block */
     729             :             memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
     730             :             idx = block->firstFreeChunk;
     731             : 
     732             :             /*
     733             :              * Now walk through the chunks, count the free ones and also
     734             :              * perform some additional checks for the used ones. As the chunk
     735             :              * freelist is stored within the chunks themselves, we have to
     736             :              * walk through the chunks and construct our own bitmap.
     737             :              */
     738             : 
     739             :             nfree = 0;
     740             :             while (idx < slab->chunksPerBlock)
     741             :             {
     742             :                 SlabChunk  *chunk;
     743             : 
     744             :                 /* count the chunk as free, add it to the bitmap */
     745             :                 nfree++;
     746             :                 slab->freechunks[idx] = true;
     747             : 
     748             :                 /* read index of the next free chunk */
     749             :                 chunk = SlabBlockGetChunk(slab, block, idx);
     750             :                 VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
     751             :                 idx = *(int32 *) SlabChunkGetPointer(chunk);
     752             :             }
     753             : 
     754             :             for (j = 0; j < slab->chunksPerBlock; j++)
     755             :             {
     756             :                 /* non-zero bit in the bitmap means chunk the chunk is used */
     757             :                 if (!slab->freechunks[j])
     758             :                 {
     759             :                     SlabChunk  *chunk = SlabBlockGetChunk(slab, block, j);
     760             : 
     761             :                     /* chunks have both block and slab pointers, so check both */
     762             :                     if (chunk->block != block)
     763             :                         elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
     764             :                              name, block, chunk);
     765             : 
     766             :                     if (chunk->slab != slab)
     767             :                         elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
     768             :                              name, block, chunk);
     769             : 
     770             :                     /* there might be sentinel (thanks to alignment) */
     771             :                     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     772             :                         if (!sentinel_ok(chunk, slab->chunkSize))
     773             :                             elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
     774             :                                  name, block, chunk);
     775             :                 }
     776             :             }
     777             : 
     778             :             /*
     779             :              * Make sure we got the expected number of free chunks (as tracked
     780             :              * in the block header).
     781             :              */
     782             :             if (nfree != block->nfree)
     783             :                 elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
     784             :                      name, block->nfree, block, nfree);
     785             :         }
     786             :     }
     787             : 
     788             :     Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
     789             : }
     790             : 
     791             : #endif                          /* MEMORY_CONTEXT_CHECKING */

Generated by: LCOV version 1.13