LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - slab.c (source / functions) Hit Total Coverage
Test: PostgreSQL 12beta2 Lines: 85 125 68.0 %
Date: 2019-06-18 07:06:57 Functions: 5 9 55.6 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * slab.c
       4             :  *    SLAB allocator definitions.
       5             :  *
       6             :  * SLAB is a MemoryContext implementation designed for cases where large
       7             :  * numbers of equally-sized objects are allocated (and freed).
       8             :  *
       9             :  *
      10             :  * Portions Copyright (c) 2017-2019, PostgreSQL Global Development Group
      11             :  *
      12             :  * IDENTIFICATION
      13             :  *    src/backend/utils/mmgr/slab.c
      14             :  *
      15             :  *
      16             :  * NOTE:
      17             :  *  The constant allocation size allows significant simplification and various
      18             :  *  optimizations over more general purpose allocators. The blocks are carved
      19             :  *  into chunks of exactly the right size (plus alignment), not wasting any
      20             :  *  memory.
      21             :  *
      22             :  *  The information about free chunks is maintained both at the block level and
      23             :  *  global (context) level. This is possible as the chunk size (and thus also
      24             :  *  the number of chunks per block) is fixed.
      25             :  *
      26             :  *  On each block, free chunks are tracked in a simple linked list. Contents
      27             :  *  of free chunks is replaced with an index of the next free chunk, forming
      28             :  *  a very simple linked list. Each block also contains a counter of free
      29             :  *  chunks. Combined with the local block-level freelist, it makes it trivial
      30             :  *  to eventually free the whole block.
      31             :  *
      32             :  *  At the context level, we use 'freelist' to track blocks ordered by number
      33             :  *  of free chunks, starting with blocks having a single allocated chunk, and
      34             :  *  with completely full blocks on the tail.
      35             :  *
      36             :  *  This also allows various optimizations - for example when searching for
      37             :  *  free chunk, the allocator reuses space from the fullest blocks first, in
      38             :  *  the hope that some of the less full blocks will get completely empty (and
      39             :  *  returned back to the OS).
      40             :  *
      41             :  *  For each block, we maintain pointer to the first free chunk - this is quite
      42             :  *  cheap and allows us to skip all the preceding used chunks, eliminating
      43             :  *  a significant number of lookups in many common usage patters. In the worst
      44             :  *  case this performs as if the pointer was not maintained.
      45             :  *
      46             :  *  We cache the freelist index for the blocks with the fewest free chunks
      47             :  *  (minFreeChunks), so that we don't have to search the freelist on every
      48             :  *  SlabAlloc() call, which is quite expensive.
      49             :  *
      50             :  *-------------------------------------------------------------------------
      51             :  */
      52             : 
      53             : #include "postgres.h"
      54             : 
      55             : #include "utils/memdebug.h"
      56             : #include "utils/memutils.h"
      57             : #include "lib/ilist.h"
      58             : 
      59             : 
      60             : /*
      61             :  * SlabContext is a specialized implementation of MemoryContext.
      62             :  */
      63             : typedef struct SlabContext
      64             : {
      65             :     MemoryContextData header;   /* Standard memory-context fields */
      66             :     /* Allocation parameters for this context: */
      67             :     Size        chunkSize;      /* chunk size */
      68             :     Size        fullChunkSize;  /* chunk size including header and alignment */
      69             :     Size        blockSize;      /* block size */
      70             :     Size        headerSize;     /* allocated size of context header */
      71             :     int         chunksPerBlock; /* number of chunks per block */
      72             :     int         minFreeChunks;  /* min number of free chunks in any block */
      73             :     int         nblocks;        /* number of blocks allocated */
      74             :     /* blocks with free space, grouped by number of free chunks: */
      75             :     dlist_head  freelist[FLEXIBLE_ARRAY_MEMBER];
      76             : } SlabContext;
      77             : 
      78             : /*
      79             :  * SlabBlock
      80             :  *      Structure of a single block in SLAB allocator.
      81             :  *
      82             :  * node: doubly-linked list of blocks in global freelist
      83             :  * nfree: number of free chunks in this block
      84             :  * firstFreeChunk: index of the first free chunk
      85             :  */
      86             : typedef struct SlabBlock
      87             : {
      88             :     dlist_node  node;           /* doubly-linked list */
      89             :     int         nfree;          /* number of free chunks */
      90             :     int         firstFreeChunk; /* index of the first free chunk in the block */
      91             : } SlabBlock;
      92             : 
      93             : /*
      94             :  * SlabChunk
      95             :  *      The prefix of each piece of memory in a SlabBlock
      96             :  *
      97             :  * Note: to meet the memory context APIs, the payload area of the chunk must
      98             :  * be maxaligned, and the "slab" link must be immediately adjacent to the
      99             :  * payload area (cf. GetMemoryChunkContext).  Since we support no machines on
     100             :  * which MAXALIGN is more than twice sizeof(void *), this happens without any
     101             :  * special hacking in this struct declaration.  But there is a static
     102             :  * assertion below that the alignment is done correctly.
     103             :  */
     104             : typedef struct SlabChunk
     105             : {
     106             :     SlabBlock  *block;          /* block owning this chunk */
     107             :     SlabContext *slab;          /* owning context */
     108             :     /* there must not be any padding to reach a MAXALIGN boundary here! */
     109             : } SlabChunk;
     110             : 
     111             : 
     112             : #define SlabPointerGetChunk(ptr)    \
     113             :     ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
     114             : #define SlabChunkGetPointer(chk)    \
     115             :     ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
     116             : #define SlabBlockGetChunk(slab, block, idx) \
     117             :     ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock)    \
     118             :                     + (idx * slab->fullChunkSize)))
     119             : #define SlabBlockStart(block)   \
     120             :     ((char *) block + sizeof(SlabBlock))
     121             : #define SlabChunkIndex(slab, block, chunk)  \
     122             :     (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
     123             : 
     124             : /*
     125             :  * These functions implement the MemoryContext API for Slab contexts.
     126             :  */
     127             : static void *SlabAlloc(MemoryContext context, Size size);
     128             : static void SlabFree(MemoryContext context, void *pointer);
     129             : static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
     130             : static void SlabReset(MemoryContext context);
     131             : static void SlabDelete(MemoryContext context);
     132             : static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
     133             : static bool SlabIsEmpty(MemoryContext context);
     134             : static void SlabStats(MemoryContext context,
     135             :                       MemoryStatsPrintFunc printfunc, void *passthru,
     136             :                       MemoryContextCounters *totals);
     137             : #ifdef MEMORY_CONTEXT_CHECKING
     138             : static void SlabCheck(MemoryContext context);
     139             : #endif
     140             : 
     141             : /*
     142             :  * This is the virtual function table for Slab contexts.
     143             :  */
     144             : static const MemoryContextMethods SlabMethods = {
     145             :     SlabAlloc,
     146             :     SlabFree,
     147             :     SlabRealloc,
     148             :     SlabReset,
     149             :     SlabDelete,
     150             :     SlabGetChunkSpace,
     151             :     SlabIsEmpty,
     152             :     SlabStats
     153             : #ifdef MEMORY_CONTEXT_CHECKING
     154             :     ,SlabCheck
     155             : #endif
     156             : };
     157             : 
     158             : /* ----------
     159             :  * Debug macros
     160             :  * ----------
     161             :  */
     162             : #ifdef HAVE_ALLOCINFO
     163             : #define SlabFreeInfo(_cxt, _chunk) \
     164             :             fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
     165             :                 (_cxt)->header.name, (_chunk), (_chunk)->header.size)
     166             : #define SlabAllocInfo(_cxt, _chunk) \
     167             :             fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
     168             :                 (_cxt)->header.name, (_chunk), (_chunk)->header.size)
     169             : #else
     170             : #define SlabFreeInfo(_cxt, _chunk)
     171             : #define SlabAllocInfo(_cxt, _chunk)
     172             : #endif
     173             : 
     174             : 
     175             : /*
     176             :  * SlabContextCreate
     177             :  *      Create a new Slab context.
     178             :  *
     179             :  * parent: parent context, or NULL if top-level context
     180             :  * name: name of context (must be statically allocated)
     181             :  * blockSize: allocation block size
     182             :  * chunkSize: allocation chunk size
     183             :  *
     184             :  * The chunkSize may not exceed:
     185             :  *      MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - SLAB_CHUNKHDRSZ
     186             :  */
     187             : MemoryContext
     188        1092 : SlabContextCreate(MemoryContext parent,
     189             :                   const char *name,
     190             :                   Size blockSize,
     191             :                   Size chunkSize)
     192             : {
     193             :     int         chunksPerBlock;
     194             :     Size        fullChunkSize;
     195             :     Size        freelistSize;
     196             :     Size        headerSize;
     197             :     SlabContext *slab;
     198             :     int         i;
     199             : 
     200             :     /* Assert we padded SlabChunk properly */
     201             :     StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
     202             :                      "sizeof(SlabChunk) is not maxaligned");
     203             :     StaticAssertStmt(offsetof(SlabChunk, slab) + sizeof(MemoryContext) ==
     204             :                      sizeof(SlabChunk),
     205             :                      "padding calculation in SlabChunk is wrong");
     206             : 
     207             :     /* Make sure the linked list node fits inside a freed chunk */
     208        1092 :     if (chunkSize < sizeof(int))
     209           0 :         chunkSize = sizeof(int);
     210             : 
     211             :     /* chunk, including SLAB header (both addresses nicely aligned) */
     212        1092 :     fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
     213             : 
     214             :     /* Make sure the block can store at least one chunk. */
     215        1092 :     if (blockSize < fullChunkSize + sizeof(SlabBlock))
     216           0 :         elog(ERROR, "block size %zu for slab is too small for %zu chunks",
     217             :              blockSize, chunkSize);
     218             : 
     219             :     /* Compute maximum number of chunks per block */
     220        1092 :     chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
     221             : 
     222             :     /* The freelist starts with 0, ends with chunksPerBlock. */
     223        1092 :     freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
     224             : 
     225             :     /*
     226             :      * Allocate the context header.  Unlike aset.c, we never try to combine
     227             :      * this with the first regular block; not worth the extra complication.
     228             :      */
     229             : 
     230             :     /* Size of the memory context header */
     231        1092 :     headerSize = offsetof(SlabContext, freelist) + freelistSize;
     232             : 
     233        1092 :     slab = (SlabContext *) malloc(headerSize);
     234        1092 :     if (slab == NULL)
     235             :     {
     236           0 :         MemoryContextStats(TopMemoryContext);
     237           0 :         ereport(ERROR,
     238             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
     239             :                  errmsg("out of memory"),
     240             :                  errdetail("Failed while creating memory context \"%s\".",
     241             :                            name)));
     242             :     }
     243             : 
     244             :     /*
     245             :      * Avoid writing code that can fail between here and MemoryContextCreate;
     246             :      * we'd leak the header if we ereport in this stretch.
     247             :      */
     248             : 
     249             :     /* Fill in SlabContext-specific header fields */
     250        1092 :     slab->chunkSize = chunkSize;
     251        1092 :     slab->fullChunkSize = fullChunkSize;
     252        1092 :     slab->blockSize = blockSize;
     253        1092 :     slab->headerSize = headerSize;
     254        1092 :     slab->chunksPerBlock = chunksPerBlock;
     255        1092 :     slab->minFreeChunks = 0;
     256        1092 :     slab->nblocks = 0;
     257             : 
     258             :     /* initialize the freelist slots */
     259       75348 :     for (i = 0; i < (slab->chunksPerBlock + 1); i++)
     260       74256 :         dlist_init(&slab->freelist[i]);
     261             : 
     262             :     /* Finally, do the type-independent part of context creation */
     263        1092 :     MemoryContextCreate((MemoryContext) slab,
     264             :                         T_SlabContext,
     265             :                         &SlabMethods,
     266             :                         parent,
     267             :                         name);
     268             : 
     269        1092 :     return (MemoryContext) slab;
     270             : }
     271             : 
     272             : /*
     273             :  * SlabReset
     274             :  *      Frees all memory which is allocated in the given set.
     275             :  *
     276             :  * The code simply frees all the blocks in the context - we don't keep any
     277             :  * keeper blocks or anything like that.
     278             :  */
     279             : static void
     280        1000 : SlabReset(MemoryContext context)
     281             : {
     282             :     int         i;
     283        1000 :     SlabContext *slab = castNode(SlabContext, context);
     284             : 
     285             :     Assert(slab);
     286             : 
     287             : #ifdef MEMORY_CONTEXT_CHECKING
     288             :     /* Check for corruption and leaks before freeing */
     289             :     SlabCheck(context);
     290             : #endif
     291             : 
     292             :     /* walk over freelists and free the blocks */
     293       69000 :     for (i = 0; i <= slab->chunksPerBlock; i++)
     294             :     {
     295             :         dlist_mutable_iter miter;
     296             : 
     297       68012 :         dlist_foreach_modify(miter, &slab->freelist[i])
     298             :         {
     299          12 :             SlabBlock  *block = dlist_container(SlabBlock, node, miter.cur);
     300             : 
     301          12 :             dlist_delete(miter.cur);
     302             : 
     303             : #ifdef CLOBBER_FREED_MEMORY
     304             :             wipe_mem(block, slab->blockSize);
     305             : #endif
     306          12 :             free(block);
     307          12 :             slab->nblocks--;
     308             :         }
     309             :     }
     310             : 
     311        1000 :     slab->minFreeChunks = 0;
     312             : 
     313             :     Assert(slab->nblocks == 0);
     314        1000 : }
     315             : 
     316             : /*
     317             :  * SlabDelete
     318             :  *      Free all memory which is allocated in the given context.
     319             :  */
     320             : static void
     321        1000 : SlabDelete(MemoryContext context)
     322             : {
     323             :     /* Reset to release all the SlabBlocks */
     324        1000 :     SlabReset(context);
     325             :     /* And free the context header */
     326        1000 :     free(context);
     327        1000 : }
     328             : 
     329             : /*
     330             :  * SlabAlloc
     331             :  *      Returns pointer to allocated memory of given size or NULL if
     332             :  *      request could not be completed; memory is added to the slab.
     333             :  */
     334             : static void *
     335     2118294 : SlabAlloc(MemoryContext context, Size size)
     336             : {
     337     2118294 :     SlabContext *slab = castNode(SlabContext, context);
     338             :     SlabBlock  *block;
     339             :     SlabChunk  *chunk;
     340             :     int         idx;
     341             : 
     342             :     Assert(slab);
     343             : 
     344             :     Assert((slab->minFreeChunks >= 0) &&
     345             :            (slab->minFreeChunks < slab->chunksPerBlock));
     346             : 
     347             :     /* make sure we only allow correct request size */
     348     2118294 :     if (size != slab->chunkSize)
     349           0 :         elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
     350             :              size, slab->chunkSize);
     351             : 
     352             :     /*
     353             :      * If there are no free chunks in any existing block, create a new block
     354             :      * and put it to the last freelist bucket.
     355             :      *
     356             :      * slab->minFreeChunks == 0 means there are no blocks with free chunks,
     357             :      * thanks to how minFreeChunks is updated at the end of SlabAlloc().
     358             :      */
     359     2118294 :     if (slab->minFreeChunks == 0)
     360             :     {
     361       25058 :         block = (SlabBlock *) malloc(slab->blockSize);
     362             : 
     363       25058 :         if (block == NULL)
     364           0 :             return NULL;
     365             : 
     366       25058 :         block->nfree = slab->chunksPerBlock;
     367       25058 :         block->firstFreeChunk = 0;
     368             : 
     369             :         /*
     370             :          * Put all the chunks on a freelist. Walk the chunks and point each
     371             :          * one to the next one.
     372             :          */
     373     2411294 :         for (idx = 0; idx < slab->chunksPerBlock; idx++)
     374             :         {
     375     2386236 :             chunk = SlabBlockGetChunk(slab, block, idx);
     376     2386236 :             *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
     377             :         }
     378             : 
     379             :         /*
     380             :          * And add it to the last freelist with all chunks empty.
     381             :          *
     382             :          * We know there are no blocks in the freelist, otherwise we wouldn't
     383             :          * need a new block.
     384             :          */
     385             :         Assert(dlist_is_empty(&slab->freelist[slab->chunksPerBlock]));
     386             : 
     387       25058 :         dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
     388             : 
     389       25058 :         slab->minFreeChunks = slab->chunksPerBlock;
     390       25058 :         slab->nblocks += 1;
     391             :     }
     392             : 
     393             :     /* grab the block from the freelist (even the new block is there) */
     394     2118294 :     block = dlist_head_element(SlabBlock, node,
     395             :                                &slab->freelist[slab->minFreeChunks]);
     396             : 
     397             :     /* make sure we actually got a valid block, with matching nfree */
     398             :     Assert(block != NULL);
     399             :     Assert(slab->minFreeChunks == block->nfree);
     400             :     Assert(block->nfree > 0);
     401             : 
     402             :     /* we know index of the first free chunk in the block */
     403     2118294 :     idx = block->firstFreeChunk;
     404             : 
     405             :     /* make sure the chunk index is valid, and that it's marked as empty */
     406             :     Assert((idx >= 0) && (idx < slab->chunksPerBlock));
     407             : 
     408             :     /* compute the chunk location block start (after the block header) */
     409     2118294 :     chunk = SlabBlockGetChunk(slab, block, idx);
     410             : 
     411             :     /*
     412             :      * Update the block nfree count, and also the minFreeChunks as we've
     413             :      * decreased nfree for a block with the minimum number of free chunks
     414             :      * (because that's how we chose the block).
     415             :      */
     416     2118294 :     block->nfree--;
     417     2118294 :     slab->minFreeChunks = block->nfree;
     418             : 
     419             :     /*
     420             :      * Remove the chunk from the freelist head. The index of the next free
     421             :      * chunk is stored in the chunk itself.
     422             :      */
     423             :     VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
     424     2118294 :     block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
     425             : 
     426             :     Assert(block->firstFreeChunk >= 0);
     427             :     Assert(block->firstFreeChunk <= slab->chunksPerBlock);
     428             : 
     429             :     Assert((block->nfree != 0 &&
     430             :             block->firstFreeChunk < slab->chunksPerBlock) ||
     431             :            (block->nfree == 0 &&
     432             :             block->firstFreeChunk == slab->chunksPerBlock));
     433             : 
     434             :     /* move the whole block to the right place in the freelist */
     435     2118294 :     dlist_delete(&block->node);
     436     2118294 :     dlist_push_head(&slab->freelist[block->nfree], &block->node);
     437             : 
     438             :     /*
     439             :      * And finally update minFreeChunks, i.e. the index to the block with the
     440             :      * lowest number of free chunks. We only need to do that when the block
     441             :      * got full (otherwise we know the current block is the right one). We'll
     442             :      * simply walk the freelist until we find a non-empty entry.
     443             :      */
     444     2118294 :     if (slab->minFreeChunks == 0)
     445             :     {
     446     4148984 :         for (idx = 1; idx <= slab->chunksPerBlock; idx++)
     447             :         {
     448     2054396 :             if (dlist_is_empty(&slab->freelist[idx]))
     449     2054270 :                 continue;
     450             : 
     451             :             /* found a non-empty freelist */
     452         126 :             slab->minFreeChunks = idx;
     453         126 :             break;
     454             :         }
     455             :     }
     456             : 
     457     2118294 :     if (slab->minFreeChunks == slab->chunksPerBlock)
     458           0 :         slab->minFreeChunks = 0;
     459             : 
     460             :     /* Prepare to initialize the chunk header. */
     461             :     VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
     462             : 
     463     2118294 :     chunk->block = block;
     464     2118294 :     chunk->slab = slab;
     465             : 
     466             : #ifdef MEMORY_CONTEXT_CHECKING
     467             :     /* slab mark to catch clobber of "unused" space */
     468             :     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     469             :     {
     470             :         set_sentinel(SlabChunkGetPointer(chunk), size);
     471             :         VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
     472             :                                    sizeof(SlabChunk) + slab->chunkSize,
     473             :                                    slab->fullChunkSize -
     474             :                                    (slab->chunkSize + sizeof(SlabChunk)));
     475             :     }
     476             : #endif
     477             : #ifdef RANDOMIZE_ALLOCATED_MEMORY
     478             :     /* fill the allocated space with junk */
     479             :     randomize_mem((char *) SlabChunkGetPointer(chunk), size);
     480             : #endif
     481             : 
     482             :     SlabAllocInfo(slab, chunk);
     483     2118294 :     return SlabChunkGetPointer(chunk);
     484             : }
     485             : 
     486             : /*
     487             :  * SlabFree
     488             :  *      Frees allocated memory; memory is removed from the slab.
     489             :  */
     490             : static void
     491     2118274 : SlabFree(MemoryContext context, void *pointer)
     492             : {
     493             :     int         idx;
     494     2118274 :     SlabContext *slab = castNode(SlabContext, context);
     495     2118274 :     SlabChunk  *chunk = SlabPointerGetChunk(pointer);
     496     2118274 :     SlabBlock  *block = chunk->block;
     497             : 
     498             :     SlabFreeInfo(slab, chunk);
     499             : 
     500             : #ifdef MEMORY_CONTEXT_CHECKING
     501             :     /* Test for someone scribbling on unused space in chunk */
     502             :     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     503             :         if (!sentinel_ok(pointer, slab->chunkSize))
     504             :             elog(WARNING, "detected write past chunk end in %s %p",
     505             :                  slab->header.name, chunk);
     506             : #endif
     507             : 
     508             :     /* compute index of the chunk with respect to block start */
     509     2118274 :     idx = SlabChunkIndex(slab, block, chunk);
     510             : 
     511             :     /* add chunk to freelist, and update block nfree count */
     512     2118274 :     *(int32 *) pointer = block->firstFreeChunk;
     513     2118274 :     block->firstFreeChunk = idx;
     514     2118274 :     block->nfree++;
     515             : 
     516             :     Assert(block->nfree > 0);
     517             :     Assert(block->nfree <= slab->chunksPerBlock);
     518             : 
     519             : #ifdef CLOBBER_FREED_MEMORY
     520             :     /* XXX don't wipe the int32 index, used for block-level freelist */
     521             :     wipe_mem((char *) pointer + sizeof(int32),
     522             :              slab->chunkSize - sizeof(int32));
     523             : #endif
     524             : 
     525             :     /* remove the block from a freelist */
     526     2118274 :     dlist_delete(&block->node);
     527             : 
     528             :     /*
     529             :      * See if we need to update the minFreeChunks field for the slab - we only
     530             :      * need to do that if there the block had that number of free chunks
     531             :      * before we freed one. In that case, we check if there still are blocks
     532             :      * in the original freelist and we either keep the current value (if there
     533             :      * still are blocks) or increment it by one (the new block is still the
     534             :      * one with minimum free chunks).
     535             :      *
     536             :      * The one exception is when the block will get completely free - in that
     537             :      * case we will free it, se we can't use it for minFreeChunks. It however
     538             :      * means there are no more blocks with free chunks.
     539             :      */
     540     2118274 :     if (slab->minFreeChunks == (block->nfree - 1))
     541             :     {
     542             :         /* Have we removed the last chunk from the freelist? */
     543       86326 :         if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
     544             :         {
     545             :             /* but if we made the block entirely free, we'll free it */
     546       65714 :             if (block->nfree == slab->chunksPerBlock)
     547        4968 :                 slab->minFreeChunks = 0;
     548             :             else
     549       60746 :                 slab->minFreeChunks++;
     550             :         }
     551             :     }
     552             : 
     553             :     /* If the block is now completely empty, free it. */
     554     2118274 :     if (block->nfree == slab->chunksPerBlock)
     555             :     {
     556       25046 :         free(block);
     557       25046 :         slab->nblocks--;
     558             :     }
     559             :     else
     560     2093228 :         dlist_push_head(&slab->freelist[block->nfree], &block->node);
     561             : 
     562             :     Assert(slab->nblocks >= 0);
     563     2118274 : }
     564             : 
     565             : /*
     566             :  * SlabRealloc
     567             :  *      Change the allocated size of a chunk.
     568             :  *
     569             :  * As Slab is designed for allocating equally-sized chunks of memory, it can't
     570             :  * do an actual chunk size change.  We try to be gentle and allow calls with
     571             :  * exactly the same size, as in that case we can simply return the same
     572             :  * chunk.  When the size differs, we throw an error.
     573             :  *
     574             :  * We could also allow requests with size < chunkSize.  That however seems
     575             :  * rather pointless - Slab is meant for chunks of constant size, and moreover
     576             :  * realloc is usually used to enlarge the chunk.
     577             :  */
     578             : static void *
     579           0 : SlabRealloc(MemoryContext context, void *pointer, Size size)
     580             : {
     581           0 :     SlabContext *slab = castNode(SlabContext, context);
     582             : 
     583             :     Assert(slab);
     584             : 
     585             :     /* can't do actual realloc with slab, but let's try to be gentle */
     586           0 :     if (size == slab->chunkSize)
     587           0 :         return pointer;
     588             : 
     589           0 :     elog(ERROR, "slab allocator does not support realloc()");
     590             :     return NULL;                /* keep compiler quiet */
     591             : }
     592             : 
     593             : /*
     594             :  * SlabGetChunkSpace
     595             :  *      Given a currently-allocated chunk, determine the total space
     596             :  *      it occupies (including all memory-allocation overhead).
     597             :  */
     598             : static Size
     599           0 : SlabGetChunkSpace(MemoryContext context, void *pointer)
     600             : {
     601           0 :     SlabContext *slab = castNode(SlabContext, context);
     602             : 
     603             :     Assert(slab);
     604             : 
     605           0 :     return slab->fullChunkSize;
     606             : }
     607             : 
     608             : /*
     609             :  * SlabIsEmpty
     610             :  *      Is an Slab empty of any allocated space?
     611             :  */
     612             : static bool
     613           0 : SlabIsEmpty(MemoryContext context)
     614             : {
     615           0 :     SlabContext *slab = castNode(SlabContext, context);
     616             : 
     617             :     Assert(slab);
     618             : 
     619           0 :     return (slab->nblocks == 0);
     620             : }
     621             : 
     622             : /*
     623             :  * SlabStats
     624             :  *      Compute stats about memory consumption of a Slab context.
     625             :  *
     626             :  * printfunc: if not NULL, pass a human-readable stats string to this.
     627             :  * passthru: pass this pointer through to printfunc.
     628             :  * totals: if not NULL, add stats about this context into *totals.
     629             :  */
     630             : static void
     631           0 : SlabStats(MemoryContext context,
     632             :           MemoryStatsPrintFunc printfunc, void *passthru,
     633             :           MemoryContextCounters *totals)
     634             : {
     635           0 :     SlabContext *slab = castNode(SlabContext, context);
     636           0 :     Size        nblocks = 0;
     637           0 :     Size        freechunks = 0;
     638             :     Size        totalspace;
     639           0 :     Size        freespace = 0;
     640             :     int         i;
     641             : 
     642             :     /* Include context header in totalspace */
     643           0 :     totalspace = slab->headerSize;
     644             : 
     645           0 :     for (i = 0; i <= slab->chunksPerBlock; i++)
     646             :     {
     647             :         dlist_iter  iter;
     648             : 
     649           0 :         dlist_foreach(iter, &slab->freelist[i])
     650             :         {
     651           0 :             SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
     652             : 
     653           0 :             nblocks++;
     654           0 :             totalspace += slab->blockSize;
     655           0 :             freespace += slab->fullChunkSize * block->nfree;
     656           0 :             freechunks += block->nfree;
     657             :         }
     658             :     }
     659             : 
     660           0 :     if (printfunc)
     661             :     {
     662             :         char        stats_string[200];
     663             : 
     664           0 :         snprintf(stats_string, sizeof(stats_string),
     665             :                  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
     666             :                  totalspace, nblocks, freespace, freechunks,
     667             :                  totalspace - freespace);
     668           0 :         printfunc(context, passthru, stats_string);
     669             :     }
     670             : 
     671           0 :     if (totals)
     672             :     {
     673           0 :         totals->nblocks += nblocks;
     674           0 :         totals->freechunks += freechunks;
     675           0 :         totals->totalspace += totalspace;
     676           0 :         totals->freespace += freespace;
     677             :     }
     678           0 : }
     679             : 
     680             : 
     681             : #ifdef MEMORY_CONTEXT_CHECKING
     682             : 
     683             : /*
     684             :  * SlabCheck
     685             :  *      Walk through chunks and check consistency of memory.
     686             :  *
     687             :  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
     688             :  * find yourself in an infinite loop when trouble occurs, because this
     689             :  * routine will be entered again when elog cleanup tries to release memory!
     690             :  */
     691             : static void
     692             : SlabCheck(MemoryContext context)
     693             : {
     694             :     int         i;
     695             :     SlabContext *slab = castNode(SlabContext, context);
     696             :     const char *name = slab->header.name;
     697             :     char       *freechunks;
     698             : 
     699             :     Assert(slab);
     700             :     Assert(slab->chunksPerBlock > 0);
     701             : 
     702             :     /* bitmap of free chunks on a block */
     703             :     freechunks = palloc(slab->chunksPerBlock * sizeof(bool));
     704             : 
     705             :     /* walk all the freelists */
     706             :     for (i = 0; i <= slab->chunksPerBlock; i++)
     707             :     {
     708             :         int         j,
     709             :                     nfree;
     710             :         dlist_iter  iter;
     711             : 
     712             :         /* walk all blocks on this freelist */
     713             :         dlist_foreach(iter, &slab->freelist[i])
     714             :         {
     715             :             int         idx;
     716             :             SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
     717             : 
     718             :             /*
     719             :              * Make sure the number of free chunks (in the block header)
     720             :              * matches position in the freelist.
     721             :              */
     722             :             if (block->nfree != i)
     723             :                 elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
     724             :                      name, block->nfree, block, i);
     725             : 
     726             :             /* reset the bitmap of free chunks for this block */
     727             :             memset(freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
     728             :             idx = block->firstFreeChunk;
     729             : 
     730             :             /*
     731             :              * Now walk through the chunks, count the free ones and also
     732             :              * perform some additional checks for the used ones. As the chunk
     733             :              * freelist is stored within the chunks themselves, we have to
     734             :              * walk through the chunks and construct our own bitmap.
     735             :              */
     736             : 
     737             :             nfree = 0;
     738             :             while (idx < slab->chunksPerBlock)
     739             :             {
     740             :                 SlabChunk  *chunk;
     741             : 
     742             :                 /* count the chunk as free, add it to the bitmap */
     743             :                 nfree++;
     744             :                 freechunks[idx] = true;
     745             : 
     746             :                 /* read index of the next free chunk */
     747             :                 chunk = SlabBlockGetChunk(slab, block, idx);
     748             :                 VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
     749             :                 idx = *(int32 *) SlabChunkGetPointer(chunk);
     750             :             }
     751             : 
     752             :             for (j = 0; j < slab->chunksPerBlock; j++)
     753             :             {
     754             :                 /* non-zero bit in the bitmap means chunk the chunk is used */
     755             :                 if (!freechunks[j])
     756             :                 {
     757             :                     SlabChunk  *chunk = SlabBlockGetChunk(slab, block, j);
     758             : 
     759             :                     /* chunks have both block and slab pointers, so check both */
     760             :                     if (chunk->block != block)
     761             :                         elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
     762             :                              name, block, chunk);
     763             : 
     764             :                     if (chunk->slab != slab)
     765             :                         elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
     766             :                              name, block, chunk);
     767             : 
     768             :                     /* there might be sentinel (thanks to alignment) */
     769             :                     if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
     770             :                         if (!sentinel_ok(chunk, slab->chunkSize))
     771             :                             elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
     772             :                                  name, block, chunk);
     773             :                 }
     774             :             }
     775             : 
     776             :             /*
     777             :              * Make sure we got the expected number of free chunks (as tracked
     778             :              * in the block header).
     779             :              */
     780             :             if (nfree != block->nfree)
     781             :                 elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
     782             :                      name, block->nfree, block, nfree);
     783             :         }
     784             :     }
     785             : }
     786             : 
     787             : #endif                          /* MEMORY_CONTEXT_CHECKING */

Generated by: LCOV version 1.13