LCOV - code coverage report
Current view: top level - src/backend/storage/buffer - buf_init.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 33 33 100.0 %
Date: 2020-05-31 23:07:13 Functions: 2 2 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * buf_init.c
       4             :  *    buffer manager initialization routines
       5             :  *
       6             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/buffer/buf_init.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : #include "postgres.h"
      16             : 
      17             : #include "storage/buf_internals.h"
      18             : #include "storage/bufmgr.h"
      19             : 
      20             : BufferDescPadded *BufferDescriptors;
      21             : char       *BufferBlocks;
      22             : LWLockMinimallyPadded *BufferIOLWLockArray = NULL;
      23             : WritebackContext BackendWritebackContext;
      24             : CkptSortItem *CkptBufferIds;
      25             : 
      26             : 
      27             : /*
      28             :  * Data Structures:
      29             :  *      buffers live in a freelist and a lookup data structure.
      30             :  *
      31             :  *
      32             :  * Buffer Lookup:
      33             :  *      Two important notes.  First, the buffer has to be
      34             :  *      available for lookup BEFORE an IO begins.  Otherwise
      35             :  *      a second process trying to read the buffer will
      36             :  *      allocate its own copy and the buffer pool will
      37             :  *      become inconsistent.
      38             :  *
      39             :  * Buffer Replacement:
      40             :  *      see freelist.c.  A buffer cannot be replaced while in
      41             :  *      use either by data manager or during IO.
      42             :  *
      43             :  *
      44             :  * Synchronization/Locking:
      45             :  *
      46             :  * IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
      47             :  *      It must be set when an IO is initiated and cleared at
      48             :  *      the end of the IO.  It is there to make sure that one
      49             :  *      process doesn't start to use a buffer while another is
      50             :  *      faulting it in.  see WaitIO and related routines.
      51             :  *
      52             :  * refcount --  Counts the number of processes holding pins on a buffer.
      53             :  *      A buffer is pinned during IO and immediately after a BufferAlloc().
      54             :  *      Pins must be released before end of transaction.  For efficiency the
      55             :  *      shared refcount isn't increased if an individual backend pins a buffer
      56             :  *      multiple times. Check the PrivateRefCount infrastructure in bufmgr.c.
      57             :  */
      58             : 
      59             : 
      60             : /*
      61             :  * Initialize shared buffer pool
      62             :  *
      63             :  * This is called once during shared-memory initialization (either in the
      64             :  * postmaster, or in a standalone backend).
      65             :  */
      66             : void
      67        2170 : InitBufferPool(void)
      68             : {
      69             :     bool        foundBufs,
      70             :                 foundDescs,
      71             :                 foundIOLocks,
      72             :                 foundBufCkpt;
      73             : 
      74             :     /* Align descriptors to a cacheline boundary. */
      75        2170 :     BufferDescriptors = (BufferDescPadded *)
      76        2170 :         ShmemInitStruct("Buffer Descriptors",
      77             :                         NBuffers * sizeof(BufferDescPadded),
      78             :                         &foundDescs);
      79             : 
      80        2170 :     BufferBlocks = (char *)
      81        2170 :         ShmemInitStruct("Buffer Blocks",
      82             :                         NBuffers * (Size) BLCKSZ, &foundBufs);
      83             : 
      84             :     /* Align lwlocks to cacheline boundary */
      85        2170 :     BufferIOLWLockArray = (LWLockMinimallyPadded *)
      86        2170 :         ShmemInitStruct("Buffer IO Locks",
      87             :                         NBuffers * (Size) sizeof(LWLockMinimallyPadded),
      88             :                         &foundIOLocks);
      89             : 
      90             :     /*
      91             :      * The array used to sort to-be-checkpointed buffer ids is located in
      92             :      * shared memory, to avoid having to allocate significant amounts of
      93             :      * memory at runtime. As that'd be in the middle of a checkpoint, or when
      94             :      * the checkpointer is restarted, memory allocation failures would be
      95             :      * painful.
      96             :      */
      97        2170 :     CkptBufferIds = (CkptSortItem *)
      98        2170 :         ShmemInitStruct("Checkpoint BufferIds",
      99             :                         NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
     100             : 
     101        2170 :     if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
     102             :     {
     103             :         /* should find all of these, or none of them */
     104             :         Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
     105             :         /* note: this path is only taken in EXEC_BACKEND case */
     106             :     }
     107             :     else
     108             :     {
     109             :         int         i;
     110             : 
     111             :         /*
     112             :          * Initialize all the buffer headers.
     113             :          */
     114    24162858 :         for (i = 0; i < NBuffers; i++)
     115             :         {
     116    24160688 :             BufferDesc *buf = GetBufferDescriptor(i);
     117             : 
     118    24160688 :             CLEAR_BUFFERTAG(buf->tag);
     119             : 
     120    24160688 :             pg_atomic_init_u32(&buf->state, 0);
     121    24160688 :             buf->wait_backend_pid = 0;
     122             : 
     123    24160688 :             buf->buf_id = i;
     124             : 
     125             :             /*
     126             :              * Initially link all the buffers together as unused. Subsequent
     127             :              * management of this list is done by freelist.c.
     128             :              */
     129    24160688 :             buf->freeNext = i + 1;
     130             : 
     131    24160688 :             LWLockInitialize(BufferDescriptorGetContentLock(buf),
     132             :                              LWTRANCHE_BUFFER_CONTENT);
     133             : 
     134    24160688 :             LWLockInitialize(BufferDescriptorGetIOLock(buf),
     135             :                              LWTRANCHE_BUFFER_IO);
     136             :         }
     137             : 
     138             :         /* Correct last entry of linked list */
     139        2170 :         GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
     140             :     }
     141             : 
     142             :     /* Init other shared buffer-management stuff */
     143        2170 :     StrategyInitialize(!foundDescs);
     144             : 
     145             :     /* Initialize per-backend file flush context */
     146        2170 :     WritebackContextInit(&BackendWritebackContext,
     147             :                          &backend_flush_after);
     148        2170 : }
     149             : 
     150             : /*
     151             :  * BufferShmemSize
     152             :  *
     153             :  * compute the size of shared memory for the buffer pool including
     154             :  * data pages, buffer descriptors, hash tables, etc.
     155             :  */
     156             : Size
     157        2174 : BufferShmemSize(void)
     158             : {
     159        2174 :     Size        size = 0;
     160             : 
     161             :     /* size of buffer descriptors */
     162        2174 :     size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
     163             :     /* to allow aligning buffer descriptors */
     164        2174 :     size = add_size(size, PG_CACHE_LINE_SIZE);
     165             : 
     166             :     /* size of data pages */
     167        2174 :     size = add_size(size, mul_size(NBuffers, BLCKSZ));
     168             : 
     169             :     /* size of stuff controlled by freelist.c */
     170        2174 :     size = add_size(size, StrategyShmemSize());
     171             : 
     172             :     /*
     173             :      * It would be nice to include the I/O locks in the BufferDesc, but that
     174             :      * would increase the size of a BufferDesc to more than one cache line,
     175             :      * and benchmarking has shown that keeping every BufferDesc aligned on a
     176             :      * cache line boundary is important for performance.  So, instead, the
     177             :      * array of I/O locks is allocated in a separate tranche.  Because those
     178             :      * locks are not highly contended, we lay out the array with minimal
     179             :      * padding.
     180             :      */
     181        2174 :     size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
     182             :     /* to allow aligning the above */
     183        2174 :     size = add_size(size, PG_CACHE_LINE_SIZE);
     184             : 
     185             :     /* size of checkpoint sort array in bufmgr.c */
     186        2174 :     size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
     187             : 
     188        2174 :     return size;
     189             : }

Generated by: LCOV version 1.13