LCOV - code coverage report
Current view: top level - src/backend/storage/buffer - buf_init.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 35 35 100.0 %
Date: 2019-11-13 22:07:24 Functions: 2 2 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * buf_init.c
       4             :  *    buffer manager initialization routines
       5             :  *
       6             :  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/buffer/buf_init.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : #include "postgres.h"
      16             : 
      17             : #include "storage/buf_internals.h"
      18             : #include "storage/bufmgr.h"
      19             : 
      20             : BufferDescPadded *BufferDescriptors;
      21             : char       *BufferBlocks;
      22             : LWLockMinimallyPadded *BufferIOLWLockArray = NULL;
      23             : WritebackContext BackendWritebackContext;
      24             : CkptSortItem *CkptBufferIds;
      25             : 
      26             : 
      27             : /*
      28             :  * Data Structures:
      29             :  *      buffers live in a freelist and a lookup data structure.
      30             :  *
      31             :  *
      32             :  * Buffer Lookup:
      33             :  *      Two important notes.  First, the buffer has to be
      34             :  *      available for lookup BEFORE an IO begins.  Otherwise
      35             :  *      a second process trying to read the buffer will
      36             :  *      allocate its own copy and the buffer pool will
      37             :  *      become inconsistent.
      38             :  *
      39             :  * Buffer Replacement:
      40             :  *      see freelist.c.  A buffer cannot be replaced while in
      41             :  *      use either by data manager or during IO.
      42             :  *
      43             :  *
      44             :  * Synchronization/Locking:
      45             :  *
      46             :  * IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
      47             :  *      It must be set when an IO is initiated and cleared at
      48             :  *      the end of the IO.  It is there to make sure that one
      49             :  *      process doesn't start to use a buffer while another is
      50             :  *      faulting it in.  see WaitIO and related routines.
      51             :  *
      52             :  * refcount --  Counts the number of processes holding pins on a buffer.
      53             :  *      A buffer is pinned during IO and immediately after a BufferAlloc().
      54             :  *      Pins must be released before end of transaction.  For efficiency the
      55             :  *      shared refcount isn't increased if an individual backend pins a buffer
      56             :  *      multiple times. Check the PrivateRefCount infrastructure in bufmgr.c.
      57             :  */
      58             : 
      59             : 
      60             : /*
      61             :  * Initialize shared buffer pool
      62             :  *
      63             :  * This is called once during shared-memory initialization (either in the
      64             :  * postmaster, or in a standalone backend).
      65             :  */
      66             : void
      67        1890 : InitBufferPool(void)
      68             : {
      69             :     bool        foundBufs,
      70             :                 foundDescs,
      71             :                 foundIOLocks,
      72             :                 foundBufCkpt;
      73             : 
      74             :     /* Align descriptors to a cacheline boundary. */
      75        1890 :     BufferDescriptors = (BufferDescPadded *)
      76        1890 :         ShmemInitStruct("Buffer Descriptors",
      77             :                         NBuffers * sizeof(BufferDescPadded),
      78             :                         &foundDescs);
      79             : 
      80        1890 :     BufferBlocks = (char *)
      81        1890 :         ShmemInitStruct("Buffer Blocks",
      82             :                         NBuffers * (Size) BLCKSZ, &foundBufs);
      83             : 
      84             :     /* Align lwlocks to cacheline boundary */
      85        1890 :     BufferIOLWLockArray = (LWLockMinimallyPadded *)
      86        1890 :         ShmemInitStruct("Buffer IO Locks",
      87             :                         NBuffers * (Size) sizeof(LWLockMinimallyPadded),
      88             :                         &foundIOLocks);
      89             : 
      90        1890 :     LWLockRegisterTranche(LWTRANCHE_BUFFER_IO_IN_PROGRESS, "buffer_io");
      91        1890 :     LWLockRegisterTranche(LWTRANCHE_BUFFER_CONTENT, "buffer_content");
      92             : 
      93             :     /*
      94             :      * The array used to sort to-be-checkpointed buffer ids is located in
      95             :      * shared memory, to avoid having to allocate significant amounts of
      96             :      * memory at runtime. As that'd be in the middle of a checkpoint, or when
      97             :      * the checkpointer is restarted, memory allocation failures would be
      98             :      * painful.
      99             :      */
     100        1890 :     CkptBufferIds = (CkptSortItem *)
     101        1890 :         ShmemInitStruct("Checkpoint BufferIds",
     102             :                         NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
     103             : 
     104        1890 :     if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
     105             :     {
     106             :         /* should find all of these, or none of them */
     107             :         Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
     108             :         /* note: this path is only taken in EXEC_BACKEND case */
     109             :     }
     110             :     else
     111             :     {
     112             :         int         i;
     113             : 
     114             :         /*
     115             :          * Initialize all the buffer headers.
     116             :          */
     117    21039218 :         for (i = 0; i < NBuffers; i++)
     118             :         {
     119    21037328 :             BufferDesc *buf = GetBufferDescriptor(i);
     120             : 
     121    21037328 :             CLEAR_BUFFERTAG(buf->tag);
     122             : 
     123    21037328 :             pg_atomic_init_u32(&buf->state, 0);
     124    21037328 :             buf->wait_backend_pid = 0;
     125             : 
     126    21037328 :             buf->buf_id = i;
     127             : 
     128             :             /*
     129             :              * Initially link all the buffers together as unused. Subsequent
     130             :              * management of this list is done by freelist.c.
     131             :              */
     132    21037328 :             buf->freeNext = i + 1;
     133             : 
     134    21037328 :             LWLockInitialize(BufferDescriptorGetContentLock(buf),
     135             :                              LWTRANCHE_BUFFER_CONTENT);
     136             : 
     137    21037328 :             LWLockInitialize(BufferDescriptorGetIOLock(buf),
     138             :                              LWTRANCHE_BUFFER_IO_IN_PROGRESS);
     139             :         }
     140             : 
     141             :         /* Correct last entry of linked list */
     142        1890 :         GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
     143             :     }
     144             : 
     145             :     /* Init other shared buffer-management stuff */
     146        1890 :     StrategyInitialize(!foundDescs);
     147             : 
     148             :     /* Initialize per-backend file flush context */
     149        1890 :     WritebackContextInit(&BackendWritebackContext,
     150             :                          &backend_flush_after);
     151        1890 : }
     152             : 
     153             : /*
     154             :  * BufferShmemSize
     155             :  *
     156             :  * compute the size of shared memory for the buffer pool including
     157             :  * data pages, buffer descriptors, hash tables, etc.
     158             :  */
     159             : Size
     160        1894 : BufferShmemSize(void)
     161             : {
     162        1894 :     Size        size = 0;
     163             : 
     164             :     /* size of buffer descriptors */
     165        1894 :     size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
     166             :     /* to allow aligning buffer descriptors */
     167        1894 :     size = add_size(size, PG_CACHE_LINE_SIZE);
     168             : 
     169             :     /* size of data pages */
     170        1894 :     size = add_size(size, mul_size(NBuffers, BLCKSZ));
     171             : 
     172             :     /* size of stuff controlled by freelist.c */
     173        1894 :     size = add_size(size, StrategyShmemSize());
     174             : 
     175             :     /*
     176             :      * It would be nice to include the I/O locks in the BufferDesc, but that
     177             :      * would increase the size of a BufferDesc to more than one cache line,
     178             :      * and benchmarking has shown that keeping every BufferDesc aligned on a
     179             :      * cache line boundary is important for performance.  So, instead, the
     180             :      * array of I/O locks is allocated in a separate tranche.  Because those
     181             :      * locks are not highly contended, we lay out the array with minimal
     182             :      * padding.
     183             :      */
     184        1894 :     size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
     185             :     /* to allow aligning the above */
     186        1894 :     size = add_size(size, PG_CACHE_LINE_SIZE);
     187             : 
     188             :     /* size of checkpoint sort array in bufmgr.c */
     189        1894 :     size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
     190             : 
     191        1894 :     return size;
     192             : }

Generated by: LCOV version 1.13