LCOV - code coverage report
Current view: top level - src/backend/storage/buffer - bufmgr.c (source / functions) Hit Total Coverage
Test: PostgreSQL 16beta1 Lines: 1282 1390 92.2 %
Date: 2023-06-02 18:12:27 Functions: 78 80 97.5 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * bufmgr.c
       4             :  *    buffer manager interface routines
       5             :  *
       6             :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/buffer/bufmgr.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /*
      16             :  * Principal entry points:
      17             :  *
      18             :  * ReadBuffer() -- find or create a buffer holding the requested page,
      19             :  *      and pin it so that no one can destroy it while this process
      20             :  *      is using it.
      21             :  *
      22             :  * ReleaseBuffer() -- unpin a buffer
      23             :  *
      24             :  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
      25             :  *      The disk write is delayed until buffer replacement or checkpoint.
      26             :  *
      27             :  * See also these files:
      28             :  *      freelist.c -- chooses victim for buffer replacement
      29             :  *      buf_table.c -- manages the buffer lookup table
      30             :  */
      31             : #include "postgres.h"
      32             : 
      33             : #include <sys/file.h>
      34             : #include <unistd.h>
      35             : 
      36             : #include "access/tableam.h"
      37             : #include "access/xloginsert.h"
      38             : #include "access/xlogutils.h"
      39             : #include "catalog/catalog.h"
      40             : #include "catalog/storage.h"
      41             : #include "catalog/storage_xlog.h"
      42             : #include "executor/instrument.h"
      43             : #include "lib/binaryheap.h"
      44             : #include "miscadmin.h"
      45             : #include "pg_trace.h"
      46             : #include "pgstat.h"
      47             : #include "postmaster/bgwriter.h"
      48             : #include "storage/buf_internals.h"
      49             : #include "storage/bufmgr.h"
      50             : #include "storage/ipc.h"
      51             : #include "storage/lmgr.h"
      52             : #include "storage/proc.h"
      53             : #include "storage/smgr.h"
      54             : #include "storage/standby.h"
      55             : #include "utils/memdebug.h"
      56             : #include "utils/ps_status.h"
      57             : #include "utils/rel.h"
      58             : #include "utils/resowner_private.h"
      59             : #include "utils/timestamp.h"
      60             : 
      61             : 
      62             : /* Note: these two macros only work on shared buffers, not local ones! */
      63             : #define BufHdrGetBlock(bufHdr)  ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
      64             : #define BufferGetLSN(bufHdr)    (PageGetLSN(BufHdrGetBlock(bufHdr)))
      65             : 
      66             : /* Note: this macro only works on local buffers, not shared ones! */
      67             : #define LocalBufHdrGetBlock(bufHdr) \
      68             :     LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
      69             : 
      70             : /* Bits in SyncOneBuffer's return value */
      71             : #define BUF_WRITTEN             0x01
      72             : #define BUF_REUSABLE            0x02
      73             : 
      74             : #define RELS_BSEARCH_THRESHOLD      20
      75             : 
      76             : /*
      77             :  * This is the size (in the number of blocks) above which we scan the
      78             :  * entire buffer pool to remove the buffers for all the pages of relation
      79             :  * being dropped. For the relations with size below this threshold, we find
      80             :  * the buffers by doing lookups in BufMapping table.
      81             :  */
      82             : #define BUF_DROP_FULL_SCAN_THRESHOLD        (uint64) (NBuffers / 32)
      83             : 
      84             : typedef struct PrivateRefCountEntry
      85             : {
      86             :     Buffer      buffer;
      87             :     int32       refcount;
      88             : } PrivateRefCountEntry;
      89             : 
      90             : /* 64 bytes, about the size of a cache line on common systems */
      91             : #define REFCOUNT_ARRAY_ENTRIES 8
      92             : 
      93             : /*
      94             :  * Status of buffers to checkpoint for a particular tablespace, used
      95             :  * internally in BufferSync.
      96             :  */
      97             : typedef struct CkptTsStatus
      98             : {
      99             :     /* oid of the tablespace */
     100             :     Oid         tsId;
     101             : 
     102             :     /*
     103             :      * Checkpoint progress for this tablespace. To make progress comparable
     104             :      * between tablespaces the progress is, for each tablespace, measured as a
     105             :      * number between 0 and the total number of to-be-checkpointed pages. Each
     106             :      * page checkpointed in this tablespace increments this space's progress
     107             :      * by progress_slice.
     108             :      */
     109             :     float8      progress;
     110             :     float8      progress_slice;
     111             : 
     112             :     /* number of to-be checkpointed pages in this tablespace */
     113             :     int         num_to_scan;
     114             :     /* already processed pages in this tablespace */
     115             :     int         num_scanned;
     116             : 
     117             :     /* current offset in CkptBufferIds for this tablespace */
     118             :     int         index;
     119             : } CkptTsStatus;
     120             : 
     121             : /*
     122             :  * Type for array used to sort SMgrRelations
     123             :  *
     124             :  * FlushRelationsAllBuffers shares the same comparator function with
     125             :  * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
     126             :  * compatible.
     127             :  */
     128             : typedef struct SMgrSortArray
     129             : {
     130             :     RelFileLocator rlocator;    /* This must be the first member */
     131             :     SMgrRelation srel;
     132             : } SMgrSortArray;
     133             : 
     134             : /* GUC variables */
     135             : bool        zero_damaged_pages = false;
     136             : int         bgwriter_lru_maxpages = 100;
     137             : double      bgwriter_lru_multiplier = 2.0;
     138             : bool        track_io_timing = false;
     139             : 
     140             : /*
     141             :  * How many buffers PrefetchBuffer callers should try to stay ahead of their
     142             :  * ReadBuffer calls by.  Zero means "never prefetch".  This value is only used
     143             :  * for buffers not belonging to tablespaces that have their
     144             :  * effective_io_concurrency parameter set.
     145             :  */
     146             : int         effective_io_concurrency = DEFAULT_EFFECTIVE_IO_CONCURRENCY;
     147             : 
     148             : /*
     149             :  * Like effective_io_concurrency, but used by maintenance code paths that might
     150             :  * benefit from a higher setting because they work on behalf of many sessions.
     151             :  * Overridden by the tablespace setting of the same name.
     152             :  */
     153             : int         maintenance_io_concurrency = DEFAULT_MAINTENANCE_IO_CONCURRENCY;
     154             : 
     155             : /*
     156             :  * GUC variables about triggering kernel writeback for buffers written; OS
     157             :  * dependent defaults are set via the GUC mechanism.
     158             :  */
     159             : int         checkpoint_flush_after = DEFAULT_CHECKPOINT_FLUSH_AFTER;
     160             : int         bgwriter_flush_after = DEFAULT_BGWRITER_FLUSH_AFTER;
     161             : int         backend_flush_after = DEFAULT_BACKEND_FLUSH_AFTER;
     162             : 
     163             : /* local state for LockBufferForCleanup */
     164             : static BufferDesc *PinCountWaitBuf = NULL;
     165             : 
     166             : /*
     167             :  * Backend-Private refcount management:
     168             :  *
     169             :  * Each buffer also has a private refcount that keeps track of the number of
     170             :  * times the buffer is pinned in the current process.  This is so that the
     171             :  * shared refcount needs to be modified only once if a buffer is pinned more
     172             :  * than once by an individual backend.  It's also used to check that no buffers
     173             :  * are still pinned at the end of transactions and when exiting.
     174             :  *
     175             :  *
     176             :  * To avoid - as we used to - requiring an array with NBuffers entries to keep
     177             :  * track of local buffers, we use a small sequentially searched array
     178             :  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
     179             :  * keep track of backend local pins.
     180             :  *
     181             :  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
     182             :  * refcounts are kept track of in the array; after that, new array entries
     183             :  * displace old ones into the hash table. That way a frequently used entry
     184             :  * can't get "stuck" in the hashtable while infrequent ones clog the array.
     185             :  *
     186             :  * Note that in most scenarios the number of pinned buffers will not exceed
     187             :  * REFCOUNT_ARRAY_ENTRIES.
     188             :  *
     189             :  *
     190             :  * To enter a buffer into the refcount tracking mechanism first reserve a free
     191             :  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
     192             :  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
     193             :  * memory allocations in NewPrivateRefCountEntry() which can be important
     194             :  * because in some scenarios it's called with a spinlock held...
     195             :  */
     196             : static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES];
     197             : static HTAB *PrivateRefCountHash = NULL;
     198             : static int32 PrivateRefCountOverflowed = 0;
     199             : static uint32 PrivateRefCountClock = 0;
     200             : static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
     201             : 
     202             : static void ReservePrivateRefCountEntry(void);
     203             : static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
     204             : static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
     205             : static inline int32 GetPrivateRefCount(Buffer buffer);
     206             : static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
     207             : 
     208             : /*
     209             :  * Ensure that the PrivateRefCountArray has sufficient space to store one more
     210             :  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
     211             :  * a new entry - but it's perfectly fine to not use a reserved entry.
     212             :  */
     213             : static void
     214   133436172 : ReservePrivateRefCountEntry(void)
     215             : {
     216             :     /* Already reserved (or freed), nothing to do */
     217   133436172 :     if (ReservedRefCountEntry != NULL)
     218   127521630 :         return;
     219             : 
     220             :     /*
     221             :      * First search for a free entry the array, that'll be sufficient in the
     222             :      * majority of cases.
     223             :      */
     224             :     {
     225             :         int         i;
     226             : 
     227    12633854 :         for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
     228             :         {
     229             :             PrivateRefCountEntry *res;
     230             : 
     231    12543582 :             res = &PrivateRefCountArray[i];
     232             : 
     233    12543582 :             if (res->buffer == InvalidBuffer)
     234             :             {
     235     5824270 :                 ReservedRefCountEntry = res;
     236     5824270 :                 return;
     237             :             }
     238             :         }
     239             :     }
     240             : 
     241             :     /*
     242             :      * No luck. All array entries are full. Move one array entry into the hash
     243             :      * table.
     244             :      */
     245             :     {
     246             :         /*
     247             :          * Move entry from the current clock position in the array into the
     248             :          * hashtable. Use that slot.
     249             :          */
     250             :         PrivateRefCountEntry *hashent;
     251             :         bool        found;
     252             : 
     253             :         /* select victim slot */
     254       90272 :         ReservedRefCountEntry =
     255       90272 :             &PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
     256             : 
     257             :         /* Better be used, otherwise we shouldn't get here. */
     258             :         Assert(ReservedRefCountEntry->buffer != InvalidBuffer);
     259             : 
     260             :         /* enter victim array entry into hashtable */
     261       90272 :         hashent = hash_search(PrivateRefCountHash,
     262       90272 :                               &(ReservedRefCountEntry->buffer),
     263             :                               HASH_ENTER,
     264             :                               &found);
     265             :         Assert(!found);
     266       90272 :         hashent->refcount = ReservedRefCountEntry->refcount;
     267             : 
     268             :         /* clear the now free array slot */
     269       90272 :         ReservedRefCountEntry->buffer = InvalidBuffer;
     270       90272 :         ReservedRefCountEntry->refcount = 0;
     271             : 
     272       90272 :         PrivateRefCountOverflowed++;
     273             :     }
     274             : }
     275             : 
     276             : /*
     277             :  * Fill a previously reserved refcount entry.
     278             :  */
     279             : static PrivateRefCountEntry *
     280   131249200 : NewPrivateRefCountEntry(Buffer buffer)
     281             : {
     282             :     PrivateRefCountEntry *res;
     283             : 
     284             :     /* only allowed to be called when a reservation has been made */
     285             :     Assert(ReservedRefCountEntry != NULL);
     286             : 
     287             :     /* use up the reserved entry */
     288   131249200 :     res = ReservedRefCountEntry;
     289   131249200 :     ReservedRefCountEntry = NULL;
     290             : 
     291             :     /* and fill it */
     292   131249200 :     res->buffer = buffer;
     293   131249200 :     res->refcount = 0;
     294             : 
     295   131249200 :     return res;
     296             : }
     297             : 
     298             : /*
     299             :  * Return the PrivateRefCount entry for the passed buffer.
     300             :  *
     301             :  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
     302             :  * do_move is true, and the entry resides in the hashtable the entry is
     303             :  * optimized for frequent access by moving it to the array.
     304             :  */
     305             : static PrivateRefCountEntry *
     306   313264318 : GetPrivateRefCountEntry(Buffer buffer, bool do_move)
     307             : {
     308             :     PrivateRefCountEntry *res;
     309             :     int         i;
     310             : 
     311             :     Assert(BufferIsValid(buffer));
     312             :     Assert(!BufferIsLocal(buffer));
     313             : 
     314             :     /*
     315             :      * First search for references in the array, that'll be sufficient in the
     316             :      * majority of cases.
     317             :      */
     318  1488909810 :     for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
     319             :     {
     320  1362171448 :         res = &PrivateRefCountArray[i];
     321             : 
     322  1362171448 :         if (res->buffer == buffer)
     323   186525956 :             return res;
     324             :     }
     325             : 
     326             :     /*
     327             :      * By here we know that the buffer, if already pinned, isn't residing in
     328             :      * the array.
     329             :      *
     330             :      * Only look up the buffer in the hashtable if we've previously overflowed
     331             :      * into it.
     332             :      */
     333   126738362 :     if (PrivateRefCountOverflowed == 0)
     334   126394280 :         return NULL;
     335             : 
     336      344082 :     res = hash_search(PrivateRefCountHash, &buffer, HASH_FIND, NULL);
     337             : 
     338      344082 :     if (res == NULL)
     339      253526 :         return NULL;
     340       90556 :     else if (!do_move)
     341             :     {
     342             :         /* caller doesn't want us to move the hash entry into the array */
     343       90088 :         return res;
     344             :     }
     345             :     else
     346             :     {
     347             :         /* move buffer from hashtable into the free array slot */
     348             :         bool        found;
     349             :         PrivateRefCountEntry *free;
     350             : 
     351             :         /* Ensure there's a free array slot */
     352         468 :         ReservePrivateRefCountEntry();
     353             : 
     354             :         /* Use up the reserved slot */
     355             :         Assert(ReservedRefCountEntry != NULL);
     356         468 :         free = ReservedRefCountEntry;
     357         468 :         ReservedRefCountEntry = NULL;
     358             :         Assert(free->buffer == InvalidBuffer);
     359             : 
     360             :         /* and fill it */
     361         468 :         free->buffer = buffer;
     362         468 :         free->refcount = res->refcount;
     363             : 
     364             :         /* delete from hashtable */
     365         468 :         hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
     366             :         Assert(found);
     367             :         Assert(PrivateRefCountOverflowed > 0);
     368         468 :         PrivateRefCountOverflowed--;
     369             : 
     370         468 :         return free;
     371             :     }
     372             : }
     373             : 
     374             : /*
     375             :  * Returns how many times the passed buffer is pinned by this backend.
     376             :  *
     377             :  * Only works for shared memory buffers!
     378             :  */
     379             : static inline int32
     380     5077644 : GetPrivateRefCount(Buffer buffer)
     381             : {
     382             :     PrivateRefCountEntry *ref;
     383             : 
     384             :     Assert(BufferIsValid(buffer));
     385             :     Assert(!BufferIsLocal(buffer));
     386             : 
     387             :     /*
     388             :      * Not moving the entry - that's ok for the current users, but we might
     389             :      * want to change this one day.
     390             :      */
     391     5077644 :     ref = GetPrivateRefCountEntry(buffer, false);
     392             : 
     393     5077644 :     if (ref == NULL)
     394      825224 :         return 0;
     395     4252420 :     return ref->refcount;
     396             : }
     397             : 
     398             : /*
     399             :  * Release resources used to track the reference count of a buffer which we no
     400             :  * longer have pinned and don't want to pin again immediately.
     401             :  */
     402             : static void
     403   131249200 : ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
     404             : {
     405             :     Assert(ref->refcount == 0);
     406             : 
     407   131249200 :     if (ref >= &PrivateRefCountArray[0] &&
     408             :         ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
     409             :     {
     410   131159396 :         ref->buffer = InvalidBuffer;
     411             : 
     412             :         /*
     413             :          * Mark the just used entry as reserved - in many scenarios that
     414             :          * allows us to avoid ever having to search the array/hash for free
     415             :          * entries.
     416             :          */
     417   131159396 :         ReservedRefCountEntry = ref;
     418             :     }
     419             :     else
     420             :     {
     421             :         bool        found;
     422       89804 :         Buffer      buffer = ref->buffer;
     423             : 
     424       89804 :         hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
     425             :         Assert(found);
     426             :         Assert(PrivateRefCountOverflowed > 0);
     427       89804 :         PrivateRefCountOverflowed--;
     428             :     }
     429   131249200 : }
     430             : 
     431             : /*
     432             :  * BufferIsPinned
     433             :  *      True iff the buffer is pinned (also checks for valid buffer number).
     434             :  *
     435             :  *      NOTE: what we check here is that *this* backend holds a pin on
     436             :  *      the buffer.  We do not care whether some other backend does.
     437             :  */
     438             : #define BufferIsPinned(bufnum) \
     439             : ( \
     440             :     !BufferIsValid(bufnum) ? \
     441             :         false \
     442             :     : \
     443             :         BufferIsLocal(bufnum) ? \
     444             :             (LocalRefCount[-(bufnum) - 1] > 0) \
     445             :         : \
     446             :     (GetPrivateRefCount(bufnum) > 0) \
     447             : )
     448             : 
     449             : 
     450             : static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence,
     451             :                                 ForkNumber forkNum, BlockNumber blockNum,
     452             :                                 ReadBufferMode mode, BufferAccessStrategy strategy,
     453             :                                 bool *hit);
     454             : static BlockNumber ExtendBufferedRelCommon(ExtendBufferedWhat eb,
     455             :                                            ForkNumber fork,
     456             :                                            BufferAccessStrategy strategy,
     457             :                                            uint32 flags,
     458             :                                            uint32 extend_by,
     459             :                                            BlockNumber extend_upto,
     460             :                                            Buffer *buffers,
     461             :                                            uint32 *extended_by);
     462             : static BlockNumber ExtendBufferedRelShared(ExtendBufferedWhat eb,
     463             :                                            ForkNumber fork,
     464             :                                            BufferAccessStrategy strategy,
     465             :                                            uint32 flags,
     466             :                                            uint32 extend_by,
     467             :                                            BlockNumber extend_upto,
     468             :                                            Buffer *buffers,
     469             :                                            uint32 *extended_by);
     470             : static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
     471             : static void PinBuffer_Locked(BufferDesc *buf);
     472             : static void UnpinBuffer(BufferDesc *buf);
     473             : static void BufferSync(int flags);
     474             : static uint32 WaitBufHdrUnlocked(BufferDesc *buf);
     475             : static int  SyncOneBuffer(int buf_id, bool skip_recently_used,
     476             :                           WritebackContext *wb_context);
     477             : static void WaitIO(BufferDesc *buf);
     478             : static bool StartBufferIO(BufferDesc *buf, bool forInput);
     479             : static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
     480             :                               uint32 set_flag_bits);
     481             : static void shared_buffer_write_error_callback(void *arg);
     482             : static void local_buffer_write_error_callback(void *arg);
     483             : static BufferDesc *BufferAlloc(SMgrRelation smgr,
     484             :                                char relpersistence,
     485             :                                ForkNumber forkNum,
     486             :                                BlockNumber blockNum,
     487             :                                BufferAccessStrategy strategy,
     488             :                                bool *foundPtr, IOContext io_context);
     489             : static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
     490             : static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
     491             :                         IOObject io_object, IOContext io_context);
     492             : static void FindAndDropRelationBuffers(RelFileLocator rlocator,
     493             :                                        ForkNumber forkNum,
     494             :                                        BlockNumber nForkBlock,
     495             :                                        BlockNumber firstDelBlock);
     496             : static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
     497             :                                            RelFileLocator dstlocator,
     498             :                                            ForkNumber forkNum, bool permanent);
     499             : static void AtProcExit_Buffers(int code, Datum arg);
     500             : static void CheckForBufferLeaks(void);
     501             : static int  rlocator_comparator(const void *p1, const void *p2);
     502             : static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
     503             : static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
     504             : static int  ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
     505             : 
     506             : 
     507             : /*
     508             :  * Implementation of PrefetchBuffer() for shared buffers.
     509             :  */
     510             : PrefetchBufferResult
     511     1711996 : PrefetchSharedBuffer(SMgrRelation smgr_reln,
     512             :                      ForkNumber forkNum,
     513             :                      BlockNumber blockNum)
     514             : {
     515     1711996 :     PrefetchBufferResult result = {InvalidBuffer, false};
     516             :     BufferTag   newTag;         /* identity of requested block */
     517             :     uint32      newHash;        /* hash value for newTag */
     518             :     LWLock     *newPartitionLock;   /* buffer partition lock for it */
     519             :     int         buf_id;
     520             : 
     521             :     Assert(BlockNumberIsValid(blockNum));
     522             : 
     523             :     /* create a tag so we can lookup the buffer */
     524     1711996 :     InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
     525             :                   forkNum, blockNum);
     526             : 
     527             :     /* determine its hash code and partition lock ID */
     528     1711996 :     newHash = BufTableHashCode(&newTag);
     529     1711996 :     newPartitionLock = BufMappingPartitionLock(newHash);
     530             : 
     531             :     /* see if the block is in the buffer pool already */
     532     1711996 :     LWLockAcquire(newPartitionLock, LW_SHARED);
     533     1711996 :     buf_id = BufTableLookup(&newTag, newHash);
     534     1711996 :     LWLockRelease(newPartitionLock);
     535             : 
     536             :     /* If not in buffers, initiate prefetch */
     537     1711996 :     if (buf_id < 0)
     538             :     {
     539             : #ifdef USE_PREFETCH
     540             :         /*
     541             :          * Try to initiate an asynchronous read.  This returns false in
     542             :          * recovery if the relation file doesn't exist.
     543             :          */
     544      828522 :         if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
     545      414040 :             smgrprefetch(smgr_reln, forkNum, blockNum))
     546             :         {
     547      414040 :             result.initiated_io = true;
     548             :         }
     549             : #endif                          /* USE_PREFETCH */
     550             :     }
     551             :     else
     552             :     {
     553             :         /*
     554             :          * Report the buffer it was in at that time.  The caller may be able
     555             :          * to avoid a buffer table lookup, but it's not pinned and it must be
     556             :          * rechecked!
     557             :          */
     558     1297514 :         result.recent_buffer = buf_id + 1;
     559             :     }
     560             : 
     561             :     /*
     562             :      * If the block *is* in buffers, we do nothing.  This is not really ideal:
     563             :      * the block might be just about to be evicted, which would be stupid
     564             :      * since we know we are going to need it soon.  But the only easy answer
     565             :      * is to bump the usage_count, which does not seem like a great solution:
     566             :      * when the caller does ultimately touch the block, usage_count would get
     567             :      * bumped again, resulting in too much favoritism for blocks that are
     568             :      * involved in a prefetch sequence. A real fix would involve some
     569             :      * additional per-buffer state, and it's not clear that there's enough of
     570             :      * a problem to justify that.
     571             :      */
     572             : 
     573     1711996 :     return result;
     574             : }
     575             : 
     576             : /*
     577             :  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
     578             :  *
     579             :  * This is named by analogy to ReadBuffer but doesn't actually allocate a
     580             :  * buffer.  Instead it tries to ensure that a future ReadBuffer for the given
     581             :  * block will not be delayed by the I/O.  Prefetching is optional.
     582             :  *
     583             :  * There are three possible outcomes:
     584             :  *
     585             :  * 1.  If the block is already cached, the result includes a valid buffer that
     586             :  * could be used by the caller to avoid the need for a later buffer lookup, but
     587             :  * it's not pinned, so the caller must recheck it.
     588             :  *
     589             :  * 2.  If the kernel has been asked to initiate I/O, the initiated_io member is
     590             :  * true.  Currently there is no way to know if the data was already cached by
     591             :  * the kernel and therefore didn't really initiate I/O, and no way to know when
     592             :  * the I/O completes other than using synchronous ReadBuffer().
     593             :  *
     594             :  * 3.  Otherwise, the buffer wasn't already cached by PostgreSQL, and
     595             :  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
     596             :  * lack of a kernel facility), direct I/O is enabled, or the underlying
     597             :  * relation file wasn't found and we are in recovery.  (If the relation file
     598             :  * wasn't found and we are not in recovery, an error is raised).
     599             :  */
     600             : PrefetchBufferResult
     601      826088 : PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
     602             : {
     603             :     Assert(RelationIsValid(reln));
     604             :     Assert(BlockNumberIsValid(blockNum));
     605             : 
     606      826088 :     if (RelationUsesLocalBuffers(reln))
     607             :     {
     608             :         /* see comments in ReadBufferExtended */
     609       12488 :         if (RELATION_IS_OTHER_TEMP(reln))
     610           0 :             ereport(ERROR,
     611             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     612             :                      errmsg("cannot access temporary tables of other sessions")));
     613             : 
     614             :         /* pass it off to localbuf.c */
     615       12488 :         return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
     616             :     }
     617             :     else
     618             :     {
     619             :         /* pass it to the shared buffer version */
     620      813600 :         return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
     621             :     }
     622             : }
     623             : 
     624             : /*
     625             :  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
     626             :  *
     627             :  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
     628             :  * successful.  Return true if the buffer is valid and still has the expected
     629             :  * tag.  In that case, the buffer is pinned and the usage count is bumped.
     630             :  */
     631             : bool
     632      825240 : ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
     633             :                  Buffer recent_buffer)
     634             : {
     635             :     BufferDesc *bufHdr;
     636             :     BufferTag   tag;
     637             :     uint32      buf_state;
     638             :     bool        have_private_ref;
     639             : 
     640             :     Assert(BufferIsValid(recent_buffer));
     641             : 
     642      825240 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
     643      825240 :     ReservePrivateRefCountEntry();
     644      825240 :     InitBufferTag(&tag, &rlocator, forkNum, blockNum);
     645             : 
     646      825240 :     if (BufferIsLocal(recent_buffer))
     647             :     {
     648           0 :         int         b = -recent_buffer - 1;
     649             : 
     650           0 :         bufHdr = GetLocalBufferDescriptor(b);
     651           0 :         buf_state = pg_atomic_read_u32(&bufHdr->state);
     652             : 
     653             :         /* Is it still valid and holding the right tag? */
     654           0 :         if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
     655             :         {
     656           0 :             PinLocalBuffer(bufHdr, true);
     657             : 
     658           0 :             pgBufferUsage.local_blks_hit++;
     659             : 
     660           0 :             return true;
     661             :         }
     662             :     }
     663             :     else
     664             :     {
     665      825240 :         bufHdr = GetBufferDescriptor(recent_buffer - 1);
     666      825240 :         have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
     667             : 
     668             :         /*
     669             :          * Do we already have this buffer pinned with a private reference?  If
     670             :          * so, it must be valid and it is safe to check the tag without
     671             :          * locking.  If not, we have to lock the header first and then check.
     672             :          */
     673      825240 :         if (have_private_ref)
     674          16 :             buf_state = pg_atomic_read_u32(&bufHdr->state);
     675             :         else
     676      825224 :             buf_state = LockBufHdr(bufHdr);
     677             : 
     678      825240 :         if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
     679             :         {
     680             :             /*
     681             :              * It's now safe to pin the buffer.  We can't pin first and ask
     682             :              * questions later, because it might confuse code paths like
     683             :              * InvalidateBuffer() if we pinned a random non-matching buffer.
     684             :              */
     685      822742 :             if (have_private_ref)
     686           0 :                 PinBuffer(bufHdr, NULL);    /* bump pin count */
     687             :             else
     688      822742 :                 PinBuffer_Locked(bufHdr);   /* pin for first time */
     689             : 
     690      822742 :             pgBufferUsage.shared_blks_hit++;
     691             : 
     692      822742 :             return true;
     693             :         }
     694             : 
     695             :         /* If we locked the header above, now unlock. */
     696        2498 :         if (!have_private_ref)
     697        2482 :             UnlockBufHdr(bufHdr, buf_state);
     698             :     }
     699             : 
     700        2498 :     return false;
     701             : }
     702             : 
     703             : /*
     704             :  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
     705             :  *      fork with RBM_NORMAL mode and default strategy.
     706             :  */
     707             : Buffer
     708    95051700 : ReadBuffer(Relation reln, BlockNumber blockNum)
     709             : {
     710    95051700 :     return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
     711             : }
     712             : 
     713             : /*
     714             :  * ReadBufferExtended -- returns a buffer containing the requested
     715             :  *      block of the requested relation.  If the blknum
     716             :  *      requested is P_NEW, extend the relation file and
     717             :  *      allocate a new block.  (Caller is responsible for
     718             :  *      ensuring that only one backend tries to extend a
     719             :  *      relation at the same time!)
     720             :  *
     721             :  * Returns: the buffer number for the buffer containing
     722             :  *      the block read.  The returned buffer has been pinned.
     723             :  *      Does not return on error --- elog's instead.
     724             :  *
     725             :  * Assume when this function is called, that reln has been opened already.
     726             :  *
     727             :  * In RBM_NORMAL mode, the page is read from disk, and the page header is
     728             :  * validated.  An error is thrown if the page header is not valid.  (But
     729             :  * note that an all-zero page is considered "valid"; see
     730             :  * PageIsVerifiedExtended().)
     731             :  *
     732             :  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
     733             :  * valid, the page is zeroed instead of throwing an error. This is intended
     734             :  * for non-critical data, where the caller is prepared to repair errors.
     735             :  *
     736             :  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
     737             :  * filled with zeros instead of reading it from disk.  Useful when the caller
     738             :  * is going to fill the page from scratch, since this saves I/O and avoids
     739             :  * unnecessary failure if the page-on-disk has corrupt page headers.
     740             :  * The page is returned locked to ensure that the caller has a chance to
     741             :  * initialize the page before it's made visible to others.
     742             :  * Caution: do not use this mode to read a page that is beyond the relation's
     743             :  * current physical EOF; that is likely to cause problems in md.c when
     744             :  * the page is modified and written out. P_NEW is OK, though.
     745             :  *
     746             :  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
     747             :  * a cleanup-strength lock on the page.
     748             :  *
     749             :  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
     750             :  *
     751             :  * If strategy is not NULL, a nondefault buffer access strategy is used.
     752             :  * See buffer/README for details.
     753             :  */
     754             : Buffer
     755   129301748 : ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
     756             :                    ReadBufferMode mode, BufferAccessStrategy strategy)
     757             : {
     758             :     bool        hit;
     759             :     Buffer      buf;
     760             : 
     761             :     /*
     762             :      * Reject attempts to read non-local temporary relations; we would be
     763             :      * likely to get wrong data since we have no visibility into the owning
     764             :      * session's local buffers.
     765             :      */
     766   129301748 :     if (RELATION_IS_OTHER_TEMP(reln))
     767           0 :         ereport(ERROR,
     768             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     769             :                  errmsg("cannot access temporary tables of other sessions")));
     770             : 
     771             :     /*
     772             :      * Read the buffer, and update pgstat counters to reflect a cache hit or
     773             :      * miss.
     774             :      */
     775   129301748 :     pgstat_count_buffer_read(reln);
     776   129301748 :     buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
     777             :                             forkNum, blockNum, mode, strategy, &hit);
     778   129301722 :     if (hit)
     779   127069174 :         pgstat_count_buffer_hit(reln);
     780   129301722 :     return buf;
     781             : }
     782             : 
     783             : 
     784             : /*
     785             :  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
     786             :  *      a relcache entry for the relation.
     787             :  *
     788             :  * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
     789             :  * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
     790             :  * cannot be used for temporary relations (and making that work might be
     791             :  * difficult, unless we only want to read temporary relations for our own
     792             :  * BackendId).
     793             :  */
     794             : Buffer
     795     5645926 : ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
     796             :                           BlockNumber blockNum, ReadBufferMode mode,
     797             :                           BufferAccessStrategy strategy, bool permanent)
     798             : {
     799             :     bool        hit;
     800             : 
     801     5645926 :     SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
     802             : 
     803     5645926 :     return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
     804             :                              RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
     805             :                              mode, strategy, &hit);
     806             : }
     807             : 
     808             : /*
     809             :  * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
     810             :  */
     811             : Buffer
     812      119188 : ExtendBufferedRel(ExtendBufferedWhat eb,
     813             :                   ForkNumber forkNum,
     814             :                   BufferAccessStrategy strategy,
     815             :                   uint32 flags)
     816             : {
     817             :     Buffer      buf;
     818      119188 :     uint32      extend_by = 1;
     819             : 
     820      119188 :     ExtendBufferedRelBy(eb, forkNum, strategy, flags, extend_by,
     821             :                         &buf, &extend_by);
     822             : 
     823      119188 :     return buf;
     824             : }
     825             : 
     826             : /*
     827             :  * Extend relation by multiple blocks.
     828             :  *
     829             :  * Tries to extend the relation by extend_by blocks. Depending on the
     830             :  * availability of resources the relation may end up being extended by a
     831             :  * smaller number of pages (unless an error is thrown, always by at least one
     832             :  * page). *extended_by is updated to the number of pages the relation has been
     833             :  * extended to.
     834             :  *
     835             :  * buffers needs to be an array that is at least extend_by long. Upon
     836             :  * completion, the first extend_by array elements will point to a pinned
     837             :  * buffer.
     838             :  *
     839             :  * If EB_LOCK_FIRST is part of flags, the first returned buffer is
     840             :  * locked. This is useful for callers that want a buffer that is guaranteed to
     841             :  * be empty.
     842             :  */
     843             : BlockNumber
     844      540500 : ExtendBufferedRelBy(ExtendBufferedWhat eb,
     845             :                     ForkNumber fork,
     846             :                     BufferAccessStrategy strategy,
     847             :                     uint32 flags,
     848             :                     uint32 extend_by,
     849             :                     Buffer *buffers,
     850             :                     uint32 *extended_by)
     851             : {
     852             :     Assert((eb.rel != NULL) != (eb.smgr != NULL));
     853             :     Assert(eb.smgr == NULL || eb.relpersistence != 0);
     854             :     Assert(extend_by > 0);
     855             : 
     856      540500 :     if (eb.smgr == NULL)
     857             :     {
     858      540126 :         eb.smgr = RelationGetSmgr(eb.rel);
     859      540126 :         eb.relpersistence = eb.rel->rd_rel->relpersistence;
     860             :     }
     861             : 
     862      540500 :     return ExtendBufferedRelCommon(eb, fork, strategy, flags,
     863             :                                    extend_by, InvalidBlockNumber,
     864             :                                    buffers, extended_by);
     865             : }
     866             : 
     867             : /*
     868             :  * Extend the relation so it is at least extend_to blocks large, return buffer
     869             :  * (extend_to - 1).
     870             :  *
     871             :  * This is useful for callers that want to write a specific page, regardless
     872             :  * of the current size of the relation (e.g. useful for visibilitymap and for
     873             :  * crash recovery).
     874             :  */
     875             : Buffer
     876      127668 : ExtendBufferedRelTo(ExtendBufferedWhat eb,
     877             :                     ForkNumber fork,
     878             :                     BufferAccessStrategy strategy,
     879             :                     uint32 flags,
     880             :                     BlockNumber extend_to,
     881             :                     ReadBufferMode mode)
     882             : {
     883             :     BlockNumber current_size;
     884      127668 :     uint32      extended_by = 0;
     885      127668 :     Buffer      buffer = InvalidBuffer;
     886             :     Buffer      buffers[64];
     887             : 
     888             :     Assert((eb.rel != NULL) != (eb.smgr != NULL));
     889             :     Assert(eb.smgr == NULL || eb.relpersistence != 0);
     890             :     Assert(extend_to != InvalidBlockNumber && extend_to > 0);
     891             : 
     892      127668 :     if (eb.smgr == NULL)
     893             :     {
     894       55416 :         eb.smgr = RelationGetSmgr(eb.rel);
     895       55416 :         eb.relpersistence = eb.rel->rd_rel->relpersistence;
     896             :     }
     897             : 
     898             :     /*
     899             :      * If desired, create the file if it doesn't exist.  If
     900             :      * smgr_cached_nblocks[fork] is positive then it must exist, no need for
     901             :      * an smgrexists call.
     902             :      */
     903      127668 :     if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
     904       55416 :         (eb.smgr->smgr_cached_nblocks[fork] == 0 ||
     905          32 :          eb.smgr->smgr_cached_nblocks[fork] == InvalidBlockNumber) &&
     906       55384 :         !smgrexists(eb.smgr, fork))
     907             :     {
     908       55372 :         LockRelationForExtension(eb.rel, ExclusiveLock);
     909             : 
     910             :         /* could have been closed while waiting for lock */
     911       55372 :         if (eb.rel)
     912       55372 :             eb.smgr = RelationGetSmgr(eb.rel);
     913             : 
     914             :         /* recheck, fork might have been created concurrently */
     915       55372 :         if (!smgrexists(eb.smgr, fork))
     916       55368 :             smgrcreate(eb.smgr, fork, flags & EB_PERFORMING_RECOVERY);
     917             : 
     918       55372 :         UnlockRelationForExtension(eb.rel, ExclusiveLock);
     919             :     }
     920             : 
     921             :     /*
     922             :      * If requested, invalidate size cache, so that smgrnblocks asks the
     923             :      * kernel.
     924             :      */
     925      127668 :     if (flags & EB_CLEAR_SIZE_CACHE)
     926       55416 :         eb.smgr->smgr_cached_nblocks[fork] = InvalidBlockNumber;
     927             : 
     928             :     /*
     929             :      * Estimate how many pages we'll need to extend by. This avoids acquiring
     930             :      * unnecessarily many victim buffers.
     931             :      */
     932      127668 :     current_size = smgrnblocks(eb.smgr, fork);
     933             : 
     934             :     /*
     935             :      * Since no-one else can be looking at the page contents yet, there is no
     936             :      * difference between an exclusive lock and a cleanup-strength lock. Note
     937             :      * that we pass the original mode to ReadBuffer_common() below, when
     938             :      * falling back to reading the buffer to a concurrent relation extension.
     939             :      */
     940      127668 :     if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
     941       71608 :         flags |= EB_LOCK_TARGET;
     942             : 
     943      259676 :     while (current_size < extend_to)
     944             :     {
     945      132008 :         uint32      num_pages = lengthof(buffers);
     946             :         BlockNumber first_block;
     947             : 
     948      132008 :         if ((uint64) current_size + num_pages > extend_to)
     949      131876 :             num_pages = extend_to - current_size;
     950             : 
     951      132008 :         first_block = ExtendBufferedRelCommon(eb, fork, strategy, flags,
     952             :                                               num_pages, extend_to,
     953             :                                               buffers, &extended_by);
     954             : 
     955      132008 :         current_size = first_block + extended_by;
     956             :         Assert(num_pages != 0 || current_size >= extend_to);
     957             : 
     958      319964 :         for (int i = 0; i < extended_by; i++)
     959             :         {
     960      187956 :             if (first_block + i != extend_to - 1)
     961       60300 :                 ReleaseBuffer(buffers[i]);
     962             :             else
     963      127656 :                 buffer = buffers[i];
     964             :         }
     965             :     }
     966             : 
     967             :     /*
     968             :      * It's possible that another backend concurrently extended the relation.
     969             :      * In that case read the buffer.
     970             :      *
     971             :      * XXX: Should we control this via a flag?
     972             :      */
     973      127668 :     if (buffer == InvalidBuffer)
     974             :     {
     975             :         bool        hit;
     976             : 
     977             :         Assert(extended_by == 0);
     978          12 :         buffer = ReadBuffer_common(eb.smgr, eb.relpersistence,
     979             :                                    fork, extend_to - 1, mode, strategy,
     980             :                                    &hit);
     981             :     }
     982             : 
     983      127668 :     return buffer;
     984             : }
     985             : 
     986             : /*
     987             :  * ReadBuffer_common -- common logic for all ReadBuffer variants
     988             :  *
     989             :  * *hit is set to true if the request was satisfied from shared buffer cache.
     990             :  */
     991             : static Buffer
     992   134947686 : ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
     993             :                   BlockNumber blockNum, ReadBufferMode mode,
     994             :                   BufferAccessStrategy strategy, bool *hit)
     995             : {
     996             :     BufferDesc *bufHdr;
     997             :     Block       bufBlock;
     998             :     bool        found;
     999             :     IOContext   io_context;
    1000             :     IOObject    io_object;
    1001   134947686 :     bool        isLocalBuf = SmgrIsTemp(smgr);
    1002             : 
    1003   134947686 :     *hit = false;
    1004             : 
    1005             :     /*
    1006             :      * Backward compatibility path, most code should use ExtendBufferedRel()
    1007             :      * instead, as acquiring the extension lock inside ExtendBufferedRel()
    1008             :      * scales a lot better.
    1009             :      */
    1010   134947686 :     if (unlikely(blockNum == P_NEW))
    1011             :     {
    1012         374 :         uint32      flags = EB_SKIP_EXTENSION_LOCK;
    1013             : 
    1014             :         /*
    1015             :          * Since no-one else can be looking at the page contents yet, there is
    1016             :          * no difference between an exclusive lock and a cleanup-strength
    1017             :          * lock.
    1018             :          */
    1019         374 :         if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
    1020           0 :             flags |= EB_LOCK_FIRST;
    1021             : 
    1022         374 :         return ExtendBufferedRel(EB_SMGR(smgr, relpersistence),
    1023             :                                  forkNum, strategy, flags);
    1024             :     }
    1025             : 
    1026             :     /* Make sure we will have room to remember the buffer pin */
    1027   134947312 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    1028             : 
    1029             :     TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
    1030             :                                        smgr->smgr_rlocator.locator.spcOid,
    1031             :                                        smgr->smgr_rlocator.locator.dbOid,
    1032             :                                        smgr->smgr_rlocator.locator.relNumber,
    1033             :                                        smgr->smgr_rlocator.backend);
    1034             : 
    1035   134947312 :     if (isLocalBuf)
    1036             :     {
    1037             :         /*
    1038             :          * We do not use a BufferAccessStrategy for I/O of temporary tables.
    1039             :          * However, in some cases, the "strategy" may not be NULL, so we can't
    1040             :          * rely on IOContextForStrategy() to set the right IOContext for us.
    1041             :          * This may happen in cases like CREATE TEMPORARY TABLE AS...
    1042             :          */
    1043     2092396 :         io_context = IOCONTEXT_NORMAL;
    1044     2092396 :         io_object = IOOBJECT_TEMP_RELATION;
    1045     2092396 :         bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
    1046     2092396 :         if (found)
    1047     2084818 :             pgBufferUsage.local_blks_hit++;
    1048        7578 :         else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
    1049             :                  mode == RBM_ZERO_ON_ERROR)
    1050        7578 :             pgBufferUsage.local_blks_read++;
    1051             :     }
    1052             :     else
    1053             :     {
    1054             :         /*
    1055             :          * lookup the buffer.  IO_IN_PROGRESS is set if the requested block is
    1056             :          * not currently in memory.
    1057             :          */
    1058   132854916 :         io_context = IOContextForStrategy(strategy);
    1059   132854916 :         io_object = IOOBJECT_RELATION;
    1060   132854916 :         bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
    1061             :                              strategy, &found, io_context);
    1062   132854916 :         if (found)
    1063   129949872 :             pgBufferUsage.shared_blks_hit++;
    1064     2905044 :         else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
    1065             :                  mode == RBM_ZERO_ON_ERROR)
    1066     2527140 :             pgBufferUsage.shared_blks_read++;
    1067             :     }
    1068             : 
    1069             :     /* At this point we do NOT hold any locks. */
    1070             : 
    1071             :     /* if it was already in the buffer pool, we're done */
    1072   134947312 :     if (found)
    1073             :     {
    1074             :         /* Just need to update stats before we exit */
    1075   132034690 :         *hit = true;
    1076   132034690 :         VacuumPageHit++;
    1077   132034690 :         pgstat_count_io_op(io_object, io_context, IOOP_HIT);
    1078             : 
    1079   132034690 :         if (VacuumCostActive)
    1080      102808 :             VacuumCostBalance += VacuumCostPageHit;
    1081             : 
    1082             :         TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
    1083             :                                           smgr->smgr_rlocator.locator.spcOid,
    1084             :                                           smgr->smgr_rlocator.locator.dbOid,
    1085             :                                           smgr->smgr_rlocator.locator.relNumber,
    1086             :                                           smgr->smgr_rlocator.backend,
    1087             :                                           found);
    1088             : 
    1089             :         /*
    1090             :          * In RBM_ZERO_AND_LOCK mode the caller expects the page to be locked
    1091             :          * on return.
    1092             :          */
    1093   132034690 :         if (!isLocalBuf)
    1094             :         {
    1095   129949872 :             if (mode == RBM_ZERO_AND_LOCK)
    1096       60258 :                 LWLockAcquire(BufferDescriptorGetContentLock(bufHdr),
    1097             :                               LW_EXCLUSIVE);
    1098   129889614 :             else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
    1099          66 :                 LockBufferForCleanup(BufferDescriptorGetBuffer(bufHdr));
    1100             :         }
    1101             : 
    1102   132034690 :         return BufferDescriptorGetBuffer(bufHdr);
    1103             :     }
    1104             : 
    1105             :     /*
    1106             :      * if we have gotten to this point, we have allocated a buffer for the
    1107             :      * page but its contents are not yet valid.  IO_IN_PROGRESS is set for it,
    1108             :      * if it's a shared buffer.
    1109             :      */
    1110             :     Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID));    /* spinlock not needed */
    1111             : 
    1112     2912622 :     bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
    1113             : 
    1114             :     /*
    1115             :      * Read in the page, unless the caller intends to overwrite it and just
    1116             :      * wants us to allocate a buffer.
    1117             :      */
    1118     2912622 :     if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
    1119      377904 :         MemSet((char *) bufBlock, 0, BLCKSZ);
    1120             :     else
    1121             :     {
    1122     2534718 :         instr_time  io_start = pgstat_prepare_io_time();
    1123             : 
    1124     2534718 :         smgrread(smgr, forkNum, blockNum, bufBlock);
    1125             : 
    1126     2534692 :         pgstat_count_io_op_time(io_object, io_context,
    1127             :                                 IOOP_READ, io_start, 1);
    1128             : 
    1129             :         /* check for garbage data */
    1130     2534692 :         if (!PageIsVerifiedExtended((Page) bufBlock, blockNum,
    1131             :                                     PIV_LOG_WARNING | PIV_REPORT_STAT))
    1132             :         {
    1133           0 :             if (mode == RBM_ZERO_ON_ERROR || zero_damaged_pages)
    1134             :             {
    1135           0 :                 ereport(WARNING,
    1136             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    1137             :                          errmsg("invalid page in block %u of relation %s; zeroing out page",
    1138             :                                 blockNum,
    1139             :                                 relpath(smgr->smgr_rlocator, forkNum))));
    1140           0 :                 MemSet((char *) bufBlock, 0, BLCKSZ);
    1141             :             }
    1142             :             else
    1143           0 :                 ereport(ERROR,
    1144             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    1145             :                          errmsg("invalid page in block %u of relation %s",
    1146             :                                 blockNum,
    1147             :                                 relpath(smgr->smgr_rlocator, forkNum))));
    1148             :         }
    1149             :     }
    1150             : 
    1151             :     /*
    1152             :      * In RBM_ZERO_AND_LOCK / RBM_ZERO_AND_CLEANUP_LOCK mode, grab the buffer
    1153             :      * content lock before marking the page as valid, to make sure that no
    1154             :      * other backend sees the zeroed page before the caller has had a chance
    1155             :      * to initialize it.
    1156             :      *
    1157             :      * Since no-one else can be looking at the page contents yet, there is no
    1158             :      * difference between an exclusive lock and a cleanup-strength lock. (Note
    1159             :      * that we cannot use LockBuffer() or LockBufferForCleanup() here, because
    1160             :      * they assert that the buffer is already valid.)
    1161             :      */
    1162     2912596 :     if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
    1163      377904 :         !isLocalBuf)
    1164             :     {
    1165      377904 :         LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_EXCLUSIVE);
    1166             :     }
    1167             : 
    1168     2912596 :     if (isLocalBuf)
    1169             :     {
    1170             :         /* Only need to adjust flags */
    1171        7578 :         uint32      buf_state = pg_atomic_read_u32(&bufHdr->state);
    1172             : 
    1173        7578 :         buf_state |= BM_VALID;
    1174        7578 :         pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
    1175             :     }
    1176             :     else
    1177             :     {
    1178             :         /* Set BM_VALID, terminate IO, and wake up any waiters */
    1179     2905018 :         TerminateBufferIO(bufHdr, false, BM_VALID);
    1180             :     }
    1181             : 
    1182     2912596 :     VacuumPageMiss++;
    1183     2912596 :     if (VacuumCostActive)
    1184        1032 :         VacuumCostBalance += VacuumCostPageMiss;
    1185             : 
    1186             :     TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
    1187             :                                       smgr->smgr_rlocator.locator.spcOid,
    1188             :                                       smgr->smgr_rlocator.locator.dbOid,
    1189             :                                       smgr->smgr_rlocator.locator.relNumber,
    1190             :                                       smgr->smgr_rlocator.backend,
    1191             :                                       found);
    1192             : 
    1193     2912596 :     return BufferDescriptorGetBuffer(bufHdr);
    1194             : }
    1195             : 
    1196             : /*
    1197             :  * BufferAlloc -- subroutine for ReadBuffer.  Handles lookup of a shared
    1198             :  *      buffer.  If no buffer exists already, selects a replacement
    1199             :  *      victim and evicts the old page, but does NOT read in new page.
    1200             :  *
    1201             :  * "strategy" can be a buffer replacement strategy object, or NULL for
    1202             :  * the default strategy.  The selected buffer's usage_count is advanced when
    1203             :  * using the default strategy, but otherwise possibly not (see PinBuffer).
    1204             :  *
    1205             :  * The returned buffer is pinned and is already marked as holding the
    1206             :  * desired page.  If it already did have the desired page, *foundPtr is
    1207             :  * set true.  Otherwise, *foundPtr is set false and the buffer is marked
    1208             :  * as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
    1209             :  *
    1210             :  * *foundPtr is actually redundant with the buffer's BM_VALID flag, but
    1211             :  * we keep it for simplicity in ReadBuffer.
    1212             :  *
    1213             :  * io_context is passed as an output parameter to avoid calling
    1214             :  * IOContextForStrategy() when there is a shared buffers hit and no IO
    1215             :  * statistics need be captured.
    1216             :  *
    1217             :  * No locks are held either at entry or exit.
    1218             :  */
    1219             : static BufferDesc *
    1220   132854916 : BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
    1221             :             BlockNumber blockNum,
    1222             :             BufferAccessStrategy strategy,
    1223             :             bool *foundPtr, IOContext io_context)
    1224             : {
    1225             :     BufferTag   newTag;         /* identity of requested block */
    1226             :     uint32      newHash;        /* hash value for newTag */
    1227             :     LWLock     *newPartitionLock;   /* buffer partition lock for it */
    1228             :     int         existing_buf_id;
    1229             :     Buffer      victim_buffer;
    1230             :     BufferDesc *victim_buf_hdr;
    1231             :     uint32      victim_buf_state;
    1232             : 
    1233             :     /* create a tag so we can lookup the buffer */
    1234   132854916 :     InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
    1235             : 
    1236             :     /* determine its hash code and partition lock ID */
    1237   132854916 :     newHash = BufTableHashCode(&newTag);
    1238   132854916 :     newPartitionLock = BufMappingPartitionLock(newHash);
    1239             : 
    1240             :     /* see if the block is in the buffer pool already */
    1241   132854916 :     LWLockAcquire(newPartitionLock, LW_SHARED);
    1242   132854916 :     existing_buf_id = BufTableLookup(&newTag, newHash);
    1243   132854916 :     if (existing_buf_id >= 0)
    1244             :     {
    1245             :         BufferDesc *buf;
    1246             :         bool        valid;
    1247             : 
    1248             :         /*
    1249             :          * Found it.  Now, pin the buffer so no one can steal it from the
    1250             :          * buffer pool, and check to see if the correct data has been loaded
    1251             :          * into the buffer.
    1252             :          */
    1253   129949670 :         buf = GetBufferDescriptor(existing_buf_id);
    1254             : 
    1255   129949670 :         valid = PinBuffer(buf, strategy);
    1256             : 
    1257             :         /* Can release the mapping lock as soon as we've pinned it */
    1258   129949670 :         LWLockRelease(newPartitionLock);
    1259             : 
    1260   129949670 :         *foundPtr = true;
    1261             : 
    1262   129949670 :         if (!valid)
    1263             :         {
    1264             :             /*
    1265             :              * We can only get here if (a) someone else is still reading in
    1266             :              * the page, or (b) a previous read attempt failed.  We have to
    1267             :              * wait for any active read attempt to finish, and then set up our
    1268             :              * own read attempt if the page is still not BM_VALID.
    1269             :              * StartBufferIO does it all.
    1270             :              */
    1271         236 :             if (StartBufferIO(buf, true))
    1272             :             {
    1273             :                 /*
    1274             :                  * If we get here, previous attempts to read the buffer must
    1275             :                  * have failed ... but we shall bravely try again.
    1276             :                  */
    1277          22 :                 *foundPtr = false;
    1278             :             }
    1279             :         }
    1280             : 
    1281   129949670 :         return buf;
    1282             :     }
    1283             : 
    1284             :     /*
    1285             :      * Didn't find it in the buffer pool.  We'll have to initialize a new
    1286             :      * buffer.  Remember to unlock the mapping lock while doing the work.
    1287             :      */
    1288     2905246 :     LWLockRelease(newPartitionLock);
    1289             : 
    1290             :     /*
    1291             :      * Acquire a victim buffer. Somebody else might try to do the same, we
    1292             :      * don't hold any conflicting locks. If so we'll have to undo our work
    1293             :      * later.
    1294             :      */
    1295     2905246 :     victim_buffer = GetVictimBuffer(strategy, io_context);
    1296     2905246 :     victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
    1297             : 
    1298             :     /*
    1299             :      * Try to make a hashtable entry for the buffer under its new tag. If
    1300             :      * somebody else inserted another buffer for the tag, we'll release the
    1301             :      * victim buffer we acquired and use the already inserted one.
    1302             :      */
    1303     2905246 :     LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
    1304     2905246 :     existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
    1305     2905246 :     if (existing_buf_id >= 0)
    1306             :     {
    1307             :         BufferDesc *existing_buf_hdr;
    1308             :         bool        valid;
    1309             : 
    1310             :         /*
    1311             :          * Got a collision. Someone has already done what we were about to do.
    1312             :          * We'll just handle this as if it were found in the buffer pool in
    1313             :          * the first place.  First, give up the buffer we were planning to
    1314             :          * use.
    1315             :          *
    1316             :          * We could do this after releasing the partition lock, but then we'd
    1317             :          * have to call ResourceOwnerEnlargeBuffers() &
    1318             :          * ReservePrivateRefCountEntry() before acquiring the lock, for the
    1319             :          * rare case of such a collision.
    1320             :          */
    1321         222 :         UnpinBuffer(victim_buf_hdr);
    1322             : 
    1323             :         /*
    1324             :          * The victim buffer we acquired peviously is clean and unused, let it
    1325             :          * be found again quickly
    1326             :          */
    1327         222 :         StrategyFreeBuffer(victim_buf_hdr);
    1328             : 
    1329             :         /* remaining code should match code at top of routine */
    1330             : 
    1331         222 :         existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
    1332             : 
    1333         222 :         valid = PinBuffer(existing_buf_hdr, strategy);
    1334             : 
    1335             :         /* Can release the mapping lock as soon as we've pinned it */
    1336         222 :         LWLockRelease(newPartitionLock);
    1337             : 
    1338         222 :         *foundPtr = true;
    1339             : 
    1340         222 :         if (!valid)
    1341             :         {
    1342             :             /*
    1343             :              * We can only get here if (a) someone else is still reading in
    1344             :              * the page, or (b) a previous read attempt failed.  We have to
    1345             :              * wait for any active read attempt to finish, and then set up our
    1346             :              * own read attempt if the page is still not BM_VALID.
    1347             :              * StartBufferIO does it all.
    1348             :              */
    1349          78 :             if (StartBufferIO(existing_buf_hdr, true))
    1350             :             {
    1351             :                 /*
    1352             :                  * If we get here, previous attempts to read the buffer must
    1353             :                  * have failed ... but we shall bravely try again.
    1354             :                  */
    1355           0 :                 *foundPtr = false;
    1356             :             }
    1357             :         }
    1358             : 
    1359         222 :         return existing_buf_hdr;
    1360             :     }
    1361             : 
    1362             :     /*
    1363             :      * Need to lock the buffer header too in order to change its tag.
    1364             :      */
    1365     2905024 :     victim_buf_state = LockBufHdr(victim_buf_hdr);
    1366             : 
    1367             :     /* some sanity checks while we hold the buffer header lock */
    1368             :     Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
    1369             :     Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
    1370             : 
    1371     2905024 :     victim_buf_hdr->tag = newTag;
    1372             : 
    1373             :     /*
    1374             :      * Make sure BM_PERMANENT is set for buffers that must be written at every
    1375             :      * checkpoint.  Unlogged buffers only need to be written at shutdown
    1376             :      * checkpoints, except for their "init" forks, which need to be treated
    1377             :      * just like permanent relations.
    1378             :      */
    1379     2905024 :     victim_buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
    1380     2905024 :     if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
    1381     2904974 :         victim_buf_state |= BM_PERMANENT;
    1382             : 
    1383     2905024 :     UnlockBufHdr(victim_buf_hdr, victim_buf_state);
    1384             : 
    1385     2905024 :     LWLockRelease(newPartitionLock);
    1386             : 
    1387             :     /*
    1388             :      * Buffer contents are currently invalid.  Try to obtain the right to
    1389             :      * start I/O.  If StartBufferIO returns false, then someone else managed
    1390             :      * to read it before we did, so there's nothing left for BufferAlloc() to
    1391             :      * do.
    1392             :      */
    1393     2905024 :     if (StartBufferIO(victim_buf_hdr, true))
    1394     2905022 :         *foundPtr = false;
    1395             :     else
    1396           2 :         *foundPtr = true;
    1397             : 
    1398     2905024 :     return victim_buf_hdr;
    1399             : }
    1400             : 
    1401             : /*
    1402             :  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
    1403             :  * freelist.
    1404             :  *
    1405             :  * The buffer header spinlock must be held at entry.  We drop it before
    1406             :  * returning.  (This is sane because the caller must have locked the
    1407             :  * buffer in order to be sure it should be dropped.)
    1408             :  *
    1409             :  * This is used only in contexts such as dropping a relation.  We assume
    1410             :  * that no other backend could possibly be interested in using the page,
    1411             :  * so the only reason the buffer might be pinned is if someone else is
    1412             :  * trying to write it out.  We have to let them finish before we can
    1413             :  * reclaim the buffer.
    1414             :  *
    1415             :  * The buffer could get reclaimed by someone else while we are waiting
    1416             :  * to acquire the necessary locks; if so, don't mess it up.
    1417             :  */
    1418             : static void
    1419      170830 : InvalidateBuffer(BufferDesc *buf)
    1420             : {
    1421             :     BufferTag   oldTag;
    1422             :     uint32      oldHash;        /* hash value for oldTag */
    1423             :     LWLock     *oldPartitionLock;   /* buffer partition lock for it */
    1424             :     uint32      oldFlags;
    1425             :     uint32      buf_state;
    1426             : 
    1427             :     /* Save the original buffer tag before dropping the spinlock */
    1428      170830 :     oldTag = buf->tag;
    1429             : 
    1430      170830 :     buf_state = pg_atomic_read_u32(&buf->state);
    1431             :     Assert(buf_state & BM_LOCKED);
    1432      170830 :     UnlockBufHdr(buf, buf_state);
    1433             : 
    1434             :     /*
    1435             :      * Need to compute the old tag's hashcode and partition lock ID. XXX is it
    1436             :      * worth storing the hashcode in BufferDesc so we need not recompute it
    1437             :      * here?  Probably not.
    1438             :      */
    1439      170830 :     oldHash = BufTableHashCode(&oldTag);
    1440      170830 :     oldPartitionLock = BufMappingPartitionLock(oldHash);
    1441             : 
    1442      170830 : retry:
    1443             : 
    1444             :     /*
    1445             :      * Acquire exclusive mapping lock in preparation for changing the buffer's
    1446             :      * association.
    1447             :      */
    1448      170830 :     LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
    1449             : 
    1450             :     /* Re-lock the buffer header */
    1451      170830 :     buf_state = LockBufHdr(buf);
    1452             : 
    1453             :     /* If it's changed while we were waiting for lock, do nothing */
    1454      170830 :     if (!BufferTagsEqual(&buf->tag, &oldTag))
    1455             :     {
    1456           0 :         UnlockBufHdr(buf, buf_state);
    1457           0 :         LWLockRelease(oldPartitionLock);
    1458           0 :         return;
    1459             :     }
    1460             : 
    1461             :     /*
    1462             :      * We assume the only reason for it to be pinned is that someone else is
    1463             :      * flushing the page out.  Wait for them to finish.  (This could be an
    1464             :      * infinite loop if the refcount is messed up... it would be nice to time
    1465             :      * out after awhile, but there seems no way to be sure how many loops may
    1466             :      * be needed.  Note that if the other guy has pinned the buffer but not
    1467             :      * yet done StartBufferIO, WaitIO will fall through and we'll effectively
    1468             :      * be busy-looping here.)
    1469             :      */
    1470      170830 :     if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
    1471             :     {
    1472           0 :         UnlockBufHdr(buf, buf_state);
    1473           0 :         LWLockRelease(oldPartitionLock);
    1474             :         /* safety check: should definitely not be our *own* pin */
    1475           0 :         if (GetPrivateRefCount(BufferDescriptorGetBuffer(buf)) > 0)
    1476           0 :             elog(ERROR, "buffer is pinned in InvalidateBuffer");
    1477           0 :         WaitIO(buf);
    1478           0 :         goto retry;
    1479             :     }
    1480             : 
    1481             :     /*
    1482             :      * Clear out the buffer's tag and flags.  We must do this to ensure that
    1483             :      * linear scans of the buffer array don't think the buffer is valid.
    1484             :      */
    1485      170830 :     oldFlags = buf_state & BUF_FLAG_MASK;
    1486      170830 :     ClearBufferTag(&buf->tag);
    1487      170830 :     buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
    1488      170830 :     UnlockBufHdr(buf, buf_state);
    1489             : 
    1490             :     /*
    1491             :      * Remove the buffer from the lookup hashtable, if it was in there.
    1492             :      */
    1493      170830 :     if (oldFlags & BM_TAG_VALID)
    1494      170830 :         BufTableDelete(&oldTag, oldHash);
    1495             : 
    1496             :     /*
    1497             :      * Done with mapping lock.
    1498             :      */
    1499      170830 :     LWLockRelease(oldPartitionLock);
    1500             : 
    1501             :     /*
    1502             :      * Insert the buffer at the head of the list of free buffers.
    1503             :      */
    1504      170830 :     StrategyFreeBuffer(buf);
    1505             : }
    1506             : 
    1507             : /*
    1508             :  * Helper routine for GetVictimBuffer()
    1509             :  *
    1510             :  * Needs to be called on a buffer with a valid tag, pinned, but without the
    1511             :  * buffer header spinlock held.
    1512             :  *
    1513             :  * Returns true if the buffer can be reused, in which case the buffer is only
    1514             :  * pinned by this backend and marked as invalid, false otherwise.
    1515             :  */
    1516             : static bool
    1517     2023940 : InvalidateVictimBuffer(BufferDesc *buf_hdr)
    1518             : {
    1519             :     uint32      buf_state;
    1520             :     uint32      hash;
    1521             :     LWLock     *partition_lock;
    1522             :     BufferTag   tag;
    1523             : 
    1524             :     Assert(GetPrivateRefCount(BufferDescriptorGetBuffer(buf_hdr)) == 1);
    1525             : 
    1526             :     /* have buffer pinned, so it's safe to read tag without lock */
    1527     2023940 :     tag = buf_hdr->tag;
    1528             : 
    1529     2023940 :     hash = BufTableHashCode(&tag);
    1530     2023940 :     partition_lock = BufMappingPartitionLock(hash);
    1531             : 
    1532     2023940 :     LWLockAcquire(partition_lock, LW_EXCLUSIVE);
    1533             : 
    1534             :     /* lock the buffer header */
    1535     2023940 :     buf_state = LockBufHdr(buf_hdr);
    1536             : 
    1537             :     /*
    1538             :      * We have the buffer pinned nobody else should have been able to unset
    1539             :      * this concurrently.
    1540             :      */
    1541             :     Assert(buf_state & BM_TAG_VALID);
    1542             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    1543             :     Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
    1544             : 
    1545             :     /*
    1546             :      * If somebody else pinned the buffer since, or even worse, dirtied it,
    1547             :      * give up on this buffer: It's clearly in use.
    1548             :      */
    1549     2023940 :     if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
    1550             :     {
    1551             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    1552             : 
    1553         342 :         UnlockBufHdr(buf_hdr, buf_state);
    1554         342 :         LWLockRelease(partition_lock);
    1555             : 
    1556         342 :         return false;
    1557             :     }
    1558             : 
    1559             :     /*
    1560             :      * Clear out the buffer's tag and flags and usagecount.  This is not
    1561             :      * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
    1562             :      * doing anything with the buffer. But currently it's beneficial, as the
    1563             :      * cheaper pre-check for several linear scans of shared buffers use the
    1564             :      * tag (see e.g. FlushDatabaseBuffers()).
    1565             :      */
    1566     2023598 :     ClearBufferTag(&buf_hdr->tag);
    1567     2023598 :     buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
    1568     2023598 :     UnlockBufHdr(buf_hdr, buf_state);
    1569             : 
    1570             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    1571             : 
    1572             :     /* finally delete buffer from the buffer mapping table */
    1573     2023598 :     BufTableDelete(&tag, hash);
    1574             : 
    1575     2023598 :     LWLockRelease(partition_lock);
    1576             : 
    1577             :     Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
    1578             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    1579             :     Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u32(&buf_hdr->state)) > 0);
    1580             : 
    1581     2023598 :     return true;
    1582             : }
    1583             : 
    1584             : static Buffer
    1585     3629952 : GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
    1586             : {
    1587             :     BufferDesc *buf_hdr;
    1588             :     Buffer      buf;
    1589             :     uint32      buf_state;
    1590             :     bool        from_ring;
    1591             : 
    1592             :     /*
    1593             :      * Ensure, while the spinlock's not yet held, that there's a free refcount
    1594             :      * entry.
    1595             :      */
    1596     3629952 :     ReservePrivateRefCountEntry();
    1597     3629952 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    1598             : 
    1599             :     /* we return here if a prospective victim buffer gets used concurrently */
    1600     3646072 : again:
    1601             : 
    1602             :     /*
    1603             :      * Select a victim buffer.  The buffer is returned with its header
    1604             :      * spinlock still held!
    1605             :      */
    1606     3646072 :     buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
    1607     3646072 :     buf = BufferDescriptorGetBuffer(buf_hdr);
    1608             : 
    1609             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
    1610             : 
    1611             :     /* Pin the buffer and then release the buffer spinlock */
    1612     3646072 :     PinBuffer_Locked(buf_hdr);
    1613             : 
    1614             :     /*
    1615             :      * We shouldn't have any other pins for this buffer.
    1616             :      */
    1617     3646072 :     CheckBufferIsPinnedOnce(buf);
    1618             : 
    1619             :     /*
    1620             :      * If the buffer was dirty, try to write it out.  There is a race
    1621             :      * condition here, in that someone might dirty it after we released the
    1622             :      * buffer header lock above, or even while we are writing it out (since
    1623             :      * our share-lock won't prevent hint-bit updates).  We will recheck the
    1624             :      * dirty bit after re-locking the buffer header.
    1625             :      */
    1626     3646072 :     if (buf_state & BM_DIRTY)
    1627             :     {
    1628             :         LWLock     *content_lock;
    1629             : 
    1630             :         Assert(buf_state & BM_TAG_VALID);
    1631             :         Assert(buf_state & BM_VALID);
    1632             : 
    1633             :         /*
    1634             :          * We need a share-lock on the buffer contents to write it out (else
    1635             :          * we might write invalid data, eg because someone else is compacting
    1636             :          * the page contents while we write).  We must use a conditional lock
    1637             :          * acquisition here to avoid deadlock.  Even though the buffer was not
    1638             :          * pinned (and therefore surely not locked) when StrategyGetBuffer
    1639             :          * returned it, someone else could have pinned and exclusive-locked it
    1640             :          * by the time we get here. If we try to get the lock unconditionally,
    1641             :          * we'd block waiting for them; if they later block waiting for us,
    1642             :          * deadlock ensues. (This has been observed to happen when two
    1643             :          * backends are both trying to split btree index pages, and the second
    1644             :          * one just happens to be trying to split the page the first one got
    1645             :          * from StrategyGetBuffer.)
    1646             :          */
    1647      455010 :         content_lock = BufferDescriptorGetContentLock(buf_hdr);
    1648      455010 :         if (!LWLockConditionalAcquire(content_lock, LW_SHARED))
    1649             :         {
    1650             :             /*
    1651             :              * Someone else has locked the buffer, so give it up and loop back
    1652             :              * to get another one.
    1653             :              */
    1654           0 :             UnpinBuffer(buf_hdr);
    1655           0 :             goto again;
    1656             :         }
    1657             : 
    1658             :         /*
    1659             :          * If using a nondefault strategy, and writing the buffer would
    1660             :          * require a WAL flush, let the strategy decide whether to go ahead
    1661             :          * and write/reuse the buffer or to choose another victim.  We need a
    1662             :          * lock to inspect the page LSN, so this can't be done inside
    1663             :          * StrategyGetBuffer.
    1664             :          */
    1665      455010 :         if (strategy != NULL)
    1666             :         {
    1667             :             XLogRecPtr  lsn;
    1668             : 
    1669             :             /* Read the LSN while holding buffer header lock */
    1670      130872 :             buf_state = LockBufHdr(buf_hdr);
    1671      130872 :             lsn = BufferGetLSN(buf_hdr);
    1672      130872 :             UnlockBufHdr(buf_hdr, buf_state);
    1673             : 
    1674      130872 :             if (XLogNeedsFlush(lsn)
    1675       19924 :                 && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
    1676             :             {
    1677       15778 :                 LWLockRelease(content_lock);
    1678       15778 :                 UnpinBuffer(buf_hdr);
    1679       15778 :                 goto again;
    1680             :             }
    1681             :         }
    1682             : 
    1683             :         /* OK, do the I/O */
    1684      439232 :         FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
    1685      439232 :         LWLockRelease(content_lock);
    1686             : 
    1687      439232 :         ScheduleBufferTagForWriteback(&BackendWritebackContext, io_context,
    1688             :                                       &buf_hdr->tag);
    1689             :     }
    1690             : 
    1691             : 
    1692     3630294 :     if (buf_state & BM_VALID)
    1693             :     {
    1694             :         /*
    1695             :          * When a BufferAccessStrategy is in use, blocks evicted from shared
    1696             :          * buffers are counted as IOOP_EVICT in the corresponding context
    1697             :          * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
    1698             :          * strategy in two cases: 1) while initially claiming buffers for the
    1699             :          * strategy ring 2) to replace an existing strategy ring buffer
    1700             :          * because it is pinned or in use and cannot be reused.
    1701             :          *
    1702             :          * Blocks evicted from buffers already in the strategy ring are
    1703             :          * counted as IOOP_REUSE in the corresponding strategy context.
    1704             :          *
    1705             :          * At this point, we can accurately count evictions and reuses,
    1706             :          * because we have successfully claimed the valid buffer. Previously,
    1707             :          * we may have been forced to release the buffer due to concurrent
    1708             :          * pinners or erroring out.
    1709             :          */
    1710     2023940 :         pgstat_count_io_op(IOOBJECT_RELATION, io_context,
    1711     2023940 :                            from_ring ? IOOP_REUSE : IOOP_EVICT);
    1712             :     }
    1713             : 
    1714             :     /*
    1715             :      * If the buffer has an entry in the buffer mapping table, delete it. This
    1716             :      * can fail because another backend could have pinned or dirtied the
    1717             :      * buffer.
    1718             :      */
    1719     3630294 :     if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
    1720             :     {
    1721         342 :         UnpinBuffer(buf_hdr);
    1722         342 :         goto again;
    1723             :     }
    1724             : 
    1725             :     /* a final set of sanity checks */
    1726             : #ifdef USE_ASSERT_CHECKING
    1727             :     buf_state = pg_atomic_read_u32(&buf_hdr->state);
    1728             : 
    1729             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
    1730             :     Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
    1731             : 
    1732             :     CheckBufferIsPinnedOnce(buf);
    1733             : #endif
    1734             : 
    1735     3629952 :     return buf;
    1736             : }
    1737             : 
    1738             : /*
    1739             :  * Limit the number of pins a batch operation may additionally acquire, to
    1740             :  * avoid running out of pinnable buffers.
    1741             :  *
    1742             :  * One additional pin is always allowed, as otherwise the operation likely
    1743             :  * cannot be performed at all.
    1744             :  *
    1745             :  * The number of allowed pins for a backend is computed based on
    1746             :  * shared_buffers and the maximum number of connections possible. That's very
    1747             :  * pessimistic, but outside of toy-sized shared_buffers it should allow
    1748             :  * sufficient pins.
    1749             :  */
    1750             : static void
    1751      651452 : LimitAdditionalPins(uint32 *additional_pins)
    1752             : {
    1753             :     uint32      max_backends;
    1754             :     int         max_proportional_pins;
    1755             : 
    1756      651452 :     if (*additional_pins <= 1)
    1757      614962 :         return;
    1758             : 
    1759       36490 :     max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
    1760       36490 :     max_proportional_pins = NBuffers / max_backends;
    1761             : 
    1762             :     /*
    1763             :      * Subtract the approximate number of buffers already pinned by this
    1764             :      * backend. We get the number of "overflowed" pins for free, but don't
    1765             :      * know the number of pins in PrivateRefCountArray. The cost of
    1766             :      * calculating that exactly doesn't seem worth it, so just assume the max.
    1767             :      */
    1768       36490 :     max_proportional_pins -= PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
    1769             : 
    1770       36490 :     if (max_proportional_pins < 0)
    1771        6432 :         max_proportional_pins = 1;
    1772             : 
    1773       36490 :     if (*additional_pins > max_proportional_pins)
    1774        6432 :         *additional_pins = max_proportional_pins;
    1775             : }
    1776             : 
    1777             : /*
    1778             :  * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
    1779             :  * avoid duplicating the tracing and relpersistence related logic.
    1780             :  */
    1781             : static BlockNumber
    1782      672508 : ExtendBufferedRelCommon(ExtendBufferedWhat eb,
    1783             :                         ForkNumber fork,
    1784             :                         BufferAccessStrategy strategy,
    1785             :                         uint32 flags,
    1786             :                         uint32 extend_by,
    1787             :                         BlockNumber extend_upto,
    1788             :                         Buffer *buffers,
    1789             :                         uint32 *extended_by)
    1790             : {
    1791             :     BlockNumber first_block;
    1792             : 
    1793             :     TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
    1794             :                                          eb.smgr->smgr_rlocator.locator.spcOid,
    1795             :                                          eb.smgr->smgr_rlocator.locator.dbOid,
    1796             :                                          eb.smgr->smgr_rlocator.locator.relNumber,
    1797             :                                          eb.smgr->smgr_rlocator.backend,
    1798             :                                          extend_by);
    1799             : 
    1800      672508 :     if (eb.relpersistence == RELPERSISTENCE_TEMP)
    1801       21056 :         first_block = ExtendBufferedRelLocal(eb, fork, flags,
    1802             :                                              extend_by, extend_upto,
    1803             :                                              buffers, &extend_by);
    1804             :     else
    1805      651452 :         first_block = ExtendBufferedRelShared(eb, fork, strategy, flags,
    1806             :                                               extend_by, extend_upto,
    1807             :                                               buffers, &extend_by);
    1808      672508 :     *extended_by = extend_by;
    1809             : 
    1810             :     TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
    1811             :                                         eb.smgr->smgr_rlocator.locator.spcOid,
    1812             :                                         eb.smgr->smgr_rlocator.locator.dbOid,
    1813             :                                         eb.smgr->smgr_rlocator.locator.relNumber,
    1814             :                                         eb.smgr->smgr_rlocator.backend,
    1815             :                                         *extended_by,
    1816             :                                         first_block);
    1817             : 
    1818      672508 :     return first_block;
    1819             : }
    1820             : 
    1821             : /*
    1822             :  * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
    1823             :  * shared buffers.
    1824             :  */
    1825             : static BlockNumber
    1826      651452 : ExtendBufferedRelShared(ExtendBufferedWhat eb,
    1827             :                         ForkNumber fork,
    1828             :                         BufferAccessStrategy strategy,
    1829             :                         uint32 flags,
    1830             :                         uint32 extend_by,
    1831             :                         BlockNumber extend_upto,
    1832             :                         Buffer *buffers,
    1833             :                         uint32 *extended_by)
    1834             : {
    1835             :     BlockNumber first_block;
    1836      651452 :     IOContext   io_context = IOContextForStrategy(strategy);
    1837             :     instr_time  io_start;
    1838             : 
    1839      651452 :     LimitAdditionalPins(&extend_by);
    1840             : 
    1841             :     /*
    1842             :      * Acquire victim buffers for extension without holding extension lock.
    1843             :      * Writing out victim buffers is the most expensive part of extending the
    1844             :      * relation, particularly when doing so requires WAL flushes. Zeroing out
    1845             :      * the buffers is also quite expensive, so do that before holding the
    1846             :      * extension lock as well.
    1847             :      *
    1848             :      * These pages are pinned by us and not valid. While we hold the pin they
    1849             :      * can't be acquired as victim buffers by another backend.
    1850             :      */
    1851     1376158 :     for (uint32 i = 0; i < extend_by; i++)
    1852             :     {
    1853             :         Block       buf_block;
    1854             : 
    1855      724706 :         buffers[i] = GetVictimBuffer(strategy, io_context);
    1856      724706 :         buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
    1857             : 
    1858             :         /* new buffers are zero-filled */
    1859      724706 :         MemSet((char *) buf_block, 0, BLCKSZ);
    1860             :     }
    1861             : 
    1862             :     /* in case we need to pin an existing buffer below */
    1863      651452 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    1864             : 
    1865             :     /*
    1866             :      * Lock relation against concurrent extensions, unless requested not to.
    1867             :      *
    1868             :      * We use the same extension lock for all forks. That's unnecessarily
    1869             :      * restrictive, but currently extensions for forks don't happen often
    1870             :      * enough to make it worth locking more granularly.
    1871             :      *
    1872             :      * Note that another backend might have extended the relation by the time
    1873             :      * we get the lock.
    1874             :      */
    1875      651452 :     if (!(flags & EB_SKIP_EXTENSION_LOCK))
    1876             :     {
    1877      565472 :         LockRelationForExtension(eb.rel, ExclusiveLock);
    1878      565472 :         if (eb.rel)
    1879      565472 :             eb.smgr = RelationGetSmgr(eb.rel);
    1880             :     }
    1881             : 
    1882             :     /*
    1883             :      * If requested, invalidate size cache, so that smgrnblocks asks the
    1884             :      * kernel.
    1885             :      */
    1886      651452 :     if (flags & EB_CLEAR_SIZE_CACHE)
    1887       56526 :         eb.smgr->smgr_cached_nblocks[fork] = InvalidBlockNumber;
    1888             : 
    1889      651452 :     first_block = smgrnblocks(eb.smgr, fork);
    1890             : 
    1891             :     /*
    1892             :      * Now that we have the accurate relation size, check if the caller wants
    1893             :      * us to extend to only up to a specific size. If there were concurrent
    1894             :      * extensions, we might have acquired too many buffers and need to release
    1895             :      * them.
    1896             :      */
    1897      651452 :     if (extend_upto != InvalidBlockNumber)
    1898             :     {
    1899      131754 :         uint32      orig_extend_by = extend_by;
    1900             : 
    1901      131754 :         if (first_block > extend_upto)
    1902           0 :             extend_by = 0;
    1903      131754 :         else if ((uint64) first_block + extend_by > extend_upto)
    1904          12 :             extend_by = extend_upto - first_block;
    1905             : 
    1906      131774 :         for (uint32 i = extend_by; i < orig_extend_by; i++)
    1907             :         {
    1908          20 :             BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
    1909             : 
    1910             :             /*
    1911             :              * The victim buffer we acquired peviously is clean and unused,
    1912             :              * let it be found again quickly
    1913             :              */
    1914          20 :             StrategyFreeBuffer(buf_hdr);
    1915          20 :             UnpinBuffer(buf_hdr);
    1916             :         }
    1917             : 
    1918      131754 :         if (extend_by == 0)
    1919             :         {
    1920          12 :             if (!(flags & EB_SKIP_EXTENSION_LOCK))
    1921          12 :                 UnlockRelationForExtension(eb.rel, ExclusiveLock);
    1922          12 :             *extended_by = extend_by;
    1923          12 :             return first_block;
    1924             :         }
    1925             :     }
    1926             : 
    1927             :     /* Fail if relation is already at maximum possible length */
    1928      651440 :     if ((uint64) first_block + extend_by >= MaxBlockNumber)
    1929           0 :         ereport(ERROR,
    1930             :                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
    1931             :                  errmsg("cannot extend relation %s beyond %u blocks",
    1932             :                         relpath(eb.smgr->smgr_rlocator, fork),
    1933             :                         MaxBlockNumber)));
    1934             : 
    1935             :     /*
    1936             :      * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
    1937             :      *
    1938             :      * This needs to happen before we extend the relation, because as soon as
    1939             :      * we do, other backends can start to read in those pages.
    1940             :      */
    1941     1376126 :     for (int i = 0; i < extend_by; i++)
    1942             :     {
    1943      724686 :         Buffer      victim_buf = buffers[i];
    1944      724686 :         BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
    1945             :         BufferTag   tag;
    1946             :         uint32      hash;
    1947             :         LWLock     *partition_lock;
    1948             :         int         existing_id;
    1949             : 
    1950      724686 :         InitBufferTag(&tag, &eb.smgr->smgr_rlocator.locator, fork, first_block + i);
    1951      724686 :         hash = BufTableHashCode(&tag);
    1952      724686 :         partition_lock = BufMappingPartitionLock(hash);
    1953             : 
    1954      724686 :         LWLockAcquire(partition_lock, LW_EXCLUSIVE);
    1955             : 
    1956      724686 :         existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
    1957             : 
    1958             :         /*
    1959             :          * We get here only in the corner case where we are trying to extend
    1960             :          * the relation but we found a pre-existing buffer. This can happen
    1961             :          * because a prior attempt at extending the relation failed, and
    1962             :          * because mdread doesn't complain about reads beyond EOF (when
    1963             :          * zero_damaged_pages is ON) and so a previous attempt to read a block
    1964             :          * beyond EOF could have left a "valid" zero-filled buffer.
    1965             :          * Unfortunately, we have also seen this case occurring because of
    1966             :          * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
    1967             :          * that doesn't account for a recent write. In that situation, the
    1968             :          * pre-existing buffer would contain valid data that we don't want to
    1969             :          * overwrite.  Since the legitimate cases should always have left a
    1970             :          * zero-filled buffer, complain if not PageIsNew.
    1971             :          */
    1972      724686 :         if (existing_id >= 0)
    1973             :         {
    1974           0 :             BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
    1975             :             Block       buf_block;
    1976             :             bool        valid;
    1977             : 
    1978             :             /*
    1979             :              * Pin the existing buffer before releasing the partition lock,
    1980             :              * preventing it from being evicted.
    1981             :              */
    1982           0 :             valid = PinBuffer(existing_hdr, strategy);
    1983             : 
    1984           0 :             LWLockRelease(partition_lock);
    1985             : 
    1986             :             /*
    1987             :              * The victim buffer we acquired peviously is clean and unused,
    1988             :              * let it be found again quickly
    1989             :              */
    1990           0 :             StrategyFreeBuffer(victim_buf_hdr);
    1991           0 :             UnpinBuffer(victim_buf_hdr);
    1992             : 
    1993           0 :             buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
    1994           0 :             buf_block = BufHdrGetBlock(existing_hdr);
    1995             : 
    1996           0 :             if (valid && !PageIsNew((Page) buf_block))
    1997           0 :                 ereport(ERROR,
    1998             :                         (errmsg("unexpected data beyond EOF in block %u of relation %s",
    1999             :                                 existing_hdr->tag.blockNum, relpath(eb.smgr->smgr_rlocator, fork)),
    2000             :                          errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
    2001             : 
    2002             :             /*
    2003             :              * We *must* do smgr[zero]extend before succeeding, else the page
    2004             :              * will not be reserved by the kernel, and the next P_NEW call
    2005             :              * will decide to return the same page.  Clear the BM_VALID bit,
    2006             :              * do StartBufferIO() and proceed.
    2007             :              *
    2008             :              * Loop to handle the very small possibility that someone re-sets
    2009             :              * BM_VALID between our clearing it and StartBufferIO inspecting
    2010             :              * it.
    2011             :              */
    2012             :             do
    2013             :             {
    2014           0 :                 uint32      buf_state = LockBufHdr(existing_hdr);
    2015             : 
    2016           0 :                 buf_state &= ~BM_VALID;
    2017           0 :                 UnlockBufHdr(existing_hdr, buf_state);
    2018           0 :             } while (!StartBufferIO(existing_hdr, true));
    2019             :         }
    2020             :         else
    2021             :         {
    2022             :             uint32      buf_state;
    2023             : 
    2024      724686 :             buf_state = LockBufHdr(victim_buf_hdr);
    2025             : 
    2026             :             /* some sanity checks while we hold the buffer header lock */
    2027             :             Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
    2028             :             Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
    2029             : 
    2030      724686 :             victim_buf_hdr->tag = tag;
    2031             : 
    2032      724686 :             buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
    2033      724686 :             if (eb.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
    2034      716580 :                 buf_state |= BM_PERMANENT;
    2035             : 
    2036      724686 :             UnlockBufHdr(victim_buf_hdr, buf_state);
    2037             : 
    2038      724686 :             LWLockRelease(partition_lock);
    2039             : 
    2040             :             /* XXX: could combine the locked operations in it with the above */
    2041      724686 :             StartBufferIO(victim_buf_hdr, true);
    2042             :         }
    2043             :     }
    2044             : 
    2045      651440 :     io_start = pgstat_prepare_io_time();
    2046             : 
    2047             :     /*
    2048             :      * Note: if smgrzeroextend fails, we will end up with buffers that are
    2049             :      * allocated but not marked BM_VALID.  The next relation extension will
    2050             :      * still select the same block number (because the relation didn't get any
    2051             :      * longer on disk) and so future attempts to extend the relation will find
    2052             :      * the same buffers (if they have not been recycled) but come right back
    2053             :      * here to try smgrzeroextend again.
    2054             :      *
    2055             :      * We don't need to set checksum for all-zero pages.
    2056             :      */
    2057      651440 :     smgrzeroextend(eb.smgr, fork, first_block, extend_by, false);
    2058             : 
    2059             :     /*
    2060             :      * Release the file-extension lock; it's now OK for someone else to extend
    2061             :      * the relation some more.
    2062             :      *
    2063             :      * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
    2064             :      * take noticeable time.
    2065             :      */
    2066      651440 :     if (!(flags & EB_SKIP_EXTENSION_LOCK))
    2067      565460 :         UnlockRelationForExtension(eb.rel, ExclusiveLock);
    2068             : 
    2069      651440 :     pgstat_count_io_op_time(IOOBJECT_RELATION, io_context, IOOP_EXTEND,
    2070             :                             io_start, extend_by);
    2071             : 
    2072             :     /* Set BM_VALID, terminate IO, and wake up any waiters */
    2073     1376126 :     for (int i = 0; i < extend_by; i++)
    2074             :     {
    2075      724686 :         Buffer      buf = buffers[i];
    2076      724686 :         BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
    2077      724686 :         bool        lock = false;
    2078             : 
    2079      724686 :         if (flags & EB_LOCK_FIRST && i == 0)
    2080      519324 :             lock = true;
    2081      205362 :         else if (flags & EB_LOCK_TARGET)
    2082             :         {
    2083             :             Assert(extend_upto != InvalidBlockNumber);
    2084       73352 :             if (first_block + i + 1 == extend_upto)
    2085       71608 :                 lock = true;
    2086             :         }
    2087             : 
    2088      724686 :         if (lock)
    2089      590932 :             LWLockAcquire(BufferDescriptorGetContentLock(buf_hdr), LW_EXCLUSIVE);
    2090             : 
    2091      724686 :         TerminateBufferIO(buf_hdr, false, BM_VALID);
    2092             :     }
    2093             : 
    2094      651440 :     pgBufferUsage.shared_blks_written += extend_by;
    2095             : 
    2096      651440 :     *extended_by = extend_by;
    2097             : 
    2098      651440 :     return first_block;
    2099             : }
    2100             : 
    2101             : /*
    2102             :  * MarkBufferDirty
    2103             :  *
    2104             :  *      Marks buffer contents as dirty (actual write happens later).
    2105             :  *
    2106             :  * Buffer must be pinned and exclusive-locked.  (If caller does not hold
    2107             :  * exclusive lock, then somebody could be in process of writing the buffer,
    2108             :  * leading to risk of bad data written to disk.)
    2109             :  */
    2110             : void
    2111    56431476 : MarkBufferDirty(Buffer buffer)
    2112             : {
    2113             :     BufferDesc *bufHdr;
    2114             :     uint32      buf_state;
    2115             :     uint32      old_buf_state;
    2116             : 
    2117    56431476 :     if (!BufferIsValid(buffer))
    2118           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    2119             : 
    2120    56431476 :     if (BufferIsLocal(buffer))
    2121             :     {
    2122     2073626 :         MarkLocalBufferDirty(buffer);
    2123     2073626 :         return;
    2124             :     }
    2125             : 
    2126    54357850 :     bufHdr = GetBufferDescriptor(buffer - 1);
    2127             : 
    2128             :     Assert(BufferIsPinned(buffer));
    2129             :     Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
    2130             :                                 LW_EXCLUSIVE));
    2131             : 
    2132    54357850 :     old_buf_state = pg_atomic_read_u32(&bufHdr->state);
    2133             :     for (;;)
    2134             :     {
    2135    54358114 :         if (old_buf_state & BM_LOCKED)
    2136          74 :             old_buf_state = WaitBufHdrUnlocked(bufHdr);
    2137             : 
    2138    54358114 :         buf_state = old_buf_state;
    2139             : 
    2140             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    2141    54358114 :         buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
    2142             : 
    2143    54358114 :         if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
    2144             :                                            buf_state))
    2145    54357850 :             break;
    2146             :     }
    2147             : 
    2148             :     /*
    2149             :      * If the buffer was not dirty already, do vacuum accounting.
    2150             :      */
    2151    54357850 :     if (!(old_buf_state & BM_DIRTY))
    2152             :     {
    2153     1435786 :         VacuumPageDirty++;
    2154     1435786 :         pgBufferUsage.shared_blks_dirtied++;
    2155     1435786 :         if (VacuumCostActive)
    2156        3912 :             VacuumCostBalance += VacuumCostPageDirty;
    2157             :     }
    2158             : }
    2159             : 
    2160             : /*
    2161             :  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
    2162             :  *
    2163             :  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
    2164             :  * compared to calling the two routines separately.  Now it's mainly just
    2165             :  * a convenience function.  However, if the passed buffer is valid and
    2166             :  * already contains the desired block, we just return it as-is; and that
    2167             :  * does save considerable work compared to a full release and reacquire.
    2168             :  *
    2169             :  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
    2170             :  * buffer actually needs to be released.  This case is the same as ReadBuffer,
    2171             :  * but can save some tests in the caller.
    2172             :  */
    2173             : Buffer
    2174    64335880 : ReleaseAndReadBuffer(Buffer buffer,
    2175             :                      Relation relation,
    2176             :                      BlockNumber blockNum)
    2177             : {
    2178    64335880 :     ForkNumber  forkNum = MAIN_FORKNUM;
    2179             :     BufferDesc *bufHdr;
    2180             : 
    2181    64335880 :     if (BufferIsValid(buffer))
    2182             :     {
    2183             :         Assert(BufferIsPinned(buffer));
    2184    42831968 :         if (BufferIsLocal(buffer))
    2185             :         {
    2186       11082 :             bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    2187       14970 :             if (bufHdr->tag.blockNum == blockNum &&
    2188        7776 :                 BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
    2189        3888 :                 BufTagGetForkNum(&bufHdr->tag) == forkNum)
    2190        3888 :                 return buffer;
    2191        7194 :             UnpinLocalBuffer(buffer);
    2192             :         }
    2193             :         else
    2194             :         {
    2195    42820886 :             bufHdr = GetBufferDescriptor(buffer - 1);
    2196             :             /* we have pin, so it's ok to examine tag without spinlock */
    2197    58512600 :             if (bufHdr->tag.blockNum == blockNum &&
    2198    31383428 :                 BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
    2199    15691714 :                 BufTagGetForkNum(&bufHdr->tag) == forkNum)
    2200    15691714 :                 return buffer;
    2201    27129172 :             UnpinBuffer(bufHdr);
    2202             :         }
    2203             :     }
    2204             : 
    2205    48640278 :     return ReadBuffer(relation, blockNum);
    2206             : }
    2207             : 
    2208             : /*
    2209             :  * PinBuffer -- make buffer unavailable for replacement.
    2210             :  *
    2211             :  * For the default access strategy, the buffer's usage_count is incremented
    2212             :  * when we first pin it; for other strategies we just make sure the usage_count
    2213             :  * isn't zero.  (The idea of the latter is that we don't want synchronized
    2214             :  * heap scans to inflate the count, but we need it to not be zero to discourage
    2215             :  * other backends from stealing buffers from our ring.  As long as we cycle
    2216             :  * through the ring faster than the global clock-sweep cycles, buffers in
    2217             :  * our ring won't be chosen as victims for replacement by other backends.)
    2218             :  *
    2219             :  * This should be applied only to shared buffers, never local ones.
    2220             :  *
    2221             :  * Since buffers are pinned/unpinned very frequently, pin buffers without
    2222             :  * taking the buffer header lock; instead update the state variable in loop of
    2223             :  * CAS operations. Hopefully it's just a single CAS.
    2224             :  *
    2225             :  * Note that ResourceOwnerEnlargeBuffers must have been done already.
    2226             :  *
    2227             :  * Returns true if buffer is BM_VALID, else false.  This provision allows
    2228             :  * some callers to avoid an extra spinlock cycle.
    2229             :  */
    2230             : static bool
    2231   129949892 : PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
    2232             : {
    2233   129949892 :     Buffer      b = BufferDescriptorGetBuffer(buf);
    2234             :     bool        result;
    2235             :     PrivateRefCountEntry *ref;
    2236             : 
    2237             :     Assert(!BufferIsLocal(b));
    2238             : 
    2239   129949892 :     ref = GetPrivateRefCountEntry(b, true);
    2240             : 
    2241   129949892 :     if (ref == NULL)
    2242             :     {
    2243             :         uint32      buf_state;
    2244             :         uint32      old_buf_state;
    2245             : 
    2246   125822582 :         ReservePrivateRefCountEntry();
    2247   125822582 :         ref = NewPrivateRefCountEntry(b);
    2248             : 
    2249   125822582 :         old_buf_state = pg_atomic_read_u32(&buf->state);
    2250             :         for (;;)
    2251             :         {
    2252   125848452 :             if (old_buf_state & BM_LOCKED)
    2253         306 :                 old_buf_state = WaitBufHdrUnlocked(buf);
    2254             : 
    2255   125848452 :             buf_state = old_buf_state;
    2256             : 
    2257             :             /* increase refcount */
    2258   125848452 :             buf_state += BUF_REFCOUNT_ONE;
    2259             : 
    2260   125848452 :             if (strategy == NULL)
    2261             :             {
    2262             :                 /* Default case: increase usagecount unless already max. */
    2263   124785028 :                 if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
    2264     6422160 :                     buf_state += BUF_USAGECOUNT_ONE;
    2265             :             }
    2266             :             else
    2267             :             {
    2268             :                 /*
    2269             :                  * Ring buffers shouldn't evict others from pool.  Thus we
    2270             :                  * don't make usagecount more than 1.
    2271             :                  */
    2272     1063424 :                 if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
    2273       56958 :                     buf_state += BUF_USAGECOUNT_ONE;
    2274             :             }
    2275             : 
    2276   125848452 :             if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
    2277             :                                                buf_state))
    2278             :             {
    2279   125822582 :                 result = (buf_state & BM_VALID) != 0;
    2280             : 
    2281             :                 /*
    2282             :                  * Assume that we acquired a buffer pin for the purposes of
    2283             :                  * Valgrind buffer client checks (even in !result case) to
    2284             :                  * keep things simple.  Buffers that are unsafe to access are
    2285             :                  * not generally guaranteed to be marked undefined or
    2286             :                  * non-accessible in any case.
    2287             :                  */
    2288             :                 VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
    2289   125822582 :                 break;
    2290             :             }
    2291             :         }
    2292             :     }
    2293             :     else
    2294             :     {
    2295             :         /*
    2296             :          * If we previously pinned the buffer, it must surely be valid.
    2297             :          *
    2298             :          * Note: We deliberately avoid a Valgrind client request here.
    2299             :          * Individual access methods can optionally superimpose buffer page
    2300             :          * client requests on top of our client requests to enforce that
    2301             :          * buffers are only accessed while locked (and pinned).  It's possible
    2302             :          * that the buffer page is legitimately non-accessible here.  We
    2303             :          * cannot meddle with that.
    2304             :          */
    2305     4127310 :         result = true;
    2306             :     }
    2307             : 
    2308   129949892 :     ref->refcount++;
    2309             :     Assert(ref->refcount > 0);
    2310   129949892 :     ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
    2311   129949892 :     return result;
    2312             : }
    2313             : 
    2314             : /*
    2315             :  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
    2316             :  * The spinlock is released before return.
    2317             :  *
    2318             :  * As this function is called with the spinlock held, the caller has to
    2319             :  * previously call ReservePrivateRefCountEntry().
    2320             :  *
    2321             :  * Currently, no callers of this function want to modify the buffer's
    2322             :  * usage_count at all, so there's no need for a strategy parameter.
    2323             :  * Also we don't bother with a BM_VALID test (the caller could check that for
    2324             :  * itself).
    2325             :  *
    2326             :  * Also all callers only ever use this function when it's known that the
    2327             :  * buffer can't have a preexisting pin by this backend. That allows us to skip
    2328             :  * searching the private refcount array & hash, which is a boon, because the
    2329             :  * spinlock is still held.
    2330             :  *
    2331             :  * Note: use of this routine is frequently mandatory, not just an optimization
    2332             :  * to save a spin lock/unlock cycle, because we need to pin a buffer before
    2333             :  * its state can change under us.
    2334             :  */
    2335             : static void
    2336     5426618 : PinBuffer_Locked(BufferDesc *buf)
    2337             : {
    2338             :     Buffer      b;
    2339             :     PrivateRefCountEntry *ref;
    2340             :     uint32      buf_state;
    2341             : 
    2342             :     /*
    2343             :      * As explained, We don't expect any preexisting pins. That allows us to
    2344             :      * manipulate the PrivateRefCount after releasing the spinlock
    2345             :      */
    2346             :     Assert(GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf), false) == NULL);
    2347             : 
    2348             :     /*
    2349             :      * Buffer can't have a preexisting pin, so mark its page as defined to
    2350             :      * Valgrind (this is similar to the PinBuffer() case where the backend
    2351             :      * doesn't already have a buffer pin)
    2352             :      */
    2353             :     VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
    2354             : 
    2355             :     /*
    2356             :      * Since we hold the buffer spinlock, we can update the buffer state and
    2357             :      * release the lock in one operation.
    2358             :      */
    2359     5426618 :     buf_state = pg_atomic_read_u32(&buf->state);
    2360             :     Assert(buf_state & BM_LOCKED);
    2361     5426618 :     buf_state += BUF_REFCOUNT_ONE;
    2362     5426618 :     UnlockBufHdr(buf, buf_state);
    2363             : 
    2364     5426618 :     b = BufferDescriptorGetBuffer(buf);
    2365             : 
    2366     5426618 :     ref = NewPrivateRefCountEntry(b);
    2367     5426618 :     ref->refcount++;
    2368             : 
    2369     5426618 :     ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
    2370     5426618 : }
    2371             : 
    2372             : /*
    2373             :  * UnpinBuffer -- make buffer available for replacement.
    2374             :  *
    2375             :  * This should be applied only to shared buffers, never local ones.  This
    2376             :  * always adjusts CurrentResourceOwner.
    2377             :  */
    2378             : static void
    2379   156806646 : UnpinBuffer(BufferDesc *buf)
    2380             : {
    2381             :     PrivateRefCountEntry *ref;
    2382   156806646 :     Buffer      b = BufferDescriptorGetBuffer(buf);
    2383             : 
    2384             :     Assert(!BufferIsLocal(b));
    2385             : 
    2386             :     /* not moving as we're likely deleting it soon anyway */
    2387   156806646 :     ref = GetPrivateRefCountEntry(b, false);
    2388             :     Assert(ref != NULL);
    2389             : 
    2390   156806646 :     ResourceOwnerForgetBuffer(CurrentResourceOwner, b);
    2391             : 
    2392             :     Assert(ref->refcount > 0);
    2393   156806646 :     ref->refcount--;
    2394   156806646 :     if (ref->refcount == 0)
    2395             :     {
    2396             :         uint32      buf_state;
    2397             :         uint32      old_buf_state;
    2398             : 
    2399             :         /*
    2400             :          * Mark buffer non-accessible to Valgrind.
    2401             :          *
    2402             :          * Note that the buffer may have already been marked non-accessible
    2403             :          * within access method code that enforces that buffers are only
    2404             :          * accessed while a buffer lock is held.
    2405             :          */
    2406             :         VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
    2407             : 
    2408             :         /* I'd better not still hold the buffer content lock */
    2409             :         Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
    2410             : 
    2411             :         /*
    2412             :          * Decrement the shared reference count.
    2413             :          *
    2414             :          * Since buffer spinlock holder can update status using just write,
    2415             :          * it's not safe to use atomic decrement here; thus use a CAS loop.
    2416             :          */
    2417   131249200 :         old_buf_state = pg_atomic_read_u32(&buf->state);
    2418             :         for (;;)
    2419             :         {
    2420   131273636 :             if (old_buf_state & BM_LOCKED)
    2421         178 :                 old_buf_state = WaitBufHdrUnlocked(buf);
    2422             : 
    2423   131273636 :             buf_state = old_buf_state;
    2424             : 
    2425   131273636 :             buf_state -= BUF_REFCOUNT_ONE;
    2426             : 
    2427   131273636 :             if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
    2428             :                                                buf_state))
    2429   131249200 :                 break;
    2430             :         }
    2431             : 
    2432             :         /* Support LockBufferForCleanup() */
    2433   131249200 :         if (buf_state & BM_PIN_COUNT_WAITER)
    2434             :         {
    2435             :             /*
    2436             :              * Acquire the buffer header lock, re-check that there's a waiter.
    2437             :              * Another backend could have unpinned this buffer, and already
    2438             :              * woken up the waiter.  There's no danger of the buffer being
    2439             :              * replaced after we unpinned it above, as it's pinned by the
    2440             :              * waiter.
    2441             :              */
    2442           4 :             buf_state = LockBufHdr(buf);
    2443             : 
    2444           4 :             if ((buf_state & BM_PIN_COUNT_WAITER) &&
    2445           4 :                 BUF_STATE_GET_REFCOUNT(buf_state) == 1)
    2446           4 :             {
    2447             :                 /* we just released the last pin other than the waiter's */
    2448           4 :                 int         wait_backend_pgprocno = buf->wait_backend_pgprocno;
    2449             : 
    2450           4 :                 buf_state &= ~BM_PIN_COUNT_WAITER;
    2451           4 :                 UnlockBufHdr(buf, buf_state);
    2452           4 :                 ProcSendSignal(wait_backend_pgprocno);
    2453             :             }
    2454             :             else
    2455           0 :                 UnlockBufHdr(buf, buf_state);
    2456             :         }
    2457   131249200 :         ForgetPrivateRefCountEntry(ref);
    2458             :     }
    2459   156806646 : }
    2460             : 
    2461             : #define ST_SORT sort_checkpoint_bufferids
    2462             : #define ST_ELEMENT_TYPE CkptSortItem
    2463             : #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
    2464             : #define ST_SCOPE static
    2465             : #define ST_DEFINE
    2466             : #include <lib/sort_template.h>
    2467             : 
    2468             : /*
    2469             :  * BufferSync -- Write out all dirty buffers in the pool.
    2470             :  *
    2471             :  * This is called at checkpoint time to write out all dirty shared buffers.
    2472             :  * The checkpoint request flags should be passed in.  If CHECKPOINT_IMMEDIATE
    2473             :  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
    2474             :  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
    2475             :  * unlogged buffers, which are otherwise skipped.  The remaining flags
    2476             :  * currently have no effect here.
    2477             :  */
    2478             : static void
    2479        4690 : BufferSync(int flags)
    2480             : {
    2481             :     uint32      buf_state;
    2482             :     int         buf_id;
    2483             :     int         num_to_scan;
    2484             :     int         num_spaces;
    2485             :     int         num_processed;
    2486             :     int         num_written;
    2487        4690 :     CkptTsStatus *per_ts_stat = NULL;
    2488             :     Oid         last_tsid;
    2489             :     binaryheap *ts_heap;
    2490             :     int         i;
    2491        4690 :     int         mask = BM_DIRTY;
    2492             :     WritebackContext wb_context;
    2493             : 
    2494             :     /* Make sure we can handle the pin inside SyncOneBuffer */
    2495        4690 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    2496             : 
    2497             :     /*
    2498             :      * Unless this is a shutdown checkpoint or we have been explicitly told,
    2499             :      * we write only permanent, dirty buffers.  But at shutdown or end of
    2500             :      * recovery, we write all dirty buffers.
    2501             :      */
    2502        4690 :     if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
    2503             :                     CHECKPOINT_FLUSH_ALL))))
    2504        1544 :         mask |= BM_PERMANENT;
    2505             : 
    2506             :     /*
    2507             :      * Loop over all buffers, and mark the ones that need to be written with
    2508             :      * BM_CHECKPOINT_NEEDED.  Count them as we go (num_to_scan), so that we
    2509             :      * can estimate how much work needs to be done.
    2510             :      *
    2511             :      * This allows us to write only those pages that were dirty when the
    2512             :      * checkpoint began, and not those that get dirtied while it proceeds.
    2513             :      * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
    2514             :      * later in this function, or by normal backends or the bgwriter cleaning
    2515             :      * scan, the flag is cleared.  Any buffer dirtied after this point won't
    2516             :      * have the flag set.
    2517             :      *
    2518             :      * Note that if we fail to write some buffer, we may leave buffers with
    2519             :      * BM_CHECKPOINT_NEEDED still set.  This is OK since any such buffer would
    2520             :      * certainly need to be written for the next checkpoint attempt, too.
    2521             :      */
    2522        4690 :     num_to_scan = 0;
    2523    67644146 :     for (buf_id = 0; buf_id < NBuffers; buf_id++)
    2524             :     {
    2525    67639456 :         BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
    2526             : 
    2527             :         /*
    2528             :          * Header spinlock is enough to examine BM_DIRTY, see comment in
    2529             :          * SyncOneBuffer.
    2530             :          */
    2531    67639456 :         buf_state = LockBufHdr(bufHdr);
    2532             : 
    2533    67639456 :         if ((buf_state & mask) == mask)
    2534             :         {
    2535             :             CkptSortItem *item;
    2536             : 
    2537      935658 :             buf_state |= BM_CHECKPOINT_NEEDED;
    2538             : 
    2539      935658 :             item = &CkptBufferIds[num_to_scan++];
    2540      935658 :             item->buf_id = buf_id;
    2541      935658 :             item->tsId = bufHdr->tag.spcOid;
    2542      935658 :             item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
    2543      935658 :             item->forkNum = BufTagGetForkNum(&bufHdr->tag);
    2544      935658 :             item->blockNum = bufHdr->tag.blockNum;
    2545             :         }
    2546             : 
    2547    67639456 :         UnlockBufHdr(bufHdr, buf_state);
    2548             : 
    2549             :         /* Check for barrier events in case NBuffers is large. */
    2550    67639456 :         if (ProcSignalBarrierPending)
    2551           0 :             ProcessProcSignalBarrier();
    2552             :     }
    2553             : 
    2554        4690 :     if (num_to_scan == 0)
    2555        1516 :         return;                 /* nothing to do */
    2556             : 
    2557        3174 :     WritebackContextInit(&wb_context, &checkpoint_flush_after);
    2558             : 
    2559             :     TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
    2560             : 
    2561             :     /*
    2562             :      * Sort buffers that need to be written to reduce the likelihood of random
    2563             :      * IO. The sorting is also important for the implementation of balancing
    2564             :      * writes between tablespaces. Without balancing writes we'd potentially
    2565             :      * end up writing to the tablespaces one-by-one; possibly overloading the
    2566             :      * underlying system.
    2567             :      */
    2568        3174 :     sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
    2569             : 
    2570        3174 :     num_spaces = 0;
    2571             : 
    2572             :     /*
    2573             :      * Allocate progress status for each tablespace with buffers that need to
    2574             :      * be flushed. This requires the to-be-flushed array to be sorted.
    2575             :      */
    2576        3174 :     last_tsid = InvalidOid;
    2577      938832 :     for (i = 0; i < num_to_scan; i++)
    2578             :     {
    2579             :         CkptTsStatus *s;
    2580             :         Oid         cur_tsid;
    2581             : 
    2582      935658 :         cur_tsid = CkptBufferIds[i].tsId;
    2583             : 
    2584             :         /*
    2585             :          * Grow array of per-tablespace status structs, every time a new
    2586             :          * tablespace is found.
    2587             :          */
    2588      935658 :         if (last_tsid == InvalidOid || last_tsid != cur_tsid)
    2589        5322 :         {
    2590             :             Size        sz;
    2591             : 
    2592        5322 :             num_spaces++;
    2593             : 
    2594             :             /*
    2595             :              * Not worth adding grow-by-power-of-2 logic here - even with a
    2596             :              * few hundred tablespaces this should be fine.
    2597             :              */
    2598        5322 :             sz = sizeof(CkptTsStatus) * num_spaces;
    2599             : 
    2600        5322 :             if (per_ts_stat == NULL)
    2601        3174 :                 per_ts_stat = (CkptTsStatus *) palloc(sz);
    2602             :             else
    2603        2148 :                 per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
    2604             : 
    2605        5322 :             s = &per_ts_stat[num_spaces - 1];
    2606        5322 :             memset(s, 0, sizeof(*s));
    2607        5322 :             s->tsId = cur_tsid;
    2608             : 
    2609             :             /*
    2610             :              * The first buffer in this tablespace. As CkptBufferIds is sorted
    2611             :              * by tablespace all (s->num_to_scan) buffers in this tablespace
    2612             :              * will follow afterwards.
    2613             :              */
    2614        5322 :             s->index = i;
    2615             : 
    2616             :             /*
    2617             :              * progress_slice will be determined once we know how many buffers
    2618             :              * are in each tablespace, i.e. after this loop.
    2619             :              */
    2620             : 
    2621        5322 :             last_tsid = cur_tsid;
    2622             :         }
    2623             :         else
    2624             :         {
    2625      930336 :             s = &per_ts_stat[num_spaces - 1];
    2626             :         }
    2627             : 
    2628      935658 :         s->num_to_scan++;
    2629             : 
    2630             :         /* Check for barrier events. */
    2631      935658 :         if (ProcSignalBarrierPending)
    2632           0 :             ProcessProcSignalBarrier();
    2633             :     }
    2634             : 
    2635             :     Assert(num_spaces > 0);
    2636             : 
    2637             :     /*
    2638             :      * Build a min-heap over the write-progress in the individual tablespaces,
    2639             :      * and compute how large a portion of the total progress a single
    2640             :      * processed buffer is.
    2641             :      */
    2642        3174 :     ts_heap = binaryheap_allocate(num_spaces,
    2643             :                                   ts_ckpt_progress_comparator,
    2644             :                                   NULL);
    2645             : 
    2646        8496 :     for (i = 0; i < num_spaces; i++)
    2647             :     {
    2648        5322 :         CkptTsStatus *ts_stat = &per_ts_stat[i];
    2649             : 
    2650        5322 :         ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
    2651             : 
    2652        5322 :         binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
    2653             :     }
    2654             : 
    2655        3174 :     binaryheap_build(ts_heap);
    2656             : 
    2657             :     /*
    2658             :      * Iterate through to-be-checkpointed buffers and write the ones (still)
    2659             :      * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
    2660             :      * tablespaces; otherwise the sorting would lead to only one tablespace
    2661             :      * receiving writes at a time, making inefficient use of the hardware.
    2662             :      */
    2663        3174 :     num_processed = 0;
    2664        3174 :     num_written = 0;
    2665      938832 :     while (!binaryheap_empty(ts_heap))
    2666             :     {
    2667      935658 :         BufferDesc *bufHdr = NULL;
    2668             :         CkptTsStatus *ts_stat = (CkptTsStatus *)
    2669      935658 :             DatumGetPointer(binaryheap_first(ts_heap));
    2670             : 
    2671      935658 :         buf_id = CkptBufferIds[ts_stat->index].buf_id;
    2672             :         Assert(buf_id != -1);
    2673             : 
    2674      935658 :         bufHdr = GetBufferDescriptor(buf_id);
    2675             : 
    2676      935658 :         num_processed++;
    2677             : 
    2678             :         /*
    2679             :          * We don't need to acquire the lock here, because we're only looking
    2680             :          * at a single bit. It's possible that someone else writes the buffer
    2681             :          * and clears the flag right after we check, but that doesn't matter
    2682             :          * since SyncOneBuffer will then do nothing.  However, there is a
    2683             :          * further race condition: it's conceivable that between the time we
    2684             :          * examine the bit here and the time SyncOneBuffer acquires the lock,
    2685             :          * someone else not only wrote the buffer but replaced it with another
    2686             :          * page and dirtied it.  In that improbable case, SyncOneBuffer will
    2687             :          * write the buffer though we didn't need to.  It doesn't seem worth
    2688             :          * guarding against this, though.
    2689             :          */
    2690      935658 :         if (pg_atomic_read_u32(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
    2691             :         {
    2692      928752 :             if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
    2693             :             {
    2694             :                 TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
    2695      928752 :                 PendingCheckpointerStats.buf_written_checkpoints++;
    2696      928752 :                 num_written++;
    2697             :             }
    2698             :         }
    2699             : 
    2700             :         /*
    2701             :          * Measure progress independent of actually having to flush the buffer
    2702             :          * - otherwise writing become unbalanced.
    2703             :          */
    2704      935658 :         ts_stat->progress += ts_stat->progress_slice;
    2705      935658 :         ts_stat->num_scanned++;
    2706      935658 :         ts_stat->index++;
    2707             : 
    2708             :         /* Have all the buffers from the tablespace been processed? */
    2709      935658 :         if (ts_stat->num_scanned == ts_stat->num_to_scan)
    2710             :         {
    2711        5322 :             binaryheap_remove_first(ts_heap);
    2712             :         }
    2713             :         else
    2714             :         {
    2715             :             /* update heap with the new progress */
    2716      930336 :             binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
    2717             :         }
    2718             : 
    2719             :         /*
    2720             :          * Sleep to throttle our I/O rate.
    2721             :          *
    2722             :          * (This will check for barrier events even if it doesn't sleep.)
    2723             :          */
    2724      935658 :         CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
    2725             :     }
    2726             : 
    2727             :     /*
    2728             :      * Issue all pending flushes. Only checkpointer calls BufferSync(), so
    2729             :      * IOContext will always be IOCONTEXT_NORMAL.
    2730             :      */
    2731        3174 :     IssuePendingWritebacks(&wb_context, IOCONTEXT_NORMAL);
    2732             : 
    2733        3174 :     pfree(per_ts_stat);
    2734        3174 :     per_ts_stat = NULL;
    2735        3174 :     binaryheap_free(ts_heap);
    2736             : 
    2737             :     /*
    2738             :      * Update checkpoint statistics. As noted above, this doesn't include
    2739             :      * buffers written by other backends or bgwriter scan.
    2740             :      */
    2741        3174 :     CheckpointStats.ckpt_bufs_written += num_written;
    2742             : 
    2743             :     TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
    2744             : }
    2745             : 
    2746             : /*
    2747             :  * BgBufferSync -- Write out some dirty buffers in the pool.
    2748             :  *
    2749             :  * This is called periodically by the background writer process.
    2750             :  *
    2751             :  * Returns true if it's appropriate for the bgwriter process to go into
    2752             :  * low-power hibernation mode.  (This happens if the strategy clock sweep
    2753             :  * has been "lapped" and no buffer allocations have occurred recently,
    2754             :  * or if the bgwriter has been effectively disabled by setting
    2755             :  * bgwriter_lru_maxpages to 0.)
    2756             :  */
    2757             : bool
    2758       12596 : BgBufferSync(WritebackContext *wb_context)
    2759             : {
    2760             :     /* info obtained from freelist.c */
    2761             :     int         strategy_buf_id;
    2762             :     uint32      strategy_passes;
    2763             :     uint32      recent_alloc;
    2764             : 
    2765             :     /*
    2766             :      * Information saved between calls so we can determine the strategy
    2767             :      * point's advance rate and avoid scanning already-cleaned buffers.
    2768             :      */
    2769             :     static bool saved_info_valid = false;
    2770             :     static int  prev_strategy_buf_id;
    2771             :     static uint32 prev_strategy_passes;
    2772             :     static int  next_to_clean;
    2773             :     static uint32 next_passes;
    2774             : 
    2775             :     /* Moving averages of allocation rate and clean-buffer density */
    2776             :     static float smoothed_alloc = 0;
    2777             :     static float smoothed_density = 10.0;
    2778             : 
    2779             :     /* Potentially these could be tunables, but for now, not */
    2780       12596 :     float       smoothing_samples = 16;
    2781       12596 :     float       scan_whole_pool_milliseconds = 120000.0;
    2782             : 
    2783             :     /* Used to compute how far we scan ahead */
    2784             :     long        strategy_delta;
    2785             :     int         bufs_to_lap;
    2786             :     int         bufs_ahead;
    2787             :     float       scans_per_alloc;
    2788             :     int         reusable_buffers_est;
    2789             :     int         upcoming_alloc_est;
    2790             :     int         min_scan_buffers;
    2791             : 
    2792             :     /* Variables for the scanning loop proper */
    2793             :     int         num_to_scan;
    2794             :     int         num_written;
    2795             :     int         reusable_buffers;
    2796             : 
    2797             :     /* Variables for final smoothed_density update */
    2798             :     long        new_strategy_delta;
    2799             :     uint32      new_recent_alloc;
    2800             : 
    2801             :     /*
    2802             :      * Find out where the freelist clock sweep currently is, and how many
    2803             :      * buffer allocations have happened since our last call.
    2804             :      */
    2805       12596 :     strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
    2806             : 
    2807             :     /* Report buffer alloc counts to pgstat */
    2808       12596 :     PendingBgWriterStats.buf_alloc += recent_alloc;
    2809             : 
    2810             :     /*
    2811             :      * If we're not running the LRU scan, just stop after doing the stats
    2812             :      * stuff.  We mark the saved state invalid so that we can recover sanely
    2813             :      * if LRU scan is turned back on later.
    2814             :      */
    2815       12596 :     if (bgwriter_lru_maxpages <= 0)
    2816             :     {
    2817           0 :         saved_info_valid = false;
    2818           0 :         return true;
    2819             :     }
    2820             : 
    2821             :     /*
    2822             :      * Compute strategy_delta = how many buffers have been scanned by the
    2823             :      * clock sweep since last time.  If first time through, assume none. Then
    2824             :      * see if we are still ahead of the clock sweep, and if so, how many
    2825             :      * buffers we could scan before we'd catch up with it and "lap" it. Note:
    2826             :      * weird-looking coding of xxx_passes comparisons are to avoid bogus
    2827             :      * behavior when the passes counts wrap around.
    2828             :      */
    2829       12596 :     if (saved_info_valid)
    2830             :     {
    2831       11902 :         int32       passes_delta = strategy_passes - prev_strategy_passes;
    2832             : 
    2833       11902 :         strategy_delta = strategy_buf_id - prev_strategy_buf_id;
    2834       11902 :         strategy_delta += (long) passes_delta * NBuffers;
    2835             : 
    2836             :         Assert(strategy_delta >= 0);
    2837             : 
    2838       11902 :         if ((int32) (next_passes - strategy_passes) > 0)
    2839             :         {
    2840             :             /* we're one pass ahead of the strategy point */
    2841        2512 :             bufs_to_lap = strategy_buf_id - next_to_clean;
    2842             : #ifdef BGW_DEBUG
    2843             :             elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
    2844             :                  next_passes, next_to_clean,
    2845             :                  strategy_passes, strategy_buf_id,
    2846             :                  strategy_delta, bufs_to_lap);
    2847             : #endif
    2848             :         }
    2849        9390 :         else if (next_passes == strategy_passes &&
    2850        6956 :                  next_to_clean >= strategy_buf_id)
    2851             :         {
    2852             :             /* on same pass, but ahead or at least not behind */
    2853        6750 :             bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
    2854             : #ifdef BGW_DEBUG
    2855             :             elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
    2856             :                  next_passes, next_to_clean,
    2857             :                  strategy_passes, strategy_buf_id,
    2858             :                  strategy_delta, bufs_to_lap);
    2859             : #endif
    2860             :         }
    2861             :         else
    2862             :         {
    2863             :             /*
    2864             :              * We're behind, so skip forward to the strategy point and start
    2865             :              * cleaning from there.
    2866             :              */
    2867             : #ifdef BGW_DEBUG
    2868             :             elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
    2869             :                  next_passes, next_to_clean,
    2870             :                  strategy_passes, strategy_buf_id,
    2871             :                  strategy_delta);
    2872             : #endif
    2873        2640 :             next_to_clean = strategy_buf_id;
    2874        2640 :             next_passes = strategy_passes;
    2875        2640 :             bufs_to_lap = NBuffers;
    2876             :         }
    2877             :     }
    2878             :     else
    2879             :     {
    2880             :         /*
    2881             :          * Initializing at startup or after LRU scanning had been off. Always
    2882             :          * start at the strategy point.
    2883             :          */
    2884             : #ifdef BGW_DEBUG
    2885             :         elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
    2886             :              strategy_passes, strategy_buf_id);
    2887             : #endif
    2888         694 :         strategy_delta = 0;
    2889         694 :         next_to_clean = strategy_buf_id;
    2890         694 :         next_passes = strategy_passes;
    2891         694 :         bufs_to_lap = NBuffers;
    2892             :     }
    2893             : 
    2894             :     /* Update saved info for next time */
    2895       12596 :     prev_strategy_buf_id = strategy_buf_id;
    2896       12596 :     prev_strategy_passes = strategy_passes;
    2897       12596 :     saved_info_valid = true;
    2898             : 
    2899             :     /*
    2900             :      * Compute how many buffers had to be scanned for each new allocation, ie,
    2901             :      * 1/density of reusable buffers, and track a moving average of that.
    2902             :      *
    2903             :      * If the strategy point didn't move, we don't update the density estimate
    2904             :      */
    2905       12596 :     if (strategy_delta > 0 && recent_alloc > 0)
    2906             :     {
    2907        3308 :         scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
    2908        3308 :         smoothed_density += (scans_per_alloc - smoothed_density) /
    2909             :             smoothing_samples;
    2910             :     }
    2911             : 
    2912             :     /*
    2913             :      * Estimate how many reusable buffers there are between the current
    2914             :      * strategy point and where we've scanned ahead to, based on the smoothed
    2915             :      * density estimate.
    2916             :      */
    2917       12596 :     bufs_ahead = NBuffers - bufs_to_lap;
    2918       12596 :     reusable_buffers_est = (float) bufs_ahead / smoothed_density;
    2919             : 
    2920             :     /*
    2921             :      * Track a moving average of recent buffer allocations.  Here, rather than
    2922             :      * a true average we want a fast-attack, slow-decline behavior: we
    2923             :      * immediately follow any increase.
    2924             :      */
    2925       12596 :     if (smoothed_alloc <= (float) recent_alloc)
    2926        2796 :         smoothed_alloc = recent_alloc;
    2927             :     else
    2928        9800 :         smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
    2929             :             smoothing_samples;
    2930             : 
    2931             :     /* Scale the estimate by a GUC to allow more aggressive tuning. */
    2932       12596 :     upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
    2933             : 
    2934             :     /*
    2935             :      * If recent_alloc remains at zero for many cycles, smoothed_alloc will
    2936             :      * eventually underflow to zero, and the underflows produce annoying
    2937             :      * kernel warnings on some platforms.  Once upcoming_alloc_est has gone to
    2938             :      * zero, there's no point in tracking smaller and smaller values of
    2939             :      * smoothed_alloc, so just reset it to exactly zero to avoid this
    2940             :      * syndrome.  It will pop back up as soon as recent_alloc increases.
    2941             :      */
    2942       12596 :     if (upcoming_alloc_est == 0)
    2943         944 :         smoothed_alloc = 0;
    2944             : 
    2945             :     /*
    2946             :      * Even in cases where there's been little or no buffer allocation
    2947             :      * activity, we want to make a small amount of progress through the buffer
    2948             :      * cache so that as many reusable buffers as possible are clean after an
    2949             :      * idle period.
    2950             :      *
    2951             :      * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
    2952             :      * the BGW will be called during the scan_whole_pool time; slice the
    2953             :      * buffer pool into that many sections.
    2954             :      */
    2955       12596 :     min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
    2956             : 
    2957       12596 :     if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
    2958             :     {
    2959             : #ifdef BGW_DEBUG
    2960             :         elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
    2961             :              upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
    2962             : #endif
    2963        6114 :         upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
    2964             :     }
    2965             : 
    2966             :     /*
    2967             :      * Now write out dirty reusable buffers, working forward from the
    2968             :      * next_to_clean point, until we have lapped the strategy scan, or cleaned
    2969             :      * enough buffers to match our estimate of the next cycle's allocation
    2970             :      * requirements, or hit the bgwriter_lru_maxpages limit.
    2971             :      */
    2972             : 
    2973             :     /* Make sure we can handle the pin inside SyncOneBuffer */
    2974       12596 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    2975             : 
    2976       12596 :     num_to_scan = bufs_to_lap;
    2977       12596 :     num_written = 0;
    2978       12596 :     reusable_buffers = reusable_buffers_est;
    2979             : 
    2980             :     /* Execute the LRU scan */
    2981     2233794 :     while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
    2982             :     {
    2983     2221200 :         int         sync_state = SyncOneBuffer(next_to_clean, true,
    2984             :                                                wb_context);
    2985             : 
    2986     2221200 :         if (++next_to_clean >= NBuffers)
    2987             :         {
    2988        2978 :             next_to_clean = 0;
    2989        2978 :             next_passes++;
    2990             :         }
    2991     2221200 :         num_to_scan--;
    2992             : 
    2993     2221200 :         if (sync_state & BUF_WRITTEN)
    2994             :         {
    2995       22034 :             reusable_buffers++;
    2996       22034 :             if (++num_written >= bgwriter_lru_maxpages)
    2997             :             {
    2998           2 :                 PendingBgWriterStats.maxwritten_clean++;
    2999           2 :                 break;
    3000             :             }
    3001             :         }
    3002     2199166 :         else if (sync_state & BUF_REUSABLE)
    3003     1631518 :             reusable_buffers++;
    3004             :     }
    3005             : 
    3006       12596 :     PendingBgWriterStats.buf_written_clean += num_written;
    3007             : 
    3008             : #ifdef BGW_DEBUG
    3009             :     elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
    3010             :          recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
    3011             :          smoothed_density, reusable_buffers_est, upcoming_alloc_est,
    3012             :          bufs_to_lap - num_to_scan,
    3013             :          num_written,
    3014             :          reusable_buffers - reusable_buffers_est);
    3015             : #endif
    3016             : 
    3017             :     /*
    3018             :      * Consider the above scan as being like a new allocation scan.
    3019             :      * Characterize its density and update the smoothed one based on it. This
    3020             :      * effectively halves the moving average period in cases where both the
    3021             :      * strategy and the background writer are doing some useful scanning,
    3022             :      * which is helpful because a long memory isn't as desirable on the
    3023             :      * density estimates.
    3024             :      */
    3025       12596 :     new_strategy_delta = bufs_to_lap - num_to_scan;
    3026       12596 :     new_recent_alloc = reusable_buffers - reusable_buffers_est;
    3027       12596 :     if (new_strategy_delta > 0 && new_recent_alloc > 0)
    3028             :     {
    3029        9824 :         scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
    3030        9824 :         smoothed_density += (scans_per_alloc - smoothed_density) /
    3031             :             smoothing_samples;
    3032             : 
    3033             : #ifdef BGW_DEBUG
    3034             :         elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
    3035             :              new_recent_alloc, new_strategy_delta,
    3036             :              scans_per_alloc, smoothed_density);
    3037             : #endif
    3038             :     }
    3039             : 
    3040             :     /* Return true if OK to hibernate */
    3041       12596 :     return (bufs_to_lap == 0 && recent_alloc == 0);
    3042             : }
    3043             : 
    3044             : /*
    3045             :  * SyncOneBuffer -- process a single buffer during syncing.
    3046             :  *
    3047             :  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
    3048             :  * buffers marked recently used, as these are not replacement candidates.
    3049             :  *
    3050             :  * Returns a bitmask containing the following flag bits:
    3051             :  *  BUF_WRITTEN: we wrote the buffer.
    3052             :  *  BUF_REUSABLE: buffer is available for replacement, ie, it has
    3053             :  *      pin count 0 and usage count 0.
    3054             :  *
    3055             :  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
    3056             :  * after locking it, but we don't care all that much.)
    3057             :  *
    3058             :  * Note: caller must have done ResourceOwnerEnlargeBuffers.
    3059             :  */
    3060             : static int
    3061     3149952 : SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
    3062             : {
    3063     3149952 :     BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
    3064     3149952 :     int         result = 0;
    3065             :     uint32      buf_state;
    3066             :     BufferTag   tag;
    3067             : 
    3068     3149952 :     ReservePrivateRefCountEntry();
    3069             : 
    3070             :     /*
    3071             :      * Check whether buffer needs writing.
    3072             :      *
    3073             :      * We can make this check without taking the buffer content lock so long
    3074             :      * as we mark pages dirty in access methods *before* logging changes with
    3075             :      * XLogInsert(): if someone marks the buffer dirty just after our check we
    3076             :      * don't worry because our checkpoint.redo points before log record for
    3077             :      * upcoming changes and so we are not required to write such dirty buffer.
    3078             :      */
    3079     3149952 :     buf_state = LockBufHdr(bufHdr);
    3080             : 
    3081     3149952 :     if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
    3082     3148992 :         BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
    3083             :     {
    3084     1654390 :         result |= BUF_REUSABLE;
    3085             :     }
    3086     1495562 :     else if (skip_recently_used)
    3087             :     {
    3088             :         /* Caller told us not to write recently-used buffers */
    3089      567648 :         UnlockBufHdr(bufHdr, buf_state);
    3090      567648 :         return result;
    3091             :     }
    3092             : 
    3093     2582304 :     if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
    3094             :     {
    3095             :         /* It's clean, so nothing to do */
    3096     1631518 :         UnlockBufHdr(bufHdr, buf_state);
    3097     1631518 :         return result;
    3098             :     }
    3099             : 
    3100             :     /*
    3101             :      * Pin it, share-lock it, write it.  (FlushBuffer will do nothing if the
    3102             :      * buffer is clean by the time we've locked it.)
    3103             :      */
    3104      950786 :     PinBuffer_Locked(bufHdr);
    3105      950786 :     LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
    3106             : 
    3107      950786 :     FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    3108             : 
    3109      950786 :     LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
    3110             : 
    3111      950786 :     tag = bufHdr->tag;
    3112             : 
    3113      950786 :     UnpinBuffer(bufHdr);
    3114             : 
    3115             :     /*
    3116             :      * SyncOneBuffer() is only called by checkpointer and bgwriter, so
    3117             :      * IOContext will always be IOCONTEXT_NORMAL.
    3118             :      */
    3119      950786 :     ScheduleBufferTagForWriteback(wb_context, IOCONTEXT_NORMAL, &tag);
    3120             : 
    3121      950786 :     return result | BUF_WRITTEN;
    3122             : }
    3123             : 
    3124             : /*
    3125             :  *      AtEOXact_Buffers - clean up at end of transaction.
    3126             :  *
    3127             :  *      As of PostgreSQL 8.0, buffer pins should get released by the
    3128             :  *      ResourceOwner mechanism.  This routine is just a debugging
    3129             :  *      cross-check that no pins remain.
    3130             :  */
    3131             : void
    3132      974324 : AtEOXact_Buffers(bool isCommit)
    3133             : {
    3134      974324 :     CheckForBufferLeaks();
    3135             : 
    3136      974324 :     AtEOXact_LocalBuffers(isCommit);
    3137             : 
    3138             :     Assert(PrivateRefCountOverflowed == 0);
    3139      974324 : }
    3140             : 
    3141             : /*
    3142             :  * Initialize access to shared buffer pool
    3143             :  *
    3144             :  * This is called during backend startup (whether standalone or under the
    3145             :  * postmaster).  It sets up for this backend's access to the already-existing
    3146             :  * buffer pool.
    3147             :  */
    3148             : void
    3149       27402 : InitBufferPoolAccess(void)
    3150             : {
    3151             :     HASHCTL     hash_ctl;
    3152             : 
    3153       27402 :     memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
    3154             : 
    3155       27402 :     hash_ctl.keysize = sizeof(int32);
    3156       27402 :     hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
    3157             : 
    3158       27402 :     PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
    3159             :                                       HASH_ELEM | HASH_BLOBS);
    3160             : 
    3161             :     /*
    3162             :      * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
    3163             :      * the corresponding phase of backend shutdown.
    3164             :      */
    3165             :     Assert(MyProc != NULL);
    3166       27402 :     on_shmem_exit(AtProcExit_Buffers, 0);
    3167       27402 : }
    3168             : 
    3169             : /*
    3170             :  * During backend exit, ensure that we released all shared-buffer locks and
    3171             :  * assert that we have no remaining pins.
    3172             :  */
    3173             : static void
    3174       27402 : AtProcExit_Buffers(int code, Datum arg)
    3175             : {
    3176       27402 :     UnlockBuffers();
    3177             : 
    3178       27402 :     CheckForBufferLeaks();
    3179             : 
    3180             :     /* localbuf.c needs a chance too */
    3181       27402 :     AtProcExit_LocalBuffers();
    3182       27402 : }
    3183             : 
    3184             : /*
    3185             :  *      CheckForBufferLeaks - ensure this backend holds no buffer pins
    3186             :  *
    3187             :  *      As of PostgreSQL 8.0, buffer pins should get released by the
    3188             :  *      ResourceOwner mechanism.  This routine is just a debugging
    3189             :  *      cross-check that no pins remain.
    3190             :  */
    3191             : static void
    3192     1001726 : CheckForBufferLeaks(void)
    3193             : {
    3194             : #ifdef USE_ASSERT_CHECKING
    3195             :     int         RefCountErrors = 0;
    3196             :     PrivateRefCountEntry *res;
    3197             :     int         i;
    3198             : 
    3199             :     /* check the array */
    3200             :     for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
    3201             :     {
    3202             :         res = &PrivateRefCountArray[i];
    3203             : 
    3204             :         if (res->buffer != InvalidBuffer)
    3205             :         {
    3206             :             PrintBufferLeakWarning(res->buffer);
    3207             :             RefCountErrors++;
    3208             :         }
    3209             :     }
    3210             : 
    3211             :     /* if necessary search the hash */
    3212             :     if (PrivateRefCountOverflowed)
    3213             :     {
    3214             :         HASH_SEQ_STATUS hstat;
    3215             : 
    3216             :         hash_seq_init(&hstat, PrivateRefCountHash);
    3217             :         while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
    3218             :         {
    3219             :             PrintBufferLeakWarning(res->buffer);
    3220             :             RefCountErrors++;
    3221             :         }
    3222             :     }
    3223             : 
    3224             :     Assert(RefCountErrors == 0);
    3225             : #endif
    3226     1001726 : }
    3227             : 
    3228             : /*
    3229             :  * Helper routine to issue warnings when a buffer is unexpectedly pinned
    3230             :  */
    3231             : void
    3232           0 : PrintBufferLeakWarning(Buffer buffer)
    3233             : {
    3234             :     BufferDesc *buf;
    3235             :     int32       loccount;
    3236             :     char       *path;
    3237             :     BackendId   backend;
    3238             :     uint32      buf_state;
    3239             : 
    3240             :     Assert(BufferIsValid(buffer));
    3241           0 :     if (BufferIsLocal(buffer))
    3242             :     {
    3243           0 :         buf = GetLocalBufferDescriptor(-buffer - 1);
    3244           0 :         loccount = LocalRefCount[-buffer - 1];
    3245           0 :         backend = MyBackendId;
    3246             :     }
    3247             :     else
    3248             :     {
    3249           0 :         buf = GetBufferDescriptor(buffer - 1);
    3250           0 :         loccount = GetPrivateRefCount(buffer);
    3251           0 :         backend = InvalidBackendId;
    3252             :     }
    3253             : 
    3254             :     /* theoretically we should lock the bufhdr here */
    3255           0 :     path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
    3256             :                           BufTagGetForkNum(&buf->tag));
    3257           0 :     buf_state = pg_atomic_read_u32(&buf->state);
    3258           0 :     elog(WARNING,
    3259             :          "buffer refcount leak: [%03d] "
    3260             :          "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
    3261             :          buffer, path,
    3262             :          buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
    3263             :          BUF_STATE_GET_REFCOUNT(buf_state), loccount);
    3264           0 :     pfree(path);
    3265           0 : }
    3266             : 
    3267             : /*
    3268             :  * CheckPointBuffers
    3269             :  *
    3270             :  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
    3271             :  *
    3272             :  * Note: temporary relations do not participate in checkpoints, so they don't
    3273             :  * need to be flushed.
    3274             :  */
    3275             : void
    3276        4690 : CheckPointBuffers(int flags)
    3277             : {
    3278        4690 :     BufferSync(flags);
    3279        4690 : }
    3280             : 
    3281             : /*
    3282             :  * BufferGetBlockNumber
    3283             :  *      Returns the block number associated with a buffer.
    3284             :  *
    3285             :  * Note:
    3286             :  *      Assumes that the buffer is valid and pinned, else the
    3287             :  *      value may be obsolete immediately...
    3288             :  */
    3289             : BlockNumber
    3290   126938448 : BufferGetBlockNumber(Buffer buffer)
    3291             : {
    3292             :     BufferDesc *bufHdr;
    3293             : 
    3294             :     Assert(BufferIsPinned(buffer));
    3295             : 
    3296   126938448 :     if (BufferIsLocal(buffer))
    3297     3207084 :         bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    3298             :     else
    3299   123731364 :         bufHdr = GetBufferDescriptor(buffer - 1);
    3300             : 
    3301             :     /* pinned, so OK to read tag without spinlock */
    3302   126938448 :     return bufHdr->tag.blockNum;
    3303             : }
    3304             : 
    3305             : /*
    3306             :  * BufferGetTag
    3307             :  *      Returns the relfilelocator, fork number and block number associated with
    3308             :  *      a buffer.
    3309             :  */
    3310             : void
    3311    44627588 : BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
    3312             :              BlockNumber *blknum)
    3313             : {
    3314             :     BufferDesc *bufHdr;
    3315             : 
    3316             :     /* Do the same checks as BufferGetBlockNumber. */
    3317             :     Assert(BufferIsPinned(buffer));
    3318             : 
    3319    44627588 :     if (BufferIsLocal(buffer))
    3320           0 :         bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    3321             :     else
    3322    44627588 :         bufHdr = GetBufferDescriptor(buffer - 1);
    3323             : 
    3324             :     /* pinned, so OK to read tag without spinlock */
    3325    44627588 :     *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
    3326    44627588 :     *forknum = BufTagGetForkNum(&bufHdr->tag);
    3327    44627588 :     *blknum = bufHdr->tag.blockNum;
    3328    44627588 : }
    3329             : 
    3330             : /*
    3331             :  * FlushBuffer
    3332             :  *      Physically write out a shared buffer.
    3333             :  *
    3334             :  * NOTE: this actually just passes the buffer contents to the kernel; the
    3335             :  * real write to disk won't happen until the kernel feels like it.  This
    3336             :  * is okay from our point of view since we can redo the changes from WAL.
    3337             :  * However, we will need to force the changes to disk via fsync before
    3338             :  * we can checkpoint WAL.
    3339             :  *
    3340             :  * The caller must hold a pin on the buffer and have share-locked the
    3341             :  * buffer contents.  (Note: a share-lock does not prevent updates of
    3342             :  * hint bits in the buffer, so the page could change while the write
    3343             :  * is in progress, but we assume that that will not invalidate the data
    3344             :  * written.)
    3345             :  *
    3346             :  * If the caller has an smgr reference for the buffer's relation, pass it
    3347             :  * as the second parameter.  If not, pass NULL.
    3348             :  */
    3349             : static void
    3350     1397086 : FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
    3351             :             IOContext io_context)
    3352             : {
    3353             :     XLogRecPtr  recptr;
    3354             :     ErrorContextCallback errcallback;
    3355             :     instr_time  io_start;
    3356             :     Block       bufBlock;
    3357             :     char       *bufToWrite;
    3358             :     uint32      buf_state;
    3359             : 
    3360             :     /*
    3361             :      * Try to start an I/O operation.  If StartBufferIO returns false, then
    3362             :      * someone else flushed the buffer before we could, so we need not do
    3363             :      * anything.
    3364             :      */
    3365     1397086 :     if (!StartBufferIO(buf, false))
    3366           0 :         return;
    3367             : 
    3368             :     /* Setup error traceback support for ereport() */
    3369     1397086 :     errcallback.callback = shared_buffer_write_error_callback;
    3370     1397086 :     errcallback.arg = (void *) buf;
    3371     1397086 :     errcallback.previous = error_context_stack;
    3372     1397086 :     error_context_stack = &errcallback;
    3373             : 
    3374             :     /* Find smgr relation for buffer */
    3375     1397086 :     if (reln == NULL)
    3376     1390086 :         reln = smgropen(BufTagGetRelFileLocator(&buf->tag), InvalidBackendId);
    3377             : 
    3378             :     TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
    3379             :                                         buf->tag.blockNum,
    3380             :                                         reln->smgr_rlocator.locator.spcOid,
    3381             :                                         reln->smgr_rlocator.locator.dbOid,
    3382             :                                         reln->smgr_rlocator.locator.relNumber);
    3383             : 
    3384     1397086 :     buf_state = LockBufHdr(buf);
    3385             : 
    3386             :     /*
    3387             :      * Run PageGetLSN while holding header lock, since we don't have the
    3388             :      * buffer locked exclusively in all cases.
    3389             :      */
    3390     1397086 :     recptr = BufferGetLSN(buf);
    3391             : 
    3392             :     /* To check if block content changes while flushing. - vadim 01/17/97 */
    3393     1397086 :     buf_state &= ~BM_JUST_DIRTIED;
    3394     1397086 :     UnlockBufHdr(buf, buf_state);
    3395             : 
    3396             :     /*
    3397             :      * Force XLOG flush up to buffer's LSN.  This implements the basic WAL
    3398             :      * rule that log updates must hit disk before any of the data-file changes
    3399             :      * they describe do.
    3400             :      *
    3401             :      * However, this rule does not apply to unlogged relations, which will be
    3402             :      * lost after a crash anyway.  Most unlogged relation pages do not bear
    3403             :      * LSNs since we never emit WAL records for them, and therefore flushing
    3404             :      * up through the buffer LSN would be useless, but harmless.  However,
    3405             :      * GiST indexes use LSNs internally to track page-splits, and therefore
    3406             :      * unlogged GiST pages bear "fake" LSNs generated by
    3407             :      * GetFakeLSNForUnloggedRel.  It is unlikely but possible that the fake
    3408             :      * LSN counter could advance past the WAL insertion point; and if it did
    3409             :      * happen, attempting to flush WAL through that location would fail, with
    3410             :      * disastrous system-wide consequences.  To make sure that can't happen,
    3411             :      * skip the flush if the buffer isn't permanent.
    3412             :      */
    3413     1397086 :     if (buf_state & BM_PERMANENT)
    3414     1393044 :         XLogFlush(recptr);
    3415             : 
    3416             :     /*
    3417             :      * Now it's safe to write buffer to disk. Note that no one else should
    3418             :      * have been able to write it while we were busy with log flushing because
    3419             :      * only one process at a time can set the BM_IO_IN_PROGRESS bit.
    3420             :      */
    3421     1397086 :     bufBlock = BufHdrGetBlock(buf);
    3422             : 
    3423             :     /*
    3424             :      * Update page checksum if desired.  Since we have only shared lock on the
    3425             :      * buffer, other processes might be updating hint bits in it, so we must
    3426             :      * copy the page to private storage if we do checksumming.
    3427             :      */
    3428     1397086 :     bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
    3429             : 
    3430     1397086 :     io_start = pgstat_prepare_io_time();
    3431             : 
    3432             :     /*
    3433             :      * bufToWrite is either the shared buffer or a copy, as appropriate.
    3434             :      */
    3435     1397086 :     smgrwrite(reln,
    3436     1397086 :               BufTagGetForkNum(&buf->tag),
    3437             :               buf->tag.blockNum,
    3438             :               bufToWrite,
    3439             :               false);
    3440             : 
    3441             :     /*
    3442             :      * When a strategy is in use, only flushes of dirty buffers already in the
    3443             :      * strategy ring are counted as strategy writes (IOCONTEXT
    3444             :      * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
    3445             :      * statistics tracking.
    3446             :      *
    3447             :      * If a shared buffer initially added to the ring must be flushed before
    3448             :      * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
    3449             :      *
    3450             :      * If a shared buffer which was added to the ring later because the
    3451             :      * current strategy buffer is pinned or in use or because all strategy
    3452             :      * buffers were dirty and rejected (for BAS_BULKREAD operations only)
    3453             :      * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
    3454             :      * (from_ring will be false).
    3455             :      *
    3456             :      * When a strategy is not in use, the write can only be a "regular" write
    3457             :      * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
    3458             :      */
    3459     1397086 :     pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
    3460             :                             IOOP_WRITE, io_start, 1);
    3461             : 
    3462     1397086 :     pgBufferUsage.shared_blks_written++;
    3463             : 
    3464             :     /*
    3465             :      * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
    3466             :      * end the BM_IO_IN_PROGRESS state.
    3467             :      */
    3468     1397086 :     TerminateBufferIO(buf, true, 0);
    3469             : 
    3470             :     TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
    3471             :                                        buf->tag.blockNum,
    3472             :                                        reln->smgr_rlocator.locator.spcOid,
    3473             :                                        reln->smgr_rlocator.locator.dbOid,
    3474             :                                        reln->smgr_rlocator.locator.relNumber);
    3475             : 
    3476             :     /* Pop the error context stack */
    3477     1397086 :     error_context_stack = errcallback.previous;
    3478             : }
    3479             : 
    3480             : /*
    3481             :  * RelationGetNumberOfBlocksInFork
    3482             :  *      Determines the current number of pages in the specified relation fork.
    3483             :  *
    3484             :  * Note that the accuracy of the result will depend on the details of the
    3485             :  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
    3486             :  * it might not be.
    3487             :  */
    3488             : BlockNumber
    3489     4096710 : RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
    3490             : {
    3491     4096710 :     if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
    3492             :     {
    3493             :         /*
    3494             :          * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
    3495             :          * tableam returns the size in bytes - but for the purpose of this
    3496             :          * routine, we want the number of blocks. Therefore divide, rounding
    3497             :          * up.
    3498             :          */
    3499             :         uint64      szbytes;
    3500             : 
    3501     3189682 :         szbytes = table_relation_size(relation, forkNum);
    3502             : 
    3503     3189646 :         return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
    3504             :     }
    3505      907028 :     else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
    3506             :     {
    3507      907028 :         return smgrnblocks(RelationGetSmgr(relation), forkNum);
    3508             :     }
    3509             :     else
    3510             :         Assert(false);
    3511             : 
    3512           0 :     return 0;                   /* keep compiler quiet */
    3513             : }
    3514             : 
    3515             : /*
    3516             :  * BufferIsPermanent
    3517             :  *      Determines whether a buffer will potentially still be around after
    3518             :  *      a crash.  Caller must hold a buffer pin.
    3519             :  */
    3520             : bool
    3521    30759626 : BufferIsPermanent(Buffer buffer)
    3522             : {
    3523             :     BufferDesc *bufHdr;
    3524             : 
    3525             :     /* Local buffers are used only for temp relations. */
    3526    30759626 :     if (BufferIsLocal(buffer))
    3527     1144906 :         return false;
    3528             : 
    3529             :     /* Make sure we've got a real buffer, and that we hold a pin on it. */
    3530             :     Assert(BufferIsValid(buffer));
    3531             :     Assert(BufferIsPinned(buffer));
    3532             : 
    3533             :     /*
    3534             :      * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
    3535             :      * need not bother with the buffer header spinlock.  Even if someone else
    3536             :      * changes the buffer header state while we're doing this, the state is
    3537             :      * changed atomically, so we'll read the old value or the new value, but
    3538             :      * not random garbage.
    3539             :      */
    3540    29614720 :     bufHdr = GetBufferDescriptor(buffer - 1);
    3541    29614720 :     return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
    3542             : }
    3543             : 
    3544             : /*
    3545             :  * BufferGetLSNAtomic
    3546             :  *      Retrieves the LSN of the buffer atomically using a buffer header lock.
    3547             :  *      This is necessary for some callers who may not have an exclusive lock
    3548             :  *      on the buffer.
    3549             :  */
    3550             : XLogRecPtr
    3551    18742166 : BufferGetLSNAtomic(Buffer buffer)
    3552             : {
    3553    18742166 :     BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
    3554    18742166 :     char       *page = BufferGetPage(buffer);
    3555             :     XLogRecPtr  lsn;
    3556             :     uint32      buf_state;
    3557             : 
    3558             :     /*
    3559             :      * If we don't need locking for correctness, fastpath out.
    3560             :      */
    3561    18742166 :     if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
    3562    15396182 :         return PageGetLSN(page);
    3563             : 
    3564             :     /* Make sure we've got a real buffer, and that we hold a pin on it. */
    3565             :     Assert(BufferIsValid(buffer));
    3566             :     Assert(BufferIsPinned(buffer));
    3567             : 
    3568     3345984 :     buf_state = LockBufHdr(bufHdr);
    3569     3345984 :     lsn = PageGetLSN(page);
    3570     3345984 :     UnlockBufHdr(bufHdr, buf_state);
    3571             : 
    3572     3345984 :     return lsn;
    3573             : }
    3574             : 
    3575             : /* ---------------------------------------------------------------------
    3576             :  *      DropRelationBuffers
    3577             :  *
    3578             :  *      This function removes from the buffer pool all the pages of the
    3579             :  *      specified relation forks that have block numbers >= firstDelBlock.
    3580             :  *      (In particular, with firstDelBlock = 0, all pages are removed.)
    3581             :  *      Dirty pages are simply dropped, without bothering to write them
    3582             :  *      out first.  Therefore, this is NOT rollback-able, and so should be
    3583             :  *      used only with extreme caution!
    3584             :  *
    3585             :  *      Currently, this is called only from smgr.c when the underlying file
    3586             :  *      is about to be deleted or truncated (firstDelBlock is needed for
    3587             :  *      the truncation case).  The data in the affected pages would therefore
    3588             :  *      be deleted momentarily anyway, and there is no point in writing it.
    3589             :  *      It is the responsibility of higher-level code to ensure that the
    3590             :  *      deletion or truncation does not lose any data that could be needed
    3591             :  *      later.  It is also the responsibility of higher-level code to ensure
    3592             :  *      that no other process could be trying to load more pages of the
    3593             :  *      relation into buffers.
    3594             :  * --------------------------------------------------------------------
    3595             :  */
    3596             : void
    3597        1070 : DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
    3598             :                     int nforks, BlockNumber *firstDelBlock)
    3599             : {
    3600             :     int         i;
    3601             :     int         j;
    3602             :     RelFileLocatorBackend rlocator;
    3603             :     BlockNumber nForkBlock[MAX_FORKNUM];
    3604        1070 :     uint64      nBlocksToInvalidate = 0;
    3605             : 
    3606        1070 :     rlocator = smgr_reln->smgr_rlocator;
    3607             : 
    3608             :     /* If it's a local relation, it's localbuf.c's problem. */
    3609        1070 :     if (RelFileLocatorBackendIsTemp(rlocator))
    3610             :     {
    3611         658 :         if (rlocator.backend == MyBackendId)
    3612             :         {
    3613        1350 :             for (j = 0; j < nforks; j++)
    3614         692 :                 DropRelationLocalBuffers(rlocator.locator, forkNum[j],
    3615         692 :                                          firstDelBlock[j]);
    3616             :         }
    3617         730 :         return;
    3618             :     }
    3619             : 
    3620             :     /*
    3621             :      * To remove all the pages of the specified relation forks from the buffer
    3622             :      * pool, we need to scan the entire buffer pool but we can optimize it by
    3623             :      * finding the buffers from BufMapping table provided we know the exact
    3624             :      * size of each fork of the relation. The exact size is required to ensure
    3625             :      * that we don't leave any buffer for the relation being dropped as
    3626             :      * otherwise the background writer or checkpointer can lead to a PANIC
    3627             :      * error while flushing buffers corresponding to files that don't exist.
    3628             :      *
    3629             :      * To know the exact size, we rely on the size cached for each fork by us
    3630             :      * during recovery which limits the optimization to recovery and on
    3631             :      * standbys but we can easily extend it once we have shared cache for
    3632             :      * relation size.
    3633             :      *
    3634             :      * In recovery, we cache the value returned by the first lseek(SEEK_END)
    3635             :      * and the future writes keeps the cached value up-to-date. See
    3636             :      * smgrextend. It is possible that the value of the first lseek is smaller
    3637             :      * than the actual number of existing blocks in the file due to buggy
    3638             :      * Linux kernels that might not have accounted for the recent write. But
    3639             :      * that should be fine because there must not be any buffers after that
    3640             :      * file size.
    3641             :      */
    3642         564 :     for (i = 0; i < nforks; i++)
    3643             :     {
    3644             :         /* Get the number of blocks for a relation's fork */
    3645         480 :         nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
    3646             : 
    3647         480 :         if (nForkBlock[i] == InvalidBlockNumber)
    3648             :         {
    3649         328 :             nBlocksToInvalidate = InvalidBlockNumber;
    3650         328 :             break;
    3651             :         }
    3652             : 
    3653             :         /* calculate the number of blocks to be invalidated */
    3654         152 :         nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
    3655             :     }
    3656             : 
    3657             :     /*
    3658             :      * We apply the optimization iff the total number of blocks to invalidate
    3659             :      * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
    3660             :      */
    3661         412 :     if (BlockNumberIsValid(nBlocksToInvalidate) &&
    3662          84 :         nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
    3663             :     {
    3664         198 :         for (j = 0; j < nforks; j++)
    3665         126 :             FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
    3666         126 :                                        nForkBlock[j], firstDelBlock[j]);
    3667          72 :         return;
    3668             :     }
    3669             : 
    3670     4498004 :     for (i = 0; i < NBuffers; i++)
    3671             :     {
    3672     4497664 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    3673             :         uint32      buf_state;
    3674             : 
    3675             :         /*
    3676             :          * We can make this a tad faster by prechecking the buffer tag before
    3677             :          * we attempt to lock the buffer; this saves a lot of lock
    3678             :          * acquisitions in typical cases.  It should be safe because the
    3679             :          * caller must have AccessExclusiveLock on the relation, or some other
    3680             :          * reason to be certain that no one is loading new pages of the rel
    3681             :          * into the buffer pool.  (Otherwise we might well miss such pages
    3682             :          * entirely.)  Therefore, while the tag might be changing while we
    3683             :          * look at it, it can't be changing *to* a value we care about, only
    3684             :          * *away* from such a value.  So false negatives are impossible, and
    3685             :          * false positives are safe because we'll recheck after getting the
    3686             :          * buffer lock.
    3687             :          *
    3688             :          * We could check forkNum and blockNum as well as the rlocator, but
    3689             :          * the incremental win from doing so seems small.
    3690             :          */
    3691     4497664 :         if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
    3692     4493382 :             continue;
    3693             : 
    3694        4282 :         buf_state = LockBufHdr(bufHdr);
    3695             : 
    3696        9246 :         for (j = 0; j < nforks; j++)
    3697             :         {
    3698        6970 :             if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
    3699        6970 :                 BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
    3700        4168 :                 bufHdr->tag.blockNum >= firstDelBlock[j])
    3701             :             {
    3702        2006 :                 InvalidateBuffer(bufHdr);   /* releases spinlock */
    3703        2006 :                 break;
    3704             :             }
    3705             :         }
    3706        4282 :         if (j >= nforks)
    3707        2276 :             UnlockBufHdr(bufHdr, buf_state);
    3708             :     }
    3709             : }
    3710             : 
    3711             : /* ---------------------------------------------------------------------
    3712             :  *      DropRelationsAllBuffers
    3713             :  *
    3714             :  *      This function removes from the buffer pool all the pages of all
    3715             :  *      forks of the specified relations.  It's equivalent to calling
    3716             :  *      DropRelationBuffers once per fork per relation with firstDelBlock = 0.
    3717             :  *      --------------------------------------------------------------------
    3718             :  */
    3719             : void
    3720       21336 : DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
    3721             : {
    3722             :     int         i;
    3723       21336 :     int         n = 0;
    3724             :     SMgrRelation *rels;
    3725             :     BlockNumber (*block)[MAX_FORKNUM + 1];
    3726       21336 :     uint64      nBlocksToInvalidate = 0;
    3727             :     RelFileLocator *locators;
    3728       21336 :     bool        cached = true;
    3729             :     bool        use_bsearch;
    3730             : 
    3731       21336 :     if (nlocators == 0)
    3732           0 :         return;
    3733             : 
    3734       21336 :     rels = palloc(sizeof(SMgrRelation) * nlocators);    /* non-local relations */
    3735             : 
    3736             :     /* If it's a local relation, it's localbuf.c's problem. */
    3737       95936 :     for (i = 0; i < nlocators; i++)
    3738             :     {
    3739       74600 :         if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
    3740             :         {
    3741        5612 :             if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
    3742        5612 :                 DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
    3743             :         }
    3744             :         else
    3745       68988 :             rels[n++] = smgr_reln[i];
    3746             :     }
    3747             : 
    3748             :     /*
    3749             :      * If there are no non-local relations, then we're done. Release the
    3750             :      * memory and return.
    3751             :      */
    3752       21336 :     if (n == 0)
    3753             :     {
    3754        1398 :         pfree(rels);
    3755        1398 :         return;
    3756             :     }
    3757             : 
    3758             :     /*
    3759             :      * This is used to remember the number of blocks for all the relations
    3760             :      * forks.
    3761             :      */
    3762             :     block = (BlockNumber (*)[MAX_FORKNUM + 1])
    3763       19938 :         palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
    3764             : 
    3765             :     /*
    3766             :      * We can avoid scanning the entire buffer pool if we know the exact size
    3767             :      * of each of the given relation forks. See DropRelationBuffers.
    3768             :      */
    3769       41992 :     for (i = 0; i < n && cached; i++)
    3770             :     {
    3771       36034 :         for (int j = 0; j <= MAX_FORKNUM; j++)
    3772             :         {
    3773             :             /* Get the number of blocks for a relation's fork. */
    3774       32558 :             block[i][j] = smgrnblocks_cached(rels[i], j);
    3775             : 
    3776             :             /* We need to only consider the relation forks that exists. */
    3777       32558 :             if (block[i][j] == InvalidBlockNumber)
    3778             :             {
    3779       28780 :                 if (!smgrexists(rels[i], j))
    3780       10202 :                     continue;
    3781       18578 :                 cached = false;
    3782       18578 :                 break;
    3783             :             }
    3784             : 
    3785             :             /* calculate the total number of blocks to be invalidated */
    3786        3778 :             nBlocksToInvalidate += block[i][j];
    3787             :         }
    3788             :     }
    3789             : 
    3790             :     /*
    3791             :      * We apply the optimization iff the total number of blocks to invalidate
    3792             :      * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
    3793             :      */
    3794       19938 :     if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
    3795             :     {
    3796        2224 :         for (i = 0; i < n; i++)
    3797             :         {
    3798        6130 :             for (int j = 0; j <= MAX_FORKNUM; j++)
    3799             :             {
    3800             :                 /* ignore relation forks that doesn't exist */
    3801        4904 :                 if (!BlockNumberIsValid(block[i][j]))
    3802        3660 :                     continue;
    3803             : 
    3804             :                 /* drop all the buffers for a particular relation fork */
    3805        1244 :                 FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
    3806        1244 :                                            j, block[i][j], 0);
    3807             :             }
    3808             :         }
    3809             : 
    3810         998 :         pfree(block);
    3811         998 :         pfree(rels);
    3812         998 :         return;
    3813             :     }
    3814             : 
    3815       18940 :     pfree(block);
    3816       18940 :     locators = palloc(sizeof(RelFileLocator) * n);  /* non-local relations */
    3817       86702 :     for (i = 0; i < n; i++)
    3818       67762 :         locators[i] = rels[i]->smgr_rlocator.locator;
    3819             : 
    3820             :     /*
    3821             :      * For low number of relations to drop just use a simple walk through, to
    3822             :      * save the bsearch overhead. The threshold to use is rather a guess than
    3823             :      * an exactly determined value, as it depends on many factors (CPU and RAM
    3824             :      * speeds, amount of shared buffers etc.).
    3825             :      */
    3826       18940 :     use_bsearch = n > RELS_BSEARCH_THRESHOLD;
    3827             : 
    3828             :     /* sort the list of rlocators if necessary */
    3829       18940 :     if (use_bsearch)
    3830         328 :         pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
    3831             : 
    3832   204960508 :     for (i = 0; i < NBuffers; i++)
    3833             :     {
    3834   204941568 :         RelFileLocator *rlocator = NULL;
    3835   204941568 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    3836             :         uint32      buf_state;
    3837             : 
    3838             :         /*
    3839             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    3840             :          * saves some cycles.
    3841             :          */
    3842             : 
    3843   204941568 :         if (!use_bsearch)
    3844             :         {
    3845             :             int         j;
    3846             : 
    3847   829000662 :             for (j = 0; j < n; j++)
    3848             :             {
    3849   627632608 :                 if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
    3850             :                 {
    3851      150282 :                     rlocator = &locators[j];
    3852      150282 :                     break;
    3853             :                 }
    3854             :             }
    3855             :         }
    3856             :         else
    3857             :         {
    3858             :             RelFileLocator locator;
    3859             : 
    3860     3423232 :             locator = BufTagGetRelFileLocator(&bufHdr->tag);
    3861     3423232 :             rlocator = bsearch((const void *) &(locator),
    3862             :                                locators, n, sizeof(RelFileLocator),
    3863             :                                rlocator_comparator);
    3864             :         }
    3865             : 
    3866             :         /* buffer doesn't belong to any of the given relfilelocators; skip it */
    3867   204941568 :         if (rlocator == NULL)
    3868   204787744 :             continue;
    3869             : 
    3870      153824 :         buf_state = LockBufHdr(bufHdr);
    3871      153824 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
    3872      153824 :             InvalidateBuffer(bufHdr);   /* releases spinlock */
    3873             :         else
    3874           0 :             UnlockBufHdr(bufHdr, buf_state);
    3875             :     }
    3876             : 
    3877       18940 :     pfree(locators);
    3878       18940 :     pfree(rels);
    3879             : }
    3880             : 
    3881             : /* ---------------------------------------------------------------------
    3882             :  *      FindAndDropRelationBuffers
    3883             :  *
    3884             :  *      This function performs look up in BufMapping table and removes from the
    3885             :  *      buffer pool all the pages of the specified relation fork that has block
    3886             :  *      number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
    3887             :  *      pages are removed.)
    3888             :  * --------------------------------------------------------------------
    3889             :  */
    3890             : static void
    3891        1370 : FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
    3892             :                            BlockNumber nForkBlock,
    3893             :                            BlockNumber firstDelBlock)
    3894             : {
    3895             :     BlockNumber curBlock;
    3896             : 
    3897        3372 :     for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
    3898             :     {
    3899             :         uint32      bufHash;    /* hash value for tag */
    3900             :         BufferTag   bufTag;     /* identity of requested block */
    3901             :         LWLock     *bufPartitionLock;   /* buffer partition lock for it */
    3902             :         int         buf_id;
    3903             :         BufferDesc *bufHdr;
    3904             :         uint32      buf_state;
    3905             : 
    3906             :         /* create a tag so we can lookup the buffer */
    3907        2002 :         InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
    3908             : 
    3909             :         /* determine its hash code and partition lock ID */
    3910        2002 :         bufHash = BufTableHashCode(&bufTag);
    3911        2002 :         bufPartitionLock = BufMappingPartitionLock(bufHash);
    3912             : 
    3913             :         /* Check that it is in the buffer pool. If not, do nothing. */
    3914        2002 :         LWLockAcquire(bufPartitionLock, LW_SHARED);
    3915        2002 :         buf_id = BufTableLookup(&bufTag, bufHash);
    3916        2002 :         LWLockRelease(bufPartitionLock);
    3917             : 
    3918        2002 :         if (buf_id < 0)
    3919         152 :             continue;
    3920             : 
    3921        1850 :         bufHdr = GetBufferDescriptor(buf_id);
    3922             : 
    3923             :         /*
    3924             :          * We need to lock the buffer header and recheck if the buffer is
    3925             :          * still associated with the same block because the buffer could be
    3926             :          * evicted by some other backend loading blocks for a different
    3927             :          * relation after we release lock on the BufMapping table.
    3928             :          */
    3929        1850 :         buf_state = LockBufHdr(bufHdr);
    3930             : 
    3931        3700 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
    3932        1850 :             BufTagGetForkNum(&bufHdr->tag) == forkNum &&
    3933        1850 :             bufHdr->tag.blockNum >= firstDelBlock)
    3934        1850 :             InvalidateBuffer(bufHdr);   /* releases spinlock */
    3935             :         else
    3936           0 :             UnlockBufHdr(bufHdr, buf_state);
    3937             :     }
    3938        1370 : }
    3939             : 
    3940             : /* ---------------------------------------------------------------------
    3941             :  *      DropDatabaseBuffers
    3942             :  *
    3943             :  *      This function removes all the buffers in the buffer cache for a
    3944             :  *      particular database.  Dirty pages are simply dropped, without
    3945             :  *      bothering to write them out first.  This is used when we destroy a
    3946             :  *      database, to avoid trying to flush data to disk when the directory
    3947             :  *      tree no longer exists.  Implementation is pretty similar to
    3948             :  *      DropRelationBuffers() which is for destroying just one relation.
    3949             :  * --------------------------------------------------------------------
    3950             :  */
    3951             : void
    3952          70 : DropDatabaseBuffers(Oid dbid)
    3953             : {
    3954             :     int         i;
    3955             : 
    3956             :     /*
    3957             :      * We needn't consider local buffers, since by assumption the target
    3958             :      * database isn't our own.
    3959             :      */
    3960             : 
    3961      269126 :     for (i = 0; i < NBuffers; i++)
    3962             :     {
    3963      269056 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    3964             :         uint32      buf_state;
    3965             : 
    3966             :         /*
    3967             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    3968             :          * saves some cycles.
    3969             :          */
    3970      269056 :         if (bufHdr->tag.dbOid != dbid)
    3971      255906 :             continue;
    3972             : 
    3973       13150 :         buf_state = LockBufHdr(bufHdr);
    3974       13150 :         if (bufHdr->tag.dbOid == dbid)
    3975       13150 :             InvalidateBuffer(bufHdr);   /* releases spinlock */
    3976             :         else
    3977           0 :             UnlockBufHdr(bufHdr, buf_state);
    3978             :     }
    3979          70 : }
    3980             : 
    3981             : /* -----------------------------------------------------------------
    3982             :  *      PrintBufferDescs
    3983             :  *
    3984             :  *      this function prints all the buffer descriptors, for debugging
    3985             :  *      use only.
    3986             :  * -----------------------------------------------------------------
    3987             :  */
    3988             : #ifdef NOT_USED
    3989             : void
    3990             : PrintBufferDescs(void)
    3991             : {
    3992             :     int         i;
    3993             : 
    3994             :     for (i = 0; i < NBuffers; ++i)
    3995             :     {
    3996             :         BufferDesc *buf = GetBufferDescriptor(i);
    3997             :         Buffer      b = BufferDescriptorGetBuffer(buf);
    3998             : 
    3999             :         /* theoretically we should lock the bufhdr here */
    4000             :         elog(LOG,
    4001             :              "[%02d] (freeNext=%d, rel=%s, "
    4002             :              "blockNum=%u, flags=0x%x, refcount=%u %d)",
    4003             :              i, buf->freeNext,
    4004             :              relpathbackend(BufTagGetRelFileLocator(&buf->tag),
    4005             :                             InvalidBackendId, BufTagGetForkNum(&buf->tag)),
    4006             :              buf->tag.blockNum, buf->flags,
    4007             :              buf->refcount, GetPrivateRefCount(b));
    4008             :     }
    4009             : }
    4010             : #endif
    4011             : 
    4012             : #ifdef NOT_USED
    4013             : void
    4014             : PrintPinnedBufs(void)
    4015             : {
    4016             :     int         i;
    4017             : 
    4018             :     for (i = 0; i < NBuffers; ++i)
    4019             :     {
    4020             :         BufferDesc *buf = GetBufferDescriptor(i);
    4021             :         Buffer      b = BufferDescriptorGetBuffer(buf);
    4022             : 
    4023             :         if (GetPrivateRefCount(b) > 0)
    4024             :         {
    4025             :             /* theoretically we should lock the bufhdr here */
    4026             :             elog(LOG,
    4027             :                  "[%02d] (freeNext=%d, rel=%s, "
    4028             :                  "blockNum=%u, flags=0x%x, refcount=%u %d)",
    4029             :                  i, buf->freeNext,
    4030             :                  relpathperm(BufTagGetRelFileLocator(&buf->tag),
    4031             :                              BufTagGetForkNum(&buf->tag)),
    4032             :                  buf->tag.blockNum, buf->flags,
    4033             :                  buf->refcount, GetPrivateRefCount(b));
    4034             :         }
    4035             :     }
    4036             : }
    4037             : #endif
    4038             : 
    4039             : /* ---------------------------------------------------------------------
    4040             :  *      FlushRelationBuffers
    4041             :  *
    4042             :  *      This function writes all dirty pages of a relation out to disk
    4043             :  *      (or more accurately, out to kernel disk buffers), ensuring that the
    4044             :  *      kernel has an up-to-date view of the relation.
    4045             :  *
    4046             :  *      Generally, the caller should be holding AccessExclusiveLock on the
    4047             :  *      target relation to ensure that no other backend is busy dirtying
    4048             :  *      more blocks of the relation; the effects can't be expected to last
    4049             :  *      after the lock is released.
    4050             :  *
    4051             :  *      XXX currently it sequentially searches the buffer pool, should be
    4052             :  *      changed to more clever ways of searching.  This routine is not
    4053             :  *      used in any performance-critical code paths, so it's not worth
    4054             :  *      adding additional overhead to normal paths to make it go faster.
    4055             :  * --------------------------------------------------------------------
    4056             :  */
    4057             : void
    4058         230 : FlushRelationBuffers(Relation rel)
    4059             : {
    4060             :     int         i;
    4061             :     BufferDesc *bufHdr;
    4062             : 
    4063         230 :     if (RelationUsesLocalBuffers(rel))
    4064             :     {
    4065        1818 :         for (i = 0; i < NLocBuffer; i++)
    4066             :         {
    4067             :             uint32      buf_state;
    4068             :             instr_time  io_start;
    4069             : 
    4070        1800 :             bufHdr = GetLocalBufferDescriptor(i);
    4071        1800 :             if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
    4072         600 :                 ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
    4073             :                  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    4074             :             {
    4075             :                 ErrorContextCallback errcallback;
    4076             :                 Page        localpage;
    4077             : 
    4078         594 :                 localpage = (char *) LocalBufHdrGetBlock(bufHdr);
    4079             : 
    4080             :                 /* Setup error traceback support for ereport() */
    4081         594 :                 errcallback.callback = local_buffer_write_error_callback;
    4082         594 :                 errcallback.arg = (void *) bufHdr;
    4083         594 :                 errcallback.previous = error_context_stack;
    4084         594 :                 error_context_stack = &errcallback;
    4085             : 
    4086         594 :                 PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
    4087             : 
    4088         594 :                 io_start = pgstat_prepare_io_time();
    4089             : 
    4090         594 :                 smgrwrite(RelationGetSmgr(rel),
    4091         594 :                           BufTagGetForkNum(&bufHdr->tag),
    4092             :                           bufHdr->tag.blockNum,
    4093             :                           localpage,
    4094             :                           false);
    4095             : 
    4096         594 :                 pgstat_count_io_op_time(IOOBJECT_TEMP_RELATION,
    4097             :                                         IOCONTEXT_NORMAL, IOOP_WRITE,
    4098             :                                         io_start, 1);
    4099             : 
    4100         594 :                 buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
    4101         594 :                 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
    4102             : 
    4103         594 :                 pgBufferUsage.local_blks_written++;
    4104             : 
    4105             :                 /* Pop the error context stack */
    4106         594 :                 error_context_stack = errcallback.previous;
    4107             :             }
    4108             :         }
    4109             : 
    4110          18 :         return;
    4111             :     }
    4112             : 
    4113             :     /* Make sure we can handle the pin inside the loop */
    4114         212 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    4115             : 
    4116     2498260 :     for (i = 0; i < NBuffers; i++)
    4117             :     {
    4118             :         uint32      buf_state;
    4119             : 
    4120     2498048 :         bufHdr = GetBufferDescriptor(i);
    4121             : 
    4122             :         /*
    4123             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    4124             :          * saves some cycles.
    4125             :          */
    4126     2498048 :         if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
    4127     2497734 :             continue;
    4128             : 
    4129         314 :         ReservePrivateRefCountEntry();
    4130             : 
    4131         314 :         buf_state = LockBufHdr(bufHdr);
    4132         314 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
    4133         314 :             (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    4134             :         {
    4135         266 :             PinBuffer_Locked(bufHdr);
    4136         266 :             LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
    4137         266 :             FlushBuffer(bufHdr, RelationGetSmgr(rel), IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    4138         266 :             LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
    4139         266 :             UnpinBuffer(bufHdr);
    4140             :         }
    4141             :         else
    4142          48 :             UnlockBufHdr(bufHdr, buf_state);
    4143             :     }
    4144             : }
    4145             : 
    4146             : /* ---------------------------------------------------------------------
    4147             :  *      FlushRelationsAllBuffers
    4148             :  *
    4149             :  *      This function flushes out of the buffer pool all the pages of all
    4150             :  *      forks of the specified smgr relations.  It's equivalent to calling
    4151             :  *      FlushRelationBuffers once per relation.  The relations are assumed not
    4152             :  *      to use local buffers.
    4153             :  * --------------------------------------------------------------------
    4154             :  */
    4155             : void
    4156          18 : FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
    4157             : {
    4158             :     int         i;
    4159             :     SMgrSortArray *srels;
    4160             :     bool        use_bsearch;
    4161             : 
    4162          18 :     if (nrels == 0)
    4163           0 :         return;
    4164             : 
    4165             :     /* fill-in array for qsort */
    4166          18 :     srels = palloc(sizeof(SMgrSortArray) * nrels);
    4167             : 
    4168          36 :     for (i = 0; i < nrels; i++)
    4169             :     {
    4170             :         Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
    4171             : 
    4172          18 :         srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
    4173          18 :         srels[i].srel = smgrs[i];
    4174             :     }
    4175             : 
    4176             :     /*
    4177             :      * Save the bsearch overhead for low number of relations to sync. See
    4178             :      * DropRelationsAllBuffers for details.
    4179             :      */
    4180          18 :     use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
    4181             : 
    4182             :     /* sort the list of SMgrRelations if necessary */
    4183          18 :     if (use_bsearch)
    4184           0 :         pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
    4185             : 
    4186             :     /* Make sure we can handle the pin inside the loop */
    4187          18 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    4188             : 
    4189      294930 :     for (i = 0; i < NBuffers; i++)
    4190             :     {
    4191      294912 :         SMgrSortArray *srelent = NULL;
    4192      294912 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    4193             :         uint32      buf_state;
    4194             : 
    4195             :         /*
    4196             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    4197             :          * saves some cycles.
    4198             :          */
    4199             : 
    4200      294912 :         if (!use_bsearch)
    4201             :         {
    4202             :             int         j;
    4203             : 
    4204      582400 :             for (j = 0; j < nrels; j++)
    4205             :             {
    4206      294912 :                 if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
    4207             :                 {
    4208        7424 :                     srelent = &srels[j];
    4209        7424 :                     break;
    4210             :                 }
    4211             :             }
    4212             :         }
    4213             :         else
    4214             :         {
    4215             :             RelFileLocator rlocator;
    4216             : 
    4217           0 :             rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
    4218           0 :             srelent = bsearch((const void *) &(rlocator),
    4219             :                               srels, nrels, sizeof(SMgrSortArray),
    4220             :                               rlocator_comparator);
    4221             :         }
    4222             : 
    4223             :         /* buffer doesn't belong to any of the given relfilelocators; skip it */
    4224      294912 :         if (srelent == NULL)
    4225      287488 :             continue;
    4226             : 
    4227        7424 :         ReservePrivateRefCountEntry();
    4228             : 
    4229        7424 :         buf_state = LockBufHdr(bufHdr);
    4230        7424 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
    4231        7424 :             (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    4232             :         {
    4233        6734 :             PinBuffer_Locked(bufHdr);
    4234        6734 :             LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
    4235        6734 :             FlushBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    4236        6734 :             LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
    4237        6734 :             UnpinBuffer(bufHdr);
    4238             :         }
    4239             :         else
    4240         690 :             UnlockBufHdr(bufHdr, buf_state);
    4241             :     }
    4242             : 
    4243          18 :     pfree(srels);
    4244             : }
    4245             : 
    4246             : /* ---------------------------------------------------------------------
    4247             :  *      RelationCopyStorageUsingBuffer
    4248             :  *
    4249             :  *      Copy fork's data using bufmgr.  Same as RelationCopyStorage but instead
    4250             :  *      of using smgrread and smgrextend this will copy using bufmgr APIs.
    4251             :  *
    4252             :  *      Refer comments atop CreateAndCopyRelationData() for details about
    4253             :  *      'permanent' parameter.
    4254             :  * --------------------------------------------------------------------
    4255             :  */
    4256             : static void
    4257      108958 : RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
    4258             :                                RelFileLocator dstlocator,
    4259             :                                ForkNumber forkNum, bool permanent)
    4260             : {
    4261             :     Buffer      srcBuf;
    4262             :     Buffer      dstBuf;
    4263             :     Page        srcPage;
    4264             :     Page        dstPage;
    4265             :     bool        use_wal;
    4266             :     BlockNumber nblocks;
    4267             :     BlockNumber blkno;
    4268             :     PGIOAlignedBlock buf;
    4269             :     BufferAccessStrategy bstrategy_src;
    4270             :     BufferAccessStrategy bstrategy_dst;
    4271             : 
    4272             :     /*
    4273             :      * In general, we want to write WAL whenever wal_level > 'minimal', but we
    4274             :      * can skip it when copying any fork of an unlogged relation other than
    4275             :      * the init fork.
    4276             :      */
    4277      108958 :     use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
    4278             : 
    4279             :     /* Get number of blocks in the source relation. */
    4280      108958 :     nblocks = smgrnblocks(smgropen(srclocator, InvalidBackendId),
    4281             :                           forkNum);
    4282             : 
    4283             :     /* Nothing to copy; just return. */
    4284      108958 :     if (nblocks == 0)
    4285       18764 :         return;
    4286             : 
    4287             :     /*
    4288             :      * Bulk extend the destination relation of the same size as the source
    4289             :      * relation before starting to copy block by block.
    4290             :      */
    4291       90194 :     memset(buf.data, 0, BLCKSZ);
    4292       90194 :     smgrextend(smgropen(dstlocator, InvalidBackendId), forkNum, nblocks - 1,
    4293             :                buf.data, true);
    4294             : 
    4295             :     /* This is a bulk operation, so use buffer access strategies. */
    4296       90194 :     bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
    4297       90194 :     bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
    4298             : 
    4299             :     /* Iterate over each block of the source relation file. */
    4300      427876 :     for (blkno = 0; blkno < nblocks; blkno++)
    4301             :     {
    4302      337682 :         CHECK_FOR_INTERRUPTS();
    4303             : 
    4304             :         /* Read block from source relation. */
    4305      337682 :         srcBuf = ReadBufferWithoutRelcache(srclocator, forkNum, blkno,
    4306             :                                            RBM_NORMAL, bstrategy_src,
    4307             :                                            permanent);
    4308      337682 :         LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
    4309      337682 :         srcPage = BufferGetPage(srcBuf);
    4310             : 
    4311      337682 :         dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum, blkno,
    4312             :                                            RBM_ZERO_AND_LOCK, bstrategy_dst,
    4313             :                                            permanent);
    4314      337682 :         dstPage = BufferGetPage(dstBuf);
    4315             : 
    4316      337682 :         START_CRIT_SECTION();
    4317             : 
    4318             :         /* Copy page data from the source to the destination. */
    4319      337682 :         memcpy(dstPage, srcPage, BLCKSZ);
    4320      337682 :         MarkBufferDirty(dstBuf);
    4321             : 
    4322             :         /* WAL-log the copied page. */
    4323      337682 :         if (use_wal)
    4324      203758 :             log_newpage_buffer(dstBuf, true);
    4325             : 
    4326      337682 :         END_CRIT_SECTION();
    4327             : 
    4328      337682 :         UnlockReleaseBuffer(dstBuf);
    4329      337682 :         UnlockReleaseBuffer(srcBuf);
    4330             :     }
    4331             : 
    4332       90194 :     FreeAccessStrategy(bstrategy_src);
    4333       90194 :     FreeAccessStrategy(bstrategy_dst);
    4334             : }
    4335             : 
    4336             : /* ---------------------------------------------------------------------
    4337             :  *      CreateAndCopyRelationData
    4338             :  *
    4339             :  *      Create destination relation storage and copy all forks from the
    4340             :  *      source relation to the destination.
    4341             :  *
    4342             :  *      Pass permanent as true for permanent relations and false for
    4343             :  *      unlogged relations.  Currently this API is not supported for
    4344             :  *      temporary relations.
    4345             :  * --------------------------------------------------------------------
    4346             :  */
    4347             : void
    4348       81722 : CreateAndCopyRelationData(RelFileLocator src_rlocator,
    4349             :                           RelFileLocator dst_rlocator, bool permanent)
    4350             : {
    4351             :     RelFileLocatorBackend rlocator;
    4352             :     char        relpersistence;
    4353             : 
    4354             :     /* Set the relpersistence. */
    4355       81722 :     relpersistence = permanent ?
    4356             :         RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
    4357             : 
    4358             :     /*
    4359             :      * Create and copy all forks of the relation.  During create database we
    4360             :      * have a separate cleanup mechanism which deletes complete database
    4361             :      * directory.  Therefore, each individual relation doesn't need to be
    4362             :      * registered for cleanup.
    4363             :      */
    4364       81722 :     RelationCreateStorage(dst_rlocator, relpersistence, false);
    4365             : 
    4366             :     /* copy main fork. */
    4367       81722 :     RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
    4368             :                                    permanent);
    4369             : 
    4370             :     /* copy those extra forks that exist */
    4371      326888 :     for (ForkNumber forkNum = MAIN_FORKNUM + 1;
    4372      245166 :          forkNum <= MAX_FORKNUM; forkNum++)
    4373             :     {
    4374      245166 :         if (smgrexists(smgropen(src_rlocator, InvalidBackendId), forkNum))
    4375             :         {
    4376       27236 :             smgrcreate(smgropen(dst_rlocator, InvalidBackendId), forkNum, false);
    4377             : 
    4378             :             /*
    4379             :              * WAL log creation if the relation is persistent, or this is the
    4380             :              * init fork of an unlogged relation.
    4381             :              */
    4382       27236 :             if (permanent || forkNum == INIT_FORKNUM)
    4383       27236 :                 log_smgrcreate(&dst_rlocator, forkNum);
    4384             : 
    4385             :             /* Copy a fork's data, block by block. */
    4386       27236 :             RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
    4387             :                                            permanent);
    4388             :         }
    4389             :     }
    4390             : 
    4391             :     /* close source and destination smgr if exists. */
    4392       81722 :     rlocator.backend = InvalidBackendId;
    4393             : 
    4394       81722 :     rlocator.locator = src_rlocator;
    4395       81722 :     smgrcloserellocator(rlocator);
    4396             : 
    4397       81722 :     rlocator.locator = dst_rlocator;
    4398       81722 :     smgrcloserellocator(rlocator);
    4399       81722 : }
    4400             : 
    4401             : /* ---------------------------------------------------------------------
    4402             :  *      FlushDatabaseBuffers
    4403             :  *
    4404             :  *      This function writes all dirty pages of a database out to disk
    4405             :  *      (or more accurately, out to kernel disk buffers), ensuring that the
    4406             :  *      kernel has an up-to-date view of the database.
    4407             :  *
    4408             :  *      Generally, the caller should be holding an appropriate lock to ensure
    4409             :  *      no other backend is active in the target database; otherwise more
    4410             :  *      pages could get dirtied.
    4411             :  *
    4412             :  *      Note we don't worry about flushing any pages of temporary relations.
    4413             :  *      It's assumed these wouldn't be interesting.
    4414             :  * --------------------------------------------------------------------
    4415             :  */
    4416             : void
    4417           6 : FlushDatabaseBuffers(Oid dbid)
    4418             : {
    4419             :     int         i;
    4420             :     BufferDesc *bufHdr;
    4421             : 
    4422             :     /* Make sure we can handle the pin inside the loop */
    4423           6 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    4424             : 
    4425         774 :     for (i = 0; i < NBuffers; i++)
    4426             :     {
    4427             :         uint32      buf_state;
    4428             : 
    4429         768 :         bufHdr = GetBufferDescriptor(i);
    4430             : 
    4431             :         /*
    4432             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    4433             :          * saves some cycles.
    4434             :          */
    4435         768 :         if (bufHdr->tag.dbOid != dbid)
    4436         528 :             continue;
    4437             : 
    4438         240 :         ReservePrivateRefCountEntry();
    4439             : 
    4440         240 :         buf_state = LockBufHdr(bufHdr);
    4441         240 :         if (bufHdr->tag.dbOid == dbid &&
    4442         240 :             (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    4443             :         {
    4444          18 :             PinBuffer_Locked(bufHdr);
    4445          18 :             LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
    4446          18 :             FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    4447          18 :             LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
    4448          18 :             UnpinBuffer(bufHdr);
    4449             :         }
    4450             :         else
    4451         222 :             UnlockBufHdr(bufHdr, buf_state);
    4452             :     }
    4453           6 : }
    4454             : 
    4455             : /*
    4456             :  * Flush a previously, shared or exclusively, locked and pinned buffer to the
    4457             :  * OS.
    4458             :  */
    4459             : void
    4460          50 : FlushOneBuffer(Buffer buffer)
    4461             : {
    4462             :     BufferDesc *bufHdr;
    4463             : 
    4464             :     /* currently not needed, but no fundamental reason not to support */
    4465             :     Assert(!BufferIsLocal(buffer));
    4466             : 
    4467             :     Assert(BufferIsPinned(buffer));
    4468             : 
    4469          50 :     bufHdr = GetBufferDescriptor(buffer - 1);
    4470             : 
    4471             :     Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
    4472             : 
    4473          50 :     FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    4474          50 : }
    4475             : 
    4476             : /*
    4477             :  * ReleaseBuffer -- release the pin on a buffer
    4478             :  */
    4479             : void
    4480   131500978 : ReleaseBuffer(Buffer buffer)
    4481             : {
    4482   131500978 :     if (!BufferIsValid(buffer))
    4483           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    4484             : 
    4485   131500978 :     if (BufferIsLocal(buffer))
    4486     2797670 :         UnpinLocalBuffer(buffer);
    4487             :     else
    4488   128703308 :         UnpinBuffer(GetBufferDescriptor(buffer - 1));
    4489   131500978 : }
    4490             : 
    4491             : /*
    4492             :  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
    4493             :  *
    4494             :  * This is just a shorthand for a common combination.
    4495             :  */
    4496             : void
    4497    39078596 : UnlockReleaseBuffer(Buffer buffer)
    4498             : {
    4499    39078596 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4500    39078596 :     ReleaseBuffer(buffer);
    4501    39078596 : }
    4502             : 
    4503             : /*
    4504             :  * IncrBufferRefCount
    4505             :  *      Increment the pin count on a buffer that we have *already* pinned
    4506             :  *      at least once.
    4507             :  *
    4508             :  *      This function cannot be used on a buffer we do not have pinned,
    4509             :  *      because it doesn't change the shared buffer state.
    4510             :  */
    4511             : void
    4512    22121016 : IncrBufferRefCount(Buffer buffer)
    4513             : {
    4514             :     Assert(BufferIsPinned(buffer));
    4515    22121016 :     ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
    4516    22121016 :     if (BufferIsLocal(buffer))
    4517      690880 :         LocalRefCount[-buffer - 1]++;
    4518             :     else
    4519             :     {
    4520             :         PrivateRefCountEntry *ref;
    4521             : 
    4522    21430136 :         ref = GetPrivateRefCountEntry(buffer, true);
    4523             :         Assert(ref != NULL);
    4524    21430136 :         ref->refcount++;
    4525             :     }
    4526    22121016 :     ResourceOwnerRememberBuffer(CurrentResourceOwner, buffer);
    4527    22121016 : }
    4528             : 
    4529             : /*
    4530             :  * MarkBufferDirtyHint
    4531             :  *
    4532             :  *  Mark a buffer dirty for non-critical changes.
    4533             :  *
    4534             :  * This is essentially the same as MarkBufferDirty, except:
    4535             :  *
    4536             :  * 1. The caller does not write WAL; so if checksums are enabled, we may need
    4537             :  *    to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
    4538             :  * 2. The caller might have only share-lock instead of exclusive-lock on the
    4539             :  *    buffer's content lock.
    4540             :  * 3. This function does not guarantee that the buffer is always marked dirty
    4541             :  *    (due to a race condition), so it cannot be used for important changes.
    4542             :  */
    4543             : void
    4544    31963140 : MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
    4545             : {
    4546             :     BufferDesc *bufHdr;
    4547    31963140 :     Page        page = BufferGetPage(buffer);
    4548             : 
    4549    31963140 :     if (!BufferIsValid(buffer))
    4550           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    4551             : 
    4552    31963140 :     if (BufferIsLocal(buffer))
    4553             :     {
    4554     1156930 :         MarkLocalBufferDirty(buffer);
    4555     1156930 :         return;
    4556             :     }
    4557             : 
    4558    30806210 :     bufHdr = GetBufferDescriptor(buffer - 1);
    4559             : 
    4560             :     Assert(GetPrivateRefCount(buffer) > 0);
    4561             :     /* here, either share or exclusive lock is OK */
    4562             :     Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
    4563             : 
    4564             :     /*
    4565             :      * This routine might get called many times on the same page, if we are
    4566             :      * making the first scan after commit of an xact that added/deleted many
    4567             :      * tuples. So, be as quick as we can if the buffer is already dirty.  We
    4568             :      * do this by not acquiring spinlock if it looks like the status bits are
    4569             :      * already set.  Since we make this test unlocked, there's a chance we
    4570             :      * might fail to notice that the flags have just been cleared, and failed
    4571             :      * to reset them, due to memory-ordering issues.  But since this function
    4572             :      * is only intended to be used in cases where failing to write out the
    4573             :      * data would be harmless anyway, it doesn't really matter.
    4574             :      */
    4575    30806210 :     if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
    4576             :         (BM_DIRTY | BM_JUST_DIRTIED))
    4577             :     {
    4578     3420200 :         XLogRecPtr  lsn = InvalidXLogRecPtr;
    4579     3420200 :         bool        dirtied = false;
    4580     3420200 :         bool        delayChkptFlags = false;
    4581             :         uint32      buf_state;
    4582             : 
    4583             :         /*
    4584             :          * If we need to protect hint bit updates from torn writes, WAL-log a
    4585             :          * full page image of the page. This full page image is only necessary
    4586             :          * if the hint bit update is the first change to the page since the
    4587             :          * last checkpoint.
    4588             :          *
    4589             :          * We don't check full_page_writes here because that logic is included
    4590             :          * when we call XLogInsert() since the value changes dynamically.
    4591             :          */
    4592     6741590 :         if (XLogHintBitIsNeeded() &&
    4593     3321390 :             (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
    4594             :         {
    4595             :             /*
    4596             :              * If we must not write WAL, due to a relfilelocator-specific
    4597             :              * condition or being in recovery, don't dirty the page.  We can
    4598             :              * set the hint, just not dirty the page as a result so the hint
    4599             :              * is lost when we evict the page or shutdown.
    4600             :              *
    4601             :              * See src/backend/storage/page/README for longer discussion.
    4602             :              */
    4603     3415764 :             if (RecoveryInProgress() ||
    4604       94380 :                 RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
    4605     3227004 :                 return;
    4606             : 
    4607             :             /*
    4608             :              * If the block is already dirty because we either made a change
    4609             :              * or set a hint already, then we don't need to write a full page
    4610             :              * image.  Note that aggressive cleaning of blocks dirtied by hint
    4611             :              * bit setting would increase the call rate. Bulk setting of hint
    4612             :              * bits would reduce the call rate...
    4613             :              *
    4614             :              * We must issue the WAL record before we mark the buffer dirty.
    4615             :              * Otherwise we might write the page before we write the WAL. That
    4616             :              * causes a race condition, since a checkpoint might occur between
    4617             :              * writing the WAL record and marking the buffer dirty. We solve
    4618             :              * that with a kluge, but one that is already in use during
    4619             :              * transaction commit to prevent race conditions. Basically, we
    4620             :              * simply prevent the checkpoint WAL record from being written
    4621             :              * until we have marked the buffer dirty. We don't start the
    4622             :              * checkpoint flush until we have marked dirty, so our checkpoint
    4623             :              * must flush the change to disk successfully or the checkpoint
    4624             :              * never gets written, so crash recovery will fix.
    4625             :              *
    4626             :              * It's possible we may enter here without an xid, so it is
    4627             :              * essential that CreateCheckPoint waits for virtual transactions
    4628             :              * rather than full transactionids.
    4629             :              */
    4630             :             Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
    4631       94380 :             MyProc->delayChkptFlags |= DELAY_CHKPT_START;
    4632       94380 :             delayChkptFlags = true;
    4633       94380 :             lsn = XLogSaveBufferForHint(buffer, buffer_std);
    4634             :         }
    4635             : 
    4636      193196 :         buf_state = LockBufHdr(bufHdr);
    4637             : 
    4638             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    4639             : 
    4640      193196 :         if (!(buf_state & BM_DIRTY))
    4641             :         {
    4642      193166 :             dirtied = true;     /* Means "will be dirtied by this action" */
    4643             : 
    4644             :             /*
    4645             :              * Set the page LSN if we wrote a backup block. We aren't supposed
    4646             :              * to set this when only holding a share lock but as long as we
    4647             :              * serialise it somehow we're OK. We choose to set LSN while
    4648             :              * holding the buffer header lock, which causes any reader of an
    4649             :              * LSN who holds only a share lock to also obtain a buffer header
    4650             :              * lock before using PageGetLSN(), which is enforced in
    4651             :              * BufferGetLSNAtomic().
    4652             :              *
    4653             :              * If checksums are enabled, you might think we should reset the
    4654             :              * checksum here. That will happen when the page is written
    4655             :              * sometime later in this checkpoint cycle.
    4656             :              */
    4657      193166 :             if (!XLogRecPtrIsInvalid(lsn))
    4658       10142 :                 PageSetLSN(page, lsn);
    4659             :         }
    4660             : 
    4661      193196 :         buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
    4662      193196 :         UnlockBufHdr(bufHdr, buf_state);
    4663             : 
    4664      193196 :         if (delayChkptFlags)
    4665       94380 :             MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
    4666             : 
    4667      193196 :         if (dirtied)
    4668             :         {
    4669      193166 :             VacuumPageDirty++;
    4670      193166 :             pgBufferUsage.shared_blks_dirtied++;
    4671      193166 :             if (VacuumCostActive)
    4672         990 :                 VacuumCostBalance += VacuumCostPageDirty;
    4673             :         }
    4674             :     }
    4675             : }
    4676             : 
    4677             : /*
    4678             :  * Release buffer content locks for shared buffers.
    4679             :  *
    4680             :  * Used to clean up after errors.
    4681             :  *
    4682             :  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
    4683             :  * of releasing buffer content locks per se; the only thing we need to deal
    4684             :  * with here is clearing any PIN_COUNT request that was in progress.
    4685             :  */
    4686             : void
    4687       76636 : UnlockBuffers(void)
    4688             : {
    4689       76636 :     BufferDesc *buf = PinCountWaitBuf;
    4690             : 
    4691       76636 :     if (buf)
    4692             :     {
    4693             :         uint32      buf_state;
    4694             : 
    4695           0 :         buf_state = LockBufHdr(buf);
    4696             : 
    4697             :         /*
    4698             :          * Don't complain if flag bit not set; it could have been reset but we
    4699             :          * got a cancel/die interrupt before getting the signal.
    4700             :          */
    4701           0 :         if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
    4702           0 :             buf->wait_backend_pgprocno == MyProc->pgprocno)
    4703           0 :             buf_state &= ~BM_PIN_COUNT_WAITER;
    4704             : 
    4705           0 :         UnlockBufHdr(buf, buf_state);
    4706             : 
    4707           0 :         PinCountWaitBuf = NULL;
    4708             :     }
    4709       76636 : }
    4710             : 
    4711             : /*
    4712             :  * Acquire or release the content_lock for the buffer.
    4713             :  */
    4714             : void
    4715   379571696 : LockBuffer(Buffer buffer, int mode)
    4716             : {
    4717             :     BufferDesc *buf;
    4718             : 
    4719             :     Assert(BufferIsPinned(buffer));
    4720   379571696 :     if (BufferIsLocal(buffer))
    4721    18824078 :         return;                 /* local buffers need no lock */
    4722             : 
    4723   360747618 :     buf = GetBufferDescriptor(buffer - 1);
    4724             : 
    4725   360747618 :     if (mode == BUFFER_LOCK_UNLOCK)
    4726   182033538 :         LWLockRelease(BufferDescriptorGetContentLock(buf));
    4727   178714080 :     else if (mode == BUFFER_LOCK_SHARE)
    4728   124536588 :         LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_SHARED);
    4729    54177492 :     else if (mode == BUFFER_LOCK_EXCLUSIVE)
    4730    54177492 :         LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_EXCLUSIVE);
    4731             :     else
    4732           0 :         elog(ERROR, "unrecognized buffer lock mode: %d", mode);
    4733             : }
    4734             : 
    4735             : /*
    4736             :  * Acquire the content_lock for the buffer, but only if we don't have to wait.
    4737             :  *
    4738             :  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
    4739             :  */
    4740             : bool
    4741     2420750 : ConditionalLockBuffer(Buffer buffer)
    4742             : {
    4743             :     BufferDesc *buf;
    4744             : 
    4745             :     Assert(BufferIsPinned(buffer));
    4746     2420750 :     if (BufferIsLocal(buffer))
    4747      129378 :         return true;            /* act as though we got it */
    4748             : 
    4749     2291372 :     buf = GetBufferDescriptor(buffer - 1);
    4750             : 
    4751     2291372 :     return LWLockConditionalAcquire(BufferDescriptorGetContentLock(buf),
    4752             :                                     LW_EXCLUSIVE);
    4753             : }
    4754             : 
    4755             : /*
    4756             :  * Verify that this backend is pinning the buffer exactly once.
    4757             :  *
    4758             :  * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
    4759             :  * holds a pin on the buffer.  We do not care whether some other backend does.
    4760             :  */
    4761             : void
    4762     3720742 : CheckBufferIsPinnedOnce(Buffer buffer)
    4763             : {
    4764     3720742 :     if (BufferIsLocal(buffer))
    4765             :     {
    4766          32 :         if (LocalRefCount[-buffer - 1] != 1)
    4767           0 :             elog(ERROR, "incorrect local pin count: %d",
    4768             :                  LocalRefCount[-buffer - 1]);
    4769             :     }
    4770             :     else
    4771             :     {
    4772     3720710 :         if (GetPrivateRefCount(buffer) != 1)
    4773           0 :             elog(ERROR, "incorrect local pin count: %d",
    4774             :                  GetPrivateRefCount(buffer));
    4775             :     }
    4776     3720742 : }
    4777             : 
    4778             : /*
    4779             :  * LockBufferForCleanup - lock a buffer in preparation for deleting items
    4780             :  *
    4781             :  * Items may be deleted from a disk page only when the caller (a) holds an
    4782             :  * exclusive lock on the buffer and (b) has observed that no other backend
    4783             :  * holds a pin on the buffer.  If there is a pin, then the other backend
    4784             :  * might have a pointer into the buffer (for example, a heapscan reference
    4785             :  * to an item --- see README for more details).  It's OK if a pin is added
    4786             :  * after the cleanup starts, however; the newly-arrived backend will be
    4787             :  * unable to look at the page until we release the exclusive lock.
    4788             :  *
    4789             :  * To implement this protocol, a would-be deleter must pin the buffer and
    4790             :  * then call LockBufferForCleanup().  LockBufferForCleanup() is similar to
    4791             :  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
    4792             :  * it has successfully observed pin count = 1.
    4793             :  */
    4794             : void
    4795       74670 : LockBufferForCleanup(Buffer buffer)
    4796             : {
    4797             :     BufferDesc *bufHdr;
    4798       74670 :     TimestampTz waitStart = 0;
    4799       74670 :     bool        waiting = false;
    4800       74670 :     bool        logged_recovery_conflict = false;
    4801             : 
    4802             :     Assert(BufferIsPinned(buffer));
    4803             :     Assert(PinCountWaitBuf == NULL);
    4804             : 
    4805       74670 :     CheckBufferIsPinnedOnce(buffer);
    4806             : 
    4807             :     /* Nobody else to wait for */
    4808       74670 :     if (BufferIsLocal(buffer))
    4809          32 :         return;
    4810             : 
    4811       74638 :     bufHdr = GetBufferDescriptor(buffer - 1);
    4812             : 
    4813             :     for (;;)
    4814          22 :     {
    4815             :         uint32      buf_state;
    4816             : 
    4817             :         /* Try to acquire lock */
    4818       74660 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    4819       74660 :         buf_state = LockBufHdr(bufHdr);
    4820             : 
    4821             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    4822       74660 :         if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
    4823             :         {
    4824             :             /* Successfully acquired exclusive lock with pincount 1 */
    4825       74638 :             UnlockBufHdr(bufHdr, buf_state);
    4826             : 
    4827             :             /*
    4828             :              * Emit the log message if recovery conflict on buffer pin was
    4829             :              * resolved but the startup process waited longer than
    4830             :              * deadlock_timeout for it.
    4831             :              */
    4832       74638 :             if (logged_recovery_conflict)
    4833           4 :                 LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
    4834             :                                     waitStart, GetCurrentTimestamp(),
    4835             :                                     NULL, false);
    4836             : 
    4837       74638 :             if (waiting)
    4838             :             {
    4839             :                 /* reset ps display to remove the suffix if we added one */
    4840           4 :                 set_ps_display_remove_suffix();
    4841           4 :                 waiting = false;
    4842             :             }
    4843       74638 :             return;
    4844             :         }
    4845             :         /* Failed, so mark myself as waiting for pincount 1 */
    4846          22 :         if (buf_state & BM_PIN_COUNT_WAITER)
    4847             :         {
    4848           0 :             UnlockBufHdr(bufHdr, buf_state);
    4849           0 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4850           0 :             elog(ERROR, "multiple backends attempting to wait for pincount 1");
    4851             :         }
    4852          22 :         bufHdr->wait_backend_pgprocno = MyProc->pgprocno;
    4853          22 :         PinCountWaitBuf = bufHdr;
    4854          22 :         buf_state |= BM_PIN_COUNT_WAITER;
    4855          22 :         UnlockBufHdr(bufHdr, buf_state);
    4856          22 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4857             : 
    4858             :         /* Wait to be signaled by UnpinBuffer() */
    4859          22 :         if (InHotStandby)
    4860             :         {
    4861          22 :             if (!waiting)
    4862             :             {
    4863             :                 /* adjust the process title to indicate that it's waiting */
    4864           4 :                 set_ps_display_suffix("waiting");
    4865           4 :                 waiting = true;
    4866             :             }
    4867             : 
    4868             :             /*
    4869             :              * Emit the log message if the startup process is waiting longer
    4870             :              * than deadlock_timeout for recovery conflict on buffer pin.
    4871             :              *
    4872             :              * Skip this if first time through because the startup process has
    4873             :              * not started waiting yet in this case. So, the wait start
    4874             :              * timestamp is set after this logic.
    4875             :              */
    4876          22 :             if (waitStart != 0 && !logged_recovery_conflict)
    4877             :             {
    4878           8 :                 TimestampTz now = GetCurrentTimestamp();
    4879             : 
    4880           8 :                 if (TimestampDifferenceExceeds(waitStart, now,
    4881             :                                                DeadlockTimeout))
    4882             :                 {
    4883           4 :                     LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
    4884             :                                         waitStart, now, NULL, true);
    4885           4 :                     logged_recovery_conflict = true;
    4886             :                 }
    4887             :             }
    4888             : 
    4889             :             /*
    4890             :              * Set the wait start timestamp if logging is enabled and first
    4891             :              * time through.
    4892             :              */
    4893          22 :             if (log_recovery_conflict_waits && waitStart == 0)
    4894           4 :                 waitStart = GetCurrentTimestamp();
    4895             : 
    4896             :             /* Publish the bufid that Startup process waits on */
    4897          22 :             SetStartupBufferPinWaitBufId(buffer - 1);
    4898             :             /* Set alarm and then wait to be signaled by UnpinBuffer() */
    4899          22 :             ResolveRecoveryConflictWithBufferPin();
    4900             :             /* Reset the published bufid */
    4901          22 :             SetStartupBufferPinWaitBufId(-1);
    4902             :         }
    4903             :         else
    4904           0 :             ProcWaitForSignal(PG_WAIT_BUFFER_PIN);
    4905             : 
    4906             :         /*
    4907             :          * Remove flag marking us as waiter. Normally this will not be set
    4908             :          * anymore, but ProcWaitForSignal() can return for other signals as
    4909             :          * well.  We take care to only reset the flag if we're the waiter, as
    4910             :          * theoretically another backend could have started waiting. That's
    4911             :          * impossible with the current usages due to table level locking, but
    4912             :          * better be safe.
    4913             :          */
    4914          22 :         buf_state = LockBufHdr(bufHdr);
    4915          22 :         if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
    4916          18 :             bufHdr->wait_backend_pgprocno == MyProc->pgprocno)
    4917          18 :             buf_state &= ~BM_PIN_COUNT_WAITER;
    4918          22 :         UnlockBufHdr(bufHdr, buf_state);
    4919             : 
    4920          22 :         PinCountWaitBuf = NULL;
    4921             :         /* Loop back and try again */
    4922             :     }
    4923             : }
    4924             : 
    4925             : /*
    4926             :  * Check called from RecoveryConflictInterrupt handler when Startup
    4927             :  * process requests cancellation of all pin holders that are blocking it.
    4928             :  */
    4929             : bool
    4930           8 : HoldingBufferPinThatDelaysRecovery(void)
    4931             : {
    4932           8 :     int         bufid = GetStartupBufferPinWaitBufId();
    4933             : 
    4934             :     /*
    4935             :      * If we get woken slowly then it's possible that the Startup process was
    4936             :      * already woken by other backends before we got here. Also possible that
    4937             :      * we get here by multiple interrupts or interrupts at inappropriate
    4938             :      * times, so make sure we do nothing if the bufid is not set.
    4939             :      */
    4940           8 :     if (bufid < 0)
    4941           4 :         return false;
    4942             : 
    4943           4 :     if (GetPrivateRefCount(bufid + 1) > 0)
    4944           4 :         return true;
    4945             : 
    4946           0 :     return false;
    4947             : }
    4948             : 
    4949             : /*
    4950             :  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
    4951             :  *
    4952             :  * We won't loop, but just check once to see if the pin count is OK.  If
    4953             :  * not, return false with no lock held.
    4954             :  */
    4955             : bool
    4956      529940 : ConditionalLockBufferForCleanup(Buffer buffer)
    4957             : {
    4958             :     BufferDesc *bufHdr;
    4959             :     uint32      buf_state,
    4960             :                 refcount;
    4961             : 
    4962             :     Assert(BufferIsValid(buffer));
    4963             : 
    4964      529940 :     if (BufferIsLocal(buffer))
    4965             :     {
    4966        1564 :         refcount = LocalRefCount[-buffer - 1];
    4967             :         /* There should be exactly one pin */
    4968             :         Assert(refcount > 0);
    4969        1564 :         if (refcount != 1)
    4970          42 :             return false;
    4971             :         /* Nobody else to wait for */
    4972        1522 :         return true;
    4973             :     }
    4974             : 
    4975             :     /* There should be exactly one local pin */
    4976      528376 :     refcount = GetPrivateRefCount(buffer);
    4977             :     Assert(refcount);
    4978      528376 :     if (refcount != 1)
    4979         394 :         return false;
    4980             : 
    4981             :     /* Try to acquire lock */
    4982      527982 :     if (!ConditionalLockBuffer(buffer))
    4983          14 :         return false;
    4984             : 
    4985      527968 :     bufHdr = GetBufferDescriptor(buffer - 1);
    4986      527968 :     buf_state = LockBufHdr(bufHdr);
    4987      527968 :     refcount = BUF_STATE_GET_REFCOUNT(buf_state);
    4988             : 
    4989             :     Assert(refcount > 0);
    4990      527968 :     if (refcount == 1)
    4991             :     {
    4992             :         /* Successfully acquired exclusive lock with pincount 1 */
    4993      527956 :         UnlockBufHdr(bufHdr, buf_state);
    4994      527956 :         return true;
    4995             :     }
    4996             : 
    4997             :     /* Failed, so release the lock */
    4998          12 :     UnlockBufHdr(bufHdr, buf_state);
    4999          12 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    5000          12 :     return false;
    5001             : }
    5002             : 
    5003             : /*
    5004             :  * IsBufferCleanupOK - as above, but we already have the lock
    5005             :  *
    5006             :  * Check whether it's OK to perform cleanup on a buffer we've already
    5007             :  * locked.  If we observe that the pin count is 1, our exclusive lock
    5008             :  * happens to be a cleanup lock, and we can proceed with anything that
    5009             :  * would have been allowable had we sought a cleanup lock originally.
    5010             :  */
    5011             : bool
    5012        3314 : IsBufferCleanupOK(Buffer buffer)
    5013             : {
    5014             :     BufferDesc *bufHdr;
    5015             :     uint32      buf_state;
    5016             : 
    5017             :     Assert(BufferIsValid(buffer));
    5018             : 
    5019        3314 :     if (BufferIsLocal(buffer))
    5020             :     {
    5021             :         /* There should be exactly one pin */
    5022           0 :         if (LocalRefCount[-buffer - 1] != 1)
    5023           0 :             return false;
    5024             :         /* Nobody else to wait for */
    5025           0 :         return true;
    5026             :     }
    5027             : 
    5028             :     /* There should be exactly one local pin */
    5029        3314 :     if (GetPrivateRefCount(buffer) != 1)
    5030           0 :         return false;
    5031             : 
    5032        3314 :     bufHdr = GetBufferDescriptor(buffer - 1);
    5033             : 
    5034             :     /* caller must hold exclusive lock on buffer */
    5035             :     Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
    5036             :                                 LW_EXCLUSIVE));
    5037             : 
    5038        3314 :     buf_state = LockBufHdr(bufHdr);
    5039             : 
    5040             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    5041        3314 :     if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
    5042             :     {
    5043             :         /* pincount is OK. */
    5044        3314 :         UnlockBufHdr(bufHdr, buf_state);
    5045        3314 :         return true;
    5046             :     }
    5047             : 
    5048           0 :     UnlockBufHdr(bufHdr, buf_state);
    5049           0 :     return false;
    5050             : }
    5051             : 
    5052             : 
    5053             : /*
    5054             :  *  Functions for buffer I/O handling
    5055             :  *
    5056             :  *  Note: We assume that nested buffer I/O never occurs.
    5057             :  *  i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
    5058             :  *
    5059             :  *  Also note that these are used only for shared buffers, not local ones.
    5060             :  */
    5061             : 
    5062             : /*
    5063             :  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
    5064             :  */
    5065             : static void
    5066         286 : WaitIO(BufferDesc *buf)
    5067             : {
    5068         286 :     ConditionVariable *cv = BufferDescriptorGetIOCV(buf);
    5069             : 
    5070         286 :     ConditionVariablePrepareToSleep(cv);
    5071             :     for (;;)
    5072         276 :     {
    5073             :         uint32      buf_state;
    5074             : 
    5075             :         /*
    5076             :          * It may not be necessary to acquire the spinlock to check the flag
    5077             :          * here, but since this test is essential for correctness, we'd better
    5078             :          * play it safe.
    5079             :          */
    5080         562 :         buf_state = LockBufHdr(buf);
    5081         562 :         UnlockBufHdr(buf, buf_state);
    5082             : 
    5083         562 :         if (!(buf_state & BM_IO_IN_PROGRESS))
    5084         286 :             break;
    5085         276 :         ConditionVariableSleep(cv, WAIT_EVENT_BUFFER_IO);
    5086             :     }
    5087         286 :     ConditionVariableCancelSleep();
    5088         286 : }
    5089             : 
    5090             : /*
    5091             :  * StartBufferIO: begin I/O on this buffer
    5092             :  *  (Assumptions)
    5093             :  *  My process is executing no IO
    5094             :  *  The buffer is Pinned
    5095             :  *
    5096             :  * In some scenarios there are race conditions in which multiple backends
    5097             :  * could attempt the same I/O operation concurrently.  If someone else
    5098             :  * has already started I/O on this buffer then we will block on the
    5099             :  * I/O condition variable until he's done.
    5100             :  *
    5101             :  * Input operations are only attempted on buffers that are not BM_VALID,
    5102             :  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
    5103             :  * so we can always tell if the work is already done.
    5104             :  *
    5105             :  * Returns true if we successfully marked the buffer as I/O busy,
    5106             :  * false if someone else already did the work.
    5107             :  */
    5108             : static bool
    5109     5027110 : StartBufferIO(BufferDesc *buf, bool forInput)
    5110             : {
    5111             :     uint32      buf_state;
    5112             : 
    5113     5027110 :     ResourceOwnerEnlargeBufferIOs(CurrentResourceOwner);
    5114             : 
    5115             :     for (;;)
    5116             :     {
    5117     5027396 :         buf_state = LockBufHdr(buf);
    5118             : 
    5119     5027396 :         if (!(buf_state & BM_IO_IN_PROGRESS))
    5120     5027110 :             break;
    5121         286 :         UnlockBufHdr(buf, buf_state);
    5122         286 :         WaitIO(buf);
    5123             :     }
    5124             : 
    5125             :     /* Once we get here, there is definitely no I/O active on this buffer */
    5126             : 
    5127     5027110 :     if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
    5128             :     {
    5129             :         /* someone else already did the I/O */
    5130         294 :         UnlockBufHdr(buf, buf_state);
    5131         294 :         return false;
    5132             :     }
    5133             : 
    5134     5026816 :     buf_state |= BM_IO_IN_PROGRESS;
    5135     5026816 :     UnlockBufHdr(buf, buf_state);
    5136             : 
    5137     5026816 :     ResourceOwnerRememberBufferIO(CurrentResourceOwner,
    5138             :                                   BufferDescriptorGetBuffer(buf));
    5139             : 
    5140     5026816 :     return true;
    5141             : }
    5142             : 
    5143             : /*
    5144             :  * TerminateBufferIO: release a buffer we were doing I/O on
    5145             :  *  (Assumptions)
    5146             :  *  My process is executing IO for the buffer
    5147             :  *  BM_IO_IN_PROGRESS bit is set for the buffer
    5148             :  *  The buffer is Pinned
    5149             :  *
    5150             :  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
    5151             :  * buffer's BM_DIRTY flag.  This is appropriate when terminating a
    5152             :  * successful write.  The check on BM_JUST_DIRTIED is necessary to avoid
    5153             :  * marking the buffer clean if it was re-dirtied while we were writing.
    5154             :  *
    5155             :  * set_flag_bits gets ORed into the buffer's flags.  It must include
    5156             :  * BM_IO_ERROR in a failure case.  For successful completion it could
    5157             :  * be 0, or BM_VALID if we just finished reading in the page.
    5158             :  */
    5159             : static void
    5160     5026816 : TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
    5161             : {
    5162             :     uint32      buf_state;
    5163             : 
    5164     5026816 :     buf_state = LockBufHdr(buf);
    5165             : 
    5166             :     Assert(buf_state & BM_IO_IN_PROGRESS);
    5167             : 
    5168     5026816 :     buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
    5169     5026816 :     if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
    5170     1397056 :         buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
    5171             : 
    5172     5026816 :     buf_state |= set_flag_bits;
    5173     5026816 :     UnlockBufHdr(buf, buf_state);
    5174             : 
    5175     5026816 :     ResourceOwnerForgetBufferIO(CurrentResourceOwner,
    5176             :                                 BufferDescriptorGetBuffer(buf));
    5177             : 
    5178     5026816 :     ConditionVariableBroadcast(BufferDescriptorGetIOCV(buf));
    5179     5026816 : }
    5180             : 
    5181             : /*
    5182             :  * AbortBufferIO: Clean up active buffer I/O after an error.
    5183             :  *
    5184             :  *  All LWLocks we might have held have been released,
    5185             :  *  but we haven't yet released buffer pins, so the buffer is still pinned.
    5186             :  *
    5187             :  *  If I/O was in progress, we always set BM_IO_ERROR, even though it's
    5188             :  *  possible the error condition wasn't related to the I/O.
    5189             :  */
    5190             : void
    5191          26 : AbortBufferIO(Buffer buffer)
    5192             : {
    5193          26 :     BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
    5194             :     uint32      buf_state;
    5195             : 
    5196          26 :     buf_state = LockBufHdr(buf_hdr);
    5197             :     Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
    5198             : 
    5199          26 :     if (!(buf_state & BM_VALID))
    5200             :     {
    5201             :         Assert(!(buf_state & BM_DIRTY));
    5202          26 :         UnlockBufHdr(buf_hdr, buf_state);
    5203             :     }
    5204             :     else
    5205             :     {
    5206             :         Assert(buf_state & BM_DIRTY);
    5207           0 :         UnlockBufHdr(buf_hdr, buf_state);
    5208             : 
    5209             :         /* Issue notice if this is not the first failure... */
    5210           0 :         if (buf_state & BM_IO_ERROR)
    5211             :         {
    5212             :             /* Buffer is pinned, so we can read tag without spinlock */
    5213             :             char       *path;
    5214             : 
    5215           0 :             path = relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
    5216             :                                BufTagGetForkNum(&buf_hdr->tag));
    5217           0 :             ereport(WARNING,
    5218             :                     (errcode(ERRCODE_IO_ERROR),
    5219             :                      errmsg("could not write block %u of %s",
    5220             :                             buf_hdr->tag.blockNum, path),
    5221             :                      errdetail("Multiple failures --- write error might be permanent.")));
    5222           0 :             pfree(path);
    5223             :         }
    5224             :     }
    5225             : 
    5226          26 :     TerminateBufferIO(buf_hdr, false, BM_IO_ERROR);
    5227          26 : }
    5228             : 
    5229             : /*
    5230             :  * Error context callback for errors occurring during shared buffer writes.
    5231             :  */
    5232             : static void
    5233          84 : shared_buffer_write_error_callback(void *arg)
    5234             : {
    5235          84 :     BufferDesc *bufHdr = (BufferDesc *) arg;
    5236             : 
    5237             :     /* Buffer is pinned, so we can read the tag without locking the spinlock */
    5238          84 :     if (bufHdr != NULL)
    5239             :     {
    5240          84 :         char       *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
    5241             :                                        BufTagGetForkNum(&bufHdr->tag));
    5242             : 
    5243          84 :         errcontext("writing block %u of relation %s",
    5244             :                    bufHdr->tag.blockNum, path);
    5245          84 :         pfree(path);
    5246             :     }
    5247          84 : }
    5248             : 
    5249             : /*
    5250             :  * Error context callback for errors occurring during local buffer writes.
    5251             :  */
    5252             : static void
    5253           0 : local_buffer_write_error_callback(void *arg)
    5254             : {
    5255           0 :     BufferDesc *bufHdr = (BufferDesc *) arg;
    5256             : 
    5257           0 :     if (bufHdr != NULL)
    5258             :     {
    5259           0 :         char       *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
    5260             :                                           MyBackendId,
    5261             :                                           BufTagGetForkNum(&bufHdr->tag));
    5262             : 
    5263           0 :         errcontext("writing block %u of relation %s",
    5264             :                    bufHdr->tag.blockNum, path);
    5265           0 :         pfree(path);
    5266             :     }
    5267           0 : }
    5268             : 
    5269             : /*
    5270             :  * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
    5271             :  */
    5272             : static int
    5273    22241376 : rlocator_comparator(const void *p1, const void *p2)
    5274             : {
    5275    22241376 :     RelFileLocator n1 = *(const RelFileLocator *) p1;
    5276    22241376 :     RelFileLocator n2 = *(const RelFileLocator *) p2;
    5277             : 
    5278    22241376 :     if (n1.relNumber < n2.relNumber)
    5279    18456162 :         return -1;
    5280     3785214 :     else if (n1.relNumber > n2.relNumber)
    5281      730162 :         return 1;
    5282             : 
    5283     3055052 :     if (n1.dbOid < n2.dbOid)
    5284       75564 :         return -1;
    5285     2979488 :     else if (n1.dbOid > n2.dbOid)
    5286       89084 :         return 1;
    5287             : 
    5288     2890404 :     if (n1.spcOid < n2.spcOid)
    5289           0 :         return -1;
    5290     2890404 :     else if (n1.spcOid > n2.spcOid)
    5291           0 :         return 1;
    5292             :     else
    5293     2890404 :         return 0;
    5294             : }
    5295             : 
    5296             : /*
    5297             :  * Lock buffer header - set BM_LOCKED in buffer state.
    5298             :  */
    5299             : uint32
    5300   101669242 : LockBufHdr(BufferDesc *desc)
    5301             : {
    5302             :     SpinDelayStatus delayStatus;
    5303             :     uint32      old_buf_state;
    5304             : 
    5305             :     Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
    5306             : 
    5307   101669242 :     init_local_spin_delay(&delayStatus);
    5308             : 
    5309             :     while (true)
    5310             :     {
    5311             :         /* set BM_LOCKED flag */
    5312   101677580 :         old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
    5313             :         /* if it wasn't set before we're OK */
    5314   101677580 :         if (!(old_buf_state & BM_LOCKED))
    5315   101669242 :             break;
    5316        8338 :         perform_spin_delay(&delayStatus);
    5317             :     }
    5318   101669242 :     finish_spin_delay(&delayStatus);
    5319   101669242 :     return old_buf_state | BM_LOCKED;
    5320             : }
    5321             : 
    5322             : /*
    5323             :  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
    5324             :  * state at that point.
    5325             :  *
    5326             :  * Obviously the buffer could be locked by the time the value is returned, so
    5327             :  * this is primarily useful in CAS style loops.
    5328             :  */
    5329             : static uint32
    5330         558 : WaitBufHdrUnlocked(BufferDesc *buf)
    5331             : {
    5332             :     SpinDelayStatus delayStatus;
    5333             :     uint32      buf_state;
    5334             : 
    5335         558 :     init_local_spin_delay(&delayStatus);
    5336             : 
    5337         558 :     buf_state = pg_atomic_read_u32(&buf->state);
    5338             : 
    5339        2196 :     while (buf_state & BM_LOCKED)
    5340             :     {
    5341        1638 :         perform_spin_delay(&delayStatus);
    5342        1638 :         buf_state = pg_atomic_read_u32(&buf->state);
    5343             :     }
    5344             : 
    5345         558 :     finish_spin_delay(&delayStatus);
    5346             : 
    5347         558 :     return buf_state;
    5348             : }
    5349             : 
    5350             : /*
    5351             :  * BufferTag comparator.
    5352             :  */
    5353             : static inline int
    5354     4306436 : buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
    5355             : {
    5356             :     int         ret;
    5357             :     RelFileLocator rlocatora;
    5358             :     RelFileLocator rlocatorb;
    5359             : 
    5360     4306436 :     rlocatora = BufTagGetRelFileLocator(ba);
    5361     4306436 :     rlocatorb = BufTagGetRelFileLocator(bb);
    5362             : 
    5363     4306436 :     ret = rlocator_comparator(&rlocatora, &rlocatorb);
    5364             : 
    5365     4306436 :     if (ret != 0)
    5366     1419574 :         return ret;
    5367             : 
    5368     2886862 :     if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
    5369      218490 :         return -1;
    5370     2668372 :     if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
    5371      126312 :         return 1;
    5372             : 
    5373     2542060 :     if (ba->blockNum < bb->blockNum)
    5374     1676390 :         return -1;
    5375      865670 :     if (ba->blockNum > bb->blockNum)
    5376      864554 :         return 1;
    5377             : 
    5378        1116 :     return 0;
    5379             : }
    5380             : 
    5381             : /*
    5382             :  * Comparator determining the writeout order in a checkpoint.
    5383             :  *
    5384             :  * It is important that tablespaces are compared first, the logic balancing
    5385             :  * writes between tablespaces relies on it.
    5386             :  */
    5387             : static inline int
    5388     9408156 : ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
    5389             : {
    5390             :     /* compare tablespace */
    5391     9408156 :     if (a->tsId < b->tsId)
    5392       61580 :         return -1;
    5393     9346576 :     else if (a->tsId > b->tsId)
    5394       92214 :         return 1;
    5395             :     /* compare relation */
    5396     9254362 :     if (a->relNumber < b->relNumber)
    5397     2553280 :         return -1;
    5398     6701082 :     else if (a->relNumber > b->relNumber)
    5399     2464248 :         return 1;
    5400             :     /* compare fork */
    5401     4236834 :     else if (a->forkNum < b->forkNum)
    5402      246774 :         return -1;
    5403     3990060 :     else if (a->forkNum > b->forkNum)
    5404      257090 :         return 1;
    5405             :     /* compare block number */
    5406     3732970 :     else if (a->blockNum < b->blockNum)
    5407     1860566 :         return -1;
    5408     1872404 :     else if (a->blockNum > b->blockNum)
    5409     1808508 :         return 1;
    5410             :     /* equal page IDs are unlikely, but not impossible */
    5411       63896 :     return 0;
    5412             : }
    5413             : 
    5414             : /*
    5415             :  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
    5416             :  * progress.
    5417             :  */
    5418             : static int
    5419      835198 : ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
    5420             : {
    5421      835198 :     CkptTsStatus *sa = (CkptTsStatus *) a;
    5422      835198 :     CkptTsStatus *sb = (CkptTsStatus *) b;
    5423             : 
    5424             :     /* we want a min-heap, so return 1 for the a < b */
    5425      835198 :     if (sa->progress < sb->progress)
    5426      780160 :         return 1;
    5427       55038 :     else if (sa->progress == sb->progress)
    5428        2860 :         return 0;
    5429             :     else
    5430       52178 :         return -1;
    5431             : }
    5432             : 
    5433             : /*
    5434             :  * Initialize a writeback context, discarding potential previous state.
    5435             :  *
    5436             :  * *max_pending is a pointer instead of an immediate value, so the coalesce
    5437             :  * limits can easily changed by the GUC mechanism, and so calling code does
    5438             :  * not have to check the current configuration. A value of 0 means that no
    5439             :  * writeback control will be performed.
    5440             :  */
    5441             : void
    5442        7504 : WritebackContextInit(WritebackContext *context, int *max_pending)
    5443             : {
    5444             :     Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
    5445             : 
    5446        7504 :     context->max_pending = max_pending;
    5447        7504 :     context->nr_pending = 0;
    5448        7504 : }
    5449             : 
    5450             : /*
    5451             :  * Add buffer to list of pending writeback requests.
    5452             :  */
    5453             : void
    5454     1390018 : ScheduleBufferTagForWriteback(WritebackContext *wb_context, IOContext io_context,
    5455             :                               BufferTag *tag)
    5456             : {
    5457             :     PendingWriteback *pending;
    5458             : 
    5459     1390018 :     if (io_direct_flags & IO_DIRECT_DATA)
    5460        1048 :         return;
    5461             : 
    5462             :     /*
    5463             :      * Add buffer to the pending writeback array, unless writeback control is
    5464             :      * disabled.
    5465             :      */
    5466     1388970 :     if (*wb_context->max_pending > 0)
    5467             :     {
    5468             :         Assert(*wb_context->max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
    5469             : 
    5470      950684 :         pending = &wb_context->pending_writebacks[wb_context->nr_pending++];
    5471             : 
    5472      950684 :         pending->tag = *tag;
    5473             :     }
    5474             : 
    5475             :     /*
    5476             :      * Perform pending flushes if the writeback limit is exceeded. This
    5477             :      * includes the case where previously an item has been added, but control
    5478             :      * is now disabled.
    5479             :      */
    5480     1388970 :     if (wb_context->nr_pending >= *wb_context->max_pending)
    5481      466358 :         IssuePendingWritebacks(wb_context, io_context);
    5482             : }
    5483             : 
    5484             : #define ST_SORT sort_pending_writebacks
    5485             : #define ST_ELEMENT_TYPE PendingWriteback
    5486             : #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
    5487             : #define ST_SCOPE static
    5488             : #define ST_DEFINE
    5489             : #include <lib/sort_template.h>
    5490             : 
    5491             : /*
    5492             :  * Issue all pending writeback requests, previously scheduled with
    5493             :  * ScheduleBufferTagForWriteback, to the OS.
    5494             :  *
    5495             :  * Because this is only used to improve the OSs IO scheduling we try to never
    5496             :  * error out - it's just a hint.
    5497             :  */
    5498             : void
    5499      469532 : IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
    5500             : {
    5501             :     instr_time  io_start;
    5502             :     int         i;
    5503             : 
    5504      469532 :     if (wb_context->nr_pending == 0)
    5505      438302 :         return;
    5506             : 
    5507             :     /*
    5508             :      * Executing the writes in-order can make them a lot faster, and allows to
    5509             :      * merge writeback requests to consecutive blocks into larger writebacks.
    5510             :      */
    5511       31230 :     sort_pending_writebacks(wb_context->pending_writebacks,
    5512       31230 :                             wb_context->nr_pending);
    5513             : 
    5514       31230 :     io_start = pgstat_prepare_io_time();
    5515             : 
    5516             :     /*
    5517             :      * Coalesce neighbouring writes, but nothing else. For that we iterate
    5518             :      * through the, now sorted, array of pending flushes, and look forward to
    5519             :      * find all neighbouring (or identical) writes.
    5520             :      */
    5521      293084 :     for (i = 0; i < wb_context->nr_pending; i++)
    5522             :     {
    5523             :         PendingWriteback *cur;
    5524             :         PendingWriteback *next;
    5525             :         SMgrRelation reln;
    5526             :         int         ahead;
    5527             :         BufferTag   tag;
    5528             :         RelFileLocator currlocator;
    5529      261854 :         Size        nblocks = 1;
    5530             : 
    5531      261854 :         cur = &wb_context->pending_writebacks[i];
    5532      261854 :         tag = cur->tag;
    5533      261854 :         currlocator = BufTagGetRelFileLocator(&tag);
    5534             : 
    5535             :         /*
    5536             :          * Peek ahead, into following writeback requests, to see if they can
    5537             :          * be combined with the current one.
    5538             :          */
    5539      946728 :         for (ahead = 0; i + ahead + 1 < wb_context->nr_pending; ahead++)
    5540             :         {
    5541             : 
    5542      915498 :             next = &wb_context->pending_writebacks[i + ahead + 1];
    5543             : 
    5544             :             /* different file, stop */
    5545      915498 :             if (!RelFileLocatorEquals(currlocator,
    5546      767822 :                                       BufTagGetRelFileLocator(&next->tag)) ||
    5547      767822 :                 BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
    5548             :                 break;
    5549             : 
    5550             :             /* ok, block queued twice, skip */
    5551      696494 :             if (cur->tag.blockNum == next->tag.blockNum)
    5552         970 :                 continue;
    5553             : 
    5554             :             /* only merge consecutive writes */
    5555      695524 :             if (cur->tag.blockNum + 1 != next->tag.blockNum)
    5556       11620 :                 break;
    5557             : 
    5558      683904 :             nblocks++;
    5559      683904 :             cur = next;
    5560             :         }
    5561             : 
    5562      261854 :         i += ahead;
    5563             : 
    5564             :         /* and finally tell the kernel to write the data to storage */
    5565      261854 :         reln = smgropen(currlocator, InvalidBackendId);
    5566      261854 :         smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
    5567             :     }
    5568             : 
    5569             :     /*
    5570             :      * Assume that writeback requests are only issued for buffers containing
    5571             :      * blocks of permanent relations.
    5572             :      */
    5573       31230 :     pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
    5574       31230 :                             IOOP_WRITEBACK, io_start, wb_context->nr_pending);
    5575             : 
    5576       31230 :     wb_context->nr_pending = 0;
    5577             : }
    5578             : 
    5579             : 
    5580             : /*
    5581             :  * Implement slower/larger portions of TestForOldSnapshot
    5582             :  *
    5583             :  * Smaller/faster portions are put inline, but the entire set of logic is too
    5584             :  * big for that.
    5585             :  */
    5586             : void
    5587        1302 : TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
    5588             : {
    5589        1302 :     if (RelationAllowsEarlyPruning(relation)
    5590        1302 :         && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
    5591           6 :         ereport(ERROR,
    5592             :                 (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
    5593             :                  errmsg("snapshot too old")));
    5594        1296 : }

Generated by: LCOV version 1.14