LCOV - code coverage report
Current view: top level - src/backend/storage/buffer - bufmgr.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 1721 1917 89.8 %
Date: 2025-12-26 03:17:37 Functions: 116 123 94.3 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * bufmgr.c
       4             :  *    buffer manager interface routines
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/buffer/bufmgr.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /*
      16             :  * Principal entry points:
      17             :  *
      18             :  * ReadBuffer() -- find or create a buffer holding the requested page,
      19             :  *      and pin it so that no one can destroy it while this process
      20             :  *      is using it.
      21             :  *
      22             :  * StartReadBuffer() -- as above, with separate wait step
      23             :  * StartReadBuffers() -- multiple block version
      24             :  * WaitReadBuffers() -- second step of above
      25             :  *
      26             :  * ReleaseBuffer() -- unpin a buffer
      27             :  *
      28             :  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
      29             :  *      The disk write is delayed until buffer replacement or checkpoint.
      30             :  *
      31             :  * See also these files:
      32             :  *      freelist.c -- chooses victim for buffer replacement
      33             :  *      buf_table.c -- manages the buffer lookup table
      34             :  */
      35             : #include "postgres.h"
      36             : 
      37             : #include <sys/file.h>
      38             : #include <unistd.h>
      39             : 
      40             : #include "access/tableam.h"
      41             : #include "access/xloginsert.h"
      42             : #include "access/xlogutils.h"
      43             : #ifdef USE_ASSERT_CHECKING
      44             : #include "catalog/pg_tablespace_d.h"
      45             : #endif
      46             : #include "catalog/storage.h"
      47             : #include "catalog/storage_xlog.h"
      48             : #include "executor/instrument.h"
      49             : #include "lib/binaryheap.h"
      50             : #include "miscadmin.h"
      51             : #include "pg_trace.h"
      52             : #include "pgstat.h"
      53             : #include "postmaster/bgwriter.h"
      54             : #include "storage/aio.h"
      55             : #include "storage/buf_internals.h"
      56             : #include "storage/bufmgr.h"
      57             : #include "storage/fd.h"
      58             : #include "storage/ipc.h"
      59             : #include "storage/lmgr.h"
      60             : #include "storage/proc.h"
      61             : #include "storage/read_stream.h"
      62             : #include "storage/smgr.h"
      63             : #include "storage/standby.h"
      64             : #include "utils/memdebug.h"
      65             : #include "utils/ps_status.h"
      66             : #include "utils/rel.h"
      67             : #include "utils/resowner.h"
      68             : #include "utils/timestamp.h"
      69             : 
      70             : 
      71             : /* Note: these two macros only work on shared buffers, not local ones! */
      72             : #define BufHdrGetBlock(bufHdr)  ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
      73             : #define BufferGetLSN(bufHdr)    (PageGetLSN(BufHdrGetBlock(bufHdr)))
      74             : 
      75             : /* Note: this macro only works on local buffers, not shared ones! */
      76             : #define LocalBufHdrGetBlock(bufHdr) \
      77             :     LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
      78             : 
      79             : /* Bits in SyncOneBuffer's return value */
      80             : #define BUF_WRITTEN             0x01
      81             : #define BUF_REUSABLE            0x02
      82             : 
      83             : #define RELS_BSEARCH_THRESHOLD      20
      84             : 
      85             : /*
      86             :  * This is the size (in the number of blocks) above which we scan the
      87             :  * entire buffer pool to remove the buffers for all the pages of relation
      88             :  * being dropped. For the relations with size below this threshold, we find
      89             :  * the buffers by doing lookups in BufMapping table.
      90             :  */
      91             : #define BUF_DROP_FULL_SCAN_THRESHOLD        (uint64) (NBuffers / 32)
      92             : 
      93             : /*
      94             :  * This is separated out from PrivateRefCountEntry to allow for copying all
      95             :  * the data members via struct assignment.
      96             :  */
      97             : typedef struct PrivateRefCountData
      98             : {
      99             :     /*
     100             :      * How many times has the buffer been pinned by this backend.
     101             :      */
     102             :     int32       refcount;
     103             : } PrivateRefCountData;
     104             : 
     105             : typedef struct PrivateRefCountEntry
     106             : {
     107             :     /*
     108             :      * Note that this needs to be same as the entry's corresponding
     109             :      * PrivateRefCountArrayKeys[i], if the entry is stored in the array. We
     110             :      * store it in both places as this is used for the hashtable key and
     111             :      * because it is more convenient (passing around a PrivateRefCountEntry
     112             :      * suffices to identify the buffer) and faster (checking the keys array is
     113             :      * faster when checking many entries, checking the entry is faster if just
     114             :      * checking a single entry).
     115             :      */
     116             :     Buffer      buffer;
     117             : 
     118             :     PrivateRefCountData data;
     119             : } PrivateRefCountEntry;
     120             : 
     121             : /* 64 bytes, about the size of a cache line on common systems */
     122             : #define REFCOUNT_ARRAY_ENTRIES 8
     123             : 
     124             : /*
     125             :  * Status of buffers to checkpoint for a particular tablespace, used
     126             :  * internally in BufferSync.
     127             :  */
     128             : typedef struct CkptTsStatus
     129             : {
     130             :     /* oid of the tablespace */
     131             :     Oid         tsId;
     132             : 
     133             :     /*
     134             :      * Checkpoint progress for this tablespace. To make progress comparable
     135             :      * between tablespaces the progress is, for each tablespace, measured as a
     136             :      * number between 0 and the total number of to-be-checkpointed pages. Each
     137             :      * page checkpointed in this tablespace increments this space's progress
     138             :      * by progress_slice.
     139             :      */
     140             :     float8      progress;
     141             :     float8      progress_slice;
     142             : 
     143             :     /* number of to-be checkpointed pages in this tablespace */
     144             :     int         num_to_scan;
     145             :     /* already processed pages in this tablespace */
     146             :     int         num_scanned;
     147             : 
     148             :     /* current offset in CkptBufferIds for this tablespace */
     149             :     int         index;
     150             : } CkptTsStatus;
     151             : 
     152             : /*
     153             :  * Type for array used to sort SMgrRelations
     154             :  *
     155             :  * FlushRelationsAllBuffers shares the same comparator function with
     156             :  * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
     157             :  * compatible.
     158             :  */
     159             : typedef struct SMgrSortArray
     160             : {
     161             :     RelFileLocator rlocator;    /* This must be the first member */
     162             :     SMgrRelation srel;
     163             : } SMgrSortArray;
     164             : 
     165             : /* GUC variables */
     166             : bool        zero_damaged_pages = false;
     167             : int         bgwriter_lru_maxpages = 100;
     168             : double      bgwriter_lru_multiplier = 2.0;
     169             : bool        track_io_timing = false;
     170             : 
     171             : /*
     172             :  * How many buffers PrefetchBuffer callers should try to stay ahead of their
     173             :  * ReadBuffer calls by.  Zero means "never prefetch".  This value is only used
     174             :  * for buffers not belonging to tablespaces that have their
     175             :  * effective_io_concurrency parameter set.
     176             :  */
     177             : int         effective_io_concurrency = DEFAULT_EFFECTIVE_IO_CONCURRENCY;
     178             : 
     179             : /*
     180             :  * Like effective_io_concurrency, but used by maintenance code paths that might
     181             :  * benefit from a higher setting because they work on behalf of many sessions.
     182             :  * Overridden by the tablespace setting of the same name.
     183             :  */
     184             : int         maintenance_io_concurrency = DEFAULT_MAINTENANCE_IO_CONCURRENCY;
     185             : 
     186             : /*
     187             :  * Limit on how many blocks should be handled in single I/O operations.
     188             :  * StartReadBuffers() callers should respect it, as should other operations
     189             :  * that call smgr APIs directly.  It is computed as the minimum of underlying
     190             :  * GUCs io_combine_limit_guc and io_max_combine_limit.
     191             :  */
     192             : int         io_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
     193             : int         io_combine_limit_guc = DEFAULT_IO_COMBINE_LIMIT;
     194             : int         io_max_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
     195             : 
     196             : /*
     197             :  * GUC variables about triggering kernel writeback for buffers written; OS
     198             :  * dependent defaults are set via the GUC mechanism.
     199             :  */
     200             : int         checkpoint_flush_after = DEFAULT_CHECKPOINT_FLUSH_AFTER;
     201             : int         bgwriter_flush_after = DEFAULT_BGWRITER_FLUSH_AFTER;
     202             : int         backend_flush_after = DEFAULT_BACKEND_FLUSH_AFTER;
     203             : 
     204             : /* local state for LockBufferForCleanup */
     205             : static BufferDesc *PinCountWaitBuf = NULL;
     206             : 
     207             : /*
     208             :  * Backend-Private refcount management:
     209             :  *
     210             :  * Each buffer also has a private refcount that keeps track of the number of
     211             :  * times the buffer is pinned in the current process.  This is so that the
     212             :  * shared refcount needs to be modified only once if a buffer is pinned more
     213             :  * than once by an individual backend.  It's also used to check that no buffers
     214             :  * are still pinned at the end of transactions and when exiting.
     215             :  *
     216             :  *
     217             :  * To avoid - as we used to - requiring an array with NBuffers entries to keep
     218             :  * track of local buffers, we use a small sequentially searched array
     219             :  * (PrivateRefCountArrayKeys, with the corresponding data stored in
     220             :  * PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
     221             :  * keep track of backend local pins.
     222             :  *
     223             :  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
     224             :  * refcounts are kept track of in the array; after that, new array entries
     225             :  * displace old ones into the hash table. That way a frequently used entry
     226             :  * can't get "stuck" in the hashtable while infrequent ones clog the array.
     227             :  *
     228             :  * Note that in most scenarios the number of pinned buffers will not exceed
     229             :  * REFCOUNT_ARRAY_ENTRIES.
     230             :  *
     231             :  *
     232             :  * To enter a buffer into the refcount tracking mechanism first reserve a free
     233             :  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
     234             :  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
     235             :  * memory allocations in NewPrivateRefCountEntry() which can be important
     236             :  * because in some scenarios it's called with a spinlock held...
     237             :  */
     238             : static Buffer PrivateRefCountArrayKeys[REFCOUNT_ARRAY_ENTRIES];
     239             : static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES];
     240             : static HTAB *PrivateRefCountHash = NULL;
     241             : static int32 PrivateRefCountOverflowed = 0;
     242             : static uint32 PrivateRefCountClock = 0;
     243             : static int  ReservedRefCountSlot = -1;
     244             : static int  PrivateRefCountEntryLast = -1;
     245             : 
     246             : static uint32 MaxProportionalPins;
     247             : 
     248             : static void ReservePrivateRefCountEntry(void);
     249             : static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
     250             : static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
     251             : static inline int32 GetPrivateRefCount(Buffer buffer);
     252             : static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
     253             : 
     254             : /* ResourceOwner callbacks to hold in-progress I/Os and buffer pins */
     255             : static void ResOwnerReleaseBufferIO(Datum res);
     256             : static char *ResOwnerPrintBufferIO(Datum res);
     257             : static void ResOwnerReleaseBufferPin(Datum res);
     258             : static char *ResOwnerPrintBufferPin(Datum res);
     259             : 
     260             : const ResourceOwnerDesc buffer_io_resowner_desc =
     261             : {
     262             :     .name = "buffer io",
     263             :     .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
     264             :     .release_priority = RELEASE_PRIO_BUFFER_IOS,
     265             :     .ReleaseResource = ResOwnerReleaseBufferIO,
     266             :     .DebugPrint = ResOwnerPrintBufferIO
     267             : };
     268             : 
     269             : const ResourceOwnerDesc buffer_pin_resowner_desc =
     270             : {
     271             :     .name = "buffer pin",
     272             :     .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
     273             :     .release_priority = RELEASE_PRIO_BUFFER_PINS,
     274             :     .ReleaseResource = ResOwnerReleaseBufferPin,
     275             :     .DebugPrint = ResOwnerPrintBufferPin
     276             : };
     277             : 
     278             : /*
     279             :  * Ensure that the PrivateRefCountArray has sufficient space to store one more
     280             :  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
     281             :  * a new entry - but it's perfectly fine to not use a reserved entry.
     282             :  */
     283             : static void
     284   131074446 : ReservePrivateRefCountEntry(void)
     285             : {
     286             :     /* Already reserved (or freed), nothing to do */
     287   131074446 :     if (ReservedRefCountSlot != -1)
     288   122796850 :         return;
     289             : 
     290             :     /*
     291             :      * First search for a free entry the array, that'll be sufficient in the
     292             :      * majority of cases.
     293             :      */
     294             :     {
     295             :         int         i;
     296             : 
     297    74498364 :         for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
     298             :         {
     299    66220768 :             if (PrivateRefCountArrayKeys[i] == InvalidBuffer)
     300             :             {
     301    49135294 :                 ReservedRefCountSlot = i;
     302             : 
     303             :                 /*
     304             :                  * We could return immediately, but iterating till the end of
     305             :                  * the array allows compiler-autovectorization.
     306             :                  */
     307             :             }
     308             :         }
     309             : 
     310     8277596 :         if (ReservedRefCountSlot != -1)
     311     7934402 :             return;
     312             :     }
     313             : 
     314             :     /*
     315             :      * No luck. All array entries are full. Move one array entry into the hash
     316             :      * table.
     317             :      */
     318             :     {
     319             :         /*
     320             :          * Move entry from the current clock position in the array into the
     321             :          * hashtable. Use that slot.
     322             :          */
     323             :         int         victim_slot;
     324             :         PrivateRefCountEntry *victim_entry;
     325             :         PrivateRefCountEntry *hashent;
     326             :         bool        found;
     327             : 
     328             :         /* select victim slot */
     329      343194 :         victim_slot = PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES;
     330      343194 :         victim_entry = &PrivateRefCountArray[victim_slot];
     331      343194 :         ReservedRefCountSlot = victim_slot;
     332             : 
     333             :         /* Better be used, otherwise we shouldn't get here. */
     334             :         Assert(PrivateRefCountArrayKeys[victim_slot] != InvalidBuffer);
     335             :         Assert(PrivateRefCountArray[victim_slot].buffer != InvalidBuffer);
     336             :         Assert(PrivateRefCountArrayKeys[victim_slot] == PrivateRefCountArray[victim_slot].buffer);
     337             : 
     338             :         /* enter victim array entry into hashtable */
     339      343194 :         hashent = hash_search(PrivateRefCountHash,
     340      343194 :                               &PrivateRefCountArrayKeys[victim_slot],
     341             :                               HASH_ENTER,
     342             :                               &found);
     343             :         Assert(!found);
     344             :         /* move data from the entry in the array to the hash entry */
     345      343194 :         hashent->data = victim_entry->data;
     346             : 
     347             :         /* clear the now free array slot */
     348      343194 :         PrivateRefCountArrayKeys[victim_slot] = InvalidBuffer;
     349      343194 :         victim_entry->buffer = InvalidBuffer;
     350             : 
     351             :         /* clear the whole data member, just for future proofing */
     352      343194 :         memset(&victim_entry->data, 0, sizeof(victim_entry->data));
     353      343194 :         victim_entry->data.refcount = 0;
     354             : 
     355      343194 :         PrivateRefCountOverflowed++;
     356             :     }
     357             : }
     358             : 
     359             : /*
     360             :  * Fill a previously reserved refcount entry.
     361             :  */
     362             : static PrivateRefCountEntry *
     363   119196050 : NewPrivateRefCountEntry(Buffer buffer)
     364             : {
     365             :     PrivateRefCountEntry *res;
     366             : 
     367             :     /* only allowed to be called when a reservation has been made */
     368             :     Assert(ReservedRefCountSlot != -1);
     369             : 
     370             :     /* use up the reserved entry */
     371   119196050 :     res = &PrivateRefCountArray[ReservedRefCountSlot];
     372             : 
     373             :     /* and fill it */
     374   119196050 :     PrivateRefCountArrayKeys[ReservedRefCountSlot] = buffer;
     375   119196050 :     res->buffer = buffer;
     376   119196050 :     res->data.refcount = 0;
     377             : 
     378             :     /* update cache for the next lookup */
     379   119196050 :     PrivateRefCountEntryLast = ReservedRefCountSlot;
     380             : 
     381   119196050 :     ReservedRefCountSlot = -1;
     382             : 
     383   119196050 :     return res;
     384             : }
     385             : 
     386             : /*
     387             :  * Slow-path for GetPrivateRefCountEntry(). This is big enough to not be worth
     388             :  * inlining. This particularly seems to be true if the compiler is capable of
     389             :  * auto-vectorizing the code, as that imposes additional stack-alignment
     390             :  * requirements etc.
     391             :  */
     392             : static pg_noinline PrivateRefCountEntry *
     393   139143474 : GetPrivateRefCountEntrySlow(Buffer buffer, bool do_move)
     394             : {
     395             :     PrivateRefCountEntry *res;
     396   139143474 :     int         match = -1;
     397             :     int         i;
     398             : 
     399             :     /*
     400             :      * First search for references in the array, that'll be sufficient in the
     401             :      * majority of cases.
     402             :      */
     403  1252291266 :     for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
     404             :     {
     405  1113147792 :         if (PrivateRefCountArrayKeys[i] == buffer)
     406             :         {
     407    24138156 :             match = i;
     408             :             /* see ReservePrivateRefCountEntry() for why we don't return */
     409             :         }
     410             :     }
     411             : 
     412   139143474 :     if (likely(match != -1))
     413             :     {
     414             :         /* update cache for the next lookup */
     415    24138156 :         PrivateRefCountEntryLast = match;
     416             : 
     417    24138156 :         return &PrivateRefCountArray[match];
     418             :     }
     419             : 
     420             :     /*
     421             :      * By here we know that the buffer, if already pinned, isn't residing in
     422             :      * the array.
     423             :      *
     424             :      * Only look up the buffer in the hashtable if we've previously overflowed
     425             :      * into it.
     426             :      */
     427   115005318 :     if (PrivateRefCountOverflowed == 0)
     428   114211226 :         return NULL;
     429             : 
     430      794092 :     res = hash_search(PrivateRefCountHash, &buffer, HASH_FIND, NULL);
     431             : 
     432      794092 :     if (res == NULL)
     433      446068 :         return NULL;
     434      348024 :     else if (!do_move)
     435             :     {
     436             :         /* caller doesn't want us to move the hash entry into the array */
     437      304530 :         return res;
     438             :     }
     439             :     else
     440             :     {
     441             :         /* move buffer from hashtable into the free array slot */
     442             :         bool        found;
     443             :         PrivateRefCountEntry *free;
     444             : 
     445             :         /* Ensure there's a free array slot */
     446       43494 :         ReservePrivateRefCountEntry();
     447             : 
     448             :         /* Use up the reserved slot */
     449             :         Assert(ReservedRefCountSlot != -1);
     450       43494 :         free = &PrivateRefCountArray[ReservedRefCountSlot];
     451             :         Assert(PrivateRefCountArrayKeys[ReservedRefCountSlot] == free->buffer);
     452             :         Assert(free->buffer == InvalidBuffer);
     453             : 
     454             :         /* and fill it */
     455       43494 :         free->buffer = buffer;
     456       43494 :         free->data = res->data;
     457       43494 :         PrivateRefCountArrayKeys[ReservedRefCountSlot] = buffer;
     458             :         /* update cache for the next lookup */
     459       43494 :         PrivateRefCountEntryLast = match;
     460             : 
     461       43494 :         ReservedRefCountSlot = -1;
     462             : 
     463             : 
     464             :         /* delete from hashtable */
     465       43494 :         hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
     466             :         Assert(found);
     467             :         Assert(PrivateRefCountOverflowed > 0);
     468       43494 :         PrivateRefCountOverflowed--;
     469             : 
     470       43494 :         return free;
     471             :     }
     472             : }
     473             : 
     474             : /*
     475             :  * Return the PrivateRefCount entry for the passed buffer.
     476             :  *
     477             :  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
     478             :  * do_move is true, and the entry resides in the hashtable the entry is
     479             :  * optimized for frequent access by moving it to the array.
     480             :  */
     481             : static inline PrivateRefCountEntry *
     482   294258130 : GetPrivateRefCountEntry(Buffer buffer, bool do_move)
     483             : {
     484             :     Assert(BufferIsValid(buffer));
     485             :     Assert(!BufferIsLocal(buffer));
     486             : 
     487             :     /*
     488             :      * It's very common to look up the same buffer repeatedly. To make that
     489             :      * fast, we have a one-entry cache.
     490             :      *
     491             :      * In contrast to the loop in GetPrivateRefCountEntrySlow(), here it
     492             :      * faster to check PrivateRefCountArray[].buffer, as in the case of a hit
     493             :      * fewer addresses are computed and fewer cachelines are accessed. Whereas
     494             :      * in GetPrivateRefCountEntrySlow()'s case, checking
     495             :      * PrivateRefCountArrayKeys saves a lot of memory accesses.
     496             :      */
     497   294258130 :     if (likely(PrivateRefCountEntryLast != -1) &&
     498   294185528 :         likely(PrivateRefCountArray[PrivateRefCountEntryLast].buffer == buffer))
     499             :     {
     500   155114656 :         return &PrivateRefCountArray[PrivateRefCountEntryLast];
     501             :     }
     502             : 
     503             :     /*
     504             :      * The code for the cached lookup is small enough to be worth inlining
     505             :      * into the caller. In the miss case however, that empirically doesn't
     506             :      * seem worth it.
     507             :      */
     508   139143474 :     return GetPrivateRefCountEntrySlow(buffer, do_move);
     509             : }
     510             : 
     511             : /*
     512             :  * Returns how many times the passed buffer is pinned by this backend.
     513             :  *
     514             :  * Only works for shared memory buffers!
     515             :  */
     516             : static inline int32
     517     5736942 : GetPrivateRefCount(Buffer buffer)
     518             : {
     519             :     PrivateRefCountEntry *ref;
     520             : 
     521             :     Assert(BufferIsValid(buffer));
     522             :     Assert(!BufferIsLocal(buffer));
     523             : 
     524             :     /*
     525             :      * Not moving the entry - that's ok for the current users, but we might
     526             :      * want to change this one day.
     527             :      */
     528     5736942 :     ref = GetPrivateRefCountEntry(buffer, false);
     529             : 
     530     5736942 :     if (ref == NULL)
     531          48 :         return 0;
     532     5736894 :     return ref->data.refcount;
     533             : }
     534             : 
     535             : /*
     536             :  * Release resources used to track the reference count of a buffer which we no
     537             :  * longer have pinned and don't want to pin again immediately.
     538             :  */
     539             : static void
     540   119196050 : ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
     541             : {
     542             :     Assert(ref->data.refcount == 0);
     543             : 
     544   119196050 :     if (ref >= &PrivateRefCountArray[0] &&
     545             :         ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
     546             :     {
     547   118896350 :         ref->buffer = InvalidBuffer;
     548   118896350 :         PrivateRefCountArrayKeys[ref - PrivateRefCountArray] = InvalidBuffer;
     549             : 
     550             : 
     551             :         /*
     552             :          * Mark the just used entry as reserved - in many scenarios that
     553             :          * allows us to avoid ever having to search the array/hash for free
     554             :          * entries.
     555             :          */
     556   118896350 :         ReservedRefCountSlot = ref - PrivateRefCountArray;
     557             :     }
     558             :     else
     559             :     {
     560             :         bool        found;
     561      299700 :         Buffer      buffer = ref->buffer;
     562             : 
     563      299700 :         hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
     564             :         Assert(found);
     565             :         Assert(PrivateRefCountOverflowed > 0);
     566      299700 :         PrivateRefCountOverflowed--;
     567             :     }
     568   119196050 : }
     569             : 
     570             : /*
     571             :  * BufferIsPinned
     572             :  *      True iff the buffer is pinned (also checks for valid buffer number).
     573             :  *
     574             :  *      NOTE: what we check here is that *this* backend holds a pin on
     575             :  *      the buffer.  We do not care whether some other backend does.
     576             :  */
     577             : #define BufferIsPinned(bufnum) \
     578             : ( \
     579             :     !BufferIsValid(bufnum) ? \
     580             :         false \
     581             :     : \
     582             :         BufferIsLocal(bufnum) ? \
     583             :             (LocalRefCount[-(bufnum) - 1] > 0) \
     584             :         : \
     585             :     (GetPrivateRefCount(bufnum) > 0) \
     586             : )
     587             : 
     588             : 
     589             : static Buffer ReadBuffer_common(Relation rel,
     590             :                                 SMgrRelation smgr, char smgr_persistence,
     591             :                                 ForkNumber forkNum, BlockNumber blockNum,
     592             :                                 ReadBufferMode mode, BufferAccessStrategy strategy);
     593             : static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr,
     594             :                                            ForkNumber fork,
     595             :                                            BufferAccessStrategy strategy,
     596             :                                            uint32 flags,
     597             :                                            uint32 extend_by,
     598             :                                            BlockNumber extend_upto,
     599             :                                            Buffer *buffers,
     600             :                                            uint32 *extended_by);
     601             : static BlockNumber ExtendBufferedRelShared(BufferManagerRelation bmr,
     602             :                                            ForkNumber fork,
     603             :                                            BufferAccessStrategy strategy,
     604             :                                            uint32 flags,
     605             :                                            uint32 extend_by,
     606             :                                            BlockNumber extend_upto,
     607             :                                            Buffer *buffers,
     608             :                                            uint32 *extended_by);
     609             : static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
     610             :                       bool skip_if_not_valid);
     611             : static void PinBuffer_Locked(BufferDesc *buf);
     612             : static void UnpinBuffer(BufferDesc *buf);
     613             : static void UnpinBufferNoOwner(BufferDesc *buf);
     614             : static void BufferSync(int flags);
     615             : static int  SyncOneBuffer(int buf_id, bool skip_recently_used,
     616             :                           WritebackContext *wb_context);
     617             : static void WaitIO(BufferDesc *buf);
     618             : static void AbortBufferIO(Buffer buffer);
     619             : static void shared_buffer_write_error_callback(void *arg);
     620             : static void local_buffer_write_error_callback(void *arg);
     621             : static inline BufferDesc *BufferAlloc(SMgrRelation smgr,
     622             :                                       char relpersistence,
     623             :                                       ForkNumber forkNum,
     624             :                                       BlockNumber blockNum,
     625             :                                       BufferAccessStrategy strategy,
     626             :                                       bool *foundPtr, IOContext io_context);
     627             : static bool AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress);
     628             : static void CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete);
     629             : static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
     630             : static void FlushUnlockedBuffer(BufferDesc *buf, SMgrRelation reln,
     631             :                                 IOObject io_object, IOContext io_context);
     632             : static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
     633             :                         IOObject io_object, IOContext io_context);
     634             : static void FindAndDropRelationBuffers(RelFileLocator rlocator,
     635             :                                        ForkNumber forkNum,
     636             :                                        BlockNumber nForkBlock,
     637             :                                        BlockNumber firstDelBlock);
     638             : static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
     639             :                                            RelFileLocator dstlocator,
     640             :                                            ForkNumber forkNum, bool permanent);
     641             : static void AtProcExit_Buffers(int code, Datum arg);
     642             : static void CheckForBufferLeaks(void);
     643             : #ifdef USE_ASSERT_CHECKING
     644             : static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
     645             :                                        void *unused_context);
     646             : #endif
     647             : static int  rlocator_comparator(const void *p1, const void *p2);
     648             : static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
     649             : static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
     650             : static int  ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
     651             : 
     652             : 
     653             : /*
     654             :  * Implementation of PrefetchBuffer() for shared buffers.
     655             :  */
     656             : PrefetchBufferResult
     657       64138 : PrefetchSharedBuffer(SMgrRelation smgr_reln,
     658             :                      ForkNumber forkNum,
     659             :                      BlockNumber blockNum)
     660             : {
     661       64138 :     PrefetchBufferResult result = {InvalidBuffer, false};
     662             :     BufferTag   newTag;         /* identity of requested block */
     663             :     uint32      newHash;        /* hash value for newTag */
     664             :     LWLock     *newPartitionLock;   /* buffer partition lock for it */
     665             :     int         buf_id;
     666             : 
     667             :     Assert(BlockNumberIsValid(blockNum));
     668             : 
     669             :     /* create a tag so we can lookup the buffer */
     670       64138 :     InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
     671             :                   forkNum, blockNum);
     672             : 
     673             :     /* determine its hash code and partition lock ID */
     674       64138 :     newHash = BufTableHashCode(&newTag);
     675       64138 :     newPartitionLock = BufMappingPartitionLock(newHash);
     676             : 
     677             :     /* see if the block is in the buffer pool already */
     678       64138 :     LWLockAcquire(newPartitionLock, LW_SHARED);
     679       64138 :     buf_id = BufTableLookup(&newTag, newHash);
     680       64138 :     LWLockRelease(newPartitionLock);
     681             : 
     682             :     /* If not in buffers, initiate prefetch */
     683       64138 :     if (buf_id < 0)
     684             :     {
     685             : #ifdef USE_PREFETCH
     686             :         /*
     687             :          * Try to initiate an asynchronous read.  This returns false in
     688             :          * recovery if the relation file doesn't exist.
     689             :          */
     690       35630 :         if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
     691       17590 :             smgrprefetch(smgr_reln, forkNum, blockNum, 1))
     692             :         {
     693       17590 :             result.initiated_io = true;
     694             :         }
     695             : #endif                          /* USE_PREFETCH */
     696             :     }
     697             :     else
     698             :     {
     699             :         /*
     700             :          * Report the buffer it was in at that time.  The caller may be able
     701             :          * to avoid a buffer table lookup, but it's not pinned and it must be
     702             :          * rechecked!
     703             :          */
     704       46098 :         result.recent_buffer = buf_id + 1;
     705             :     }
     706             : 
     707             :     /*
     708             :      * If the block *is* in buffers, we do nothing.  This is not really ideal:
     709             :      * the block might be just about to be evicted, which would be stupid
     710             :      * since we know we are going to need it soon.  But the only easy answer
     711             :      * is to bump the usage_count, which does not seem like a great solution:
     712             :      * when the caller does ultimately touch the block, usage_count would get
     713             :      * bumped again, resulting in too much favoritism for blocks that are
     714             :      * involved in a prefetch sequence. A real fix would involve some
     715             :      * additional per-buffer state, and it's not clear that there's enough of
     716             :      * a problem to justify that.
     717             :      */
     718             : 
     719       64138 :     return result;
     720             : }
     721             : 
     722             : /*
     723             :  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
     724             :  *
     725             :  * This is named by analogy to ReadBuffer but doesn't actually allocate a
     726             :  * buffer.  Instead it tries to ensure that a future ReadBuffer for the given
     727             :  * block will not be delayed by the I/O.  Prefetching is optional.
     728             :  *
     729             :  * There are three possible outcomes:
     730             :  *
     731             :  * 1.  If the block is already cached, the result includes a valid buffer that
     732             :  * could be used by the caller to avoid the need for a later buffer lookup, but
     733             :  * it's not pinned, so the caller must recheck it.
     734             :  *
     735             :  * 2.  If the kernel has been asked to initiate I/O, the initiated_io member is
     736             :  * true.  Currently there is no way to know if the data was already cached by
     737             :  * the kernel and therefore didn't really initiate I/O, and no way to know when
     738             :  * the I/O completes other than using synchronous ReadBuffer().
     739             :  *
     740             :  * 3.  Otherwise, the buffer wasn't already cached by PostgreSQL, and
     741             :  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
     742             :  * lack of a kernel facility), direct I/O is enabled, or the underlying
     743             :  * relation file wasn't found and we are in recovery.  (If the relation file
     744             :  * wasn't found and we are not in recovery, an error is raised).
     745             :  */
     746             : PrefetchBufferResult
     747       42516 : PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
     748             : {
     749             :     Assert(RelationIsValid(reln));
     750             :     Assert(BlockNumberIsValid(blockNum));
     751             : 
     752       42516 :     if (RelationUsesLocalBuffers(reln))
     753             :     {
     754             :         /* see comments in ReadBufferExtended */
     755        1566 :         if (RELATION_IS_OTHER_TEMP(reln))
     756           0 :             ereport(ERROR,
     757             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     758             :                      errmsg("cannot access temporary tables of other sessions")));
     759             : 
     760             :         /* pass it off to localbuf.c */
     761        1566 :         return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
     762             :     }
     763             :     else
     764             :     {
     765             :         /* pass it to the shared buffer version */
     766       40950 :         return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
     767             :     }
     768             : }
     769             : 
     770             : /*
     771             :  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
     772             :  *
     773             :  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
     774             :  * successful.  Return true if the buffer is valid and still has the expected
     775             :  * tag.  In that case, the buffer is pinned and the usage count is bumped.
     776             :  */
     777             : bool
     778        9152 : ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
     779             :                  Buffer recent_buffer)
     780             : {
     781             :     BufferDesc *bufHdr;
     782             :     BufferTag   tag;
     783             :     uint32      buf_state;
     784             : 
     785             :     Assert(BufferIsValid(recent_buffer));
     786             : 
     787        9152 :     ResourceOwnerEnlarge(CurrentResourceOwner);
     788        9152 :     ReservePrivateRefCountEntry();
     789        9152 :     InitBufferTag(&tag, &rlocator, forkNum, blockNum);
     790             : 
     791        9152 :     if (BufferIsLocal(recent_buffer))
     792             :     {
     793          64 :         int         b = -recent_buffer - 1;
     794             : 
     795          64 :         bufHdr = GetLocalBufferDescriptor(b);
     796          64 :         buf_state = pg_atomic_read_u32(&bufHdr->state);
     797             : 
     798             :         /* Is it still valid and holding the right tag? */
     799          64 :         if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
     800             :         {
     801          64 :             PinLocalBuffer(bufHdr, true);
     802             : 
     803          64 :             pgBufferUsage.local_blks_hit++;
     804             : 
     805          64 :             return true;
     806             :         }
     807             :     }
     808             :     else
     809             :     {
     810        9088 :         bufHdr = GetBufferDescriptor(recent_buffer - 1);
     811             : 
     812             :         /*
     813             :          * Is it still valid and holding the right tag?  We do an unlocked tag
     814             :          * comparison first, to make it unlikely that we'll increment the
     815             :          * usage counter of the wrong buffer, if someone calls us with a very
     816             :          * out of date recent_buffer.  Then we'll check it again if we get the
     817             :          * pin.
     818             :          */
     819       18096 :         if (BufferTagsEqual(&tag, &bufHdr->tag) &&
     820        9008 :             PinBuffer(bufHdr, NULL, true))
     821             :         {
     822        8996 :             if (BufferTagsEqual(&tag, &bufHdr->tag))
     823             :             {
     824        8996 :                 pgBufferUsage.shared_blks_hit++;
     825        8996 :                 return true;
     826             :             }
     827           0 :             UnpinBuffer(bufHdr);
     828             :         }
     829             :     }
     830             : 
     831          92 :     return false;
     832             : }
     833             : 
     834             : /*
     835             :  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
     836             :  *      fork with RBM_NORMAL mode and default strategy.
     837             :  */
     838             : Buffer
     839    87732594 : ReadBuffer(Relation reln, BlockNumber blockNum)
     840             : {
     841    87732594 :     return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
     842             : }
     843             : 
     844             : /*
     845             :  * ReadBufferExtended -- returns a buffer containing the requested
     846             :  *      block of the requested relation.  If the blknum
     847             :  *      requested is P_NEW, extend the relation file and
     848             :  *      allocate a new block.  (Caller is responsible for
     849             :  *      ensuring that only one backend tries to extend a
     850             :  *      relation at the same time!)
     851             :  *
     852             :  * Returns: the buffer number for the buffer containing
     853             :  *      the block read.  The returned buffer has been pinned.
     854             :  *      Does not return on error --- elog's instead.
     855             :  *
     856             :  * Assume when this function is called, that reln has been opened already.
     857             :  *
     858             :  * In RBM_NORMAL mode, the page is read from disk, and the page header is
     859             :  * validated.  An error is thrown if the page header is not valid.  (But
     860             :  * note that an all-zero page is considered "valid"; see
     861             :  * PageIsVerified().)
     862             :  *
     863             :  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
     864             :  * valid, the page is zeroed instead of throwing an error. This is intended
     865             :  * for non-critical data, where the caller is prepared to repair errors.
     866             :  *
     867             :  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
     868             :  * filled with zeros instead of reading it from disk.  Useful when the caller
     869             :  * is going to fill the page from scratch, since this saves I/O and avoids
     870             :  * unnecessary failure if the page-on-disk has corrupt page headers.
     871             :  * The page is returned locked to ensure that the caller has a chance to
     872             :  * initialize the page before it's made visible to others.
     873             :  * Caution: do not use this mode to read a page that is beyond the relation's
     874             :  * current physical EOF; that is likely to cause problems in md.c when
     875             :  * the page is modified and written out. P_NEW is OK, though.
     876             :  *
     877             :  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
     878             :  * a cleanup-strength lock on the page.
     879             :  *
     880             :  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
     881             :  *
     882             :  * If strategy is not NULL, a nondefault buffer access strategy is used.
     883             :  * See buffer/README for details.
     884             :  */
     885             : inline Buffer
     886   105384530 : ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
     887             :                    ReadBufferMode mode, BufferAccessStrategy strategy)
     888             : {
     889             :     Buffer      buf;
     890             : 
     891             :     /*
     892             :      * Reject attempts to read non-local temporary relations; we would be
     893             :      * likely to get wrong data since we have no visibility into the owning
     894             :      * session's local buffers.
     895             :      */
     896   105384530 :     if (RELATION_IS_OTHER_TEMP(reln))
     897           0 :         ereport(ERROR,
     898             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     899             :                  errmsg("cannot access temporary tables of other sessions")));
     900             : 
     901             :     /*
     902             :      * Read the buffer, and update pgstat counters to reflect a cache hit or
     903             :      * miss.
     904             :      */
     905   105384530 :     buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
     906             :                             forkNum, blockNum, mode, strategy);
     907             : 
     908   105384484 :     return buf;
     909             : }
     910             : 
     911             : 
     912             : /*
     913             :  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
     914             :  *      a relcache entry for the relation.
     915             :  *
     916             :  * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
     917             :  * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
     918             :  * cannot be used for temporary relations (and making that work might be
     919             :  * difficult, unless we only want to read temporary relations for our own
     920             :  * ProcNumber).
     921             :  */
     922             : Buffer
     923    11539492 : ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
     924             :                           BlockNumber blockNum, ReadBufferMode mode,
     925             :                           BufferAccessStrategy strategy, bool permanent)
     926             : {
     927    11539492 :     SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
     928             : 
     929    11539492 :     return ReadBuffer_common(NULL, smgr,
     930             :                              permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
     931             :                              forkNum, blockNum,
     932             :                              mode, strategy);
     933             : }
     934             : 
     935             : /*
     936             :  * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
     937             :  */
     938             : Buffer
     939       92132 : ExtendBufferedRel(BufferManagerRelation bmr,
     940             :                   ForkNumber forkNum,
     941             :                   BufferAccessStrategy strategy,
     942             :                   uint32 flags)
     943             : {
     944             :     Buffer      buf;
     945       92132 :     uint32      extend_by = 1;
     946             : 
     947       92132 :     ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
     948             :                         &buf, &extend_by);
     949             : 
     950       92132 :     return buf;
     951             : }
     952             : 
     953             : /*
     954             :  * Extend relation by multiple blocks.
     955             :  *
     956             :  * Tries to extend the relation by extend_by blocks. Depending on the
     957             :  * availability of resources the relation may end up being extended by a
     958             :  * smaller number of pages (unless an error is thrown, always by at least one
     959             :  * page). *extended_by is updated to the number of pages the relation has been
     960             :  * extended to.
     961             :  *
     962             :  * buffers needs to be an array that is at least extend_by long. Upon
     963             :  * completion, the first extend_by array elements will point to a pinned
     964             :  * buffer.
     965             :  *
     966             :  * If EB_LOCK_FIRST is part of flags, the first returned buffer is
     967             :  * locked. This is useful for callers that want a buffer that is guaranteed to
     968             :  * be empty.
     969             :  */
     970             : BlockNumber
     971      324340 : ExtendBufferedRelBy(BufferManagerRelation bmr,
     972             :                     ForkNumber fork,
     973             :                     BufferAccessStrategy strategy,
     974             :                     uint32 flags,
     975             :                     uint32 extend_by,
     976             :                     Buffer *buffers,
     977             :                     uint32 *extended_by)
     978             : {
     979             :     Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
     980             :     Assert(bmr.smgr == NULL || bmr.relpersistence != '\0');
     981             :     Assert(extend_by > 0);
     982             : 
     983      324340 :     if (bmr.relpersistence == '\0')
     984      324340 :         bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
     985             : 
     986      324340 :     return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
     987             :                                    extend_by, InvalidBlockNumber,
     988             :                                    buffers, extended_by);
     989             : }
     990             : 
     991             : /*
     992             :  * Extend the relation so it is at least extend_to blocks large, return buffer
     993             :  * (extend_to - 1).
     994             :  *
     995             :  * This is useful for callers that want to write a specific page, regardless
     996             :  * of the current size of the relation (e.g. useful for visibilitymap and for
     997             :  * crash recovery).
     998             :  */
     999             : Buffer
    1000      104582 : ExtendBufferedRelTo(BufferManagerRelation bmr,
    1001             :                     ForkNumber fork,
    1002             :                     BufferAccessStrategy strategy,
    1003             :                     uint32 flags,
    1004             :                     BlockNumber extend_to,
    1005             :                     ReadBufferMode mode)
    1006             : {
    1007             :     BlockNumber current_size;
    1008      104582 :     uint32      extended_by = 0;
    1009      104582 :     Buffer      buffer = InvalidBuffer;
    1010             :     Buffer      buffers[64];
    1011             : 
    1012             :     Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
    1013             :     Assert(bmr.smgr == NULL || bmr.relpersistence != '\0');
    1014             :     Assert(extend_to != InvalidBlockNumber && extend_to > 0);
    1015             : 
    1016      104582 :     if (bmr.relpersistence == '\0')
    1017       14314 :         bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
    1018             : 
    1019             :     /*
    1020             :      * If desired, create the file if it doesn't exist.  If
    1021             :      * smgr_cached_nblocks[fork] is positive then it must exist, no need for
    1022             :      * an smgrexists call.
    1023             :      */
    1024      104582 :     if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
    1025       14314 :         (BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] == 0 ||
    1026          40 :          BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] == InvalidBlockNumber) &&
    1027       14274 :         !smgrexists(BMR_GET_SMGR(bmr), fork))
    1028             :     {
    1029       14238 :         LockRelationForExtension(bmr.rel, ExclusiveLock);
    1030             : 
    1031             :         /* recheck, fork might have been created concurrently */
    1032       14238 :         if (!smgrexists(BMR_GET_SMGR(bmr), fork))
    1033       14236 :             smgrcreate(BMR_GET_SMGR(bmr), fork, flags & EB_PERFORMING_RECOVERY);
    1034             : 
    1035       14238 :         UnlockRelationForExtension(bmr.rel, ExclusiveLock);
    1036             :     }
    1037             : 
    1038             :     /*
    1039             :      * If requested, invalidate size cache, so that smgrnblocks asks the
    1040             :      * kernel.
    1041             :      */
    1042      104582 :     if (flags & EB_CLEAR_SIZE_CACHE)
    1043       14314 :         BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] = InvalidBlockNumber;
    1044             : 
    1045             :     /*
    1046             :      * Estimate how many pages we'll need to extend by. This avoids acquiring
    1047             :      * unnecessarily many victim buffers.
    1048             :      */
    1049      104582 :     current_size = smgrnblocks(BMR_GET_SMGR(bmr), fork);
    1050             : 
    1051             :     /*
    1052             :      * Since no-one else can be looking at the page contents yet, there is no
    1053             :      * difference between an exclusive lock and a cleanup-strength lock. Note
    1054             :      * that we pass the original mode to ReadBuffer_common() below, when
    1055             :      * falling back to reading the buffer to a concurrent relation extension.
    1056             :      */
    1057      104582 :     if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
    1058       89534 :         flags |= EB_LOCK_TARGET;
    1059             : 
    1060      213442 :     while (current_size < extend_to)
    1061             :     {
    1062      108860 :         uint32      num_pages = lengthof(buffers);
    1063             :         BlockNumber first_block;
    1064             : 
    1065      108860 :         if ((uint64) current_size + num_pages > extend_to)
    1066      108728 :             num_pages = extend_to - current_size;
    1067             : 
    1068      108860 :         first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
    1069             :                                               num_pages, extend_to,
    1070             :                                               buffers, &extended_by);
    1071             : 
    1072      108860 :         current_size = first_block + extended_by;
    1073             :         Assert(num_pages != 0 || current_size >= extend_to);
    1074             : 
    1075      232362 :         for (uint32 i = 0; i < extended_by; i++)
    1076             :         {
    1077      123502 :             if (first_block + i != extend_to - 1)
    1078       18946 :                 ReleaseBuffer(buffers[i]);
    1079             :             else
    1080      104556 :                 buffer = buffers[i];
    1081             :         }
    1082             :     }
    1083             : 
    1084             :     /*
    1085             :      * It's possible that another backend concurrently extended the relation.
    1086             :      * In that case read the buffer.
    1087             :      *
    1088             :      * XXX: Should we control this via a flag?
    1089             :      */
    1090      104582 :     if (buffer == InvalidBuffer)
    1091             :     {
    1092             :         Assert(extended_by == 0);
    1093          26 :         buffer = ReadBuffer_common(bmr.rel, BMR_GET_SMGR(bmr), bmr.relpersistence,
    1094             :                                    fork, extend_to - 1, mode, strategy);
    1095             :     }
    1096             : 
    1097      104582 :     return buffer;
    1098             : }
    1099             : 
    1100             : /*
    1101             :  * Lock and optionally zero a buffer, as part of the implementation of
    1102             :  * RBM_ZERO_AND_LOCK or RBM_ZERO_AND_CLEANUP_LOCK.  The buffer must be already
    1103             :  * pinned.  If the buffer is not already valid, it is zeroed and made valid.
    1104             :  */
    1105             : static void
    1106      639856 : ZeroAndLockBuffer(Buffer buffer, ReadBufferMode mode, bool already_valid)
    1107             : {
    1108             :     BufferDesc *bufHdr;
    1109             :     bool        need_to_zero;
    1110      639856 :     bool        isLocalBuf = BufferIsLocal(buffer);
    1111             : 
    1112             :     Assert(mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
    1113             : 
    1114      639856 :     if (already_valid)
    1115             :     {
    1116             :         /*
    1117             :          * If the caller already knew the buffer was valid, we can skip some
    1118             :          * header interaction.  The caller just wants to lock the buffer.
    1119             :          */
    1120       74140 :         need_to_zero = false;
    1121             :     }
    1122      565716 :     else if (isLocalBuf)
    1123             :     {
    1124             :         /* Simple case for non-shared buffers. */
    1125          48 :         bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    1126          48 :         need_to_zero = StartLocalBufferIO(bufHdr, true, false);
    1127             :     }
    1128             :     else
    1129             :     {
    1130             :         /*
    1131             :          * Take BM_IO_IN_PROGRESS, or discover that BM_VALID has been set
    1132             :          * concurrently.  Even though we aren't doing I/O, that ensures that
    1133             :          * we don't zero a page that someone else has pinned.  An exclusive
    1134             :          * content lock wouldn't be enough, because readers are allowed to
    1135             :          * drop the content lock after determining that a tuple is visible
    1136             :          * (see buffer access rules in README).
    1137             :          */
    1138      565668 :         bufHdr = GetBufferDescriptor(buffer - 1);
    1139      565668 :         need_to_zero = StartBufferIO(bufHdr, true, false);
    1140             :     }
    1141             : 
    1142      639856 :     if (need_to_zero)
    1143             :     {
    1144      565716 :         memset(BufferGetPage(buffer), 0, BLCKSZ);
    1145             : 
    1146             :         /*
    1147             :          * Grab the buffer content lock before marking the page as valid, to
    1148             :          * make sure that no other backend sees the zeroed page before the
    1149             :          * caller has had a chance to initialize it.
    1150             :          *
    1151             :          * Since no-one else can be looking at the page contents yet, there is
    1152             :          * no difference between an exclusive lock and a cleanup-strength
    1153             :          * lock. (Note that we cannot use LockBuffer() or
    1154             :          * LockBufferForCleanup() here, because they assert that the buffer is
    1155             :          * already valid.)
    1156             :          */
    1157      565716 :         if (!isLocalBuf)
    1158      565668 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    1159             : 
    1160             :         /* Set BM_VALID, terminate IO, and wake up any waiters */
    1161      565716 :         if (isLocalBuf)
    1162          48 :             TerminateLocalBufferIO(bufHdr, false, BM_VALID, false);
    1163             :         else
    1164      565668 :             TerminateBufferIO(bufHdr, false, BM_VALID, true, false);
    1165             :     }
    1166       74140 :     else if (!isLocalBuf)
    1167             :     {
    1168             :         /*
    1169             :          * The buffer is valid, so we can't zero it.  The caller still expects
    1170             :          * the page to be locked on return.
    1171             :          */
    1172       74100 :         if (mode == RBM_ZERO_AND_LOCK)
    1173       73962 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    1174             :         else
    1175         138 :             LockBufferForCleanup(buffer);
    1176             :     }
    1177      639856 : }
    1178             : 
    1179             : /*
    1180             :  * Pin a buffer for a given block.  *foundPtr is set to true if the block was
    1181             :  * already present, or false if more work is required to either read it in or
    1182             :  * zero it.
    1183             :  */
    1184             : static pg_attribute_always_inline Buffer
    1185   125208422 : PinBufferForBlock(Relation rel,
    1186             :                   SMgrRelation smgr,
    1187             :                   char persistence,
    1188             :                   ForkNumber forkNum,
    1189             :                   BlockNumber blockNum,
    1190             :                   BufferAccessStrategy strategy,
    1191             :                   bool *foundPtr)
    1192             : {
    1193             :     BufferDesc *bufHdr;
    1194             :     IOContext   io_context;
    1195             :     IOObject    io_object;
    1196             : 
    1197             :     Assert(blockNum != P_NEW);
    1198             : 
    1199             :     /* Persistence should be set before */
    1200             :     Assert((persistence == RELPERSISTENCE_TEMP ||
    1201             :             persistence == RELPERSISTENCE_PERMANENT ||
    1202             :             persistence == RELPERSISTENCE_UNLOGGED));
    1203             : 
    1204   125208422 :     if (persistence == RELPERSISTENCE_TEMP)
    1205             :     {
    1206     2554400 :         io_context = IOCONTEXT_NORMAL;
    1207     2554400 :         io_object = IOOBJECT_TEMP_RELATION;
    1208             :     }
    1209             :     else
    1210             :     {
    1211   122654022 :         io_context = IOContextForStrategy(strategy);
    1212   122654022 :         io_object = IOOBJECT_RELATION;
    1213             :     }
    1214             : 
    1215             :     TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
    1216             :                                        smgr->smgr_rlocator.locator.spcOid,
    1217             :                                        smgr->smgr_rlocator.locator.dbOid,
    1218             :                                        smgr->smgr_rlocator.locator.relNumber,
    1219             :                                        smgr->smgr_rlocator.backend);
    1220             : 
    1221   125208422 :     if (persistence == RELPERSISTENCE_TEMP)
    1222             :     {
    1223     2554400 :         bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, foundPtr);
    1224     2554388 :         if (*foundPtr)
    1225     2537604 :             pgBufferUsage.local_blks_hit++;
    1226             :     }
    1227             :     else
    1228             :     {
    1229   122654022 :         bufHdr = BufferAlloc(smgr, persistence, forkNum, blockNum,
    1230             :                              strategy, foundPtr, io_context);
    1231   122654022 :         if (*foundPtr)
    1232   119167922 :             pgBufferUsage.shared_blks_hit++;
    1233             :     }
    1234   125208410 :     if (rel)
    1235             :     {
    1236             :         /*
    1237             :          * While pgBufferUsage's "read" counter isn't bumped unless we reach
    1238             :          * WaitReadBuffers() (so, not for hits, and not for buffers that are
    1239             :          * zeroed instead), the per-relation stats always count them.
    1240             :          */
    1241   113196802 :         pgstat_count_buffer_read(rel);
    1242   113196802 :         if (*foundPtr)
    1243   110624066 :             pgstat_count_buffer_hit(rel);
    1244             :     }
    1245   125208410 :     if (*foundPtr)
    1246             :     {
    1247   121705526 :         pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
    1248   121705526 :         if (VacuumCostActive)
    1249     4841208 :             VacuumCostBalance += VacuumCostPageHit;
    1250             : 
    1251             :         TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
    1252             :                                           smgr->smgr_rlocator.locator.spcOid,
    1253             :                                           smgr->smgr_rlocator.locator.dbOid,
    1254             :                                           smgr->smgr_rlocator.locator.relNumber,
    1255             :                                           smgr->smgr_rlocator.backend,
    1256             :                                           true);
    1257             :     }
    1258             : 
    1259   125208410 :     return BufferDescriptorGetBuffer(bufHdr);
    1260             : }
    1261             : 
    1262             : /*
    1263             :  * ReadBuffer_common -- common logic for all ReadBuffer variants
    1264             :  *
    1265             :  * smgr is required, rel is optional unless using P_NEW.
    1266             :  */
    1267             : static pg_attribute_always_inline Buffer
    1268   116924048 : ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence,
    1269             :                   ForkNumber forkNum,
    1270             :                   BlockNumber blockNum, ReadBufferMode mode,
    1271             :                   BufferAccessStrategy strategy)
    1272             : {
    1273             :     ReadBuffersOperation operation;
    1274             :     Buffer      buffer;
    1275             :     int         flags;
    1276             :     char        persistence;
    1277             : 
    1278             :     /*
    1279             :      * Backward compatibility path, most code should use ExtendBufferedRel()
    1280             :      * instead, as acquiring the extension lock inside ExtendBufferedRel()
    1281             :      * scales a lot better.
    1282             :      */
    1283   116924048 :     if (unlikely(blockNum == P_NEW))
    1284             :     {
    1285         522 :         uint32      flags = EB_SKIP_EXTENSION_LOCK;
    1286             : 
    1287             :         /*
    1288             :          * Since no-one else can be looking at the page contents yet, there is
    1289             :          * no difference between an exclusive lock and a cleanup-strength
    1290             :          * lock.
    1291             :          */
    1292         522 :         if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
    1293           0 :             flags |= EB_LOCK_FIRST;
    1294             : 
    1295         522 :         return ExtendBufferedRel(BMR_REL(rel), forkNum, strategy, flags);
    1296             :     }
    1297             : 
    1298   116923526 :     if (rel)
    1299   105384034 :         persistence = rel->rd_rel->relpersistence;
    1300             :     else
    1301    11539492 :         persistence = smgr_persistence;
    1302             : 
    1303   116923526 :     if (unlikely(mode == RBM_ZERO_AND_CLEANUP_LOCK ||
    1304             :                  mode == RBM_ZERO_AND_LOCK))
    1305             :     {
    1306             :         bool        found;
    1307             : 
    1308      639856 :         buffer = PinBufferForBlock(rel, smgr, persistence,
    1309             :                                    forkNum, blockNum, strategy, &found);
    1310      639856 :         ZeroAndLockBuffer(buffer, mode, found);
    1311      639856 :         return buffer;
    1312             :     }
    1313             : 
    1314             :     /*
    1315             :      * Signal that we are going to immediately wait. If we're immediately
    1316             :      * waiting, there is no benefit in actually executing the IO
    1317             :      * asynchronously, it would just add dispatch overhead.
    1318             :      */
    1319   116283670 :     flags = READ_BUFFERS_SYNCHRONOUSLY;
    1320   116283670 :     if (mode == RBM_ZERO_ON_ERROR)
    1321     2672158 :         flags |= READ_BUFFERS_ZERO_ON_ERROR;
    1322   116283670 :     operation.smgr = smgr;
    1323   116283670 :     operation.rel = rel;
    1324   116283670 :     operation.persistence = persistence;
    1325   116283670 :     operation.forknum = forkNum;
    1326   116283670 :     operation.strategy = strategy;
    1327   116283670 :     if (StartReadBuffer(&operation,
    1328             :                         &buffer,
    1329             :                         blockNum,
    1330             :                         flags))
    1331     1425602 :         WaitReadBuffers(&operation);
    1332             : 
    1333   116283624 :     return buffer;
    1334             : }
    1335             : 
    1336             : static pg_attribute_always_inline bool
    1337   124225490 : StartReadBuffersImpl(ReadBuffersOperation *operation,
    1338             :                      Buffer *buffers,
    1339             :                      BlockNumber blockNum,
    1340             :                      int *nblocks,
    1341             :                      int flags,
    1342             :                      bool allow_forwarding)
    1343             : {
    1344   124225490 :     int         actual_nblocks = *nblocks;
    1345   124225490 :     int         maxcombine = 0;
    1346             :     bool        did_start_io;
    1347             : 
    1348             :     Assert(*nblocks == 1 || allow_forwarding);
    1349             :     Assert(*nblocks > 0);
    1350             :     Assert(*nblocks <= MAX_IO_COMBINE_LIMIT);
    1351             : 
    1352   127162658 :     for (int i = 0; i < actual_nblocks; ++i)
    1353             :     {
    1354             :         bool        found;
    1355             : 
    1356   124571506 :         if (allow_forwarding && buffers[i] != InvalidBuffer)
    1357        2940 :         {
    1358             :             BufferDesc *bufHdr;
    1359             : 
    1360             :             /*
    1361             :              * This is a buffer that was pinned by an earlier call to
    1362             :              * StartReadBuffers(), but couldn't be handled in one operation at
    1363             :              * that time.  The operation was split, and the caller has passed
    1364             :              * an already pinned buffer back to us to handle the rest of the
    1365             :              * operation.  It must continue at the expected block number.
    1366             :              */
    1367             :             Assert(BufferGetBlockNumber(buffers[i]) == blockNum + i);
    1368             : 
    1369             :             /*
    1370             :              * It might be an already valid buffer (a hit) that followed the
    1371             :              * final contiguous block of an earlier I/O (a miss) marking the
    1372             :              * end of it, or a buffer that some other backend has since made
    1373             :              * valid by performing the I/O for us, in which case we can handle
    1374             :              * it as a hit now.  It is safe to check for a BM_VALID flag with
    1375             :              * a relaxed load, because we got a fresh view of it while pinning
    1376             :              * it in the previous call.
    1377             :              *
    1378             :              * On the other hand if we don't see BM_VALID yet, it must be an
    1379             :              * I/O that was split by the previous call and we need to try to
    1380             :              * start a new I/O from this block.  We're also racing against any
    1381             :              * other backend that might start the I/O or even manage to mark
    1382             :              * it BM_VALID after this check, but StartBufferIO() will handle
    1383             :              * those cases.
    1384             :              */
    1385        2940 :             if (BufferIsLocal(buffers[i]))
    1386           4 :                 bufHdr = GetLocalBufferDescriptor(-buffers[i] - 1);
    1387             :             else
    1388        2936 :                 bufHdr = GetBufferDescriptor(buffers[i] - 1);
    1389             :             Assert(pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID);
    1390        2940 :             found = pg_atomic_read_u32(&bufHdr->state) & BM_VALID;
    1391             :         }
    1392             :         else
    1393             :         {
    1394   124568554 :             buffers[i] = PinBufferForBlock(operation->rel,
    1395             :                                            operation->smgr,
    1396   124568566 :                                            operation->persistence,
    1397             :                                            operation->forknum,
    1398             :                                            blockNum + i,
    1399             :                                            operation->strategy,
    1400             :                                            &found);
    1401             :         }
    1402             : 
    1403   124571494 :         if (found)
    1404             :         {
    1405             :             /*
    1406             :              * We have a hit.  If it's the first block in the requested range,
    1407             :              * we can return it immediately and report that WaitReadBuffers()
    1408             :              * does not need to be called.  If the initial value of *nblocks
    1409             :              * was larger, the caller will have to call again for the rest.
    1410             :              */
    1411   121634326 :             if (i == 0)
    1412             :             {
    1413   121631382 :                 *nblocks = 1;
    1414             : 
    1415             : #ifdef USE_ASSERT_CHECKING
    1416             : 
    1417             :                 /*
    1418             :                  * Initialize enough of ReadBuffersOperation to make
    1419             :                  * CheckReadBuffersOperation() work. Outside of assertions
    1420             :                  * that's not necessary when no IO is issued.
    1421             :                  */
    1422             :                 operation->buffers = buffers;
    1423             :                 operation->blocknum = blockNum;
    1424             :                 operation->nblocks = 1;
    1425             :                 operation->nblocks_done = 1;
    1426             :                 CheckReadBuffersOperation(operation, true);
    1427             : #endif
    1428   121631382 :                 return false;
    1429             :             }
    1430             : 
    1431             :             /*
    1432             :              * Otherwise we already have an I/O to perform, but this block
    1433             :              * can't be included as it is already valid.  Split the I/O here.
    1434             :              * There may or may not be more blocks requiring I/O after this
    1435             :              * one, we haven't checked, but they can't be contiguous with this
    1436             :              * one in the way.  We'll leave this buffer pinned, forwarding it
    1437             :              * to the next call, avoiding the need to unpin it here and re-pin
    1438             :              * it in the next call.
    1439             :              */
    1440        2944 :             actual_nblocks = i;
    1441        2944 :             break;
    1442             :         }
    1443             :         else
    1444             :         {
    1445             :             /*
    1446             :              * Check how many blocks we can cover with the same IO. The smgr
    1447             :              * implementation might e.g. be limited due to a segment boundary.
    1448             :              */
    1449     2937168 :             if (i == 0 && actual_nblocks > 1)
    1450             :             {
    1451       69540 :                 maxcombine = smgrmaxcombine(operation->smgr,
    1452             :                                             operation->forknum,
    1453             :                                             blockNum);
    1454       69540 :                 if (unlikely(maxcombine < actual_nblocks))
    1455             :                 {
    1456           0 :                     elog(DEBUG2, "limiting nblocks at %u from %u to %u",
    1457             :                          blockNum, actual_nblocks, maxcombine);
    1458           0 :                     actual_nblocks = maxcombine;
    1459             :                 }
    1460             :             }
    1461             :         }
    1462             :     }
    1463     2594096 :     *nblocks = actual_nblocks;
    1464             : 
    1465             :     /* Populate information needed for I/O. */
    1466     2594096 :     operation->buffers = buffers;
    1467     2594096 :     operation->blocknum = blockNum;
    1468     2594096 :     operation->flags = flags;
    1469     2594096 :     operation->nblocks = actual_nblocks;
    1470     2594096 :     operation->nblocks_done = 0;
    1471     2594096 :     pgaio_wref_clear(&operation->io_wref);
    1472             : 
    1473             :     /*
    1474             :      * When using AIO, start the IO in the background. If not, issue prefetch
    1475             :      * requests if desired by the caller.
    1476             :      *
    1477             :      * The reason we have a dedicated path for IOMETHOD_SYNC here is to
    1478             :      * de-risk the introduction of AIO somewhat. It's a large architectural
    1479             :      * change, with lots of chances for unanticipated performance effects.
    1480             :      *
    1481             :      * Use of IOMETHOD_SYNC already leads to not actually performing IO
    1482             :      * asynchronously, but without the check here we'd execute IO earlier than
    1483             :      * we used to. Eventually this IOMETHOD_SYNC specific path should go away.
    1484             :      */
    1485     2594096 :     if (io_method != IOMETHOD_SYNC)
    1486             :     {
    1487             :         /*
    1488             :          * Try to start IO asynchronously. It's possible that no IO needs to
    1489             :          * be started, if another backend already performed the IO.
    1490             :          *
    1491             :          * Note that if an IO is started, it might not cover the entire
    1492             :          * requested range, e.g. because an intermediary block has been read
    1493             :          * in by another backend.  In that case any "trailing" buffers we
    1494             :          * already pinned above will be "forwarded" by read_stream.c to the
    1495             :          * next call to StartReadBuffers().
    1496             :          *
    1497             :          * This is signalled to the caller by decrementing *nblocks *and*
    1498             :          * reducing operation->nblocks. The latter is done here, but not below
    1499             :          * WaitReadBuffers(), as in WaitReadBuffers() we can't "shorten" the
    1500             :          * overall read size anymore, we need to retry until done in its
    1501             :          * entirety or until failed.
    1502             :          */
    1503     2591940 :         did_start_io = AsyncReadBuffers(operation, nblocks);
    1504             : 
    1505     2591910 :         operation->nblocks = *nblocks;
    1506             :     }
    1507             :     else
    1508             :     {
    1509        2156 :         operation->flags |= READ_BUFFERS_SYNCHRONOUSLY;
    1510             : 
    1511        2156 :         if (flags & READ_BUFFERS_ISSUE_ADVICE)
    1512             :         {
    1513             :             /*
    1514             :              * In theory we should only do this if PinBufferForBlock() had to
    1515             :              * allocate new buffers above.  That way, if two calls to
    1516             :              * StartReadBuffers() were made for the same blocks before
    1517             :              * WaitReadBuffers(), only the first would issue the advice.
    1518             :              * That'd be a better simulation of true asynchronous I/O, which
    1519             :              * would only start the I/O once, but isn't done here for
    1520             :              * simplicity.
    1521             :              */
    1522           4 :             smgrprefetch(operation->smgr,
    1523             :                          operation->forknum,
    1524             :                          blockNum,
    1525             :                          actual_nblocks);
    1526             :         }
    1527             : 
    1528             :         /*
    1529             :          * Indicate that WaitReadBuffers() should be called. WaitReadBuffers()
    1530             :          * will initiate the necessary IO.
    1531             :          */
    1532        2156 :         did_start_io = true;
    1533             :     }
    1534             : 
    1535     2594066 :     CheckReadBuffersOperation(operation, !did_start_io);
    1536             : 
    1537     2594066 :     return did_start_io;
    1538             : }
    1539             : 
    1540             : /*
    1541             :  * Begin reading a range of blocks beginning at blockNum and extending for
    1542             :  * *nblocks.  *nblocks and the buffers array are in/out parameters.  On entry,
    1543             :  * the buffers elements covered by *nblocks must hold either InvalidBuffer or
    1544             :  * buffers forwarded by an earlier call to StartReadBuffers() that was split
    1545             :  * and is now being continued.  On return, *nblocks holds the number of blocks
    1546             :  * accepted by this operation.  If it is less than the original number then
    1547             :  * this operation has been split, but buffer elements up to the original
    1548             :  * requested size may hold forwarded buffers to be used for a continuing
    1549             :  * operation.  The caller must either start a new I/O beginning at the block
    1550             :  * immediately following the blocks accepted by this call and pass those
    1551             :  * buffers back in, or release them if it chooses not to.  It shouldn't make
    1552             :  * any other use of or assumptions about forwarded buffers.
    1553             :  *
    1554             :  * If false is returned, no I/O is necessary and the buffers covered by
    1555             :  * *nblocks on exit are valid and ready to be accessed.  If true is returned,
    1556             :  * an I/O has been started, and WaitReadBuffers() must be called with the same
    1557             :  * operation object before the buffers covered by *nblocks on exit can be
    1558             :  * accessed.  Along with the operation object, the caller-supplied array of
    1559             :  * buffers must remain valid until WaitReadBuffers() is called, and any
    1560             :  * forwarded buffers must also be preserved for a continuing call unless
    1561             :  * they are explicitly released.
    1562             :  */
    1563             : bool
    1564     3665316 : StartReadBuffers(ReadBuffersOperation *operation,
    1565             :                  Buffer *buffers,
    1566             :                  BlockNumber blockNum,
    1567             :                  int *nblocks,
    1568             :                  int flags)
    1569             : {
    1570     3665316 :     return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags,
    1571             :                                 true /* expect forwarded buffers */ );
    1572             : }
    1573             : 
    1574             : /*
    1575             :  * Single block version of the StartReadBuffers().  This might save a few
    1576             :  * instructions when called from another translation unit, because it is
    1577             :  * specialized for nblocks == 1.
    1578             :  *
    1579             :  * This version does not support "forwarded" buffers: they cannot be created
    1580             :  * by reading only one block and *buffer is ignored on entry.
    1581             :  */
    1582             : bool
    1583   120560174 : StartReadBuffer(ReadBuffersOperation *operation,
    1584             :                 Buffer *buffer,
    1585             :                 BlockNumber blocknum,
    1586             :                 int flags)
    1587             : {
    1588   120560174 :     int         nblocks = 1;
    1589             :     bool        result;
    1590             : 
    1591   120560174 :     result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags,
    1592             :                                   false /* single block, no forwarding */ );
    1593             :     Assert(nblocks == 1);       /* single block can't be short */
    1594             : 
    1595   120560144 :     return result;
    1596             : }
    1597             : 
    1598             : /*
    1599             :  * Perform sanity checks on the ReadBuffersOperation.
    1600             :  */
    1601             : static void
    1602     7747084 : CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete)
    1603             : {
    1604             : #ifdef USE_ASSERT_CHECKING
    1605             :     Assert(operation->nblocks_done <= operation->nblocks);
    1606             :     Assert(!is_complete || operation->nblocks == operation->nblocks_done);
    1607             : 
    1608             :     for (int i = 0; i < operation->nblocks; i++)
    1609             :     {
    1610             :         Buffer      buffer = operation->buffers[i];
    1611             :         BufferDesc *buf_hdr = BufferIsLocal(buffer) ?
    1612             :             GetLocalBufferDescriptor(-buffer - 1) :
    1613             :             GetBufferDescriptor(buffer - 1);
    1614             : 
    1615             :         Assert(BufferGetBlockNumber(buffer) == operation->blocknum + i);
    1616             :         Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_TAG_VALID);
    1617             : 
    1618             :         if (i < operation->nblocks_done)
    1619             :             Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_VALID);
    1620             :     }
    1621             : #endif
    1622     7747084 : }
    1623             : 
    1624             : /* helper for ReadBuffersCanStartIO(), to avoid repetition */
    1625             : static inline bool
    1626     2937204 : ReadBuffersCanStartIOOnce(Buffer buffer, bool nowait)
    1627             : {
    1628     2937204 :     if (BufferIsLocal(buffer))
    1629       16736 :         return StartLocalBufferIO(GetLocalBufferDescriptor(-buffer - 1),
    1630             :                                   true, nowait);
    1631             :     else
    1632     2920468 :         return StartBufferIO(GetBufferDescriptor(buffer - 1), true, nowait);
    1633             : }
    1634             : 
    1635             : /*
    1636             :  * Helper for AsyncReadBuffers that tries to get the buffer ready for IO.
    1637             :  */
    1638             : static inline bool
    1639     2937204 : ReadBuffersCanStartIO(Buffer buffer, bool nowait)
    1640             : {
    1641             :     /*
    1642             :      * If this backend currently has staged IO, we need to submit the pending
    1643             :      * IO before waiting for the right to issue IO, to avoid the potential for
    1644             :      * deadlocks (and, more commonly, unnecessary delays for other backends).
    1645             :      */
    1646     2937204 :     if (!nowait && pgaio_have_staged())
    1647             :     {
    1648        1178 :         if (ReadBuffersCanStartIOOnce(buffer, true))
    1649        1178 :             return true;
    1650             : 
    1651             :         /*
    1652             :          * Unfortunately StartBufferIO() returning false doesn't allow to
    1653             :          * distinguish between the buffer already being valid and IO already
    1654             :          * being in progress. Since IO already being in progress is quite
    1655             :          * rare, this approach seems fine.
    1656             :          */
    1657           0 :         pgaio_submit_staged();
    1658             :     }
    1659             : 
    1660     2936026 :     return ReadBuffersCanStartIOOnce(buffer, nowait);
    1661             : }
    1662             : 
    1663             : /*
    1664             :  * Helper for WaitReadBuffers() that processes the results of a readv
    1665             :  * operation, raising an error if necessary.
    1666             :  */
    1667             : static void
    1668     2575470 : ProcessReadBuffersResult(ReadBuffersOperation *operation)
    1669             : {
    1670     2575470 :     PgAioReturn *aio_ret = &operation->io_return;
    1671     2575470 :     PgAioResultStatus rs = aio_ret->result.status;
    1672     2575470 :     int         newly_read_blocks = 0;
    1673             : 
    1674             :     Assert(pgaio_wref_valid(&operation->io_wref));
    1675             :     Assert(aio_ret->result.status != PGAIO_RS_UNKNOWN);
    1676             : 
    1677             :     /*
    1678             :      * SMGR reports the number of blocks successfully read as the result of
    1679             :      * the IO operation. Thus we can simply add that to ->nblocks_done.
    1680             :      */
    1681             : 
    1682     2575470 :     if (likely(rs != PGAIO_RS_ERROR))
    1683     2575412 :         newly_read_blocks = aio_ret->result.result;
    1684             : 
    1685     2575470 :     if (rs == PGAIO_RS_ERROR || rs == PGAIO_RS_WARNING)
    1686          90 :         pgaio_result_report(aio_ret->result, &aio_ret->target_data,
    1687             :                             rs == PGAIO_RS_ERROR ? ERROR : WARNING);
    1688     2575380 :     else if (aio_ret->result.status == PGAIO_RS_PARTIAL)
    1689             :     {
    1690             :         /*
    1691             :          * We'll retry, so we just emit a debug message to the server log (or
    1692             :          * not even that in prod scenarios).
    1693             :          */
    1694          20 :         pgaio_result_report(aio_ret->result, &aio_ret->target_data, DEBUG1);
    1695          20 :         elog(DEBUG3, "partial read, will retry");
    1696             :     }
    1697             : 
    1698             :     Assert(newly_read_blocks > 0);
    1699             :     Assert(newly_read_blocks <= MAX_IO_COMBINE_LIMIT);
    1700             : 
    1701     2575412 :     operation->nblocks_done += newly_read_blocks;
    1702             : 
    1703             :     Assert(operation->nblocks_done <= operation->nblocks);
    1704     2575412 : }
    1705             : 
    1706             : void
    1707     2575450 : WaitReadBuffers(ReadBuffersOperation *operation)
    1708             : {
    1709     2575450 :     PgAioReturn *aio_ret = &operation->io_return;
    1710             :     IOContext   io_context;
    1711             :     IOObject    io_object;
    1712             : 
    1713     2575450 :     if (operation->persistence == RELPERSISTENCE_TEMP)
    1714             :     {
    1715        2982 :         io_context = IOCONTEXT_NORMAL;
    1716        2982 :         io_object = IOOBJECT_TEMP_RELATION;
    1717             :     }
    1718             :     else
    1719             :     {
    1720     2572468 :         io_context = IOContextForStrategy(operation->strategy);
    1721     2572468 :         io_object = IOOBJECT_RELATION;
    1722             :     }
    1723             : 
    1724             :     /*
    1725             :      * If we get here without an IO operation having been issued, the
    1726             :      * io_method == IOMETHOD_SYNC path must have been used. Otherwise the
    1727             :      * caller should not have called WaitReadBuffers().
    1728             :      *
    1729             :      * In the case of IOMETHOD_SYNC, we start - as we used to before the
    1730             :      * introducing of AIO - the IO in WaitReadBuffers(). This is done as part
    1731             :      * of the retry logic below, no extra code is required.
    1732             :      *
    1733             :      * This path is expected to eventually go away.
    1734             :      */
    1735     2575450 :     if (!pgaio_wref_valid(&operation->io_wref) && io_method != IOMETHOD_SYNC)
    1736           0 :         elog(ERROR, "waiting for read operation that didn't read");
    1737             : 
    1738             :     /*
    1739             :      * To handle partial reads, and IOMETHOD_SYNC, we re-issue IO until we're
    1740             :      * done. We may need multiple retries, not just because we could get
    1741             :      * multiple partial reads, but also because some of the remaining
    1742             :      * to-be-read buffers may have been read in by other backends, limiting
    1743             :      * the IO size.
    1744             :      */
    1745             :     while (true)
    1746        2176 :     {
    1747             :         int         ignored_nblocks_progress;
    1748             : 
    1749     2577626 :         CheckReadBuffersOperation(operation, false);
    1750             : 
    1751             :         /*
    1752             :          * If there is an IO associated with the operation, we may need to
    1753             :          * wait for it.
    1754             :          */
    1755     2577626 :         if (pgaio_wref_valid(&operation->io_wref))
    1756             :         {
    1757             :             /*
    1758             :              * Track the time spent waiting for the IO to complete. As
    1759             :              * tracking a wait even if we don't actually need to wait
    1760             :              *
    1761             :              * a) is not cheap, due to the timestamping overhead
    1762             :              *
    1763             :              * b) reports some time as waiting, even if we never waited
    1764             :              *
    1765             :              * we first check if we already know the IO is complete.
    1766             :              */
    1767     2575470 :             if (aio_ret->result.status == PGAIO_RS_UNKNOWN &&
    1768     1134160 :                 !pgaio_wref_check_done(&operation->io_wref))
    1769             :             {
    1770      439702 :                 instr_time  io_start = pgstat_prepare_io_time(track_io_timing);
    1771             : 
    1772      439702 :                 pgaio_wref_wait(&operation->io_wref);
    1773             : 
    1774             :                 /*
    1775             :                  * The IO operation itself was already counted earlier, in
    1776             :                  * AsyncReadBuffers(), this just accounts for the wait time.
    1777             :                  */
    1778      439702 :                 pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
    1779             :                                         io_start, 0, 0);
    1780             :             }
    1781             :             else
    1782             :             {
    1783             :                 Assert(pgaio_wref_check_done(&operation->io_wref));
    1784             :             }
    1785             : 
    1786             :             /*
    1787             :              * We now are sure the IO completed. Check the results. This
    1788             :              * includes reporting on errors if there were any.
    1789             :              */
    1790     2575470 :             ProcessReadBuffersResult(operation);
    1791             :         }
    1792             : 
    1793             :         /*
    1794             :          * Most of the time, the one IO we already started, will read in
    1795             :          * everything.  But we need to deal with partial reads and buffers not
    1796             :          * needing IO anymore.
    1797             :          */
    1798     2577568 :         if (operation->nblocks_done == operation->nblocks)
    1799     2575392 :             break;
    1800             : 
    1801        2176 :         CHECK_FOR_INTERRUPTS();
    1802             : 
    1803             :         /*
    1804             :          * This may only complete the IO partially, either because some
    1805             :          * buffers were already valid, or because of a partial read.
    1806             :          *
    1807             :          * NB: In contrast to after the AsyncReadBuffers() call in
    1808             :          * StartReadBuffers(), we do *not* reduce
    1809             :          * ReadBuffersOperation->nblocks here, callers expect the full
    1810             :          * operation to be completed at this point (as more operations may
    1811             :          * have been queued).
    1812             :          */
    1813        2176 :         AsyncReadBuffers(operation, &ignored_nblocks_progress);
    1814             :     }
    1815             : 
    1816     2575392 :     CheckReadBuffersOperation(operation, true);
    1817             : 
    1818             :     /* NB: READ_DONE tracepoint was already executed in completion callback */
    1819     2575392 : }
    1820             : 
    1821             : /*
    1822             :  * Initiate IO for the ReadBuffersOperation
    1823             :  *
    1824             :  * This function only starts a single IO at a time. The size of the IO may be
    1825             :  * limited to below the to-be-read blocks, if one of the buffers has
    1826             :  * concurrently been read in. If the first to-be-read buffer is already valid,
    1827             :  * no IO will be issued.
    1828             :  *
    1829             :  * To support retries after partial reads, the first operation->nblocks_done
    1830             :  * buffers are skipped.
    1831             :  *
    1832             :  * On return *nblocks_progress is updated to reflect the number of buffers
    1833             :  * affected by the call. If the first buffer is valid, *nblocks_progress is
    1834             :  * set to 1 and operation->nblocks_done is incremented.
    1835             :  *
    1836             :  * Returns true if IO was initiated, false if no IO was necessary.
    1837             :  */
    1838             : static bool
    1839     2594190 : AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress)
    1840             : {
    1841     2594190 :     Buffer     *buffers = &operation->buffers[0];
    1842     2594190 :     int         flags = operation->flags;
    1843     2594190 :     BlockNumber blocknum = operation->blocknum;
    1844     2594190 :     ForkNumber  forknum = operation->forknum;
    1845     2594190 :     char        persistence = operation->persistence;
    1846     2594190 :     int16       nblocks_done = operation->nblocks_done;
    1847     2594190 :     Buffer     *io_buffers = &operation->buffers[nblocks_done];
    1848     2594190 :     int         io_buffers_len = 0;
    1849             :     PgAioHandle *ioh;
    1850     2594190 :     uint32      ioh_flags = 0;
    1851             :     void       *io_pages[MAX_IO_COMBINE_LIMIT];
    1852             :     IOContext   io_context;
    1853             :     IOObject    io_object;
    1854             :     bool        did_start_io;
    1855             : 
    1856             :     /*
    1857             :      * When this IO is executed synchronously, either because the caller will
    1858             :      * immediately block waiting for the IO or because IOMETHOD_SYNC is used,
    1859             :      * the AIO subsystem needs to know.
    1860             :      */
    1861     2594190 :     if (flags & READ_BUFFERS_SYNCHRONOUSLY)
    1862     1435712 :         ioh_flags |= PGAIO_HF_SYNCHRONOUS;
    1863             : 
    1864     2594190 :     if (persistence == RELPERSISTENCE_TEMP)
    1865             :     {
    1866        3570 :         io_context = IOCONTEXT_NORMAL;
    1867        3570 :         io_object = IOOBJECT_TEMP_RELATION;
    1868        3570 :         ioh_flags |= PGAIO_HF_REFERENCES_LOCAL;
    1869             :     }
    1870             :     else
    1871             :     {
    1872     2590620 :         io_context = IOContextForStrategy(operation->strategy);
    1873     2590620 :         io_object = IOOBJECT_RELATION;
    1874             :     }
    1875             : 
    1876             :     /*
    1877             :      * If zero_damaged_pages is enabled, add the READ_BUFFERS_ZERO_ON_ERROR
    1878             :      * flag. The reason for that is that, hopefully, zero_damaged_pages isn't
    1879             :      * set globally, but on a per-session basis. The completion callback,
    1880             :      * which may be run in other processes, e.g. in IO workers, may have a
    1881             :      * different value of the zero_damaged_pages GUC.
    1882             :      *
    1883             :      * XXX: We probably should eventually use a different flag for
    1884             :      * zero_damaged_pages, so we can report different log levels / error codes
    1885             :      * for zero_damaged_pages and ZERO_ON_ERROR.
    1886             :      */
    1887     2594190 :     if (zero_damaged_pages)
    1888          32 :         flags |= READ_BUFFERS_ZERO_ON_ERROR;
    1889             : 
    1890             :     /*
    1891             :      * For the same reason as with zero_damaged_pages we need to use this
    1892             :      * backend's ignore_checksum_failure value.
    1893             :      */
    1894     2594190 :     if (ignore_checksum_failure)
    1895          16 :         flags |= READ_BUFFERS_IGNORE_CHECKSUM_FAILURES;
    1896             : 
    1897             : 
    1898             :     /*
    1899             :      * To be allowed to report stats in the local completion callback we need
    1900             :      * to prepare to report stats now. This ensures we can safely report the
    1901             :      * checksum failure even in a critical section.
    1902             :      */
    1903     2594190 :     pgstat_prepare_report_checksum_failure(operation->smgr->smgr_rlocator.locator.dbOid);
    1904             : 
    1905             :     /*
    1906             :      * Get IO handle before ReadBuffersCanStartIO(), as pgaio_io_acquire()
    1907             :      * might block, which we don't want after setting IO_IN_PROGRESS.
    1908             :      *
    1909             :      * If we need to wait for IO before we can get a handle, submit
    1910             :      * already-staged IO first, so that other backends don't need to wait.
    1911             :      * There wouldn't be a deadlock risk, as pgaio_io_acquire() just needs to
    1912             :      * wait for already submitted IO, which doesn't require additional locks,
    1913             :      * but it could still cause undesirable waits.
    1914             :      *
    1915             :      * A secondary benefit is that this would allow us to measure the time in
    1916             :      * pgaio_io_acquire() without causing undue timer overhead in the common,
    1917             :      * non-blocking, case.  However, currently the pgstats infrastructure
    1918             :      * doesn't really allow that, as it a) asserts that an operation can't
    1919             :      * have time without operations b) doesn't have an API to report
    1920             :      * "accumulated" time.
    1921             :      */
    1922     2594190 :     ioh = pgaio_io_acquire_nb(CurrentResourceOwner, &operation->io_return);
    1923     2594190 :     if (unlikely(!ioh))
    1924             :     {
    1925        6072 :         pgaio_submit_staged();
    1926             : 
    1927        6072 :         ioh = pgaio_io_acquire(CurrentResourceOwner, &operation->io_return);
    1928             :     }
    1929             : 
    1930             :     /*
    1931             :      * Check if we can start IO on the first to-be-read buffer.
    1932             :      *
    1933             :      * If an I/O is already in progress in another backend, we want to wait
    1934             :      * for the outcome: either done, or something went wrong and we will
    1935             :      * retry.
    1936             :      */
    1937     2594190 :     if (!ReadBuffersCanStartIO(buffers[nblocks_done], false))
    1938             :     {
    1939             :         /*
    1940             :          * Someone else has already completed this block, we're done.
    1941             :          *
    1942             :          * When IO is necessary, ->nblocks_done is updated in
    1943             :          * ProcessReadBuffersResult(), but that is not called if no IO is
    1944             :          * necessary. Thus update here.
    1945             :          */
    1946       18014 :         operation->nblocks_done += 1;
    1947       18014 :         *nblocks_progress = 1;
    1948             : 
    1949       18014 :         pgaio_io_release(ioh);
    1950       18014 :         pgaio_wref_clear(&operation->io_wref);
    1951       18014 :         did_start_io = false;
    1952             : 
    1953             :         /*
    1954             :          * Report and track this as a 'hit' for this backend, even though it
    1955             :          * must have started out as a miss in PinBufferForBlock(). The other
    1956             :          * backend will track this as a 'read'.
    1957             :          */
    1958             :         TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + operation->nblocks_done,
    1959             :                                           operation->smgr->smgr_rlocator.locator.spcOid,
    1960             :                                           operation->smgr->smgr_rlocator.locator.dbOid,
    1961             :                                           operation->smgr->smgr_rlocator.locator.relNumber,
    1962             :                                           operation->smgr->smgr_rlocator.backend,
    1963             :                                           true);
    1964             : 
    1965       18014 :         if (persistence == RELPERSISTENCE_TEMP)
    1966           0 :             pgBufferUsage.local_blks_hit += 1;
    1967             :         else
    1968       18014 :             pgBufferUsage.shared_blks_hit += 1;
    1969             : 
    1970       18014 :         if (operation->rel)
    1971       18014 :             pgstat_count_buffer_hit(operation->rel);
    1972             : 
    1973       18014 :         pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
    1974             : 
    1975       18014 :         if (VacuumCostActive)
    1976         132 :             VacuumCostBalance += VacuumCostPageHit;
    1977             :     }
    1978             :     else
    1979             :     {
    1980             :         instr_time  io_start;
    1981             : 
    1982             :         /* We found a buffer that we need to read in. */
    1983             :         Assert(io_buffers[0] == buffers[nblocks_done]);
    1984     2576176 :         io_pages[0] = BufferGetBlock(buffers[nblocks_done]);
    1985     2576176 :         io_buffers_len = 1;
    1986             : 
    1987             :         /*
    1988             :          * How many neighboring-on-disk blocks can we scatter-read into other
    1989             :          * buffers at the same time?  In this case we don't wait if we see an
    1990             :          * I/O already in progress.  We already set BM_IO_IN_PROGRESS for the
    1991             :          * head block, so we should get on with that I/O as soon as possible.
    1992             :          */
    1993     2919264 :         for (int i = nblocks_done + 1; i < operation->nblocks; i++)
    1994             :         {
    1995      343088 :             if (!ReadBuffersCanStartIO(buffers[i], true))
    1996           0 :                 break;
    1997             :             /* Must be consecutive block numbers. */
    1998             :             Assert(BufferGetBlockNumber(buffers[i - 1]) ==
    1999             :                    BufferGetBlockNumber(buffers[i]) - 1);
    2000             :             Assert(io_buffers[io_buffers_len] == buffers[i]);
    2001             : 
    2002      343088 :             io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
    2003             :         }
    2004             : 
    2005             :         /* get a reference to wait for in WaitReadBuffers() */
    2006     2576176 :         pgaio_io_get_wref(ioh, &operation->io_wref);
    2007             : 
    2008             :         /* provide the list of buffers to the completion callbacks */
    2009     2576176 :         pgaio_io_set_handle_data_32(ioh, (uint32 *) io_buffers, io_buffers_len);
    2010             : 
    2011     2576176 :         pgaio_io_register_callbacks(ioh,
    2012             :                                     persistence == RELPERSISTENCE_TEMP ?
    2013             :                                     PGAIO_HCB_LOCAL_BUFFER_READV :
    2014             :                                     PGAIO_HCB_SHARED_BUFFER_READV,
    2015             :                                     flags);
    2016             : 
    2017     2576176 :         pgaio_io_set_flag(ioh, ioh_flags);
    2018             : 
    2019             :         /* ---
    2020             :          * Even though we're trying to issue IO asynchronously, track the time
    2021             :          * in smgrstartreadv():
    2022             :          * - if io_method == IOMETHOD_SYNC, we will always perform the IO
    2023             :          *   immediately
    2024             :          * - the io method might not support the IO (e.g. worker IO for a temp
    2025             :          *   table)
    2026             :          * ---
    2027             :          */
    2028     2576102 :         io_start = pgstat_prepare_io_time(track_io_timing);
    2029     2576102 :         smgrstartreadv(ioh, operation->smgr, forknum,
    2030             :                        blocknum + nblocks_done,
    2031             :                        io_pages, io_buffers_len);
    2032     2576072 :         pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
    2033     2576072 :                                 io_start, 1, io_buffers_len * BLCKSZ);
    2034             : 
    2035     2576072 :         if (persistence == RELPERSISTENCE_TEMP)
    2036        3570 :             pgBufferUsage.local_blks_read += io_buffers_len;
    2037             :         else
    2038     2572502 :             pgBufferUsage.shared_blks_read += io_buffers_len;
    2039             : 
    2040             :         /*
    2041             :          * Track vacuum cost when issuing IO, not after waiting for it.
    2042             :          * Otherwise we could end up issuing a lot of IO in a short timespan,
    2043             :          * despite a low cost limit.
    2044             :          */
    2045     2576072 :         if (VacuumCostActive)
    2046       33784 :             VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
    2047             : 
    2048     2576072 :         *nblocks_progress = io_buffers_len;
    2049     2576072 :         did_start_io = true;
    2050             :     }
    2051             : 
    2052     2594086 :     return did_start_io;
    2053             : }
    2054             : 
    2055             : /*
    2056             :  * BufferAlloc -- subroutine for PinBufferForBlock.  Handles lookup of a shared
    2057             :  *      buffer.  If no buffer exists already, selects a replacement victim and
    2058             :  *      evicts the old page, but does NOT read in new page.
    2059             :  *
    2060             :  * "strategy" can be a buffer replacement strategy object, or NULL for
    2061             :  * the default strategy.  The selected buffer's usage_count is advanced when
    2062             :  * using the default strategy, but otherwise possibly not (see PinBuffer).
    2063             :  *
    2064             :  * The returned buffer is pinned and is already marked as holding the
    2065             :  * desired page.  If it already did have the desired page, *foundPtr is
    2066             :  * set true.  Otherwise, *foundPtr is set false.
    2067             :  *
    2068             :  * io_context is passed as an output parameter to avoid calling
    2069             :  * IOContextForStrategy() when there is a shared buffers hit and no IO
    2070             :  * statistics need be captured.
    2071             :  *
    2072             :  * No locks are held either at entry or exit.
    2073             :  */
    2074             : static pg_attribute_always_inline BufferDesc *
    2075   122654854 : BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
    2076             :             BlockNumber blockNum,
    2077             :             BufferAccessStrategy strategy,
    2078             :             bool *foundPtr, IOContext io_context)
    2079             : {
    2080             :     BufferTag   newTag;         /* identity of requested block */
    2081             :     uint32      newHash;        /* hash value for newTag */
    2082             :     LWLock     *newPartitionLock;   /* buffer partition lock for it */
    2083             :     int         existing_buf_id;
    2084             :     Buffer      victim_buffer;
    2085             :     BufferDesc *victim_buf_hdr;
    2086             :     uint32      victim_buf_state;
    2087   122654854 :     uint32      set_bits = 0;
    2088             : 
    2089             :     /* Make sure we will have room to remember the buffer pin */
    2090   122654854 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    2091   122654854 :     ReservePrivateRefCountEntry();
    2092             : 
    2093             :     /* create a tag so we can lookup the buffer */
    2094   122654854 :     InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
    2095             : 
    2096             :     /* determine its hash code and partition lock ID */
    2097   122654854 :     newHash = BufTableHashCode(&newTag);
    2098   122654854 :     newPartitionLock = BufMappingPartitionLock(newHash);
    2099             : 
    2100             :     /* see if the block is in the buffer pool already */
    2101   122654854 :     LWLockAcquire(newPartitionLock, LW_SHARED);
    2102   122654854 :     existing_buf_id = BufTableLookup(&newTag, newHash);
    2103   122654854 :     if (existing_buf_id >= 0)
    2104             :     {
    2105             :         BufferDesc *buf;
    2106             :         bool        valid;
    2107             : 
    2108             :         /*
    2109             :          * Found it.  Now, pin the buffer so no one can steal it from the
    2110             :          * buffer pool, and check to see if the correct data has been loaded
    2111             :          * into the buffer.
    2112             :          */
    2113   119183072 :         buf = GetBufferDescriptor(existing_buf_id);
    2114             : 
    2115   119183072 :         valid = PinBuffer(buf, strategy, false);
    2116             : 
    2117             :         /* Can release the mapping lock as soon as we've pinned it */
    2118   119183072 :         LWLockRelease(newPartitionLock);
    2119             : 
    2120   119183072 :         *foundPtr = true;
    2121             : 
    2122   119183072 :         if (!valid)
    2123             :         {
    2124             :             /*
    2125             :              * We can only get here if (a) someone else is still reading in
    2126             :              * the page, (b) a previous read attempt failed, or (c) someone
    2127             :              * called StartReadBuffers() but not yet WaitReadBuffers().
    2128             :              */
    2129       15284 :             *foundPtr = false;
    2130             :         }
    2131             : 
    2132   119183072 :         return buf;
    2133             :     }
    2134             : 
    2135             :     /*
    2136             :      * Didn't find it in the buffer pool.  We'll have to initialize a new
    2137             :      * buffer.  Remember to unlock the mapping lock while doing the work.
    2138             :      */
    2139     3471782 :     LWLockRelease(newPartitionLock);
    2140             : 
    2141             :     /*
    2142             :      * Acquire a victim buffer. Somebody else might try to do the same, we
    2143             :      * don't hold any conflicting locks. If so we'll have to undo our work
    2144             :      * later.
    2145             :      */
    2146     3471782 :     victim_buffer = GetVictimBuffer(strategy, io_context);
    2147     3471782 :     victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
    2148             : 
    2149             :     /*
    2150             :      * Try to make a hashtable entry for the buffer under its new tag. If
    2151             :      * somebody else inserted another buffer for the tag, we'll release the
    2152             :      * victim buffer we acquired and use the already inserted one.
    2153             :      */
    2154     3471782 :     LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
    2155     3471782 :     existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
    2156     3471782 :     if (existing_buf_id >= 0)
    2157             :     {
    2158             :         BufferDesc *existing_buf_hdr;
    2159             :         bool        valid;
    2160             : 
    2161             :         /*
    2162             :          * Got a collision. Someone has already done what we were about to do.
    2163             :          * We'll just handle this as if it were found in the buffer pool in
    2164             :          * the first place.  First, give up the buffer we were planning to
    2165             :          * use.
    2166             :          *
    2167             :          * We could do this after releasing the partition lock, but then we'd
    2168             :          * have to call ResourceOwnerEnlarge() & ReservePrivateRefCountEntry()
    2169             :          * before acquiring the lock, for the rare case of such a collision.
    2170             :          */
    2171        3718 :         UnpinBuffer(victim_buf_hdr);
    2172             : 
    2173             :         /* remaining code should match code at top of routine */
    2174             : 
    2175        3718 :         existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
    2176             : 
    2177        3718 :         valid = PinBuffer(existing_buf_hdr, strategy, false);
    2178             : 
    2179             :         /* Can release the mapping lock as soon as we've pinned it */
    2180        3718 :         LWLockRelease(newPartitionLock);
    2181             : 
    2182        3718 :         *foundPtr = true;
    2183             : 
    2184        3718 :         if (!valid)
    2185             :         {
    2186             :             /*
    2187             :              * We can only get here if (a) someone else is still reading in
    2188             :              * the page, (b) a previous read attempt failed, or (c) someone
    2189             :              * called StartReadBuffers() but not yet WaitReadBuffers().
    2190             :              */
    2191        2826 :             *foundPtr = false;
    2192             :         }
    2193             : 
    2194        3718 :         return existing_buf_hdr;
    2195             :     }
    2196             : 
    2197             :     /*
    2198             :      * Need to lock the buffer header too in order to change its tag.
    2199             :      */
    2200     3468064 :     victim_buf_state = LockBufHdr(victim_buf_hdr);
    2201             : 
    2202             :     /* some sanity checks while we hold the buffer header lock */
    2203             :     Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
    2204             :     Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
    2205             : 
    2206     3468064 :     victim_buf_hdr->tag = newTag;
    2207             : 
    2208             :     /*
    2209             :      * Make sure BM_PERMANENT is set for buffers that must be written at every
    2210             :      * checkpoint.  Unlogged buffers only need to be written at shutdown
    2211             :      * checkpoints, except for their "init" forks, which need to be treated
    2212             :      * just like permanent relations.
    2213             :      */
    2214     3468064 :     set_bits |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
    2215     3468064 :     if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
    2216     3467378 :         set_bits |= BM_PERMANENT;
    2217             : 
    2218     3468064 :     UnlockBufHdrExt(victim_buf_hdr, victim_buf_state,
    2219             :                     set_bits, 0, 0);
    2220             : 
    2221     3468064 :     LWLockRelease(newPartitionLock);
    2222             : 
    2223             :     /*
    2224             :      * Buffer contents are currently invalid.
    2225             :      */
    2226     3468064 :     *foundPtr = false;
    2227             : 
    2228     3468064 :     return victim_buf_hdr;
    2229             : }
    2230             : 
    2231             : /*
    2232             :  * InvalidateBuffer -- mark a shared buffer invalid.
    2233             :  *
    2234             :  * The buffer header spinlock must be held at entry.  We drop it before
    2235             :  * returning.  (This is sane because the caller must have locked the
    2236             :  * buffer in order to be sure it should be dropped.)
    2237             :  *
    2238             :  * This is used only in contexts such as dropping a relation.  We assume
    2239             :  * that no other backend could possibly be interested in using the page,
    2240             :  * so the only reason the buffer might be pinned is if someone else is
    2241             :  * trying to write it out.  We have to let them finish before we can
    2242             :  * reclaim the buffer.
    2243             :  *
    2244             :  * The buffer could get reclaimed by someone else while we are waiting
    2245             :  * to acquire the necessary locks; if so, don't mess it up.
    2246             :  */
    2247             : static void
    2248      208768 : InvalidateBuffer(BufferDesc *buf)
    2249             : {
    2250             :     BufferTag   oldTag;
    2251             :     uint32      oldHash;        /* hash value for oldTag */
    2252             :     LWLock     *oldPartitionLock;   /* buffer partition lock for it */
    2253             :     uint32      oldFlags;
    2254             :     uint32      buf_state;
    2255             : 
    2256             :     /* Save the original buffer tag before dropping the spinlock */
    2257      208768 :     oldTag = buf->tag;
    2258             : 
    2259      208768 :     UnlockBufHdr(buf);
    2260             : 
    2261             :     /*
    2262             :      * Need to compute the old tag's hashcode and partition lock ID. XXX is it
    2263             :      * worth storing the hashcode in BufferDesc so we need not recompute it
    2264             :      * here?  Probably not.
    2265             :      */
    2266      208768 :     oldHash = BufTableHashCode(&oldTag);
    2267      208768 :     oldPartitionLock = BufMappingPartitionLock(oldHash);
    2268             : 
    2269      208768 : retry:
    2270             : 
    2271             :     /*
    2272             :      * Acquire exclusive mapping lock in preparation for changing the buffer's
    2273             :      * association.
    2274             :      */
    2275      208768 :     LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
    2276             : 
    2277             :     /* Re-lock the buffer header */
    2278      208768 :     buf_state = LockBufHdr(buf);
    2279             : 
    2280             :     /* If it's changed while we were waiting for lock, do nothing */
    2281      208768 :     if (!BufferTagsEqual(&buf->tag, &oldTag))
    2282             :     {
    2283           0 :         UnlockBufHdr(buf);
    2284           0 :         LWLockRelease(oldPartitionLock);
    2285           0 :         return;
    2286             :     }
    2287             : 
    2288             :     /*
    2289             :      * We assume the reason for it to be pinned is that either we were
    2290             :      * asynchronously reading the page in before erroring out or someone else
    2291             :      * is flushing the page out.  Wait for the IO to finish.  (This could be
    2292             :      * an infinite loop if the refcount is messed up... it would be nice to
    2293             :      * time out after awhile, but there seems no way to be sure how many loops
    2294             :      * may be needed.  Note that if the other guy has pinned the buffer but
    2295             :      * not yet done StartBufferIO, WaitIO will fall through and we'll
    2296             :      * effectively be busy-looping here.)
    2297             :      */
    2298      208768 :     if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
    2299             :     {
    2300           0 :         UnlockBufHdr(buf);
    2301           0 :         LWLockRelease(oldPartitionLock);
    2302             :         /* safety check: should definitely not be our *own* pin */
    2303           0 :         if (GetPrivateRefCount(BufferDescriptorGetBuffer(buf)) > 0)
    2304           0 :             elog(ERROR, "buffer is pinned in InvalidateBuffer");
    2305           0 :         WaitIO(buf);
    2306           0 :         goto retry;
    2307             :     }
    2308             : 
    2309             :     /*
    2310             :      * Clear out the buffer's tag and flags.  We must do this to ensure that
    2311             :      * linear scans of the buffer array don't think the buffer is valid.
    2312             :      */
    2313      208768 :     oldFlags = buf_state & BUF_FLAG_MASK;
    2314      208768 :     ClearBufferTag(&buf->tag);
    2315             : 
    2316      208768 :     UnlockBufHdrExt(buf, buf_state,
    2317             :                     0,
    2318             :                     BUF_FLAG_MASK | BUF_USAGECOUNT_MASK,
    2319             :                     0);
    2320             : 
    2321             :     /*
    2322             :      * Remove the buffer from the lookup hashtable, if it was in there.
    2323             :      */
    2324      208768 :     if (oldFlags & BM_TAG_VALID)
    2325      208768 :         BufTableDelete(&oldTag, oldHash);
    2326             : 
    2327             :     /*
    2328             :      * Done with mapping lock.
    2329             :      */
    2330      208768 :     LWLockRelease(oldPartitionLock);
    2331             : }
    2332             : 
    2333             : /*
    2334             :  * Helper routine for GetVictimBuffer()
    2335             :  *
    2336             :  * Needs to be called on a buffer with a valid tag, pinned, but without the
    2337             :  * buffer header spinlock held.
    2338             :  *
    2339             :  * Returns true if the buffer can be reused, in which case the buffer is only
    2340             :  * pinned by this backend and marked as invalid, false otherwise.
    2341             :  */
    2342             : static bool
    2343     2448040 : InvalidateVictimBuffer(BufferDesc *buf_hdr)
    2344             : {
    2345             :     uint32      buf_state;
    2346             :     uint32      hash;
    2347             :     LWLock     *partition_lock;
    2348             :     BufferTag   tag;
    2349             : 
    2350             :     Assert(GetPrivateRefCount(BufferDescriptorGetBuffer(buf_hdr)) == 1);
    2351             : 
    2352             :     /* have buffer pinned, so it's safe to read tag without lock */
    2353     2448040 :     tag = buf_hdr->tag;
    2354             : 
    2355     2448040 :     hash = BufTableHashCode(&tag);
    2356     2448040 :     partition_lock = BufMappingPartitionLock(hash);
    2357             : 
    2358     2448040 :     LWLockAcquire(partition_lock, LW_EXCLUSIVE);
    2359             : 
    2360             :     /* lock the buffer header */
    2361     2448040 :     buf_state = LockBufHdr(buf_hdr);
    2362             : 
    2363             :     /*
    2364             :      * We have the buffer pinned nobody else should have been able to unset
    2365             :      * this concurrently.
    2366             :      */
    2367             :     Assert(buf_state & BM_TAG_VALID);
    2368             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    2369             :     Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
    2370             : 
    2371             :     /*
    2372             :      * If somebody else pinned the buffer since, or even worse, dirtied it,
    2373             :      * give up on this buffer: It's clearly in use.
    2374             :      */
    2375     2448040 :     if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
    2376             :     {
    2377             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    2378             : 
    2379        1100 :         UnlockBufHdr(buf_hdr);
    2380        1100 :         LWLockRelease(partition_lock);
    2381             : 
    2382        1100 :         return false;
    2383             :     }
    2384             : 
    2385             :     /*
    2386             :      * Clear out the buffer's tag and flags and usagecount.  This is not
    2387             :      * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
    2388             :      * doing anything with the buffer. But currently it's beneficial, as the
    2389             :      * cheaper pre-check for several linear scans of shared buffers use the
    2390             :      * tag (see e.g. FlushDatabaseBuffers()).
    2391             :      */
    2392     2446940 :     ClearBufferTag(&buf_hdr->tag);
    2393     2446940 :     UnlockBufHdrExt(buf_hdr, buf_state,
    2394             :                     0,
    2395             :                     BUF_FLAG_MASK | BUF_USAGECOUNT_MASK,
    2396             :                     0);
    2397             : 
    2398             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    2399             : 
    2400             :     /* finally delete buffer from the buffer mapping table */
    2401     2446940 :     BufTableDelete(&tag, hash);
    2402             : 
    2403     2446940 :     LWLockRelease(partition_lock);
    2404             : 
    2405     2446940 :     buf_state = pg_atomic_read_u32(&buf_hdr->state);
    2406             :     Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
    2407             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    2408             :     Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u32(&buf_hdr->state)) > 0);
    2409             : 
    2410     2446940 :     return true;
    2411             : }
    2412             : 
    2413             : static Buffer
    2414     3924816 : GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
    2415             : {
    2416             :     BufferDesc *buf_hdr;
    2417             :     Buffer      buf;
    2418             :     uint32      buf_state;
    2419             :     bool        from_ring;
    2420             : 
    2421             :     /*
    2422             :      * Ensure, before we pin a victim buffer, that there's a free refcount
    2423             :      * entry and resource owner slot for the pin.
    2424             :      */
    2425     3924816 :     ReservePrivateRefCountEntry();
    2426     3924816 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    2427             : 
    2428             :     /* we return here if a prospective victim buffer gets used concurrently */
    2429       13280 : again:
    2430             : 
    2431             :     /*
    2432             :      * Select a victim buffer.  The buffer is returned pinned and owned by
    2433             :      * this backend.
    2434             :      */
    2435     3938096 :     buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
    2436     3938096 :     buf = BufferDescriptorGetBuffer(buf_hdr);
    2437             : 
    2438             :     /*
    2439             :      * We shouldn't have any other pins for this buffer.
    2440             :      */
    2441     3938096 :     CheckBufferIsPinnedOnce(buf);
    2442             : 
    2443             :     /*
    2444             :      * If the buffer was dirty, try to write it out.  There is a race
    2445             :      * condition here, in that someone might dirty it after we released the
    2446             :      * buffer header lock above, or even while we are writing it out (since
    2447             :      * our share-lock won't prevent hint-bit updates).  We will recheck the
    2448             :      * dirty bit after re-locking the buffer header.
    2449             :      */
    2450     3938096 :     if (buf_state & BM_DIRTY)
    2451             :     {
    2452             :         LWLock     *content_lock;
    2453             : 
    2454             :         Assert(buf_state & BM_TAG_VALID);
    2455             :         Assert(buf_state & BM_VALID);
    2456             : 
    2457             :         /*
    2458             :          * We need a share-lock on the buffer contents to write it out (else
    2459             :          * we might write invalid data, eg because someone else is compacting
    2460             :          * the page contents while we write).  We must use a conditional lock
    2461             :          * acquisition here to avoid deadlock.  Even though the buffer was not
    2462             :          * pinned (and therefore surely not locked) when StrategyGetBuffer
    2463             :          * returned it, someone else could have pinned and exclusive-locked it
    2464             :          * by the time we get here. If we try to get the lock unconditionally,
    2465             :          * we'd block waiting for them; if they later block waiting for us,
    2466             :          * deadlock ensues. (This has been observed to happen when two
    2467             :          * backends are both trying to split btree index pages, and the second
    2468             :          * one just happens to be trying to split the page the first one got
    2469             :          * from StrategyGetBuffer.)
    2470             :          */
    2471      553352 :         content_lock = BufferDescriptorGetContentLock(buf_hdr);
    2472      553352 :         if (!LWLockConditionalAcquire(content_lock, LW_SHARED))
    2473             :         {
    2474             :             /*
    2475             :              * Someone else has locked the buffer, so give it up and loop back
    2476             :              * to get another one.
    2477             :              */
    2478           0 :             UnpinBuffer(buf_hdr);
    2479           0 :             goto again;
    2480             :         }
    2481             : 
    2482             :         /*
    2483             :          * If using a nondefault strategy, and writing the buffer would
    2484             :          * require a WAL flush, let the strategy decide whether to go ahead
    2485             :          * and write/reuse the buffer or to choose another victim.  We need a
    2486             :          * lock to inspect the page LSN, so this can't be done inside
    2487             :          * StrategyGetBuffer.
    2488             :          */
    2489      553352 :         if (strategy != NULL)
    2490             :         {
    2491             :             XLogRecPtr  lsn;
    2492             : 
    2493             :             /* Read the LSN while holding buffer header lock */
    2494      156982 :             buf_state = LockBufHdr(buf_hdr);
    2495      156982 :             lsn = BufferGetLSN(buf_hdr);
    2496      156982 :             UnlockBufHdr(buf_hdr);
    2497             : 
    2498      156982 :             if (XLogNeedsFlush(lsn)
    2499       18616 :                 && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
    2500             :             {
    2501       12180 :                 LWLockRelease(content_lock);
    2502       12180 :                 UnpinBuffer(buf_hdr);
    2503       12180 :                 goto again;
    2504             :             }
    2505             :         }
    2506             : 
    2507             :         /* OK, do the I/O */
    2508      541172 :         FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
    2509      541172 :         LWLockRelease(content_lock);
    2510             : 
    2511      541172 :         ScheduleBufferTagForWriteback(&BackendWritebackContext, io_context,
    2512             :                                       &buf_hdr->tag);
    2513             :     }
    2514             : 
    2515             : 
    2516     3925916 :     if (buf_state & BM_VALID)
    2517             :     {
    2518             :         /*
    2519             :          * When a BufferAccessStrategy is in use, blocks evicted from shared
    2520             :          * buffers are counted as IOOP_EVICT in the corresponding context
    2521             :          * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
    2522             :          * strategy in two cases: 1) while initially claiming buffers for the
    2523             :          * strategy ring 2) to replace an existing strategy ring buffer
    2524             :          * because it is pinned or in use and cannot be reused.
    2525             :          *
    2526             :          * Blocks evicted from buffers already in the strategy ring are
    2527             :          * counted as IOOP_REUSE in the corresponding strategy context.
    2528             :          *
    2529             :          * At this point, we can accurately count evictions and reuses,
    2530             :          * because we have successfully claimed the valid buffer. Previously,
    2531             :          * we may have been forced to release the buffer due to concurrent
    2532             :          * pinners or erroring out.
    2533             :          */
    2534     2443758 :         pgstat_count_io_op(IOOBJECT_RELATION, io_context,
    2535     2443758 :                            from_ring ? IOOP_REUSE : IOOP_EVICT, 1, 0);
    2536             :     }
    2537             : 
    2538             :     /*
    2539             :      * If the buffer has an entry in the buffer mapping table, delete it. This
    2540             :      * can fail because another backend could have pinned or dirtied the
    2541             :      * buffer.
    2542             :      */
    2543     3925916 :     if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
    2544             :     {
    2545        1100 :         UnpinBuffer(buf_hdr);
    2546        1100 :         goto again;
    2547             :     }
    2548             : 
    2549             :     /* a final set of sanity checks */
    2550             : #ifdef USE_ASSERT_CHECKING
    2551             :     buf_state = pg_atomic_read_u32(&buf_hdr->state);
    2552             : 
    2553             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
    2554             :     Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
    2555             : 
    2556             :     CheckBufferIsPinnedOnce(buf);
    2557             : #endif
    2558             : 
    2559     3924816 :     return buf;
    2560             : }
    2561             : 
    2562             : /*
    2563             :  * Return the maximum number of buffers that a backend should try to pin once,
    2564             :  * to avoid exceeding its fair share.  This is the highest value that
    2565             :  * GetAdditionalPinLimit() could ever return.  Note that it may be zero on a
    2566             :  * system with a very small buffer pool relative to max_connections.
    2567             :  */
    2568             : uint32
    2569     1291674 : GetPinLimit(void)
    2570             : {
    2571     1291674 :     return MaxProportionalPins;
    2572             : }
    2573             : 
    2574             : /*
    2575             :  * Return the maximum number of additional buffers that this backend should
    2576             :  * pin if it wants to stay under the per-backend limit, considering the number
    2577             :  * of buffers it has already pinned.  Unlike LimitAdditionalPins(), the limit
    2578             :  * return by this function can be zero.
    2579             :  */
    2580             : uint32
    2581     7308520 : GetAdditionalPinLimit(void)
    2582             : {
    2583             :     uint32      estimated_pins_held;
    2584             : 
    2585             :     /*
    2586             :      * We get the number of "overflowed" pins for free, but don't know the
    2587             :      * number of pins in PrivateRefCountArray.  The cost of calculating that
    2588             :      * exactly doesn't seem worth it, so just assume the max.
    2589             :      */
    2590     7308520 :     estimated_pins_held = PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
    2591             : 
    2592             :     /* Is this backend already holding more than its fair share? */
    2593     7308520 :     if (estimated_pins_held > MaxProportionalPins)
    2594     2477310 :         return 0;
    2595             : 
    2596     4831210 :     return MaxProportionalPins - estimated_pins_held;
    2597             : }
    2598             : 
    2599             : /*
    2600             :  * Limit the number of pins a batch operation may additionally acquire, to
    2601             :  * avoid running out of pinnable buffers.
    2602             :  *
    2603             :  * One additional pin is always allowed, on the assumption that the operation
    2604             :  * requires at least one to make progress.
    2605             :  */
    2606             : void
    2607      410322 : LimitAdditionalPins(uint32 *additional_pins)
    2608             : {
    2609             :     uint32      limit;
    2610             : 
    2611      410322 :     if (*additional_pins <= 1)
    2612      390162 :         return;
    2613             : 
    2614       20160 :     limit = GetAdditionalPinLimit();
    2615       20160 :     limit = Max(limit, 1);
    2616       20160 :     if (limit < *additional_pins)
    2617       11026 :         *additional_pins = limit;
    2618             : }
    2619             : 
    2620             : /*
    2621             :  * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
    2622             :  * avoid duplicating the tracing and relpersistence related logic.
    2623             :  */
    2624             : static BlockNumber
    2625      433200 : ExtendBufferedRelCommon(BufferManagerRelation bmr,
    2626             :                         ForkNumber fork,
    2627             :                         BufferAccessStrategy strategy,
    2628             :                         uint32 flags,
    2629             :                         uint32 extend_by,
    2630             :                         BlockNumber extend_upto,
    2631             :                         Buffer *buffers,
    2632             :                         uint32 *extended_by)
    2633             : {
    2634             :     BlockNumber first_block;
    2635             : 
    2636             :     TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
    2637             :                                          BMR_GET_SMGR(bmr)->smgr_rlocator.locator.spcOid,
    2638             :                                          BMR_GET_SMGR(bmr)->smgr_rlocator.locator.dbOid,
    2639             :                                          BMR_GET_SMGR(bmr)->smgr_rlocator.locator.relNumber,
    2640             :                                          BMR_GET_SMGR(bmr)->smgr_rlocator.backend,
    2641             :                                          extend_by);
    2642             : 
    2643      433200 :     if (bmr.relpersistence == RELPERSISTENCE_TEMP)
    2644       22878 :         first_block = ExtendBufferedRelLocal(bmr, fork, flags,
    2645             :                                              extend_by, extend_upto,
    2646             :                                              buffers, &extend_by);
    2647             :     else
    2648      410322 :         first_block = ExtendBufferedRelShared(bmr, fork, strategy, flags,
    2649             :                                               extend_by, extend_upto,
    2650             :                                               buffers, &extend_by);
    2651      433200 :     *extended_by = extend_by;
    2652             : 
    2653             :     TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
    2654             :                                         BMR_GET_SMGR(bmr)->smgr_rlocator.locator.spcOid,
    2655             :                                         BMR_GET_SMGR(bmr)->smgr_rlocator.locator.dbOid,
    2656             :                                         BMR_GET_SMGR(bmr)->smgr_rlocator.locator.relNumber,
    2657             :                                         BMR_GET_SMGR(bmr)->smgr_rlocator.backend,
    2658             :                                         *extended_by,
    2659             :                                         first_block);
    2660             : 
    2661      433200 :     return first_block;
    2662             : }
    2663             : 
    2664             : /*
    2665             :  * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
    2666             :  * shared buffers.
    2667             :  */
    2668             : static BlockNumber
    2669      410322 : ExtendBufferedRelShared(BufferManagerRelation bmr,
    2670             :                         ForkNumber fork,
    2671             :                         BufferAccessStrategy strategy,
    2672             :                         uint32 flags,
    2673             :                         uint32 extend_by,
    2674             :                         BlockNumber extend_upto,
    2675             :                         Buffer *buffers,
    2676             :                         uint32 *extended_by)
    2677             : {
    2678             :     BlockNumber first_block;
    2679      410322 :     IOContext   io_context = IOContextForStrategy(strategy);
    2680             :     instr_time  io_start;
    2681             : 
    2682      410322 :     LimitAdditionalPins(&extend_by);
    2683             : 
    2684             :     /*
    2685             :      * Acquire victim buffers for extension without holding extension lock.
    2686             :      * Writing out victim buffers is the most expensive part of extending the
    2687             :      * relation, particularly when doing so requires WAL flushes. Zeroing out
    2688             :      * the buffers is also quite expensive, so do that before holding the
    2689             :      * extension lock as well.
    2690             :      *
    2691             :      * These pages are pinned by us and not valid. While we hold the pin they
    2692             :      * can't be acquired as victim buffers by another backend.
    2693             :      */
    2694      863356 :     for (uint32 i = 0; i < extend_by; i++)
    2695             :     {
    2696             :         Block       buf_block;
    2697             : 
    2698      453034 :         buffers[i] = GetVictimBuffer(strategy, io_context);
    2699      453034 :         buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
    2700             : 
    2701             :         /* new buffers are zero-filled */
    2702      453034 :         MemSet(buf_block, 0, BLCKSZ);
    2703             :     }
    2704             : 
    2705             :     /*
    2706             :      * Lock relation against concurrent extensions, unless requested not to.
    2707             :      *
    2708             :      * We use the same extension lock for all forks. That's unnecessarily
    2709             :      * restrictive, but currently extensions for forks don't happen often
    2710             :      * enough to make it worth locking more granularly.
    2711             :      *
    2712             :      * Note that another backend might have extended the relation by the time
    2713             :      * we get the lock.
    2714             :      */
    2715      410322 :     if (!(flags & EB_SKIP_EXTENSION_LOCK))
    2716      305372 :         LockRelationForExtension(bmr.rel, ExclusiveLock);
    2717             : 
    2718             :     /*
    2719             :      * If requested, invalidate size cache, so that smgrnblocks asks the
    2720             :      * kernel.
    2721             :      */
    2722      410322 :     if (flags & EB_CLEAR_SIZE_CACHE)
    2723       15690 :         BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] = InvalidBlockNumber;
    2724             : 
    2725      410322 :     first_block = smgrnblocks(BMR_GET_SMGR(bmr), fork);
    2726             : 
    2727             :     /*
    2728             :      * Now that we have the accurate relation size, check if the caller wants
    2729             :      * us to extend to only up to a specific size. If there were concurrent
    2730             :      * extensions, we might have acquired too many buffers and need to release
    2731             :      * them.
    2732             :      */
    2733      410322 :     if (extend_upto != InvalidBlockNumber)
    2734             :     {
    2735      108528 :         uint32      orig_extend_by = extend_by;
    2736             : 
    2737      108528 :         if (first_block > extend_upto)
    2738           0 :             extend_by = 0;
    2739      108528 :         else if ((uint64) first_block + extend_by > extend_upto)
    2740          26 :             extend_by = extend_upto - first_block;
    2741             : 
    2742      108596 :         for (uint32 i = extend_by; i < orig_extend_by; i++)
    2743             :         {
    2744          68 :             BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
    2745             : 
    2746          68 :             UnpinBuffer(buf_hdr);
    2747             :         }
    2748             : 
    2749      108528 :         if (extend_by == 0)
    2750             :         {
    2751          26 :             if (!(flags & EB_SKIP_EXTENSION_LOCK))
    2752          26 :                 UnlockRelationForExtension(bmr.rel, ExclusiveLock);
    2753          26 :             *extended_by = extend_by;
    2754          26 :             return first_block;
    2755             :         }
    2756             :     }
    2757             : 
    2758             :     /* Fail if relation is already at maximum possible length */
    2759      410296 :     if ((uint64) first_block + extend_by >= MaxBlockNumber)
    2760           0 :         ereport(ERROR,
    2761             :                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
    2762             :                  errmsg("cannot extend relation %s beyond %u blocks",
    2763             :                         relpath(BMR_GET_SMGR(bmr)->smgr_rlocator, fork).str,
    2764             :                         MaxBlockNumber)));
    2765             : 
    2766             :     /*
    2767             :      * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
    2768             :      *
    2769             :      * This needs to happen before we extend the relation, because as soon as
    2770             :      * we do, other backends can start to read in those pages.
    2771             :      */
    2772      863262 :     for (uint32 i = 0; i < extend_by; i++)
    2773             :     {
    2774      452966 :         Buffer      victim_buf = buffers[i];
    2775      452966 :         BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
    2776             :         BufferTag   tag;
    2777             :         uint32      hash;
    2778             :         LWLock     *partition_lock;
    2779             :         int         existing_id;
    2780             : 
    2781             :         /* in case we need to pin an existing buffer below */
    2782      452966 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    2783      452966 :         ReservePrivateRefCountEntry();
    2784             : 
    2785      452966 :         InitBufferTag(&tag, &BMR_GET_SMGR(bmr)->smgr_rlocator.locator, fork,
    2786             :                       first_block + i);
    2787      452966 :         hash = BufTableHashCode(&tag);
    2788      452966 :         partition_lock = BufMappingPartitionLock(hash);
    2789             : 
    2790      452966 :         LWLockAcquire(partition_lock, LW_EXCLUSIVE);
    2791             : 
    2792      452966 :         existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
    2793             : 
    2794             :         /*
    2795             :          * We get here only in the corner case where we are trying to extend
    2796             :          * the relation but we found a pre-existing buffer. This can happen
    2797             :          * because a prior attempt at extending the relation failed, and
    2798             :          * because mdread doesn't complain about reads beyond EOF (when
    2799             :          * zero_damaged_pages is ON) and so a previous attempt to read a block
    2800             :          * beyond EOF could have left a "valid" zero-filled buffer.
    2801             :          *
    2802             :          * This has also been observed when relation was overwritten by
    2803             :          * external process. Since the legitimate cases should always have
    2804             :          * left a zero-filled buffer, complain if not PageIsNew.
    2805             :          */
    2806      452966 :         if (existing_id >= 0)
    2807             :         {
    2808           0 :             BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
    2809             :             Block       buf_block;
    2810             :             bool        valid;
    2811             : 
    2812             :             /*
    2813             :              * Pin the existing buffer before releasing the partition lock,
    2814             :              * preventing it from being evicted.
    2815             :              */
    2816           0 :             valid = PinBuffer(existing_hdr, strategy, false);
    2817             : 
    2818           0 :             LWLockRelease(partition_lock);
    2819           0 :             UnpinBuffer(victim_buf_hdr);
    2820             : 
    2821           0 :             buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
    2822           0 :             buf_block = BufHdrGetBlock(existing_hdr);
    2823             : 
    2824           0 :             if (valid && !PageIsNew((Page) buf_block))
    2825           0 :                 ereport(ERROR,
    2826             :                         (errmsg("unexpected data beyond EOF in block %u of relation \"%s\"",
    2827             :                                 existing_hdr->tag.blockNum,
    2828             :                                 relpath(BMR_GET_SMGR(bmr)->smgr_rlocator, fork).str)));
    2829             : 
    2830             :             /*
    2831             :              * We *must* do smgr[zero]extend before succeeding, else the page
    2832             :              * will not be reserved by the kernel, and the next P_NEW call
    2833             :              * will decide to return the same page.  Clear the BM_VALID bit,
    2834             :              * do StartBufferIO() and proceed.
    2835             :              *
    2836             :              * Loop to handle the very small possibility that someone re-sets
    2837             :              * BM_VALID between our clearing it and StartBufferIO inspecting
    2838             :              * it.
    2839             :              */
    2840             :             do
    2841             :             {
    2842           0 :                 pg_atomic_fetch_and_u32(&existing_hdr->state, ~BM_VALID);
    2843           0 :             } while (!StartBufferIO(existing_hdr, true, false));
    2844             :         }
    2845             :         else
    2846             :         {
    2847             :             uint32      buf_state;
    2848      452966 :             uint32      set_bits = 0;
    2849             : 
    2850      452966 :             buf_state = LockBufHdr(victim_buf_hdr);
    2851             : 
    2852             :             /* some sanity checks while we hold the buffer header lock */
    2853             :             Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
    2854             :             Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
    2855             : 
    2856      452966 :             victim_buf_hdr->tag = tag;
    2857             : 
    2858      452966 :             set_bits |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
    2859      452966 :             if (bmr.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
    2860      442358 :                 set_bits |= BM_PERMANENT;
    2861             : 
    2862      452966 :             UnlockBufHdrExt(victim_buf_hdr, buf_state,
    2863             :                             set_bits, 0,
    2864             :                             0);
    2865             : 
    2866      452966 :             LWLockRelease(partition_lock);
    2867             : 
    2868             :             /* XXX: could combine the locked operations in it with the above */
    2869      452966 :             StartBufferIO(victim_buf_hdr, true, false);
    2870             :         }
    2871             :     }
    2872             : 
    2873      410296 :     io_start = pgstat_prepare_io_time(track_io_timing);
    2874             : 
    2875             :     /*
    2876             :      * Note: if smgrzeroextend fails, we will end up with buffers that are
    2877             :      * allocated but not marked BM_VALID.  The next relation extension will
    2878             :      * still select the same block number (because the relation didn't get any
    2879             :      * longer on disk) and so future attempts to extend the relation will find
    2880             :      * the same buffers (if they have not been recycled) but come right back
    2881             :      * here to try smgrzeroextend again.
    2882             :      *
    2883             :      * We don't need to set checksum for all-zero pages.
    2884             :      */
    2885      410296 :     smgrzeroextend(BMR_GET_SMGR(bmr), fork, first_block, extend_by, false);
    2886             : 
    2887             :     /*
    2888             :      * Release the file-extension lock; it's now OK for someone else to extend
    2889             :      * the relation some more.
    2890             :      *
    2891             :      * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
    2892             :      * take noticeable time.
    2893             :      */
    2894      410296 :     if (!(flags & EB_SKIP_EXTENSION_LOCK))
    2895      305346 :         UnlockRelationForExtension(bmr.rel, ExclusiveLock);
    2896             : 
    2897      410296 :     pgstat_count_io_op_time(IOOBJECT_RELATION, io_context, IOOP_EXTEND,
    2898      410296 :                             io_start, 1, extend_by * BLCKSZ);
    2899             : 
    2900             :     /* Set BM_VALID, terminate IO, and wake up any waiters */
    2901      863262 :     for (uint32 i = 0; i < extend_by; i++)
    2902             :     {
    2903      452966 :         Buffer      buf = buffers[i];
    2904      452966 :         BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
    2905      452966 :         bool        lock = false;
    2906             : 
    2907      452966 :         if (flags & EB_LOCK_FIRST && i == 0)
    2908      301260 :             lock = true;
    2909      151706 :         else if (flags & EB_LOCK_TARGET)
    2910             :         {
    2911             :             Assert(extend_upto != InvalidBlockNumber);
    2912       90704 :             if (first_block + i + 1 == extend_upto)
    2913       89534 :                 lock = true;
    2914             :         }
    2915             : 
    2916      452966 :         if (lock)
    2917      390794 :             LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    2918             : 
    2919      452966 :         TerminateBufferIO(buf_hdr, false, BM_VALID, true, false);
    2920             :     }
    2921             : 
    2922      410296 :     pgBufferUsage.shared_blks_written += extend_by;
    2923             : 
    2924      410296 :     *extended_by = extend_by;
    2925             : 
    2926      410296 :     return first_block;
    2927             : }
    2928             : 
    2929             : /*
    2930             :  * BufferIsLockedByMe
    2931             :  *
    2932             :  *      Checks if this backend has the buffer locked in any mode.
    2933             :  *
    2934             :  * Buffer must be pinned.
    2935             :  */
    2936             : bool
    2937           0 : BufferIsLockedByMe(Buffer buffer)
    2938             : {
    2939             :     BufferDesc *bufHdr;
    2940             : 
    2941             :     Assert(BufferIsPinned(buffer));
    2942             : 
    2943           0 :     if (BufferIsLocal(buffer))
    2944             :     {
    2945             :         /* Content locks are not maintained for local buffers. */
    2946           0 :         return true;
    2947             :     }
    2948             :     else
    2949             :     {
    2950           0 :         bufHdr = GetBufferDescriptor(buffer - 1);
    2951           0 :         return LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr));
    2952             :     }
    2953             : }
    2954             : 
    2955             : /*
    2956             :  * BufferIsLockedByMeInMode
    2957             :  *
    2958             :  *      Checks if this backend has the buffer locked in the specified mode.
    2959             :  *
    2960             :  * Buffer must be pinned.
    2961             :  */
    2962             : bool
    2963           0 : BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
    2964             : {
    2965             :     BufferDesc *bufHdr;
    2966             : 
    2967             :     Assert(BufferIsPinned(buffer));
    2968             : 
    2969           0 :     if (BufferIsLocal(buffer))
    2970             :     {
    2971             :         /* Content locks are not maintained for local buffers. */
    2972           0 :         return true;
    2973             :     }
    2974             :     else
    2975             :     {
    2976             :         LWLockMode  lw_mode;
    2977             : 
    2978           0 :         switch (mode)
    2979             :         {
    2980           0 :             case BUFFER_LOCK_EXCLUSIVE:
    2981           0 :                 lw_mode = LW_EXCLUSIVE;
    2982           0 :                 break;
    2983           0 :             case BUFFER_LOCK_SHARE:
    2984           0 :                 lw_mode = LW_SHARED;
    2985           0 :                 break;
    2986           0 :             default:
    2987           0 :                 pg_unreachable();
    2988             :         }
    2989             : 
    2990           0 :         bufHdr = GetBufferDescriptor(buffer - 1);
    2991           0 :         return LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
    2992             :                                     lw_mode);
    2993             :     }
    2994             : }
    2995             : 
    2996             : /*
    2997             :  * BufferIsDirty
    2998             :  *
    2999             :  *      Checks if buffer is already dirty.
    3000             :  *
    3001             :  * Buffer must be pinned and exclusive-locked.  (Without an exclusive lock,
    3002             :  * the result may be stale before it's returned.)
    3003             :  */
    3004             : bool
    3005           0 : BufferIsDirty(Buffer buffer)
    3006             : {
    3007             :     BufferDesc *bufHdr;
    3008             : 
    3009             :     Assert(BufferIsPinned(buffer));
    3010             : 
    3011           0 :     if (BufferIsLocal(buffer))
    3012             :     {
    3013           0 :         int         bufid = -buffer - 1;
    3014             : 
    3015           0 :         bufHdr = GetLocalBufferDescriptor(bufid);
    3016             :         /* Content locks are not maintained for local buffers. */
    3017             :     }
    3018             :     else
    3019             :     {
    3020           0 :         bufHdr = GetBufferDescriptor(buffer - 1);
    3021             :         Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
    3022             :     }
    3023             : 
    3024           0 :     return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
    3025             : }
    3026             : 
    3027             : /*
    3028             :  * MarkBufferDirty
    3029             :  *
    3030             :  *      Marks buffer contents as dirty (actual write happens later).
    3031             :  *
    3032             :  * Buffer must be pinned and exclusive-locked.  (If caller does not hold
    3033             :  * exclusive lock, then somebody could be in process of writing the buffer,
    3034             :  * leading to risk of bad data written to disk.)
    3035             :  */
    3036             : void
    3037    43614820 : MarkBufferDirty(Buffer buffer)
    3038             : {
    3039             :     BufferDesc *bufHdr;
    3040             :     uint32      buf_state;
    3041             :     uint32      old_buf_state;
    3042             : 
    3043    43614820 :     if (!BufferIsValid(buffer))
    3044           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    3045             : 
    3046    43614820 :     if (BufferIsLocal(buffer))
    3047             :     {
    3048     2444582 :         MarkLocalBufferDirty(buffer);
    3049     2444582 :         return;
    3050             :     }
    3051             : 
    3052    41170238 :     bufHdr = GetBufferDescriptor(buffer - 1);
    3053             : 
    3054             :     Assert(BufferIsPinned(buffer));
    3055             :     Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
    3056             : 
    3057             :     /*
    3058             :      * NB: We have to wait for the buffer header spinlock to be not held, as
    3059             :      * TerminateBufferIO() relies on the spinlock.
    3060             :      */
    3061    41170238 :     old_buf_state = pg_atomic_read_u32(&bufHdr->state);
    3062             :     for (;;)
    3063             :     {
    3064    41170492 :         if (old_buf_state & BM_LOCKED)
    3065          12 :             old_buf_state = WaitBufHdrUnlocked(bufHdr);
    3066             : 
    3067    41170492 :         buf_state = old_buf_state;
    3068             : 
    3069             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    3070    41170492 :         buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
    3071             : 
    3072    41170492 :         if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
    3073             :                                            buf_state))
    3074    41170238 :             break;
    3075             :     }
    3076             : 
    3077             :     /*
    3078             :      * If the buffer was not dirty already, do vacuum accounting.
    3079             :      */
    3080    41170238 :     if (!(old_buf_state & BM_DIRTY))
    3081             :     {
    3082     1315826 :         pgBufferUsage.shared_blks_dirtied++;
    3083     1315826 :         if (VacuumCostActive)
    3084       17358 :             VacuumCostBalance += VacuumCostPageDirty;
    3085             :     }
    3086             : }
    3087             : 
    3088             : /*
    3089             :  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
    3090             :  *
    3091             :  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
    3092             :  * compared to calling the two routines separately.  Now it's mainly just
    3093             :  * a convenience function.  However, if the passed buffer is valid and
    3094             :  * already contains the desired block, we just return it as-is; and that
    3095             :  * does save considerable work compared to a full release and reacquire.
    3096             :  *
    3097             :  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
    3098             :  * buffer actually needs to be released.  This case is the same as ReadBuffer,
    3099             :  * but can save some tests in the caller.
    3100             :  */
    3101             : Buffer
    3102    58678072 : ReleaseAndReadBuffer(Buffer buffer,
    3103             :                      Relation relation,
    3104             :                      BlockNumber blockNum)
    3105             : {
    3106    58678072 :     ForkNumber  forkNum = MAIN_FORKNUM;
    3107             :     BufferDesc *bufHdr;
    3108             : 
    3109    58678072 :     if (BufferIsValid(buffer))
    3110             :     {
    3111             :         Assert(BufferIsPinned(buffer));
    3112    35328168 :         if (BufferIsLocal(buffer))
    3113             :         {
    3114       73728 :             bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    3115       80772 :             if (bufHdr->tag.blockNum == blockNum &&
    3116       14088 :                 BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
    3117        7044 :                 BufTagGetForkNum(&bufHdr->tag) == forkNum)
    3118        7044 :                 return buffer;
    3119       66684 :             UnpinLocalBuffer(buffer);
    3120             :         }
    3121             :         else
    3122             :         {
    3123    35254440 :             bufHdr = GetBufferDescriptor(buffer - 1);
    3124             :             /* we have pin, so it's ok to examine tag without spinlock */
    3125    47421882 :             if (bufHdr->tag.blockNum == blockNum &&
    3126    24334884 :                 BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
    3127    12167442 :                 BufTagGetForkNum(&bufHdr->tag) == forkNum)
    3128    12167442 :                 return buffer;
    3129    23086998 :             UnpinBuffer(bufHdr);
    3130             :         }
    3131             :     }
    3132             : 
    3133    46503586 :     return ReadBuffer(relation, blockNum);
    3134             : }
    3135             : 
    3136             : /*
    3137             :  * PinBuffer -- make buffer unavailable for replacement.
    3138             :  *
    3139             :  * For the default access strategy, the buffer's usage_count is incremented
    3140             :  * when we first pin it; for other strategies we just make sure the usage_count
    3141             :  * isn't zero.  (The idea of the latter is that we don't want synchronized
    3142             :  * heap scans to inflate the count, but we need it to not be zero to discourage
    3143             :  * other backends from stealing buffers from our ring.  As long as we cycle
    3144             :  * through the ring faster than the global clock-sweep cycles, buffers in
    3145             :  * our ring won't be chosen as victims for replacement by other backends.)
    3146             :  *
    3147             :  * This should be applied only to shared buffers, never local ones.
    3148             :  *
    3149             :  * Since buffers are pinned/unpinned very frequently, pin buffers without
    3150             :  * taking the buffer header lock; instead update the state variable in loop of
    3151             :  * CAS operations. Hopefully it's just a single CAS.
    3152             :  *
    3153             :  * Note that ResourceOwnerEnlarge() and ReservePrivateRefCountEntry()
    3154             :  * must have been done already.
    3155             :  *
    3156             :  * Returns true if buffer is BM_VALID, else false.  This provision allows
    3157             :  * some callers to avoid an extra spinlock cycle.  If skip_if_not_valid is
    3158             :  * true, then a false return value also indicates that the buffer was
    3159             :  * (recently) invalid and has not been pinned.
    3160             :  */
    3161             : static bool
    3162   119195798 : PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
    3163             :           bool skip_if_not_valid)
    3164             : {
    3165   119195798 :     Buffer      b = BufferDescriptorGetBuffer(buf);
    3166             :     bool        result;
    3167             :     PrivateRefCountEntry *ref;
    3168             : 
    3169             :     Assert(!BufferIsLocal(b));
    3170             :     Assert(ReservedRefCountSlot != -1);
    3171             : 
    3172   119195798 :     ref = GetPrivateRefCountEntry(b, true);
    3173             : 
    3174   119195798 :     if (ref == NULL)
    3175             :     {
    3176             :         uint32      buf_state;
    3177             :         uint32      old_buf_state;
    3178             : 
    3179   114657956 :         old_buf_state = pg_atomic_read_u32(&buf->state);
    3180             :         for (;;)
    3181             :         {
    3182   114704086 :             if (unlikely(skip_if_not_valid && !(old_buf_state & BM_VALID)))
    3183          12 :                 return false;
    3184             : 
    3185             :             /*
    3186             :              * We're not allowed to increase the refcount while the buffer
    3187             :              * header spinlock is held. Wait for the lock to be released.
    3188             :              */
    3189   114704074 :             if (old_buf_state & BM_LOCKED)
    3190        1366 :                 old_buf_state = WaitBufHdrUnlocked(buf);
    3191             : 
    3192   114704074 :             buf_state = old_buf_state;
    3193             : 
    3194             :             /* increase refcount */
    3195   114704074 :             buf_state += BUF_REFCOUNT_ONE;
    3196             : 
    3197   114704074 :             if (strategy == NULL)
    3198             :             {
    3199             :                 /* Default case: increase usagecount unless already max. */
    3200   113293280 :                 if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
    3201     6811902 :                     buf_state += BUF_USAGECOUNT_ONE;
    3202             :             }
    3203             :             else
    3204             :             {
    3205             :                 /*
    3206             :                  * Ring buffers shouldn't evict others from pool.  Thus we
    3207             :                  * don't make usagecount more than 1.
    3208             :                  */
    3209     1410794 :                 if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
    3210       69760 :                     buf_state += BUF_USAGECOUNT_ONE;
    3211             :             }
    3212             : 
    3213   114704074 :             if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
    3214             :                                                buf_state))
    3215             :             {
    3216   114657944 :                 result = (buf_state & BM_VALID) != 0;
    3217             : 
    3218   114657944 :                 TrackNewBufferPin(b);
    3219   114657944 :                 break;
    3220             :             }
    3221             :         }
    3222             :     }
    3223             :     else
    3224             :     {
    3225             :         /*
    3226             :          * If we previously pinned the buffer, it is likely to be valid, but
    3227             :          * it may not be if StartReadBuffers() was called and
    3228             :          * WaitReadBuffers() hasn't been called yet.  We'll check by loading
    3229             :          * the flags without locking.  This is racy, but it's OK to return
    3230             :          * false spuriously: when WaitReadBuffers() calls StartBufferIO(),
    3231             :          * it'll see that it's now valid.
    3232             :          *
    3233             :          * Note: We deliberately avoid a Valgrind client request here.
    3234             :          * Individual access methods can optionally superimpose buffer page
    3235             :          * client requests on top of our client requests to enforce that
    3236             :          * buffers are only accessed while locked (and pinned).  It's possible
    3237             :          * that the buffer page is legitimately non-accessible here.  We
    3238             :          * cannot meddle with that.
    3239             :          */
    3240     4537842 :         result = (pg_atomic_read_u32(&buf->state) & BM_VALID) != 0;
    3241             : 
    3242             :         Assert(ref->data.refcount > 0);
    3243     4537842 :         ref->data.refcount++;
    3244     4537842 :         ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
    3245             :     }
    3246             : 
    3247   119195786 :     return result;
    3248             : }
    3249             : 
    3250             : /*
    3251             :  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
    3252             :  * The spinlock is released before return.
    3253             :  *
    3254             :  * As this function is called with the spinlock held, the caller has to
    3255             :  * previously call ReservePrivateRefCountEntry() and
    3256             :  * ResourceOwnerEnlarge(CurrentResourceOwner);
    3257             :  *
    3258             :  * Currently, no callers of this function want to modify the buffer's
    3259             :  * usage_count at all, so there's no need for a strategy parameter.
    3260             :  * Also we don't bother with a BM_VALID test (the caller could check that for
    3261             :  * itself).
    3262             :  *
    3263             :  * Also all callers only ever use this function when it's known that the
    3264             :  * buffer can't have a preexisting pin by this backend. That allows us to skip
    3265             :  * searching the private refcount array & hash, which is a boon, because the
    3266             :  * spinlock is still held.
    3267             :  *
    3268             :  * Note: use of this routine is frequently mandatory, not just an optimization
    3269             :  * to save a spin lock/unlock cycle, because we need to pin a buffer before
    3270             :  * its state can change under us.
    3271             :  */
    3272             : static void
    3273      600794 : PinBuffer_Locked(BufferDesc *buf)
    3274             : {
    3275             :     uint32      old_buf_state;
    3276             : 
    3277             :     /*
    3278             :      * As explained, We don't expect any preexisting pins. That allows us to
    3279             :      * manipulate the PrivateRefCount after releasing the spinlock
    3280             :      */
    3281             :     Assert(GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf), false) == NULL);
    3282             : 
    3283             :     /*
    3284             :      * Since we hold the buffer spinlock, we can update the buffer state and
    3285             :      * release the lock in one operation.
    3286             :      */
    3287      600794 :     old_buf_state = pg_atomic_read_u32(&buf->state);
    3288             : 
    3289      600794 :     UnlockBufHdrExt(buf, old_buf_state,
    3290             :                     0, 0, 1);
    3291             : 
    3292      600794 :     TrackNewBufferPin(BufferDescriptorGetBuffer(buf));
    3293      600794 : }
    3294             : 
    3295             : /*
    3296             :  * Support for waking up another backend that is waiting for the cleanup lock
    3297             :  * to be released using BM_PIN_COUNT_WAITER.
    3298             :  *
    3299             :  * See LockBufferForCleanup().
    3300             :  *
    3301             :  * Expected to be called just after releasing a buffer pin (in a BufferDesc,
    3302             :  * not just reducing the backend-local pincount for the buffer).
    3303             :  */
    3304             : static void
    3305         198 : WakePinCountWaiter(BufferDesc *buf)
    3306             : {
    3307             :     /*
    3308             :      * Acquire the buffer header lock, re-check that there's a waiter. Another
    3309             :      * backend could have unpinned this buffer, and already woken up the
    3310             :      * waiter.
    3311             :      *
    3312             :      * There's no danger of the buffer being replaced after we unpinned it
    3313             :      * above, as it's pinned by the waiter. The waiter removes
    3314             :      * BM_PIN_COUNT_WAITER if it stops waiting for a reason other than this
    3315             :      * backend waking it up.
    3316             :      */
    3317         198 :     uint32      buf_state = LockBufHdr(buf);
    3318             : 
    3319         198 :     if ((buf_state & BM_PIN_COUNT_WAITER) &&
    3320         198 :         BUF_STATE_GET_REFCOUNT(buf_state) == 1)
    3321         198 :     {
    3322             :         /* we just released the last pin other than the waiter's */
    3323         198 :         int         wait_backend_pgprocno = buf->wait_backend_pgprocno;
    3324             : 
    3325         198 :         UnlockBufHdrExt(buf, buf_state,
    3326             :                         0, BM_PIN_COUNT_WAITER,
    3327             :                         0);
    3328         198 :         ProcSendSignal(wait_backend_pgprocno);
    3329             :     }
    3330             :     else
    3331           0 :         UnlockBufHdr(buf);
    3332         198 : }
    3333             : 
    3334             : /*
    3335             :  * UnpinBuffer -- make buffer available for replacement.
    3336             :  *
    3337             :  * This should be applied only to shared buffers, never local ones.  This
    3338             :  * always adjusts CurrentResourceOwner.
    3339             :  */
    3340             : static void
    3341   146521920 : UnpinBuffer(BufferDesc *buf)
    3342             : {
    3343   146521920 :     Buffer      b = BufferDescriptorGetBuffer(buf);
    3344             : 
    3345   146521920 :     ResourceOwnerForgetBuffer(CurrentResourceOwner, b);
    3346   146521920 :     UnpinBufferNoOwner(buf);
    3347   146521920 : }
    3348             : 
    3349             : static void
    3350   146531050 : UnpinBufferNoOwner(BufferDesc *buf)
    3351             : {
    3352             :     PrivateRefCountEntry *ref;
    3353   146531050 :     Buffer      b = BufferDescriptorGetBuffer(buf);
    3354             : 
    3355             :     Assert(!BufferIsLocal(b));
    3356             : 
    3357             :     /* not moving as we're likely deleting it soon anyway */
    3358   146531050 :     ref = GetPrivateRefCountEntry(b, false);
    3359             :     Assert(ref != NULL);
    3360             :     Assert(ref->data.refcount > 0);
    3361   146531050 :     ref->data.refcount--;
    3362   146531050 :     if (ref->data.refcount == 0)
    3363             :     {
    3364             :         uint32      old_buf_state;
    3365             : 
    3366             :         /*
    3367             :          * Mark buffer non-accessible to Valgrind.
    3368             :          *
    3369             :          * Note that the buffer may have already been marked non-accessible
    3370             :          * within access method code that enforces that buffers are only
    3371             :          * accessed while a buffer lock is held.
    3372             :          */
    3373             :         VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
    3374             : 
    3375             :         /*
    3376             :          * I'd better not still hold the buffer content lock. Can't use
    3377             :          * BufferIsLockedByMe(), as that asserts the buffer is pinned.
    3378             :          */
    3379             :         Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
    3380             : 
    3381             :         /* decrement the shared reference count */
    3382   119196834 :         old_buf_state = pg_atomic_fetch_sub_u32(&buf->state, BUF_REFCOUNT_ONE);
    3383             : 
    3384             :         /* Support LockBufferForCleanup() */
    3385   119196834 :         if (old_buf_state & BM_PIN_COUNT_WAITER)
    3386         198 :             WakePinCountWaiter(buf);
    3387             : 
    3388   119196834 :         ForgetPrivateRefCountEntry(ref);
    3389             :     }
    3390   146531050 : }
    3391             : 
    3392             : /*
    3393             :  * Set up backend-local tracking of a buffer pinned the first time by this
    3394             :  * backend.
    3395             :  */
    3396             : inline void
    3397   119196834 : TrackNewBufferPin(Buffer buf)
    3398             : {
    3399             :     PrivateRefCountEntry *ref;
    3400             : 
    3401   119196834 :     ref = NewPrivateRefCountEntry(buf);
    3402   119196834 :     ref->data.refcount++;
    3403             : 
    3404   119196834 :     ResourceOwnerRememberBuffer(CurrentResourceOwner, buf);
    3405             : 
    3406             :     /*
    3407             :      * This is the first pin for this page by this backend, mark its page as
    3408             :      * defined to valgrind. While the page contents might not actually be
    3409             :      * valid yet, we don't currently guarantee that such pages are marked
    3410             :      * undefined or non-accessible.
    3411             :      *
    3412             :      * It's not necessarily the prettiest to do this here, but otherwise we'd
    3413             :      * need this block of code in multiple places.
    3414             :      */
    3415             :     VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(GetBufferDescriptor(buf - 1)),
    3416             :                               BLCKSZ);
    3417   119196834 : }
    3418             : 
    3419             : #define ST_SORT sort_checkpoint_bufferids
    3420             : #define ST_ELEMENT_TYPE CkptSortItem
    3421             : #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
    3422             : #define ST_SCOPE static
    3423             : #define ST_DEFINE
    3424             : #include "lib/sort_template.h"
    3425             : 
    3426             : /*
    3427             :  * BufferSync -- Write out all dirty buffers in the pool.
    3428             :  *
    3429             :  * This is called at checkpoint time to write out all dirty shared buffers.
    3430             :  * The checkpoint request flags should be passed in.  If CHECKPOINT_FAST is
    3431             :  * set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
    3432             :  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_UNLOGGED is set, we write
    3433             :  * even unlogged buffers, which are otherwise skipped.  The remaining flags
    3434             :  * currently have no effect here.
    3435             :  */
    3436             : static void
    3437        3546 : BufferSync(int flags)
    3438             : {
    3439             :     uint32      buf_state;
    3440             :     int         buf_id;
    3441             :     int         num_to_scan;
    3442             :     int         num_spaces;
    3443             :     int         num_processed;
    3444             :     int         num_written;
    3445        3546 :     CkptTsStatus *per_ts_stat = NULL;
    3446             :     Oid         last_tsid;
    3447             :     binaryheap *ts_heap;
    3448             :     int         i;
    3449        3546 :     uint32      mask = BM_DIRTY;
    3450             :     WritebackContext wb_context;
    3451             : 
    3452             :     /*
    3453             :      * Unless this is a shutdown checkpoint or we have been explicitly told,
    3454             :      * we write only permanent, dirty buffers.  But at shutdown or end of
    3455             :      * recovery, we write all dirty buffers.
    3456             :      */
    3457        3546 :     if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
    3458             :                     CHECKPOINT_FLUSH_UNLOGGED))))
    3459        1978 :         mask |= BM_PERMANENT;
    3460             : 
    3461             :     /*
    3462             :      * Loop over all buffers, and mark the ones that need to be written with
    3463             :      * BM_CHECKPOINT_NEEDED.  Count them as we go (num_to_scan), so that we
    3464             :      * can estimate how much work needs to be done.
    3465             :      *
    3466             :      * This allows us to write only those pages that were dirty when the
    3467             :      * checkpoint began, and not those that get dirtied while it proceeds.
    3468             :      * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
    3469             :      * later in this function, or by normal backends or the bgwriter cleaning
    3470             :      * scan, the flag is cleared.  Any buffer dirtied after this point won't
    3471             :      * have the flag set.
    3472             :      *
    3473             :      * Note that if we fail to write some buffer, we may leave buffers with
    3474             :      * BM_CHECKPOINT_NEEDED still set.  This is OK since any such buffer would
    3475             :      * certainly need to be written for the next checkpoint attempt, too.
    3476             :      */
    3477        3546 :     num_to_scan = 0;
    3478    24678266 :     for (buf_id = 0; buf_id < NBuffers; buf_id++)
    3479             :     {
    3480    24674720 :         BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
    3481    24674720 :         uint32      set_bits = 0;
    3482             : 
    3483             :         /*
    3484             :          * Header spinlock is enough to examine BM_DIRTY, see comment in
    3485             :          * SyncOneBuffer.
    3486             :          */
    3487    24674720 :         buf_state = LockBufHdr(bufHdr);
    3488             : 
    3489    24674720 :         if ((buf_state & mask) == mask)
    3490             :         {
    3491             :             CkptSortItem *item;
    3492             : 
    3493      593378 :             set_bits = BM_CHECKPOINT_NEEDED;
    3494             : 
    3495      593378 :             item = &CkptBufferIds[num_to_scan++];
    3496      593378 :             item->buf_id = buf_id;
    3497      593378 :             item->tsId = bufHdr->tag.spcOid;
    3498      593378 :             item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
    3499      593378 :             item->forkNum = BufTagGetForkNum(&bufHdr->tag);
    3500      593378 :             item->blockNum = bufHdr->tag.blockNum;
    3501             :         }
    3502             : 
    3503    24674720 :         UnlockBufHdrExt(bufHdr, buf_state,
    3504             :                         set_bits, 0,
    3505             :                         0);
    3506             : 
    3507             :         /* Check for barrier events in case NBuffers is large. */
    3508    24674720 :         if (ProcSignalBarrierPending)
    3509           0 :             ProcessProcSignalBarrier();
    3510             :     }
    3511             : 
    3512        3546 :     if (num_to_scan == 0)
    3513        1372 :         return;                 /* nothing to do */
    3514             : 
    3515        2174 :     WritebackContextInit(&wb_context, &checkpoint_flush_after);
    3516             : 
    3517             :     TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
    3518             : 
    3519             :     /*
    3520             :      * Sort buffers that need to be written to reduce the likelihood of random
    3521             :      * IO. The sorting is also important for the implementation of balancing
    3522             :      * writes between tablespaces. Without balancing writes we'd potentially
    3523             :      * end up writing to the tablespaces one-by-one; possibly overloading the
    3524             :      * underlying system.
    3525             :      */
    3526        2174 :     sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
    3527             : 
    3528        2174 :     num_spaces = 0;
    3529             : 
    3530             :     /*
    3531             :      * Allocate progress status for each tablespace with buffers that need to
    3532             :      * be flushed. This requires the to-be-flushed array to be sorted.
    3533             :      */
    3534        2174 :     last_tsid = InvalidOid;
    3535      595552 :     for (i = 0; i < num_to_scan; i++)
    3536             :     {
    3537             :         CkptTsStatus *s;
    3538             :         Oid         cur_tsid;
    3539             : 
    3540      593378 :         cur_tsid = CkptBufferIds[i].tsId;
    3541             : 
    3542             :         /*
    3543             :          * Grow array of per-tablespace status structs, every time a new
    3544             :          * tablespace is found.
    3545             :          */
    3546      593378 :         if (last_tsid == InvalidOid || last_tsid != cur_tsid)
    3547        3254 :         {
    3548             :             Size        sz;
    3549             : 
    3550        3254 :             num_spaces++;
    3551             : 
    3552             :             /*
    3553             :              * Not worth adding grow-by-power-of-2 logic here - even with a
    3554             :              * few hundred tablespaces this should be fine.
    3555             :              */
    3556        3254 :             sz = sizeof(CkptTsStatus) * num_spaces;
    3557             : 
    3558        3254 :             if (per_ts_stat == NULL)
    3559        2174 :                 per_ts_stat = (CkptTsStatus *) palloc(sz);
    3560             :             else
    3561        1080 :                 per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
    3562             : 
    3563        3254 :             s = &per_ts_stat[num_spaces - 1];
    3564        3254 :             memset(s, 0, sizeof(*s));
    3565        3254 :             s->tsId = cur_tsid;
    3566             : 
    3567             :             /*
    3568             :              * The first buffer in this tablespace. As CkptBufferIds is sorted
    3569             :              * by tablespace all (s->num_to_scan) buffers in this tablespace
    3570             :              * will follow afterwards.
    3571             :              */
    3572        3254 :             s->index = i;
    3573             : 
    3574             :             /*
    3575             :              * progress_slice will be determined once we know how many buffers
    3576             :              * are in each tablespace, i.e. after this loop.
    3577             :              */
    3578             : 
    3579        3254 :             last_tsid = cur_tsid;
    3580             :         }
    3581             :         else
    3582             :         {
    3583      590124 :             s = &per_ts_stat[num_spaces - 1];
    3584             :         }
    3585             : 
    3586      593378 :         s->num_to_scan++;
    3587             : 
    3588             :         /* Check for barrier events. */
    3589      593378 :         if (ProcSignalBarrierPending)
    3590           0 :             ProcessProcSignalBarrier();
    3591             :     }
    3592             : 
    3593             :     Assert(num_spaces > 0);
    3594             : 
    3595             :     /*
    3596             :      * Build a min-heap over the write-progress in the individual tablespaces,
    3597             :      * and compute how large a portion of the total progress a single
    3598             :      * processed buffer is.
    3599             :      */
    3600        2174 :     ts_heap = binaryheap_allocate(num_spaces,
    3601             :                                   ts_ckpt_progress_comparator,
    3602             :                                   NULL);
    3603             : 
    3604        5428 :     for (i = 0; i < num_spaces; i++)
    3605             :     {
    3606        3254 :         CkptTsStatus *ts_stat = &per_ts_stat[i];
    3607             : 
    3608        3254 :         ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
    3609             : 
    3610        3254 :         binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
    3611             :     }
    3612             : 
    3613        2174 :     binaryheap_build(ts_heap);
    3614             : 
    3615             :     /*
    3616             :      * Iterate through to-be-checkpointed buffers and write the ones (still)
    3617             :      * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
    3618             :      * tablespaces; otherwise the sorting would lead to only one tablespace
    3619             :      * receiving writes at a time, making inefficient use of the hardware.
    3620             :      */
    3621        2174 :     num_processed = 0;
    3622        2174 :     num_written = 0;
    3623      595552 :     while (!binaryheap_empty(ts_heap))
    3624             :     {
    3625      593378 :         BufferDesc *bufHdr = NULL;
    3626             :         CkptTsStatus *ts_stat = (CkptTsStatus *)
    3627      593378 :             DatumGetPointer(binaryheap_first(ts_heap));
    3628             : 
    3629      593378 :         buf_id = CkptBufferIds[ts_stat->index].buf_id;
    3630             :         Assert(buf_id != -1);
    3631             : 
    3632      593378 :         bufHdr = GetBufferDescriptor(buf_id);
    3633             : 
    3634      593378 :         num_processed++;
    3635             : 
    3636             :         /*
    3637             :          * We don't need to acquire the lock here, because we're only looking
    3638             :          * at a single bit. It's possible that someone else writes the buffer
    3639             :          * and clears the flag right after we check, but that doesn't matter
    3640             :          * since SyncOneBuffer will then do nothing.  However, there is a
    3641             :          * further race condition: it's conceivable that between the time we
    3642             :          * examine the bit here and the time SyncOneBuffer acquires the lock,
    3643             :          * someone else not only wrote the buffer but replaced it with another
    3644             :          * page and dirtied it.  In that improbable case, SyncOneBuffer will
    3645             :          * write the buffer though we didn't need to.  It doesn't seem worth
    3646             :          * guarding against this, though.
    3647             :          */
    3648      593378 :         if (pg_atomic_read_u32(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
    3649             :         {
    3650      555326 :             if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
    3651             :             {
    3652             :                 TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
    3653      555326 :                 PendingCheckpointerStats.buffers_written++;
    3654      555326 :                 num_written++;
    3655             :             }
    3656             :         }
    3657             : 
    3658             :         /*
    3659             :          * Measure progress independent of actually having to flush the buffer
    3660             :          * - otherwise writing become unbalanced.
    3661             :          */
    3662      593378 :         ts_stat->progress += ts_stat->progress_slice;
    3663      593378 :         ts_stat->num_scanned++;
    3664      593378 :         ts_stat->index++;
    3665             : 
    3666             :         /* Have all the buffers from the tablespace been processed? */
    3667      593378 :         if (ts_stat->num_scanned == ts_stat->num_to_scan)
    3668             :         {
    3669        3254 :             binaryheap_remove_first(ts_heap);
    3670             :         }
    3671             :         else
    3672             :         {
    3673             :             /* update heap with the new progress */
    3674      590124 :             binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
    3675             :         }
    3676             : 
    3677             :         /*
    3678             :          * Sleep to throttle our I/O rate.
    3679             :          *
    3680             :          * (This will check for barrier events even if it doesn't sleep.)
    3681             :          */
    3682      593378 :         CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
    3683             :     }
    3684             : 
    3685             :     /*
    3686             :      * Issue all pending flushes. Only checkpointer calls BufferSync(), so
    3687             :      * IOContext will always be IOCONTEXT_NORMAL.
    3688             :      */
    3689        2174 :     IssuePendingWritebacks(&wb_context, IOCONTEXT_NORMAL);
    3690             : 
    3691        2174 :     pfree(per_ts_stat);
    3692        2174 :     per_ts_stat = NULL;
    3693        2174 :     binaryheap_free(ts_heap);
    3694             : 
    3695             :     /*
    3696             :      * Update checkpoint statistics. As noted above, this doesn't include
    3697             :      * buffers written by other backends or bgwriter scan.
    3698             :      */
    3699        2174 :     CheckpointStats.ckpt_bufs_written += num_written;
    3700             : 
    3701             :     TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
    3702             : }
    3703             : 
    3704             : /*
    3705             :  * BgBufferSync -- Write out some dirty buffers in the pool.
    3706             :  *
    3707             :  * This is called periodically by the background writer process.
    3708             :  *
    3709             :  * Returns true if it's appropriate for the bgwriter process to go into
    3710             :  * low-power hibernation mode.  (This happens if the strategy clock-sweep
    3711             :  * has been "lapped" and no buffer allocations have occurred recently,
    3712             :  * or if the bgwriter has been effectively disabled by setting
    3713             :  * bgwriter_lru_maxpages to 0.)
    3714             :  */
    3715             : bool
    3716       23482 : BgBufferSync(WritebackContext *wb_context)
    3717             : {
    3718             :     /* info obtained from freelist.c */
    3719             :     int         strategy_buf_id;
    3720             :     uint32      strategy_passes;
    3721             :     uint32      recent_alloc;
    3722             : 
    3723             :     /*
    3724             :      * Information saved between calls so we can determine the strategy
    3725             :      * point's advance rate and avoid scanning already-cleaned buffers.
    3726             :      */
    3727             :     static bool saved_info_valid = false;
    3728             :     static int  prev_strategy_buf_id;
    3729             :     static uint32 prev_strategy_passes;
    3730             :     static int  next_to_clean;
    3731             :     static uint32 next_passes;
    3732             : 
    3733             :     /* Moving averages of allocation rate and clean-buffer density */
    3734             :     static float smoothed_alloc = 0;
    3735             :     static float smoothed_density = 10.0;
    3736             : 
    3737             :     /* Potentially these could be tunables, but for now, not */
    3738       23482 :     float       smoothing_samples = 16;
    3739       23482 :     float       scan_whole_pool_milliseconds = 120000.0;
    3740             : 
    3741             :     /* Used to compute how far we scan ahead */
    3742             :     long        strategy_delta;
    3743             :     int         bufs_to_lap;
    3744             :     int         bufs_ahead;
    3745             :     float       scans_per_alloc;
    3746             :     int         reusable_buffers_est;
    3747             :     int         upcoming_alloc_est;
    3748             :     int         min_scan_buffers;
    3749             : 
    3750             :     /* Variables for the scanning loop proper */
    3751             :     int         num_to_scan;
    3752             :     int         num_written;
    3753             :     int         reusable_buffers;
    3754             : 
    3755             :     /* Variables for final smoothed_density update */
    3756             :     long        new_strategy_delta;
    3757             :     uint32      new_recent_alloc;
    3758             : 
    3759             :     /*
    3760             :      * Find out where the clock-sweep currently is, and how many buffer
    3761             :      * allocations have happened since our last call.
    3762             :      */
    3763       23482 :     strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
    3764             : 
    3765             :     /* Report buffer alloc counts to pgstat */
    3766       23482 :     PendingBgWriterStats.buf_alloc += recent_alloc;
    3767             : 
    3768             :     /*
    3769             :      * If we're not running the LRU scan, just stop after doing the stats
    3770             :      * stuff.  We mark the saved state invalid so that we can recover sanely
    3771             :      * if LRU scan is turned back on later.
    3772             :      */
    3773       23482 :     if (bgwriter_lru_maxpages <= 0)
    3774             :     {
    3775          80 :         saved_info_valid = false;
    3776          80 :         return true;
    3777             :     }
    3778             : 
    3779             :     /*
    3780             :      * Compute strategy_delta = how many buffers have been scanned by the
    3781             :      * clock-sweep since last time.  If first time through, assume none. Then
    3782             :      * see if we are still ahead of the clock-sweep, and if so, how many
    3783             :      * buffers we could scan before we'd catch up with it and "lap" it. Note:
    3784             :      * weird-looking coding of xxx_passes comparisons are to avoid bogus
    3785             :      * behavior when the passes counts wrap around.
    3786             :      */
    3787       23402 :     if (saved_info_valid)
    3788             :     {
    3789       22280 :         int32       passes_delta = strategy_passes - prev_strategy_passes;
    3790             : 
    3791       22280 :         strategy_delta = strategy_buf_id - prev_strategy_buf_id;
    3792       22280 :         strategy_delta += (long) passes_delta * NBuffers;
    3793             : 
    3794             :         Assert(strategy_delta >= 0);
    3795             : 
    3796       22280 :         if ((int32) (next_passes - strategy_passes) > 0)
    3797             :         {
    3798             :             /* we're one pass ahead of the strategy point */
    3799        3892 :             bufs_to_lap = strategy_buf_id - next_to_clean;
    3800             : #ifdef BGW_DEBUG
    3801             :             elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
    3802             :                  next_passes, next_to_clean,
    3803             :                  strategy_passes, strategy_buf_id,
    3804             :                  strategy_delta, bufs_to_lap);
    3805             : #endif
    3806             :         }
    3807       18388 :         else if (next_passes == strategy_passes &&
    3808       14532 :                  next_to_clean >= strategy_buf_id)
    3809             :         {
    3810             :             /* on same pass, but ahead or at least not behind */
    3811       13146 :             bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
    3812             : #ifdef BGW_DEBUG
    3813             :             elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
    3814             :                  next_passes, next_to_clean,
    3815             :                  strategy_passes, strategy_buf_id,
    3816             :                  strategy_delta, bufs_to_lap);
    3817             : #endif
    3818             :         }
    3819             :         else
    3820             :         {
    3821             :             /*
    3822             :              * We're behind, so skip forward to the strategy point and start
    3823             :              * cleaning from there.
    3824             :              */
    3825             : #ifdef BGW_DEBUG
    3826             :             elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
    3827             :                  next_passes, next_to_clean,
    3828             :                  strategy_passes, strategy_buf_id,
    3829             :                  strategy_delta);
    3830             : #endif
    3831        5242 :             next_to_clean = strategy_buf_id;
    3832        5242 :             next_passes = strategy_passes;
    3833        5242 :             bufs_to_lap = NBuffers;
    3834             :         }
    3835             :     }
    3836             :     else
    3837             :     {
    3838             :         /*
    3839             :          * Initializing at startup or after LRU scanning had been off. Always
    3840             :          * start at the strategy point.
    3841             :          */
    3842             : #ifdef BGW_DEBUG
    3843             :         elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
    3844             :              strategy_passes, strategy_buf_id);
    3845             : #endif
    3846        1122 :         strategy_delta = 0;
    3847        1122 :         next_to_clean = strategy_buf_id;
    3848        1122 :         next_passes = strategy_passes;
    3849        1122 :         bufs_to_lap = NBuffers;
    3850             :     }
    3851             : 
    3852             :     /* Update saved info for next time */
    3853       23402 :     prev_strategy_buf_id = strategy_buf_id;
    3854       23402 :     prev_strategy_passes = strategy_passes;
    3855       23402 :     saved_info_valid = true;
    3856             : 
    3857             :     /*
    3858             :      * Compute how many buffers had to be scanned for each new allocation, ie,
    3859             :      * 1/density of reusable buffers, and track a moving average of that.
    3860             :      *
    3861             :      * If the strategy point didn't move, we don't update the density estimate
    3862             :      */
    3863       23402 :     if (strategy_delta > 0 && recent_alloc > 0)
    3864             :     {
    3865       11990 :         scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
    3866       11990 :         smoothed_density += (scans_per_alloc - smoothed_density) /
    3867             :             smoothing_samples;
    3868             :     }
    3869             : 
    3870             :     /*
    3871             :      * Estimate how many reusable buffers there are between the current
    3872             :      * strategy point and where we've scanned ahead to, based on the smoothed
    3873             :      * density estimate.
    3874             :      */
    3875       23402 :     bufs_ahead = NBuffers - bufs_to_lap;
    3876       23402 :     reusable_buffers_est = (float) bufs_ahead / smoothed_density;
    3877             : 
    3878             :     /*
    3879             :      * Track a moving average of recent buffer allocations.  Here, rather than
    3880             :      * a true average we want a fast-attack, slow-decline behavior: we
    3881             :      * immediately follow any increase.
    3882             :      */
    3883       23402 :     if (smoothed_alloc <= (float) recent_alloc)
    3884        7036 :         smoothed_alloc = recent_alloc;
    3885             :     else
    3886       16366 :         smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
    3887             :             smoothing_samples;
    3888             : 
    3889             :     /* Scale the estimate by a GUC to allow more aggressive tuning. */
    3890       23402 :     upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
    3891             : 
    3892             :     /*
    3893             :      * If recent_alloc remains at zero for many cycles, smoothed_alloc will
    3894             :      * eventually underflow to zero, and the underflows produce annoying
    3895             :      * kernel warnings on some platforms.  Once upcoming_alloc_est has gone to
    3896             :      * zero, there's no point in tracking smaller and smaller values of
    3897             :      * smoothed_alloc, so just reset it to exactly zero to avoid this
    3898             :      * syndrome.  It will pop back up as soon as recent_alloc increases.
    3899             :      */
    3900       23402 :     if (upcoming_alloc_est == 0)
    3901        4374 :         smoothed_alloc = 0;
    3902             : 
    3903             :     /*
    3904             :      * Even in cases where there's been little or no buffer allocation
    3905             :      * activity, we want to make a small amount of progress through the buffer
    3906             :      * cache so that as many reusable buffers as possible are clean after an
    3907             :      * idle period.
    3908             :      *
    3909             :      * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
    3910             :      * the BGW will be called during the scan_whole_pool time; slice the
    3911             :      * buffer pool into that many sections.
    3912             :      */
    3913       23402 :     min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
    3914             : 
    3915       23402 :     if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
    3916             :     {
    3917             : #ifdef BGW_DEBUG
    3918             :         elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
    3919             :              upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
    3920             : #endif
    3921       11252 :         upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
    3922             :     }
    3923             : 
    3924             :     /*
    3925             :      * Now write out dirty reusable buffers, working forward from the
    3926             :      * next_to_clean point, until we have lapped the strategy scan, or cleaned
    3927             :      * enough buffers to match our estimate of the next cycle's allocation
    3928             :      * requirements, or hit the bgwriter_lru_maxpages limit.
    3929             :      */
    3930             : 
    3931       23402 :     num_to_scan = bufs_to_lap;
    3932       23402 :     num_written = 0;
    3933       23402 :     reusable_buffers = reusable_buffers_est;
    3934             : 
    3935             :     /* Execute the LRU scan */
    3936     3447854 :     while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
    3937             :     {
    3938     3424452 :         int         sync_state = SyncOneBuffer(next_to_clean, true,
    3939             :                                                wb_context);
    3940             : 
    3941     3424452 :         if (++next_to_clean >= NBuffers)
    3942             :         {
    3943        4716 :             next_to_clean = 0;
    3944        4716 :             next_passes++;
    3945             :         }
    3946     3424452 :         num_to_scan--;
    3947             : 
    3948     3424452 :         if (sync_state & BUF_WRITTEN)
    3949             :         {
    3950       36160 :             reusable_buffers++;
    3951       36160 :             if (++num_written >= bgwriter_lru_maxpages)
    3952             :             {
    3953           0 :                 PendingBgWriterStats.maxwritten_clean++;
    3954           0 :                 break;
    3955             :             }
    3956             :         }
    3957     3388292 :         else if (sync_state & BUF_REUSABLE)
    3958     2755144 :             reusable_buffers++;
    3959             :     }
    3960             : 
    3961       23402 :     PendingBgWriterStats.buf_written_clean += num_written;
    3962             : 
    3963             : #ifdef BGW_DEBUG
    3964             :     elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
    3965             :          recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
    3966             :          smoothed_density, reusable_buffers_est, upcoming_alloc_est,
    3967             :          bufs_to_lap - num_to_scan,
    3968             :          num_written,
    3969             :          reusable_buffers - reusable_buffers_est);
    3970             : #endif
    3971             : 
    3972             :     /*
    3973             :      * Consider the above scan as being like a new allocation scan.
    3974             :      * Characterize its density and update the smoothed one based on it. This
    3975             :      * effectively halves the moving average period in cases where both the
    3976             :      * strategy and the background writer are doing some useful scanning,
    3977             :      * which is helpful because a long memory isn't as desirable on the
    3978             :      * density estimates.
    3979             :      */
    3980       23402 :     new_strategy_delta = bufs_to_lap - num_to_scan;
    3981       23402 :     new_recent_alloc = reusable_buffers - reusable_buffers_est;
    3982       23402 :     if (new_strategy_delta > 0 && new_recent_alloc > 0)
    3983             :     {
    3984       19232 :         scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
    3985       19232 :         smoothed_density += (scans_per_alloc - smoothed_density) /
    3986             :             smoothing_samples;
    3987             : 
    3988             : #ifdef BGW_DEBUG
    3989             :         elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
    3990             :              new_recent_alloc, new_strategy_delta,
    3991             :              scans_per_alloc, smoothed_density);
    3992             : #endif
    3993             :     }
    3994             : 
    3995             :     /* Return true if OK to hibernate */
    3996       23402 :     return (bufs_to_lap == 0 && recent_alloc == 0);
    3997             : }
    3998             : 
    3999             : /*
    4000             :  * SyncOneBuffer -- process a single buffer during syncing.
    4001             :  *
    4002             :  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
    4003             :  * buffers marked recently used, as these are not replacement candidates.
    4004             :  *
    4005             :  * Returns a bitmask containing the following flag bits:
    4006             :  *  BUF_WRITTEN: we wrote the buffer.
    4007             :  *  BUF_REUSABLE: buffer is available for replacement, ie, it has
    4008             :  *      pin count 0 and usage count 0.
    4009             :  *
    4010             :  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
    4011             :  * after locking it, but we don't care all that much.)
    4012             :  */
    4013             : static int
    4014     3979778 : SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
    4015             : {
    4016     3979778 :     BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
    4017     3979778 :     int         result = 0;
    4018             :     uint32      buf_state;
    4019             :     BufferTag   tag;
    4020             : 
    4021             :     /* Make sure we can handle the pin */
    4022     3979778 :     ReservePrivateRefCountEntry();
    4023     3979778 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    4024             : 
    4025             :     /*
    4026             :      * Check whether buffer needs writing.
    4027             :      *
    4028             :      * We can make this check without taking the buffer content lock so long
    4029             :      * as we mark pages dirty in access methods *before* logging changes with
    4030             :      * XLogInsert(): if someone marks the buffer dirty just after our check we
    4031             :      * don't worry because our checkpoint.redo points before log record for
    4032             :      * upcoming changes and so we are not required to write such dirty buffer.
    4033             :      */
    4034     3979778 :     buf_state = LockBufHdr(bufHdr);
    4035             : 
    4036     3979778 :     if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
    4037     3972654 :         BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
    4038             :     {
    4039     2795422 :         result |= BUF_REUSABLE;
    4040             :     }
    4041     1184356 :     else if (skip_recently_used)
    4042             :     {
    4043             :         /* Caller told us not to write recently-used buffers */
    4044      633148 :         UnlockBufHdr(bufHdr);
    4045      633148 :         return result;
    4046             :     }
    4047             : 
    4048     3346630 :     if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
    4049             :     {
    4050             :         /* It's clean, so nothing to do */
    4051     2755144 :         UnlockBufHdr(bufHdr);
    4052     2755144 :         return result;
    4053             :     }
    4054             : 
    4055             :     /*
    4056             :      * Pin it, share-lock it, write it.  (FlushBuffer will do nothing if the
    4057             :      * buffer is clean by the time we've locked it.)
    4058             :      */
    4059      591486 :     PinBuffer_Locked(bufHdr);
    4060             : 
    4061      591486 :     FlushUnlockedBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    4062             : 
    4063      591486 :     tag = bufHdr->tag;
    4064             : 
    4065      591486 :     UnpinBuffer(bufHdr);
    4066             : 
    4067             :     /*
    4068             :      * SyncOneBuffer() is only called by checkpointer and bgwriter, so
    4069             :      * IOContext will always be IOCONTEXT_NORMAL.
    4070             :      */
    4071      591486 :     ScheduleBufferTagForWriteback(wb_context, IOCONTEXT_NORMAL, &tag);
    4072             : 
    4073      591486 :     return result | BUF_WRITTEN;
    4074             : }
    4075             : 
    4076             : /*
    4077             :  *      AtEOXact_Buffers - clean up at end of transaction.
    4078             :  *
    4079             :  *      As of PostgreSQL 8.0, buffer pins should get released by the
    4080             :  *      ResourceOwner mechanism.  This routine is just a debugging
    4081             :  *      cross-check that no pins remain.
    4082             :  */
    4083             : void
    4084     1052322 : AtEOXact_Buffers(bool isCommit)
    4085             : {
    4086     1052322 :     CheckForBufferLeaks();
    4087             : 
    4088     1052322 :     AtEOXact_LocalBuffers(isCommit);
    4089             : 
    4090             :     Assert(PrivateRefCountOverflowed == 0);
    4091     1052322 : }
    4092             : 
    4093             : /*
    4094             :  * Initialize access to shared buffer pool
    4095             :  *
    4096             :  * This is called during backend startup (whether standalone or under the
    4097             :  * postmaster).  It sets up for this backend's access to the already-existing
    4098             :  * buffer pool.
    4099             :  */
    4100             : void
    4101       45208 : InitBufferManagerAccess(void)
    4102             : {
    4103             :     HASHCTL     hash_ctl;
    4104             : 
    4105             :     /*
    4106             :      * An advisory limit on the number of pins each backend should hold, based
    4107             :      * on shared_buffers and the maximum number of connections possible.
    4108             :      * That's very pessimistic, but outside toy-sized shared_buffers it should
    4109             :      * allow plenty of pins.  LimitAdditionalPins() and
    4110             :      * GetAdditionalPinLimit() can be used to check the remaining balance.
    4111             :      */
    4112       45208 :     MaxProportionalPins = NBuffers / (MaxBackends + NUM_AUXILIARY_PROCS);
    4113             : 
    4114       45208 :     memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
    4115       45208 :     memset(&PrivateRefCountArrayKeys, 0, sizeof(PrivateRefCountArrayKeys));
    4116             : 
    4117       45208 :     hash_ctl.keysize = sizeof(Buffer);
    4118       45208 :     hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
    4119             : 
    4120       45208 :     PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
    4121             :                                       HASH_ELEM | HASH_BLOBS);
    4122             : 
    4123             :     /*
    4124             :      * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
    4125             :      * the corresponding phase of backend shutdown.
    4126             :      */
    4127             :     Assert(MyProc != NULL);
    4128       45208 :     on_shmem_exit(AtProcExit_Buffers, 0);
    4129       45208 : }
    4130             : 
    4131             : /*
    4132             :  * During backend exit, ensure that we released all shared-buffer locks and
    4133             :  * assert that we have no remaining pins.
    4134             :  */
    4135             : static void
    4136       45208 : AtProcExit_Buffers(int code, Datum arg)
    4137             : {
    4138       45208 :     UnlockBuffers();
    4139             : 
    4140       45208 :     CheckForBufferLeaks();
    4141             : 
    4142             :     /* localbuf.c needs a chance too */
    4143       45208 :     AtProcExit_LocalBuffers();
    4144       45208 : }
    4145             : 
    4146             : /*
    4147             :  *      CheckForBufferLeaks - ensure this backend holds no buffer pins
    4148             :  *
    4149             :  *      As of PostgreSQL 8.0, buffer pins should get released by the
    4150             :  *      ResourceOwner mechanism.  This routine is just a debugging
    4151             :  *      cross-check that no pins remain.
    4152             :  */
    4153             : static void
    4154     1097530 : CheckForBufferLeaks(void)
    4155             : {
    4156             : #ifdef USE_ASSERT_CHECKING
    4157             :     int         RefCountErrors = 0;
    4158             :     PrivateRefCountEntry *res;
    4159             :     int         i;
    4160             :     char       *s;
    4161             : 
    4162             :     /* check the array */
    4163             :     for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
    4164             :     {
    4165             :         if (PrivateRefCountArrayKeys[i] != InvalidBuffer)
    4166             :         {
    4167             :             res = &PrivateRefCountArray[i];
    4168             : 
    4169             :             s = DebugPrintBufferRefcount(res->buffer);
    4170             :             elog(WARNING, "buffer refcount leak: %s", s);
    4171             :             pfree(s);
    4172             : 
    4173             :             RefCountErrors++;
    4174             :         }
    4175             :     }
    4176             : 
    4177             :     /* if necessary search the hash */
    4178             :     if (PrivateRefCountOverflowed)
    4179             :     {
    4180             :         HASH_SEQ_STATUS hstat;
    4181             : 
    4182             :         hash_seq_init(&hstat, PrivateRefCountHash);
    4183             :         while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
    4184             :         {
    4185             :             s = DebugPrintBufferRefcount(res->buffer);
    4186             :             elog(WARNING, "buffer refcount leak: %s", s);
    4187             :             pfree(s);
    4188             :             RefCountErrors++;
    4189             :         }
    4190             :     }
    4191             : 
    4192             :     Assert(RefCountErrors == 0);
    4193             : #endif
    4194     1097530 : }
    4195             : 
    4196             : #ifdef USE_ASSERT_CHECKING
    4197             : /*
    4198             :  * Check for exclusive-locked catalog buffers.  This is the core of
    4199             :  * AssertCouldGetRelation().
    4200             :  *
    4201             :  * A backend would self-deadlock on LWLocks if the catalog scan read the
    4202             :  * exclusive-locked buffer.  The main threat is exclusive-locked buffers of
    4203             :  * catalogs used in relcache, because a catcache search on any catalog may
    4204             :  * build that catalog's relcache entry.  We don't have an inventory of
    4205             :  * catalogs relcache uses, so just check buffers of most catalogs.
    4206             :  *
    4207             :  * It's better to minimize waits while holding an exclusive buffer lock, so it
    4208             :  * would be nice to broaden this check not to be catalog-specific.  However,
    4209             :  * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
    4210             :  * read tables.  That is deadlock-free as long as there's no loop in the
    4211             :  * dependency graph: modifying table A may cause an opclass to read table B,
    4212             :  * but it must not cause a read of table A.
    4213             :  */
    4214             : void
    4215             : AssertBufferLocksPermitCatalogRead(void)
    4216             : {
    4217             :     ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
    4218             : }
    4219             : 
    4220             : static void
    4221             : AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
    4222             :                            void *unused_context)
    4223             : {
    4224             :     BufferDesc *bufHdr;
    4225             :     BufferTag   tag;
    4226             :     Oid         relid;
    4227             : 
    4228             :     if (mode != LW_EXCLUSIVE)
    4229             :         return;
    4230             : 
    4231             :     if (!((BufferDescPadded *) lock > BufferDescriptors &&
    4232             :           (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
    4233             :         return;                 /* not a buffer lock */
    4234             : 
    4235             :     bufHdr = (BufferDesc *)
    4236             :         ((char *) lock - offsetof(BufferDesc, content_lock));
    4237             :     tag = bufHdr->tag;
    4238             : 
    4239             :     /*
    4240             :      * This relNumber==relid assumption holds until a catalog experiences
    4241             :      * VACUUM FULL or similar.  After a command like that, relNumber will be
    4242             :      * in the normal (non-catalog) range, and we lose the ability to detect
    4243             :      * hazardous access to that catalog.  Calling RelidByRelfilenumber() would
    4244             :      * close that gap, but RelidByRelfilenumber() might then deadlock with a
    4245             :      * held lock.
    4246             :      */
    4247             :     relid = tag.relNumber;
    4248             : 
    4249             :     if (IsCatalogTextUniqueIndexOid(relid)) /* see comments at the callee */
    4250             :         return;
    4251             : 
    4252             :     Assert(!IsCatalogRelationOid(relid));
    4253             : }
    4254             : #endif
    4255             : 
    4256             : 
    4257             : /*
    4258             :  * Helper routine to issue warnings when a buffer is unexpectedly pinned
    4259             :  */
    4260             : char *
    4261          80 : DebugPrintBufferRefcount(Buffer buffer)
    4262             : {
    4263             :     BufferDesc *buf;
    4264             :     int32       loccount;
    4265             :     char       *result;
    4266             :     ProcNumber  backend;
    4267             :     uint32      buf_state;
    4268             : 
    4269             :     Assert(BufferIsValid(buffer));
    4270          80 :     if (BufferIsLocal(buffer))
    4271             :     {
    4272          32 :         buf = GetLocalBufferDescriptor(-buffer - 1);
    4273          32 :         loccount = LocalRefCount[-buffer - 1];
    4274          32 :         backend = MyProcNumber;
    4275             :     }
    4276             :     else
    4277             :     {
    4278          48 :         buf = GetBufferDescriptor(buffer - 1);
    4279          48 :         loccount = GetPrivateRefCount(buffer);
    4280          48 :         backend = INVALID_PROC_NUMBER;
    4281             :     }
    4282             : 
    4283             :     /* theoretically we should lock the bufhdr here */
    4284          80 :     buf_state = pg_atomic_read_u32(&buf->state);
    4285             : 
    4286          80 :     result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
    4287             :                       buffer,
    4288          80 :                       relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
    4289             :                                      BufTagGetForkNum(&buf->tag)).str,
    4290             :                       buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
    4291             :                       BUF_STATE_GET_REFCOUNT(buf_state), loccount);
    4292          80 :     return result;
    4293             : }
    4294             : 
    4295             : /*
    4296             :  * CheckPointBuffers
    4297             :  *
    4298             :  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
    4299             :  *
    4300             :  * Note: temporary relations do not participate in checkpoints, so they don't
    4301             :  * need to be flushed.
    4302             :  */
    4303             : void
    4304        3546 : CheckPointBuffers(int flags)
    4305             : {
    4306        3546 :     BufferSync(flags);
    4307        3546 : }
    4308             : 
    4309             : /*
    4310             :  * BufferGetBlockNumber
    4311             :  *      Returns the block number associated with a buffer.
    4312             :  *
    4313             :  * Note:
    4314             :  *      Assumes that the buffer is valid and pinned, else the
    4315             :  *      value may be obsolete immediately...
    4316             :  */
    4317             : BlockNumber
    4318   100710140 : BufferGetBlockNumber(Buffer buffer)
    4319             : {
    4320             :     BufferDesc *bufHdr;
    4321             : 
    4322             :     Assert(BufferIsPinned(buffer));
    4323             : 
    4324   100710140 :     if (BufferIsLocal(buffer))
    4325     3810286 :         bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    4326             :     else
    4327    96899854 :         bufHdr = GetBufferDescriptor(buffer - 1);
    4328             : 
    4329             :     /* pinned, so OK to read tag without spinlock */
    4330   100710140 :     return bufHdr->tag.blockNum;
    4331             : }
    4332             : 
    4333             : /*
    4334             :  * BufferGetTag
    4335             :  *      Returns the relfilelocator, fork number and block number associated with
    4336             :  *      a buffer.
    4337             :  */
    4338             : void
    4339    31721372 : BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
    4340             :              BlockNumber *blknum)
    4341             : {
    4342             :     BufferDesc *bufHdr;
    4343             : 
    4344             :     /* Do the same checks as BufferGetBlockNumber. */
    4345             :     Assert(BufferIsPinned(buffer));
    4346             : 
    4347    31721372 :     if (BufferIsLocal(buffer))
    4348           0 :         bufHdr = GetLocalBufferDescriptor(-buffer - 1);
    4349             :     else
    4350    31721372 :         bufHdr = GetBufferDescriptor(buffer - 1);
    4351             : 
    4352             :     /* pinned, so OK to read tag without spinlock */
    4353    31721372 :     *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
    4354    31721372 :     *forknum = BufTagGetForkNum(&bufHdr->tag);
    4355    31721372 :     *blknum = bufHdr->tag.blockNum;
    4356    31721372 : }
    4357             : 
    4358             : /*
    4359             :  * FlushBuffer
    4360             :  *      Physically write out a shared buffer.
    4361             :  *
    4362             :  * NOTE: this actually just passes the buffer contents to the kernel; the
    4363             :  * real write to disk won't happen until the kernel feels like it.  This
    4364             :  * is okay from our point of view since we can redo the changes from WAL.
    4365             :  * However, we will need to force the changes to disk via fsync before
    4366             :  * we can checkpoint WAL.
    4367             :  *
    4368             :  * The caller must hold a pin on the buffer and have share-locked the
    4369             :  * buffer contents.  (Note: a share-lock does not prevent updates of
    4370             :  * hint bits in the buffer, so the page could change while the write
    4371             :  * is in progress, but we assume that that will not invalidate the data
    4372             :  * written.)
    4373             :  *
    4374             :  * If the caller has an smgr reference for the buffer's relation, pass it
    4375             :  * as the second parameter.  If not, pass NULL.
    4376             :  */
    4377             : static void
    4378     1139716 : FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
    4379             :             IOContext io_context)
    4380             : {
    4381             :     XLogRecPtr  recptr;
    4382             :     ErrorContextCallback errcallback;
    4383             :     instr_time  io_start;
    4384             :     Block       bufBlock;
    4385             :     char       *bufToWrite;
    4386             :     uint32      buf_state;
    4387             : 
    4388             :     /*
    4389             :      * Try to start an I/O operation.  If StartBufferIO returns false, then
    4390             :      * someone else flushed the buffer before we could, so we need not do
    4391             :      * anything.
    4392             :      */
    4393     1139716 :     if (!StartBufferIO(buf, false, false))
    4394          28 :         return;
    4395             : 
    4396             :     /* Setup error traceback support for ereport() */
    4397     1139688 :     errcallback.callback = shared_buffer_write_error_callback;
    4398     1139688 :     errcallback.arg = buf;
    4399     1139688 :     errcallback.previous = error_context_stack;
    4400     1139688 :     error_context_stack = &errcallback;
    4401             : 
    4402             :     /* Find smgr relation for buffer */
    4403     1139688 :     if (reln == NULL)
    4404     1134886 :         reln = smgropen(BufTagGetRelFileLocator(&buf->tag), INVALID_PROC_NUMBER);
    4405             : 
    4406             :     TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
    4407             :                                         buf->tag.blockNum,
    4408             :                                         reln->smgr_rlocator.locator.spcOid,
    4409             :                                         reln->smgr_rlocator.locator.dbOid,
    4410             :                                         reln->smgr_rlocator.locator.relNumber);
    4411             : 
    4412     1139688 :     buf_state = LockBufHdr(buf);
    4413             : 
    4414             :     /*
    4415             :      * Run PageGetLSN while holding header lock, since we don't have the
    4416             :      * buffer locked exclusively in all cases.
    4417             :      */
    4418     1139688 :     recptr = BufferGetLSN(buf);
    4419             : 
    4420             :     /* To check if block content changes while flushing. - vadim 01/17/97 */
    4421     1139688 :     UnlockBufHdrExt(buf, buf_state,
    4422             :                     0, BM_JUST_DIRTIED,
    4423             :                     0);
    4424             : 
    4425             :     /*
    4426             :      * Force XLOG flush up to buffer's LSN.  This implements the basic WAL
    4427             :      * rule that log updates must hit disk before any of the data-file changes
    4428             :      * they describe do.
    4429             :      *
    4430             :      * However, this rule does not apply to unlogged relations, which will be
    4431             :      * lost after a crash anyway.  Most unlogged relation pages do not bear
    4432             :      * LSNs since we never emit WAL records for them, and therefore flushing
    4433             :      * up through the buffer LSN would be useless, but harmless.  However,
    4434             :      * GiST indexes use LSNs internally to track page-splits, and therefore
    4435             :      * unlogged GiST pages bear "fake" LSNs generated by
    4436             :      * GetFakeLSNForUnloggedRel.  It is unlikely but possible that the fake
    4437             :      * LSN counter could advance past the WAL insertion point; and if it did
    4438             :      * happen, attempting to flush WAL through that location would fail, with
    4439             :      * disastrous system-wide consequences.  To make sure that can't happen,
    4440             :      * skip the flush if the buffer isn't permanent.
    4441             :      */
    4442     1139688 :     if (buf_state & BM_PERMANENT)
    4443     1136088 :         XLogFlush(recptr);
    4444             : 
    4445             :     /*
    4446             :      * Now it's safe to write the buffer to disk. Note that no one else should
    4447             :      * have been able to write it, while we were busy with log flushing,
    4448             :      * because we got the exclusive right to perform I/O by setting the
    4449             :      * BM_IO_IN_PROGRESS bit.
    4450             :      */
    4451     1139688 :     bufBlock = BufHdrGetBlock(buf);
    4452             : 
    4453             :     /*
    4454             :      * Update page checksum if desired.  Since we have only shared lock on the
    4455             :      * buffer, other processes might be updating hint bits in it, so we must
    4456             :      * copy the page to private storage if we do checksumming.
    4457             :      */
    4458     1139688 :     bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
    4459             : 
    4460     1139688 :     io_start = pgstat_prepare_io_time(track_io_timing);
    4461             : 
    4462             :     /*
    4463             :      * bufToWrite is either the shared buffer or a copy, as appropriate.
    4464             :      */
    4465     1139688 :     smgrwrite(reln,
    4466     1139688 :               BufTagGetForkNum(&buf->tag),
    4467             :               buf->tag.blockNum,
    4468             :               bufToWrite,
    4469             :               false);
    4470             : 
    4471             :     /*
    4472             :      * When a strategy is in use, only flushes of dirty buffers already in the
    4473             :      * strategy ring are counted as strategy writes (IOCONTEXT
    4474             :      * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
    4475             :      * statistics tracking.
    4476             :      *
    4477             :      * If a shared buffer initially added to the ring must be flushed before
    4478             :      * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
    4479             :      *
    4480             :      * If a shared buffer which was added to the ring later because the
    4481             :      * current strategy buffer is pinned or in use or because all strategy
    4482             :      * buffers were dirty and rejected (for BAS_BULKREAD operations only)
    4483             :      * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
    4484             :      * (from_ring will be false).
    4485             :      *
    4486             :      * When a strategy is not in use, the write can only be a "regular" write
    4487             :      * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
    4488             :      */
    4489     1139688 :     pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
    4490             :                             IOOP_WRITE, io_start, 1, BLCKSZ);
    4491             : 
    4492     1139688 :     pgBufferUsage.shared_blks_written++;
    4493             : 
    4494             :     /*
    4495             :      * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
    4496             :      * end the BM_IO_IN_PROGRESS state.
    4497             :      */
    4498     1139688 :     TerminateBufferIO(buf, true, 0, true, false);
    4499             : 
    4500             :     TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
    4501             :                                        buf->tag.blockNum,
    4502             :                                        reln->smgr_rlocator.locator.spcOid,
    4503             :                                        reln->smgr_rlocator.locator.dbOid,
    4504             :                                        reln->smgr_rlocator.locator.relNumber);
    4505             : 
    4506             :     /* Pop the error context stack */
    4507     1139688 :     error_context_stack = errcallback.previous;
    4508             : }
    4509             : 
    4510             : /*
    4511             :  * Convenience wrapper around FlushBuffer() that locks/unlocks the buffer
    4512             :  * before/after calling FlushBuffer().
    4513             :  */
    4514             : static void
    4515      598386 : FlushUnlockedBuffer(BufferDesc *buf, SMgrRelation reln,
    4516             :                     IOObject io_object, IOContext io_context)
    4517             : {
    4518      598386 :     LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_SHARED);
    4519      598386 :     FlushBuffer(buf, reln, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    4520      598386 :     LWLockRelease(BufferDescriptorGetContentLock(buf));
    4521      598386 : }
    4522             : 
    4523             : /*
    4524             :  * RelationGetNumberOfBlocksInFork
    4525             :  *      Determines the current number of pages in the specified relation fork.
    4526             :  *
    4527             :  * Note that the accuracy of the result will depend on the details of the
    4528             :  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
    4529             :  * it might not be.
    4530             :  */
    4531             : BlockNumber
    4532     3795930 : RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
    4533             : {
    4534     3795930 :     if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
    4535             :     {
    4536             :         /*
    4537             :          * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
    4538             :          * tableam returns the size in bytes - but for the purpose of this
    4539             :          * routine, we want the number of blocks. Therefore divide, rounding
    4540             :          * up.
    4541             :          */
    4542             :         uint64      szbytes;
    4543             : 
    4544     2875040 :         szbytes = table_relation_size(relation, forkNum);
    4545             : 
    4546     2875002 :         return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
    4547             :     }
    4548      920890 :     else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
    4549             :     {
    4550      920890 :         return smgrnblocks(RelationGetSmgr(relation), forkNum);
    4551             :     }
    4552             :     else
    4553             :         Assert(false);
    4554             : 
    4555           0 :     return 0;                   /* keep compiler quiet */
    4556             : }
    4557             : 
    4558             : /*
    4559             :  * BufferIsPermanent
    4560             :  *      Determines whether a buffer will potentially still be around after
    4561             :  *      a crash.  Caller must hold a buffer pin.
    4562             :  */
    4563             : bool
    4564    19325654 : BufferIsPermanent(Buffer buffer)
    4565             : {
    4566             :     BufferDesc *bufHdr;
    4567             : 
    4568             :     /* Local buffers are used only for temp relations. */
    4569    19325654 :     if (BufferIsLocal(buffer))
    4570     1253958 :         return false;
    4571             : 
    4572             :     /* Make sure we've got a real buffer, and that we hold a pin on it. */
    4573             :     Assert(BufferIsValid(buffer));
    4574             :     Assert(BufferIsPinned(buffer));
    4575             : 
    4576             :     /*
    4577             :      * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
    4578             :      * need not bother with the buffer header spinlock.  Even if someone else
    4579             :      * changes the buffer header state while we're doing this, the state is
    4580             :      * changed atomically, so we'll read the old value or the new value, but
    4581             :      * not random garbage.
    4582             :      */
    4583    18071696 :     bufHdr = GetBufferDescriptor(buffer - 1);
    4584    18071696 :     return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
    4585             : }
    4586             : 
    4587             : /*
    4588             :  * BufferGetLSNAtomic
    4589             :  *      Retrieves the LSN of the buffer atomically using a buffer header lock.
    4590             :  *      This is necessary for some callers who may not have an exclusive lock
    4591             :  *      on the buffer.
    4592             :  */
    4593             : XLogRecPtr
    4594    14317034 : BufferGetLSNAtomic(Buffer buffer)
    4595             : {
    4596    14317034 :     char       *page = BufferGetPage(buffer);
    4597             :     BufferDesc *bufHdr;
    4598             :     XLogRecPtr  lsn;
    4599             : 
    4600             :     /*
    4601             :      * If we don't need locking for correctness, fastpath out.
    4602             :      */
    4603    14317034 :     if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
    4604      477976 :         return PageGetLSN(page);
    4605             : 
    4606             :     /* Make sure we've got a real buffer, and that we hold a pin on it. */
    4607             :     Assert(BufferIsValid(buffer));
    4608             :     Assert(BufferIsPinned(buffer));
    4609             : 
    4610    13839058 :     bufHdr = GetBufferDescriptor(buffer - 1);
    4611    13839058 :     LockBufHdr(bufHdr);
    4612    13839058 :     lsn = PageGetLSN(page);
    4613    13839058 :     UnlockBufHdr(bufHdr);
    4614             : 
    4615    13839058 :     return lsn;
    4616             : }
    4617             : 
    4618             : /* ---------------------------------------------------------------------
    4619             :  *      DropRelationBuffers
    4620             :  *
    4621             :  *      This function removes from the buffer pool all the pages of the
    4622             :  *      specified relation forks that have block numbers >= firstDelBlock.
    4623             :  *      (In particular, with firstDelBlock = 0, all pages are removed.)
    4624             :  *      Dirty pages are simply dropped, without bothering to write them
    4625             :  *      out first.  Therefore, this is NOT rollback-able, and so should be
    4626             :  *      used only with extreme caution!
    4627             :  *
    4628             :  *      Currently, this is called only from smgr.c when the underlying file
    4629             :  *      is about to be deleted or truncated (firstDelBlock is needed for
    4630             :  *      the truncation case).  The data in the affected pages would therefore
    4631             :  *      be deleted momentarily anyway, and there is no point in writing it.
    4632             :  *      It is the responsibility of higher-level code to ensure that the
    4633             :  *      deletion or truncation does not lose any data that could be needed
    4634             :  *      later.  It is also the responsibility of higher-level code to ensure
    4635             :  *      that no other process could be trying to load more pages of the
    4636             :  *      relation into buffers.
    4637             :  * --------------------------------------------------------------------
    4638             :  */
    4639             : void
    4640        1252 : DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
    4641             :                     int nforks, BlockNumber *firstDelBlock)
    4642             : {
    4643             :     int         i;
    4644             :     int         j;
    4645             :     RelFileLocatorBackend rlocator;
    4646             :     BlockNumber nForkBlock[MAX_FORKNUM];
    4647        1252 :     uint64      nBlocksToInvalidate = 0;
    4648             : 
    4649        1252 :     rlocator = smgr_reln->smgr_rlocator;
    4650             : 
    4651             :     /* If it's a local relation, it's localbuf.c's problem. */
    4652        1252 :     if (RelFileLocatorBackendIsTemp(rlocator))
    4653             :     {
    4654         748 :         if (rlocator.backend == MyProcNumber)
    4655         748 :             DropRelationLocalBuffers(rlocator.locator, forkNum, nforks,
    4656             :                                      firstDelBlock);
    4657             : 
    4658         830 :         return;
    4659             :     }
    4660             : 
    4661             :     /*
    4662             :      * To remove all the pages of the specified relation forks from the buffer
    4663             :      * pool, we need to scan the entire buffer pool but we can optimize it by
    4664             :      * finding the buffers from BufMapping table provided we know the exact
    4665             :      * size of each fork of the relation. The exact size is required to ensure
    4666             :      * that we don't leave any buffer for the relation being dropped as
    4667             :      * otherwise the background writer or checkpointer can lead to a PANIC
    4668             :      * error while flushing buffers corresponding to files that don't exist.
    4669             :      *
    4670             :      * To know the exact size, we rely on the size cached for each fork by us
    4671             :      * during recovery which limits the optimization to recovery and on
    4672             :      * standbys but we can easily extend it once we have shared cache for
    4673             :      * relation size.
    4674             :      *
    4675             :      * In recovery, we cache the value returned by the first lseek(SEEK_END)
    4676             :      * and the future writes keeps the cached value up-to-date. See
    4677             :      * smgrextend. It is possible that the value of the first lseek is smaller
    4678             :      * than the actual number of existing blocks in the file due to buggy
    4679             :      * Linux kernels that might not have accounted for the recent write. But
    4680             :      * that should be fine because there must not be any buffers after that
    4681             :      * file size.
    4682             :      */
    4683         698 :     for (i = 0; i < nforks; i++)
    4684             :     {
    4685             :         /* Get the number of blocks for a relation's fork */
    4686         594 :         nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
    4687             : 
    4688         594 :         if (nForkBlock[i] == InvalidBlockNumber)
    4689             :         {
    4690         400 :             nBlocksToInvalidate = InvalidBlockNumber;
    4691         400 :             break;
    4692             :         }
    4693             : 
    4694             :         /* calculate the number of blocks to be invalidated */
    4695         194 :         nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
    4696             :     }
    4697             : 
    4698             :     /*
    4699             :      * We apply the optimization iff the total number of blocks to invalidate
    4700             :      * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
    4701             :      */
    4702         504 :     if (BlockNumberIsValid(nBlocksToInvalidate) &&
    4703         104 :         nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
    4704             :     {
    4705         228 :         for (j = 0; j < nforks; j++)
    4706         146 :             FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
    4707         146 :                                        nForkBlock[j], firstDelBlock[j]);
    4708          82 :         return;
    4709             :     }
    4710             : 
    4711     5483942 :     for (i = 0; i < NBuffers; i++)
    4712             :     {
    4713     5483520 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    4714             : 
    4715             :         /*
    4716             :          * We can make this a tad faster by prechecking the buffer tag before
    4717             :          * we attempt to lock the buffer; this saves a lot of lock
    4718             :          * acquisitions in typical cases.  It should be safe because the
    4719             :          * caller must have AccessExclusiveLock on the relation, or some other
    4720             :          * reason to be certain that no one is loading new pages of the rel
    4721             :          * into the buffer pool.  (Otherwise we might well miss such pages
    4722             :          * entirely.)  Therefore, while the tag might be changing while we
    4723             :          * look at it, it can't be changing *to* a value we care about, only
    4724             :          * *away* from such a value.  So false negatives are impossible, and
    4725             :          * false positives are safe because we'll recheck after getting the
    4726             :          * buffer lock.
    4727             :          *
    4728             :          * We could check forkNum and blockNum as well as the rlocator, but
    4729             :          * the incremental win from doing so seems small.
    4730             :          */
    4731     5483520 :         if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
    4732     5469434 :             continue;
    4733             : 
    4734       14086 :         LockBufHdr(bufHdr);
    4735             : 
    4736       36476 :         for (j = 0; j < nforks; j++)
    4737             :         {
    4738       25542 :             if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
    4739       25542 :                 BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
    4740       13906 :                 bufHdr->tag.blockNum >= firstDelBlock[j])
    4741             :             {
    4742        3152 :                 InvalidateBuffer(bufHdr);   /* releases spinlock */
    4743        3152 :                 break;
    4744             :             }
    4745             :         }
    4746       14086 :         if (j >= nforks)
    4747       10934 :             UnlockBufHdr(bufHdr);
    4748             :     }
    4749             : }
    4750             : 
    4751             : /* ---------------------------------------------------------------------
    4752             :  *      DropRelationsAllBuffers
    4753             :  *
    4754             :  *      This function removes from the buffer pool all the pages of all
    4755             :  *      forks of the specified relations.  It's equivalent to calling
    4756             :  *      DropRelationBuffers once per fork per relation with firstDelBlock = 0.
    4757             :  *      --------------------------------------------------------------------
    4758             :  */
    4759             : void
    4760       28496 : DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
    4761             : {
    4762             :     int         i;
    4763       28496 :     int         n = 0;
    4764             :     SMgrRelation *rels;
    4765             :     BlockNumber (*block)[MAX_FORKNUM + 1];
    4766       28496 :     uint64      nBlocksToInvalidate = 0;
    4767             :     RelFileLocator *locators;
    4768       28496 :     bool        cached = true;
    4769             :     bool        use_bsearch;
    4770             : 
    4771       28496 :     if (nlocators == 0)
    4772           0 :         return;
    4773             : 
    4774       28496 :     rels = palloc_array(SMgrRelation, nlocators);   /* non-local relations */
    4775             : 
    4776             :     /* If it's a local relation, it's localbuf.c's problem. */
    4777      124654 :     for (i = 0; i < nlocators; i++)
    4778             :     {
    4779       96158 :         if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
    4780             :         {
    4781        6504 :             if (smgr_reln[i]->smgr_rlocator.backend == MyProcNumber)
    4782        6504 :                 DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
    4783             :         }
    4784             :         else
    4785       89654 :             rels[n++] = smgr_reln[i];
    4786             :     }
    4787             : 
    4788             :     /*
    4789             :      * If there are no non-local relations, then we're done. Release the
    4790             :      * memory and return.
    4791             :      */
    4792       28496 :     if (n == 0)
    4793             :     {
    4794        1702 :         pfree(rels);
    4795        1702 :         return;
    4796             :     }
    4797             : 
    4798             :     /*
    4799             :      * This is used to remember the number of blocks for all the relations
    4800             :      * forks.
    4801             :      */
    4802             :     block = (BlockNumber (*)[MAX_FORKNUM + 1])
    4803       26794 :         palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
    4804             : 
    4805             :     /*
    4806             :      * We can avoid scanning the entire buffer pool if we know the exact size
    4807             :      * of each of the given relation forks. See DropRelationBuffers.
    4808             :      */
    4809       56282 :     for (i = 0; i < n && cached; i++)
    4810             :     {
    4811       47066 :         for (int j = 0; j <= MAX_FORKNUM; j++)
    4812             :         {
    4813             :             /* Get the number of blocks for a relation's fork. */
    4814       42702 :             block[i][j] = smgrnblocks_cached(rels[i], j);
    4815             : 
    4816             :             /* We need to only consider the relation forks that exists. */
    4817       42702 :             if (block[i][j] == InvalidBlockNumber)
    4818             :             {
    4819       38002 :                 if (!smgrexists(rels[i], j))
    4820       12878 :                     continue;
    4821       25124 :                 cached = false;
    4822       25124 :                 break;
    4823             :             }
    4824             : 
    4825             :             /* calculate the total number of blocks to be invalidated */
    4826        4700 :             nBlocksToInvalidate += block[i][j];
    4827             :         }
    4828             :     }
    4829             : 
    4830             :     /*
    4831             :      * We apply the optimization iff the total number of blocks to invalidate
    4832             :      * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
    4833             :      */
    4834       26794 :     if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
    4835             :     {
    4836        2762 :         for (i = 0; i < n; i++)
    4837             :         {
    4838        7610 :             for (int j = 0; j <= MAX_FORKNUM; j++)
    4839             :             {
    4840             :                 /* ignore relation forks that doesn't exist */
    4841        6088 :                 if (!BlockNumberIsValid(block[i][j]))
    4842        4548 :                     continue;
    4843             : 
    4844             :                 /* drop all the buffers for a particular relation fork */
    4845        1540 :                 FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
    4846        1540 :                                            j, block[i][j], 0);
    4847             :             }
    4848             :         }
    4849             : 
    4850        1240 :         pfree(block);
    4851        1240 :         pfree(rels);
    4852        1240 :         return;
    4853             :     }
    4854             : 
    4855       25554 :     pfree(block);
    4856       25554 :     locators = palloc_array(RelFileLocator, n); /* non-local relations */
    4857      113686 :     for (i = 0; i < n; i++)
    4858       88132 :         locators[i] = rels[i]->smgr_rlocator.locator;
    4859             : 
    4860             :     /*
    4861             :      * For low number of relations to drop just use a simple walk through, to
    4862             :      * save the bsearch overhead. The threshold to use is rather a guess than
    4863             :      * an exactly determined value, as it depends on many factors (CPU and RAM
    4864             :      * speeds, amount of shared buffers etc.).
    4865             :      */
    4866       25554 :     use_bsearch = n > RELS_BSEARCH_THRESHOLD;
    4867             : 
    4868             :     /* sort the list of rlocators if necessary */
    4869       25554 :     if (use_bsearch)
    4870         346 :         qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
    4871             : 
    4872   275486930 :     for (i = 0; i < NBuffers; i++)
    4873             :     {
    4874   275461376 :         RelFileLocator *rlocator = NULL;
    4875   275461376 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    4876             : 
    4877             :         /*
    4878             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    4879             :          * saves some cycles.
    4880             :          */
    4881             : 
    4882   275461376 :         if (!use_bsearch)
    4883             :         {
    4884             :             int         j;
    4885             : 
    4886  1105033090 :             for (j = 0; j < n; j++)
    4887             :             {
    4888   833464772 :                 if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
    4889             :                 {
    4890      174914 :                     rlocator = &locators[j];
    4891      174914 :                     break;
    4892             :                 }
    4893             :             }
    4894             :         }
    4895             :         else
    4896             :         {
    4897             :             RelFileLocator locator;
    4898             : 
    4899     3718144 :             locator = BufTagGetRelFileLocator(&bufHdr->tag);
    4900     3718144 :             rlocator = bsearch(&locator,
    4901             :                                locators, n, sizeof(RelFileLocator),
    4902             :                                rlocator_comparator);
    4903             :         }
    4904             : 
    4905             :         /* buffer doesn't belong to any of the given relfilelocators; skip it */
    4906   275461376 :         if (rlocator == NULL)
    4907   275283518 :             continue;
    4908             : 
    4909      177858 :         LockBufHdr(bufHdr);
    4910      177858 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
    4911      177858 :             InvalidateBuffer(bufHdr);   /* releases spinlock */
    4912             :         else
    4913           0 :             UnlockBufHdr(bufHdr);
    4914             :     }
    4915             : 
    4916       25554 :     pfree(locators);
    4917       25554 :     pfree(rels);
    4918             : }
    4919             : 
    4920             : /* ---------------------------------------------------------------------
    4921             :  *      FindAndDropRelationBuffers
    4922             :  *
    4923             :  *      This function performs look up in BufMapping table and removes from the
    4924             :  *      buffer pool all the pages of the specified relation fork that has block
    4925             :  *      number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
    4926             :  *      pages are removed.)
    4927             :  * --------------------------------------------------------------------
    4928             :  */
    4929             : static void
    4930        1686 : FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
    4931             :                            BlockNumber nForkBlock,
    4932             :                            BlockNumber firstDelBlock)
    4933             : {
    4934             :     BlockNumber curBlock;
    4935             : 
    4936        4054 :     for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
    4937             :     {
    4938             :         uint32      bufHash;    /* hash value for tag */
    4939             :         BufferTag   bufTag;     /* identity of requested block */
    4940             :         LWLock     *bufPartitionLock;   /* buffer partition lock for it */
    4941             :         int         buf_id;
    4942             :         BufferDesc *bufHdr;
    4943             : 
    4944             :         /* create a tag so we can lookup the buffer */
    4945        2368 :         InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
    4946             : 
    4947             :         /* determine its hash code and partition lock ID */
    4948        2368 :         bufHash = BufTableHashCode(&bufTag);
    4949        2368 :         bufPartitionLock = BufMappingPartitionLock(bufHash);
    4950             : 
    4951             :         /* Check that it is in the buffer pool. If not, do nothing. */
    4952        2368 :         LWLockAcquire(bufPartitionLock, LW_SHARED);
    4953        2368 :         buf_id = BufTableLookup(&bufTag, bufHash);
    4954        2368 :         LWLockRelease(bufPartitionLock);
    4955             : 
    4956        2368 :         if (buf_id < 0)
    4957         214 :             continue;
    4958             : 
    4959        2154 :         bufHdr = GetBufferDescriptor(buf_id);
    4960             : 
    4961             :         /*
    4962             :          * We need to lock the buffer header and recheck if the buffer is
    4963             :          * still associated with the same block because the buffer could be
    4964             :          * evicted by some other backend loading blocks for a different
    4965             :          * relation after we release lock on the BufMapping table.
    4966             :          */
    4967        2154 :         LockBufHdr(bufHdr);
    4968             : 
    4969        4308 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
    4970        2154 :             BufTagGetForkNum(&bufHdr->tag) == forkNum &&
    4971        2154 :             bufHdr->tag.blockNum >= firstDelBlock)
    4972        2154 :             InvalidateBuffer(bufHdr);   /* releases spinlock */
    4973             :         else
    4974           0 :             UnlockBufHdr(bufHdr);
    4975             :     }
    4976        1686 : }
    4977             : 
    4978             : /* ---------------------------------------------------------------------
    4979             :  *      DropDatabaseBuffers
    4980             :  *
    4981             :  *      This function removes all the buffers in the buffer cache for a
    4982             :  *      particular database.  Dirty pages are simply dropped, without
    4983             :  *      bothering to write them out first.  This is used when we destroy a
    4984             :  *      database, to avoid trying to flush data to disk when the directory
    4985             :  *      tree no longer exists.  Implementation is pretty similar to
    4986             :  *      DropRelationBuffers() which is for destroying just one relation.
    4987             :  * --------------------------------------------------------------------
    4988             :  */
    4989             : void
    4990         148 : DropDatabaseBuffers(Oid dbid)
    4991             : {
    4992             :     int         i;
    4993             : 
    4994             :     /*
    4995             :      * We needn't consider local buffers, since by assumption the target
    4996             :      * database isn't our own.
    4997             :      */
    4998             : 
    4999     1026964 :     for (i = 0; i < NBuffers; i++)
    5000             :     {
    5001     1026816 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    5002             : 
    5003             :         /*
    5004             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    5005             :          * saves some cycles.
    5006             :          */
    5007     1026816 :         if (bufHdr->tag.dbOid != dbid)
    5008     1001212 :             continue;
    5009             : 
    5010       25604 :         LockBufHdr(bufHdr);
    5011       25604 :         if (bufHdr->tag.dbOid == dbid)
    5012       25604 :             InvalidateBuffer(bufHdr);   /* releases spinlock */
    5013             :         else
    5014           0 :             UnlockBufHdr(bufHdr);
    5015             :     }
    5016         148 : }
    5017             : 
    5018             : /* ---------------------------------------------------------------------
    5019             :  *      FlushRelationBuffers
    5020             :  *
    5021             :  *      This function writes all dirty pages of a relation out to disk
    5022             :  *      (or more accurately, out to kernel disk buffers), ensuring that the
    5023             :  *      kernel has an up-to-date view of the relation.
    5024             :  *
    5025             :  *      Generally, the caller should be holding AccessExclusiveLock on the
    5026             :  *      target relation to ensure that no other backend is busy dirtying
    5027             :  *      more blocks of the relation; the effects can't be expected to last
    5028             :  *      after the lock is released.
    5029             :  *
    5030             :  *      XXX currently it sequentially searches the buffer pool, should be
    5031             :  *      changed to more clever ways of searching.  This routine is not
    5032             :  *      used in any performance-critical code paths, so it's not worth
    5033             :  *      adding additional overhead to normal paths to make it go faster.
    5034             :  * --------------------------------------------------------------------
    5035             :  */
    5036             : void
    5037         276 : FlushRelationBuffers(Relation rel)
    5038             : {
    5039             :     int         i;
    5040             :     BufferDesc *bufHdr;
    5041         276 :     SMgrRelation srel = RelationGetSmgr(rel);
    5042             : 
    5043         276 :     if (RelationUsesLocalBuffers(rel))
    5044             :     {
    5045        1818 :         for (i = 0; i < NLocBuffer; i++)
    5046             :         {
    5047             :             uint32      buf_state;
    5048             : 
    5049        1800 :             bufHdr = GetLocalBufferDescriptor(i);
    5050        1800 :             if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
    5051         600 :                 ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
    5052             :                  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    5053             :             {
    5054             :                 ErrorContextCallback errcallback;
    5055             : 
    5056             :                 /* Setup error traceback support for ereport() */
    5057         600 :                 errcallback.callback = local_buffer_write_error_callback;
    5058         600 :                 errcallback.arg = bufHdr;
    5059         600 :                 errcallback.previous = error_context_stack;
    5060         600 :                 error_context_stack = &errcallback;
    5061             : 
    5062             :                 /* Make sure we can handle the pin */
    5063         600 :                 ReservePrivateRefCountEntry();
    5064         600 :                 ResourceOwnerEnlarge(CurrentResourceOwner);
    5065             : 
    5066             :                 /*
    5067             :                  * Pin/unpin mostly to make valgrind work, but it also seems
    5068             :                  * like the right thing to do.
    5069             :                  */
    5070         600 :                 PinLocalBuffer(bufHdr, false);
    5071             : 
    5072             : 
    5073         600 :                 FlushLocalBuffer(bufHdr, srel);
    5074             : 
    5075         600 :                 UnpinLocalBuffer(BufferDescriptorGetBuffer(bufHdr));
    5076             : 
    5077             :                 /* Pop the error context stack */
    5078         600 :                 error_context_stack = errcallback.previous;
    5079             :             }
    5080             :         }
    5081             : 
    5082          18 :         return;
    5083             :     }
    5084             : 
    5085     3024386 :     for (i = 0; i < NBuffers; i++)
    5086             :     {
    5087             :         uint32      buf_state;
    5088             : 
    5089     3024128 :         bufHdr = GetBufferDescriptor(i);
    5090             : 
    5091             :         /*
    5092             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    5093             :          * saves some cycles.
    5094             :          */
    5095     3024128 :         if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
    5096     3023708 :             continue;
    5097             : 
    5098             :         /* Make sure we can handle the pin */
    5099         420 :         ReservePrivateRefCountEntry();
    5100         420 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    5101             : 
    5102         420 :         buf_state = LockBufHdr(bufHdr);
    5103         420 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
    5104         420 :             (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    5105             :         {
    5106         340 :             PinBuffer_Locked(bufHdr);
    5107         340 :             FlushUnlockedBuffer(bufHdr, srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    5108         340 :             UnpinBuffer(bufHdr);
    5109             :         }
    5110             :         else
    5111          80 :             UnlockBufHdr(bufHdr);
    5112             :     }
    5113             : }
    5114             : 
    5115             : /* ---------------------------------------------------------------------
    5116             :  *      FlushRelationsAllBuffers
    5117             :  *
    5118             :  *      This function flushes out of the buffer pool all the pages of all
    5119             :  *      forks of the specified smgr relations.  It's equivalent to calling
    5120             :  *      FlushRelationBuffers once per relation.  The relations are assumed not
    5121             :  *      to use local buffers.
    5122             :  * --------------------------------------------------------------------
    5123             :  */
    5124             : void
    5125           8 : FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
    5126             : {
    5127             :     int         i;
    5128             :     SMgrSortArray *srels;
    5129             :     bool        use_bsearch;
    5130             : 
    5131           8 :     if (nrels == 0)
    5132           0 :         return;
    5133             : 
    5134             :     /* fill-in array for qsort */
    5135           8 :     srels = palloc_array(SMgrSortArray, nrels);
    5136             : 
    5137          16 :     for (i = 0; i < nrels; i++)
    5138             :     {
    5139             :         Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
    5140             : 
    5141           8 :         srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
    5142           8 :         srels[i].srel = smgrs[i];
    5143             :     }
    5144             : 
    5145             :     /*
    5146             :      * Save the bsearch overhead for low number of relations to sync. See
    5147             :      * DropRelationsAllBuffers for details.
    5148             :      */
    5149           8 :     use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
    5150             : 
    5151             :     /* sort the list of SMgrRelations if necessary */
    5152           8 :     if (use_bsearch)
    5153           0 :         qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
    5154             : 
    5155      131080 :     for (i = 0; i < NBuffers; i++)
    5156             :     {
    5157      131072 :         SMgrSortArray *srelent = NULL;
    5158      131072 :         BufferDesc *bufHdr = GetBufferDescriptor(i);
    5159             :         uint32      buf_state;
    5160             : 
    5161             :         /*
    5162             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    5163             :          * saves some cycles.
    5164             :          */
    5165             : 
    5166      131072 :         if (!use_bsearch)
    5167             :         {
    5168             :             int         j;
    5169             : 
    5170      257614 :             for (j = 0; j < nrels; j++)
    5171             :             {
    5172      131072 :                 if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
    5173             :                 {
    5174        4530 :                     srelent = &srels[j];
    5175        4530 :                     break;
    5176             :                 }
    5177             :             }
    5178             :         }
    5179             :         else
    5180             :         {
    5181             :             RelFileLocator rlocator;
    5182             : 
    5183           0 :             rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
    5184           0 :             srelent = bsearch(&rlocator,
    5185             :                               srels, nrels, sizeof(SMgrSortArray),
    5186             :                               rlocator_comparator);
    5187             :         }
    5188             : 
    5189             :         /* buffer doesn't belong to any of the given relfilelocators; skip it */
    5190      131072 :         if (srelent == NULL)
    5191      126542 :             continue;
    5192             : 
    5193             :         /* Make sure we can handle the pin */
    5194        4530 :         ReservePrivateRefCountEntry();
    5195        4530 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    5196             : 
    5197        4530 :         buf_state = LockBufHdr(bufHdr);
    5198        4530 :         if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
    5199        4530 :             (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    5200             :         {
    5201        4462 :             PinBuffer_Locked(bufHdr);
    5202        4462 :             FlushUnlockedBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    5203        4462 :             UnpinBuffer(bufHdr);
    5204             :         }
    5205             :         else
    5206          68 :             UnlockBufHdr(bufHdr);
    5207             :     }
    5208             : 
    5209           8 :     pfree(srels);
    5210             : }
    5211             : 
    5212             : /* ---------------------------------------------------------------------
    5213             :  *      RelationCopyStorageUsingBuffer
    5214             :  *
    5215             :  *      Copy fork's data using bufmgr.  Same as RelationCopyStorage but instead
    5216             :  *      of using smgrread and smgrextend this will copy using bufmgr APIs.
    5217             :  *
    5218             :  *      Refer comments atop CreateAndCopyRelationData() for details about
    5219             :  *      'permanent' parameter.
    5220             :  * --------------------------------------------------------------------
    5221             :  */
    5222             : static void
    5223      149644 : RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
    5224             :                                RelFileLocator dstlocator,
    5225             :                                ForkNumber forkNum, bool permanent)
    5226             : {
    5227             :     Buffer      srcBuf;
    5228             :     Buffer      dstBuf;
    5229             :     Page        srcPage;
    5230             :     Page        dstPage;
    5231             :     bool        use_wal;
    5232             :     BlockNumber nblocks;
    5233             :     BlockNumber blkno;
    5234             :     PGIOAlignedBlock buf;
    5235             :     BufferAccessStrategy bstrategy_src;
    5236             :     BufferAccessStrategy bstrategy_dst;
    5237             :     BlockRangeReadStreamPrivate p;
    5238             :     ReadStream *src_stream;
    5239             :     SMgrRelation src_smgr;
    5240             : 
    5241             :     /*
    5242             :      * In general, we want to write WAL whenever wal_level > 'minimal', but we
    5243             :      * can skip it when copying any fork of an unlogged relation other than
    5244             :      * the init fork.
    5245             :      */
    5246      149644 :     use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
    5247             : 
    5248             :     /* Get number of blocks in the source relation. */
    5249      149644 :     nblocks = smgrnblocks(smgropen(srclocator, INVALID_PROC_NUMBER),
    5250             :                           forkNum);
    5251             : 
    5252             :     /* Nothing to copy; just return. */
    5253      149644 :     if (nblocks == 0)
    5254       26102 :         return;
    5255             : 
    5256             :     /*
    5257             :      * Bulk extend the destination relation of the same size as the source
    5258             :      * relation before starting to copy block by block.
    5259             :      */
    5260      123542 :     memset(buf.data, 0, BLCKSZ);
    5261      123542 :     smgrextend(smgropen(dstlocator, INVALID_PROC_NUMBER), forkNum, nblocks - 1,
    5262             :                buf.data, true);
    5263             : 
    5264             :     /* This is a bulk operation, so use buffer access strategies. */
    5265      123542 :     bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
    5266      123542 :     bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
    5267             : 
    5268             :     /* Initialize streaming read */
    5269      123542 :     p.current_blocknum = 0;
    5270      123542 :     p.last_exclusive = nblocks;
    5271      123542 :     src_smgr = smgropen(srclocator, INVALID_PROC_NUMBER);
    5272             : 
    5273             :     /*
    5274             :      * It is safe to use batchmode as block_range_read_stream_cb takes no
    5275             :      * locks.
    5276             :      */
    5277      123542 :     src_stream = read_stream_begin_smgr_relation(READ_STREAM_FULL |
    5278             :                                                  READ_STREAM_USE_BATCHING,
    5279             :                                                  bstrategy_src,
    5280             :                                                  src_smgr,
    5281             :                                                  permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
    5282             :                                                  forkNum,
    5283             :                                                  block_range_read_stream_cb,
    5284             :                                                  &p,
    5285             :                                                  0);
    5286             : 
    5287             :     /* Iterate over each block of the source relation file. */
    5288      595650 :     for (blkno = 0; blkno < nblocks; blkno++)
    5289             :     {
    5290      472112 :         CHECK_FOR_INTERRUPTS();
    5291             : 
    5292             :         /* Read block from source relation. */
    5293      472112 :         srcBuf = read_stream_next_buffer(src_stream, NULL);
    5294      472108 :         LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
    5295      472108 :         srcPage = BufferGetPage(srcBuf);
    5296             : 
    5297      472108 :         dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum,
    5298             :                                            BufferGetBlockNumber(srcBuf),
    5299             :                                            RBM_ZERO_AND_LOCK, bstrategy_dst,
    5300             :                                            permanent);
    5301      472108 :         dstPage = BufferGetPage(dstBuf);
    5302             : 
    5303      472108 :         START_CRIT_SECTION();
    5304             : 
    5305             :         /* Copy page data from the source to the destination. */
    5306      472108 :         memcpy(dstPage, srcPage, BLCKSZ);
    5307      472108 :         MarkBufferDirty(dstBuf);
    5308             : 
    5309             :         /* WAL-log the copied page. */
    5310      472108 :         if (use_wal)
    5311      272784 :             log_newpage_buffer(dstBuf, true);
    5312             : 
    5313      472108 :         END_CRIT_SECTION();
    5314             : 
    5315      472108 :         UnlockReleaseBuffer(dstBuf);
    5316      472108 :         UnlockReleaseBuffer(srcBuf);
    5317             :     }
    5318             :     Assert(read_stream_next_buffer(src_stream, NULL) == InvalidBuffer);
    5319      123538 :     read_stream_end(src_stream);
    5320             : 
    5321      123538 :     FreeAccessStrategy(bstrategy_src);
    5322      123538 :     FreeAccessStrategy(bstrategy_dst);
    5323             : }
    5324             : 
    5325             : /* ---------------------------------------------------------------------
    5326             :  *      CreateAndCopyRelationData
    5327             :  *
    5328             :  *      Create destination relation storage and copy all forks from the
    5329             :  *      source relation to the destination.
    5330             :  *
    5331             :  *      Pass permanent as true for permanent relations and false for
    5332             :  *      unlogged relations.  Currently this API is not supported for
    5333             :  *      temporary relations.
    5334             :  * --------------------------------------------------------------------
    5335             :  */
    5336             : void
    5337      112488 : CreateAndCopyRelationData(RelFileLocator src_rlocator,
    5338             :                           RelFileLocator dst_rlocator, bool permanent)
    5339             : {
    5340             :     char        relpersistence;
    5341             :     SMgrRelation src_rel;
    5342             :     SMgrRelation dst_rel;
    5343             : 
    5344             :     /* Set the relpersistence. */
    5345      112488 :     relpersistence = permanent ?
    5346             :         RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
    5347             : 
    5348      112488 :     src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
    5349      112488 :     dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
    5350             : 
    5351             :     /*
    5352             :      * Create and copy all forks of the relation.  During create database we
    5353             :      * have a separate cleanup mechanism which deletes complete database
    5354             :      * directory.  Therefore, each individual relation doesn't need to be
    5355             :      * registered for cleanup.
    5356             :      */
    5357      112488 :     RelationCreateStorage(dst_rlocator, relpersistence, false);
    5358             : 
    5359             :     /* copy main fork. */
    5360      112488 :     RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
    5361             :                                    permanent);
    5362             : 
    5363             :     /* copy those extra forks that exist */
    5364      112484 :     for (ForkNumber forkNum = MAIN_FORKNUM + 1;
    5365      449936 :          forkNum <= MAX_FORKNUM; forkNum++)
    5366             :     {
    5367      337452 :         if (smgrexists(src_rel, forkNum))
    5368             :         {
    5369       37156 :             smgrcreate(dst_rel, forkNum, false);
    5370             : 
    5371             :             /*
    5372             :              * WAL log creation if the relation is persistent, or this is the
    5373             :              * init fork of an unlogged relation.
    5374             :              */
    5375       37156 :             if (permanent || forkNum == INIT_FORKNUM)
    5376       37156 :                 log_smgrcreate(&dst_rlocator, forkNum);
    5377             : 
    5378             :             /* Copy a fork's data, block by block. */
    5379       37156 :             RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
    5380             :                                            permanent);
    5381             :         }
    5382             :     }
    5383      112484 : }
    5384             : 
    5385             : /* ---------------------------------------------------------------------
    5386             :  *      FlushDatabaseBuffers
    5387             :  *
    5388             :  *      This function writes all dirty pages of a database out to disk
    5389             :  *      (or more accurately, out to kernel disk buffers), ensuring that the
    5390             :  *      kernel has an up-to-date view of the database.
    5391             :  *
    5392             :  *      Generally, the caller should be holding an appropriate lock to ensure
    5393             :  *      no other backend is active in the target database; otherwise more
    5394             :  *      pages could get dirtied.
    5395             :  *
    5396             :  *      Note we don't worry about flushing any pages of temporary relations.
    5397             :  *      It's assumed these wouldn't be interesting.
    5398             :  * --------------------------------------------------------------------
    5399             :  */
    5400             : void
    5401          10 : FlushDatabaseBuffers(Oid dbid)
    5402             : {
    5403             :     int         i;
    5404             :     BufferDesc *bufHdr;
    5405             : 
    5406        1290 :     for (i = 0; i < NBuffers; i++)
    5407             :     {
    5408             :         uint32      buf_state;
    5409             : 
    5410        1280 :         bufHdr = GetBufferDescriptor(i);
    5411             : 
    5412             :         /*
    5413             :          * As in DropRelationBuffers, an unlocked precheck should be safe and
    5414             :          * saves some cycles.
    5415             :          */
    5416        1280 :         if (bufHdr->tag.dbOid != dbid)
    5417         892 :             continue;
    5418             : 
    5419             :         /* Make sure we can handle the pin */
    5420         388 :         ReservePrivateRefCountEntry();
    5421         388 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    5422             : 
    5423         388 :         buf_state = LockBufHdr(bufHdr);
    5424         388 :         if (bufHdr->tag.dbOid == dbid &&
    5425         388 :             (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
    5426             :         {
    5427         154 :             PinBuffer_Locked(bufHdr);
    5428         154 :             FlushUnlockedBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    5429         154 :             UnpinBuffer(bufHdr);
    5430             :         }
    5431             :         else
    5432         234 :             UnlockBufHdr(bufHdr);
    5433             :     }
    5434          10 : }
    5435             : 
    5436             : /*
    5437             :  * Flush a previously, shared or exclusively, locked and pinned buffer to the
    5438             :  * OS.
    5439             :  */
    5440             : void
    5441         158 : FlushOneBuffer(Buffer buffer)
    5442             : {
    5443             :     BufferDesc *bufHdr;
    5444             : 
    5445             :     /* currently not needed, but no fundamental reason not to support */
    5446             :     Assert(!BufferIsLocal(buffer));
    5447             : 
    5448             :     Assert(BufferIsPinned(buffer));
    5449             : 
    5450         158 :     bufHdr = GetBufferDescriptor(buffer - 1);
    5451             : 
    5452             :     Assert(BufferIsLockedByMe(buffer));
    5453             : 
    5454         158 :     FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    5455         158 : }
    5456             : 
    5457             : /*
    5458             :  * ReleaseBuffer -- release the pin on a buffer
    5459             :  */
    5460             : void
    5461   126037612 : ReleaseBuffer(Buffer buffer)
    5462             : {
    5463   126037612 :     if (!BufferIsValid(buffer))
    5464           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    5465             : 
    5466   126037612 :     if (BufferIsLocal(buffer))
    5467     3220550 :         UnpinLocalBuffer(buffer);
    5468             :     else
    5469   122817062 :         UnpinBuffer(GetBufferDescriptor(buffer - 1));
    5470   126037612 : }
    5471             : 
    5472             : /*
    5473             :  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
    5474             :  *
    5475             :  * This is just a shorthand for a common combination.
    5476             :  */
    5477             : void
    5478    37683148 : UnlockReleaseBuffer(Buffer buffer)
    5479             : {
    5480    37683148 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    5481    37683148 :     ReleaseBuffer(buffer);
    5482    37683148 : }
    5483             : 
    5484             : /*
    5485             :  * IncrBufferRefCount
    5486             :  *      Increment the pin count on a buffer that we have *already* pinned
    5487             :  *      at least once.
    5488             :  *
    5489             :  *      This function cannot be used on a buffer we do not have pinned,
    5490             :  *      because it doesn't change the shared buffer state.
    5491             :  */
    5492             : void
    5493    23505764 : IncrBufferRefCount(Buffer buffer)
    5494             : {
    5495             :     Assert(BufferIsPinned(buffer));
    5496    23505764 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    5497    23505764 :     if (BufferIsLocal(buffer))
    5498      709390 :         LocalRefCount[-buffer - 1]++;
    5499             :     else
    5500             :     {
    5501             :         PrivateRefCountEntry *ref;
    5502             : 
    5503    22796374 :         ref = GetPrivateRefCountEntry(buffer, true);
    5504             :         Assert(ref != NULL);
    5505    22796374 :         ref->data.refcount++;
    5506             :     }
    5507    23505764 :     ResourceOwnerRememberBuffer(CurrentResourceOwner, buffer);
    5508    23505764 : }
    5509             : 
    5510             : /*
    5511             :  * MarkBufferDirtyHint
    5512             :  *
    5513             :  *  Mark a buffer dirty for non-critical changes.
    5514             :  *
    5515             :  * This is essentially the same as MarkBufferDirty, except:
    5516             :  *
    5517             :  * 1. The caller does not write WAL; so if checksums are enabled, we may need
    5518             :  *    to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
    5519             :  * 2. The caller might have only share-lock instead of exclusive-lock on the
    5520             :  *    buffer's content lock.
    5521             :  * 3. This function does not guarantee that the buffer is always marked dirty
    5522             :  *    (due to a race condition), so it cannot be used for important changes.
    5523             :  */
    5524             : void
    5525    20278326 : MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
    5526             : {
    5527             :     BufferDesc *bufHdr;
    5528    20278326 :     Page        page = BufferGetPage(buffer);
    5529             : 
    5530    20278326 :     if (!BufferIsValid(buffer))
    5531           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    5532             : 
    5533    20278326 :     if (BufferIsLocal(buffer))
    5534             :     {
    5535     1270308 :         MarkLocalBufferDirty(buffer);
    5536     1270308 :         return;
    5537             :     }
    5538             : 
    5539    19008018 :     bufHdr = GetBufferDescriptor(buffer - 1);
    5540             : 
    5541             :     Assert(GetPrivateRefCount(buffer) > 0);
    5542             :     /* here, either share or exclusive lock is OK */
    5543             :     Assert(BufferIsLockedByMe(buffer));
    5544             : 
    5545             :     /*
    5546             :      * This routine might get called many times on the same page, if we are
    5547             :      * making the first scan after commit of an xact that added/deleted many
    5548             :      * tuples. So, be as quick as we can if the buffer is already dirty.  We
    5549             :      * do this by not acquiring spinlock if it looks like the status bits are
    5550             :      * already set.  Since we make this test unlocked, there's a chance we
    5551             :      * might fail to notice that the flags have just been cleared, and failed
    5552             :      * to reset them, due to memory-ordering issues.  But since this function
    5553             :      * is only intended to be used in cases where failing to write out the
    5554             :      * data would be harmless anyway, it doesn't really matter.
    5555             :      */
    5556    19008018 :     if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
    5557             :         (BM_DIRTY | BM_JUST_DIRTIED))
    5558             :     {
    5559     1859196 :         XLogRecPtr  lsn = InvalidXLogRecPtr;
    5560     1859196 :         bool        dirtied = false;
    5561     1859196 :         bool        delayChkptFlags = false;
    5562             :         uint32      buf_state;
    5563             : 
    5564             :         /*
    5565             :          * If we need to protect hint bit updates from torn writes, WAL-log a
    5566             :          * full page image of the page. This full page image is only necessary
    5567             :          * if the hint bit update is the first change to the page since the
    5568             :          * last checkpoint.
    5569             :          *
    5570             :          * We don't check full_page_writes here because that logic is included
    5571             :          * when we call XLogInsert() since the value changes dynamically.
    5572             :          */
    5573     3716190 :         if (XLogHintBitIsNeeded() &&
    5574     1856994 :             (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
    5575             :         {
    5576             :             /*
    5577             :              * If we must not write WAL, due to a relfilelocator-specific
    5578             :              * condition or being in recovery, don't dirty the page.  We can
    5579             :              * set the hint, just not dirty the page as a result so the hint
    5580             :              * is lost when we evict the page or shutdown.
    5581             :              *
    5582             :              * See src/backend/storage/page/README for longer discussion.
    5583             :              */
    5584     1980336 :             if (RecoveryInProgress() ||
    5585      123406 :                 RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
    5586     1736020 :                 return;
    5587             : 
    5588             :             /*
    5589             :              * If the block is already dirty because we either made a change
    5590             :              * or set a hint already, then we don't need to write a full page
    5591             :              * image.  Note that aggressive cleaning of blocks dirtied by hint
    5592             :              * bit setting would increase the call rate. Bulk setting of hint
    5593             :              * bits would reduce the call rate...
    5594             :              *
    5595             :              * We must issue the WAL record before we mark the buffer dirty.
    5596             :              * Otherwise we might write the page before we write the WAL. That
    5597             :              * causes a race condition, since a checkpoint might occur between
    5598             :              * writing the WAL record and marking the buffer dirty. We solve
    5599             :              * that with a kluge, but one that is already in use during
    5600             :              * transaction commit to prevent race conditions. Basically, we
    5601             :              * simply prevent the checkpoint WAL record from being written
    5602             :              * until we have marked the buffer dirty. We don't start the
    5603             :              * checkpoint flush until we have marked dirty, so our checkpoint
    5604             :              * must flush the change to disk successfully or the checkpoint
    5605             :              * never gets written, so crash recovery will fix.
    5606             :              *
    5607             :              * It's possible we may enter here without an xid, so it is
    5608             :              * essential that CreateCheckPoint waits for virtual transactions
    5609             :              * rather than full transactionids.
    5610             :              */
    5611             :             Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
    5612      120910 :             MyProc->delayChkptFlags |= DELAY_CHKPT_START;
    5613      120910 :             delayChkptFlags = true;
    5614      120910 :             lsn = XLogSaveBufferForHint(buffer, buffer_std);
    5615             :         }
    5616             : 
    5617      123176 :         buf_state = LockBufHdr(bufHdr);
    5618             : 
    5619             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    5620             : 
    5621      123176 :         if (!(buf_state & BM_DIRTY))
    5622             :         {
    5623      123068 :             dirtied = true;     /* Means "will be dirtied by this action" */
    5624             : 
    5625             :             /*
    5626             :              * Set the page LSN if we wrote a backup block. We aren't supposed
    5627             :              * to set this when only holding a share lock but as long as we
    5628             :              * serialise it somehow we're OK. We choose to set LSN while
    5629             :              * holding the buffer header lock, which causes any reader of an
    5630             :              * LSN who holds only a share lock to also obtain a buffer header
    5631             :              * lock before using PageGetLSN(), which is enforced in
    5632             :              * BufferGetLSNAtomic().
    5633             :              *
    5634             :              * If checksums are enabled, you might think we should reset the
    5635             :              * checksum here. That will happen when the page is written
    5636             :              * sometime later in this checkpoint cycle.
    5637             :              */
    5638      123068 :             if (XLogRecPtrIsValid(lsn))
    5639       62376 :                 PageSetLSN(page, lsn);
    5640             :         }
    5641             : 
    5642      123176 :         UnlockBufHdrExt(bufHdr, buf_state,
    5643             :                         BM_DIRTY | BM_JUST_DIRTIED,
    5644             :                         0, 0);
    5645             : 
    5646      123176 :         if (delayChkptFlags)
    5647      120910 :             MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
    5648             : 
    5649      123176 :         if (dirtied)
    5650             :         {
    5651      123068 :             pgBufferUsage.shared_blks_dirtied++;
    5652      123068 :             if (VacuumCostActive)
    5653        2386 :                 VacuumCostBalance += VacuumCostPageDirty;
    5654             :         }
    5655             :     }
    5656             : }
    5657             : 
    5658             : /*
    5659             :  * Release buffer content locks for shared buffers.
    5660             :  *
    5661             :  * Used to clean up after errors.
    5662             :  *
    5663             :  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
    5664             :  * of releasing buffer content locks per se; the only thing we need to deal
    5665             :  * with here is clearing any PIN_COUNT request that was in progress.
    5666             :  */
    5667             : void
    5668      106658 : UnlockBuffers(void)
    5669             : {
    5670      106658 :     BufferDesc *buf = PinCountWaitBuf;
    5671             : 
    5672      106658 :     if (buf)
    5673             :     {
    5674             :         uint32      buf_state;
    5675           0 :         uint32      unset_bits = 0;
    5676             : 
    5677           0 :         buf_state = LockBufHdr(buf);
    5678             : 
    5679             :         /*
    5680             :          * Don't complain if flag bit not set; it could have been reset but we
    5681             :          * got a cancel/die interrupt before getting the signal.
    5682             :          */
    5683           0 :         if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
    5684           0 :             buf->wait_backend_pgprocno == MyProcNumber)
    5685           0 :             unset_bits = BM_PIN_COUNT_WAITER;
    5686             : 
    5687           0 :         UnlockBufHdrExt(buf, buf_state,
    5688             :                         0, unset_bits,
    5689             :                         0);
    5690             : 
    5691           0 :         PinCountWaitBuf = NULL;
    5692             :     }
    5693      106658 : }
    5694             : 
    5695             : /*
    5696             :  * Acquire or release the content_lock for the buffer.
    5697             :  */
    5698             : void
    5699   347028814 : LockBuffer(Buffer buffer, BufferLockMode mode)
    5700             : {
    5701             :     BufferDesc *buf;
    5702             : 
    5703             :     Assert(BufferIsPinned(buffer));
    5704   347028814 :     if (BufferIsLocal(buffer))
    5705    19800546 :         return;                 /* local buffers need no lock */
    5706             : 
    5707   327228268 :     buf = GetBufferDescriptor(buffer - 1);
    5708             : 
    5709   327228268 :     if (mode == BUFFER_LOCK_UNLOCK)
    5710   164957868 :         LWLockRelease(BufferDescriptorGetContentLock(buf));
    5711   162270400 :     else if (mode == BUFFER_LOCK_SHARE)
    5712   114386624 :         LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_SHARED);
    5713    47883776 :     else if (mode == BUFFER_LOCK_EXCLUSIVE)
    5714    47883776 :         LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_EXCLUSIVE);
    5715             :     else
    5716           0 :         elog(ERROR, "unrecognized buffer lock mode: %d", mode);
    5717             : }
    5718             : 
    5719             : /*
    5720             :  * Acquire the content_lock for the buffer, but only if we don't have to wait.
    5721             :  *
    5722             :  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
    5723             :  */
    5724             : bool
    5725     2817946 : ConditionalLockBuffer(Buffer buffer)
    5726             : {
    5727             :     BufferDesc *buf;
    5728             : 
    5729             :     Assert(BufferIsPinned(buffer));
    5730     2817946 :     if (BufferIsLocal(buffer))
    5731      129410 :         return true;            /* act as though we got it */
    5732             : 
    5733     2688536 :     buf = GetBufferDescriptor(buffer - 1);
    5734             : 
    5735     2688536 :     return LWLockConditionalAcquire(BufferDescriptorGetContentLock(buf),
    5736             :                                     LW_EXCLUSIVE);
    5737             : }
    5738             : 
    5739             : /*
    5740             :  * Verify that this backend is pinning the buffer exactly once.
    5741             :  *
    5742             :  * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
    5743             :  * holds a pin on the buffer.  We do not care whether some other backend does.
    5744             :  */
    5745             : void
    5746     4813736 : CheckBufferIsPinnedOnce(Buffer buffer)
    5747             : {
    5748     4813736 :     if (BufferIsLocal(buffer))
    5749             :     {
    5750        1578 :         if (LocalRefCount[-buffer - 1] != 1)
    5751           0 :             elog(ERROR, "incorrect local pin count: %d",
    5752             :                  LocalRefCount[-buffer - 1]);
    5753             :     }
    5754             :     else
    5755             :     {
    5756     4812158 :         if (GetPrivateRefCount(buffer) != 1)
    5757           0 :             elog(ERROR, "incorrect local pin count: %d",
    5758             :                  GetPrivateRefCount(buffer));
    5759             :     }
    5760     4813736 : }
    5761             : 
    5762             : /*
    5763             :  * LockBufferForCleanup - lock a buffer in preparation for deleting items
    5764             :  *
    5765             :  * Items may be deleted from a disk page only when the caller (a) holds an
    5766             :  * exclusive lock on the buffer and (b) has observed that no other backend
    5767             :  * holds a pin on the buffer.  If there is a pin, then the other backend
    5768             :  * might have a pointer into the buffer (for example, a heapscan reference
    5769             :  * to an item --- see README for more details).  It's OK if a pin is added
    5770             :  * after the cleanup starts, however; the newly-arrived backend will be
    5771             :  * unable to look at the page until we release the exclusive lock.
    5772             :  *
    5773             :  * To implement this protocol, a would-be deleter must pin the buffer and
    5774             :  * then call LockBufferForCleanup().  LockBufferForCleanup() is similar to
    5775             :  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
    5776             :  * it has successfully observed pin count = 1.
    5777             :  */
    5778             : void
    5779       41308 : LockBufferForCleanup(Buffer buffer)
    5780             : {
    5781             :     BufferDesc *bufHdr;
    5782       41308 :     TimestampTz waitStart = 0;
    5783       41308 :     bool        waiting = false;
    5784       41308 :     bool        logged_recovery_conflict = false;
    5785             : 
    5786             :     Assert(BufferIsPinned(buffer));
    5787             :     Assert(PinCountWaitBuf == NULL);
    5788             : 
    5789       41308 :     CheckBufferIsPinnedOnce(buffer);
    5790             : 
    5791             :     /*
    5792             :      * We do not yet need to be worried about in-progress AIOs holding a pin,
    5793             :      * as we, so far, only support doing reads via AIO and this function can
    5794             :      * only be called once the buffer is valid (i.e. no read can be in
    5795             :      * flight).
    5796             :      */
    5797             : 
    5798             :     /* Nobody else to wait for */
    5799       41308 :     if (BufferIsLocal(buffer))
    5800          32 :         return;
    5801             : 
    5802       41276 :     bufHdr = GetBufferDescriptor(buffer - 1);
    5803             : 
    5804             :     for (;;)
    5805         178 :     {
    5806             :         uint32      buf_state;
    5807       41454 :         uint32      unset_bits = 0;
    5808             : 
    5809             :         /* Try to acquire lock */
    5810       41454 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    5811       41454 :         buf_state = LockBufHdr(bufHdr);
    5812             : 
    5813             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    5814       41454 :         if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
    5815             :         {
    5816             :             /* Successfully acquired exclusive lock with pincount 1 */
    5817       41276 :             UnlockBufHdr(bufHdr);
    5818             : 
    5819             :             /*
    5820             :              * Emit the log message if recovery conflict on buffer pin was
    5821             :              * resolved but the startup process waited longer than
    5822             :              * deadlock_timeout for it.
    5823             :              */
    5824       41276 :             if (logged_recovery_conflict)
    5825           4 :                 LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
    5826             :                                     waitStart, GetCurrentTimestamp(),
    5827             :                                     NULL, false);
    5828             : 
    5829       41276 :             if (waiting)
    5830             :             {
    5831             :                 /* reset ps display to remove the suffix if we added one */
    5832           4 :                 set_ps_display_remove_suffix();
    5833           4 :                 waiting = false;
    5834             :             }
    5835       41276 :             return;
    5836             :         }
    5837             :         /* Failed, so mark myself as waiting for pincount 1 */
    5838         178 :         if (buf_state & BM_PIN_COUNT_WAITER)
    5839             :         {
    5840           0 :             UnlockBufHdr(bufHdr);
    5841           0 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    5842           0 :             elog(ERROR, "multiple backends attempting to wait for pincount 1");
    5843             :         }
    5844         178 :         bufHdr->wait_backend_pgprocno = MyProcNumber;
    5845         178 :         PinCountWaitBuf = bufHdr;
    5846         178 :         UnlockBufHdrExt(bufHdr, buf_state,
    5847             :                         BM_PIN_COUNT_WAITER, 0,
    5848             :                         0);
    5849         178 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    5850             : 
    5851             :         /* Wait to be signaled by UnpinBuffer() */
    5852         178 :         if (InHotStandby)
    5853             :         {
    5854          18 :             if (!waiting)
    5855             :             {
    5856             :                 /* adjust the process title to indicate that it's waiting */
    5857           4 :                 set_ps_display_suffix("waiting");
    5858           4 :                 waiting = true;
    5859             :             }
    5860             : 
    5861             :             /*
    5862             :              * Emit the log message if the startup process is waiting longer
    5863             :              * than deadlock_timeout for recovery conflict on buffer pin.
    5864             :              *
    5865             :              * Skip this if first time through because the startup process has
    5866             :              * not started waiting yet in this case. So, the wait start
    5867             :              * timestamp is set after this logic.
    5868             :              */
    5869          18 :             if (waitStart != 0 && !logged_recovery_conflict)
    5870             :             {
    5871           6 :                 TimestampTz now = GetCurrentTimestamp();
    5872             : 
    5873           6 :                 if (TimestampDifferenceExceeds(waitStart, now,
    5874             :                                                DeadlockTimeout))
    5875             :                 {
    5876           4 :                     LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
    5877             :                                         waitStart, now, NULL, true);
    5878           4 :                     logged_recovery_conflict = true;
    5879             :                 }
    5880             :             }
    5881             : 
    5882             :             /*
    5883             :              * Set the wait start timestamp if logging is enabled and first
    5884             :              * time through.
    5885             :              */
    5886          18 :             if (log_recovery_conflict_waits && waitStart == 0)
    5887           4 :                 waitStart = GetCurrentTimestamp();
    5888             : 
    5889             :             /* Publish the bufid that Startup process waits on */
    5890          18 :             SetStartupBufferPinWaitBufId(buffer - 1);
    5891             :             /* Set alarm and then wait to be signaled by UnpinBuffer() */
    5892          18 :             ResolveRecoveryConflictWithBufferPin();
    5893             :             /* Reset the published bufid */
    5894          18 :             SetStartupBufferPinWaitBufId(-1);
    5895             :         }
    5896             :         else
    5897         160 :             ProcWaitForSignal(WAIT_EVENT_BUFFER_CLEANUP);
    5898             : 
    5899             :         /*
    5900             :          * Remove flag marking us as waiter. Normally this will not be set
    5901             :          * anymore, but ProcWaitForSignal() can return for other signals as
    5902             :          * well.  We take care to only reset the flag if we're the waiter, as
    5903             :          * theoretically another backend could have started waiting. That's
    5904             :          * impossible with the current usages due to table level locking, but
    5905             :          * better be safe.
    5906             :          */
    5907         178 :         buf_state = LockBufHdr(bufHdr);
    5908         178 :         if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
    5909          16 :             bufHdr->wait_backend_pgprocno == MyProcNumber)
    5910          16 :             unset_bits |= BM_PIN_COUNT_WAITER;
    5911             : 
    5912         178 :         UnlockBufHdrExt(bufHdr, buf_state,
    5913             :                         0, unset_bits,
    5914             :                         0);
    5915             : 
    5916         178 :         PinCountWaitBuf = NULL;
    5917             :         /* Loop back and try again */
    5918             :     }
    5919             : }
    5920             : 
    5921             : /*
    5922             :  * Check called from ProcessRecoveryConflictInterrupts() when Startup process
    5923             :  * requests cancellation of all pin holders that are blocking it.
    5924             :  */
    5925             : bool
    5926           6 : HoldingBufferPinThatDelaysRecovery(void)
    5927             : {
    5928           6 :     int         bufid = GetStartupBufferPinWaitBufId();
    5929             : 
    5930             :     /*
    5931             :      * If we get woken slowly then it's possible that the Startup process was
    5932             :      * already woken by other backends before we got here. Also possible that
    5933             :      * we get here by multiple interrupts or interrupts at inappropriate
    5934             :      * times, so make sure we do nothing if the bufid is not set.
    5935             :      */
    5936           6 :     if (bufid < 0)
    5937           2 :         return false;
    5938             : 
    5939           4 :     if (GetPrivateRefCount(bufid + 1) > 0)
    5940           4 :         return true;
    5941             : 
    5942           0 :     return false;
    5943             : }
    5944             : 
    5945             : /*
    5946             :  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
    5947             :  *
    5948             :  * We won't loop, but just check once to see if the pin count is OK.  If
    5949             :  * not, return false with no lock held.
    5950             :  */
    5951             : bool
    5952      922364 : ConditionalLockBufferForCleanup(Buffer buffer)
    5953             : {
    5954             :     BufferDesc *bufHdr;
    5955             :     uint32      buf_state,
    5956             :                 refcount;
    5957             : 
    5958             :     Assert(BufferIsValid(buffer));
    5959             : 
    5960             :     /* see AIO related comment in LockBufferForCleanup() */
    5961             : 
    5962      922364 :     if (BufferIsLocal(buffer))
    5963             :     {
    5964        1608 :         refcount = LocalRefCount[-buffer - 1];
    5965             :         /* There should be exactly one pin */
    5966             :         Assert(refcount > 0);
    5967        1608 :         if (refcount != 1)
    5968          42 :             return false;
    5969             :         /* Nobody else to wait for */
    5970        1566 :         return true;
    5971             :     }
    5972             : 
    5973             :     /* There should be exactly one local pin */
    5974      920756 :     refcount = GetPrivateRefCount(buffer);
    5975             :     Assert(refcount);
    5976      920756 :     if (refcount != 1)
    5977         512 :         return false;
    5978             : 
    5979             :     /* Try to acquire lock */
    5980      920244 :     if (!ConditionalLockBuffer(buffer))
    5981         102 :         return false;
    5982             : 
    5983      920142 :     bufHdr = GetBufferDescriptor(buffer - 1);
    5984      920142 :     buf_state = LockBufHdr(bufHdr);
    5985      920142 :     refcount = BUF_STATE_GET_REFCOUNT(buf_state);
    5986             : 
    5987             :     Assert(refcount > 0);
    5988      920142 :     if (refcount == 1)
    5989             :     {
    5990             :         /* Successfully acquired exclusive lock with pincount 1 */
    5991      919364 :         UnlockBufHdr(bufHdr);
    5992      919364 :         return true;
    5993             :     }
    5994             : 
    5995             :     /* Failed, so release the lock */
    5996         778 :     UnlockBufHdr(bufHdr);
    5997         778 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    5998         778 :     return false;
    5999             : }
    6000             : 
    6001             : /*
    6002             :  * IsBufferCleanupOK - as above, but we already have the lock
    6003             :  *
    6004             :  * Check whether it's OK to perform cleanup on a buffer we've already
    6005             :  * locked.  If we observe that the pin count is 1, our exclusive lock
    6006             :  * happens to be a cleanup lock, and we can proceed with anything that
    6007             :  * would have been allowable had we sought a cleanup lock originally.
    6008             :  */
    6009             : bool
    6010        4050 : IsBufferCleanupOK(Buffer buffer)
    6011             : {
    6012             :     BufferDesc *bufHdr;
    6013             :     uint32      buf_state;
    6014             : 
    6015             :     Assert(BufferIsValid(buffer));
    6016             : 
    6017             :     /* see AIO related comment in LockBufferForCleanup() */
    6018             : 
    6019        4050 :     if (BufferIsLocal(buffer))
    6020             :     {
    6021             :         /* There should be exactly one pin */
    6022           0 :         if (LocalRefCount[-buffer - 1] != 1)
    6023           0 :             return false;
    6024             :         /* Nobody else to wait for */
    6025           0 :         return true;
    6026             :     }
    6027             : 
    6028             :     /* There should be exactly one local pin */
    6029        4050 :     if (GetPrivateRefCount(buffer) != 1)
    6030           0 :         return false;
    6031             : 
    6032        4050 :     bufHdr = GetBufferDescriptor(buffer - 1);
    6033             : 
    6034             :     /* caller must hold exclusive lock on buffer */
    6035             :     Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
    6036             : 
    6037        4050 :     buf_state = LockBufHdr(bufHdr);
    6038             : 
    6039             :     Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    6040        4050 :     if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
    6041             :     {
    6042             :         /* pincount is OK. */
    6043        4050 :         UnlockBufHdr(bufHdr);
    6044        4050 :         return true;
    6045             :     }
    6046             : 
    6047           0 :     UnlockBufHdr(bufHdr);
    6048           0 :     return false;
    6049             : }
    6050             : 
    6051             : 
    6052             : /*
    6053             :  *  Functions for buffer I/O handling
    6054             :  *
    6055             :  *  Also note that these are used only for shared buffers, not local ones.
    6056             :  */
    6057             : 
    6058             : /*
    6059             :  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
    6060             :  */
    6061             : static void
    6062       17280 : WaitIO(BufferDesc *buf)
    6063             : {
    6064       17280 :     ConditionVariable *cv = BufferDescriptorGetIOCV(buf);
    6065             : 
    6066       17280 :     ConditionVariablePrepareToSleep(cv);
    6067             :     for (;;)
    6068       17162 :     {
    6069             :         uint32      buf_state;
    6070             :         PgAioWaitRef iow;
    6071             : 
    6072             :         /*
    6073             :          * It may not be necessary to acquire the spinlock to check the flag
    6074             :          * here, but since this test is essential for correctness, we'd better
    6075             :          * play it safe.
    6076             :          */
    6077       34442 :         buf_state = LockBufHdr(buf);
    6078             : 
    6079             :         /*
    6080             :          * Copy the wait reference while holding the spinlock. This protects
    6081             :          * against a concurrent TerminateBufferIO() in another backend from
    6082             :          * clearing the wref while it's being read.
    6083             :          */
    6084       34442 :         iow = buf->io_wref;
    6085       34442 :         UnlockBufHdr(buf);
    6086             : 
    6087             :         /* no IO in progress, we don't need to wait */
    6088       34442 :         if (!(buf_state & BM_IO_IN_PROGRESS))
    6089       17280 :             break;
    6090             : 
    6091             :         /*
    6092             :          * The buffer has asynchronous IO in progress, wait for it to
    6093             :          * complete.
    6094             :          */
    6095       17162 :         if (pgaio_wref_valid(&iow))
    6096             :         {
    6097       14814 :             pgaio_wref_wait(&iow);
    6098             : 
    6099             :             /*
    6100             :              * The AIO subsystem internally uses condition variables and thus
    6101             :              * might remove this backend from the BufferDesc's CV. While that
    6102             :              * wouldn't cause a correctness issue (the first CV sleep just
    6103             :              * immediately returns if not already registered), it seems worth
    6104             :              * avoiding unnecessary loop iterations, given that we take care
    6105             :              * to do so at the start of the function.
    6106             :              */
    6107       14814 :             ConditionVariablePrepareToSleep(cv);
    6108       14814 :             continue;
    6109             :         }
    6110             : 
    6111             :         /* wait on BufferDesc->cv, e.g. for concurrent synchronous IO */
    6112        2348 :         ConditionVariableSleep(cv, WAIT_EVENT_BUFFER_IO);
    6113             :     }
    6114       17280 :     ConditionVariableCancelSleep();
    6115       17280 : }
    6116             : 
    6117             : /*
    6118             :  * StartBufferIO: begin I/O on this buffer
    6119             :  *  (Assumptions)
    6120             :  *  My process is executing no IO on this buffer
    6121             :  *  The buffer is Pinned
    6122             :  *
    6123             :  * In some scenarios multiple backends could attempt the same I/O operation
    6124             :  * concurrently.  If someone else has already started I/O on this buffer then
    6125             :  * we will wait for completion of the IO using WaitIO().
    6126             :  *
    6127             :  * Input operations are only attempted on buffers that are not BM_VALID,
    6128             :  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
    6129             :  * so we can always tell if the work is already done.
    6130             :  *
    6131             :  * Returns true if we successfully marked the buffer as I/O busy,
    6132             :  * false if someone else already did the work.
    6133             :  *
    6134             :  * If nowait is true, then we don't wait for an I/O to be finished by another
    6135             :  * backend.  In that case, false indicates either that the I/O was already
    6136             :  * finished, or is still in progress.  This is useful for callers that want to
    6137             :  * find out if they can perform the I/O as part of a larger operation, without
    6138             :  * waiting for the answer or distinguishing the reasons why not.
    6139             :  */
    6140             : bool
    6141     5079080 : StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
    6142             : {
    6143             :     uint32      buf_state;
    6144             : 
    6145     5079080 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    6146             : 
    6147             :     for (;;)
    6148             :     {
    6149     5096360 :         buf_state = LockBufHdr(buf);
    6150             : 
    6151     5096360 :         if (!(buf_state & BM_IO_IN_PROGRESS))
    6152     5079072 :             break;
    6153       17288 :         UnlockBufHdr(buf);
    6154       17288 :         if (nowait)
    6155           8 :             return false;
    6156       17280 :         WaitIO(buf);
    6157             :     }
    6158             : 
    6159             :     /* Once we get here, there is definitely no I/O active on this buffer */
    6160             : 
    6161             :     /* Check if someone else already did the I/O */
    6162     5079072 :     if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
    6163             :     {
    6164       18046 :         UnlockBufHdr(buf);
    6165       18046 :         return false;
    6166             :     }
    6167             : 
    6168     5061026 :     UnlockBufHdrExt(buf, buf_state,
    6169             :                     BM_IO_IN_PROGRESS, 0,
    6170             :                     0);
    6171             : 
    6172     5061026 :     ResourceOwnerRememberBufferIO(CurrentResourceOwner,
    6173             :                                   BufferDescriptorGetBuffer(buf));
    6174             : 
    6175     5061026 :     return true;
    6176             : }
    6177             : 
    6178             : /*
    6179             :  * TerminateBufferIO: release a buffer we were doing I/O on
    6180             :  *  (Assumptions)
    6181             :  *  My process is executing IO for the buffer
    6182             :  *  BM_IO_IN_PROGRESS bit is set for the buffer
    6183             :  *  The buffer is Pinned
    6184             :  *
    6185             :  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
    6186             :  * buffer's BM_DIRTY flag.  This is appropriate when terminating a
    6187             :  * successful write.  The check on BM_JUST_DIRTIED is necessary to avoid
    6188             :  * marking the buffer clean if it was re-dirtied while we were writing.
    6189             :  *
    6190             :  * set_flag_bits gets ORed into the buffer's flags.  It must include
    6191             :  * BM_IO_ERROR in a failure case.  For successful completion it could
    6192             :  * be 0, or BM_VALID if we just finished reading in the page.
    6193             :  *
    6194             :  * If forget_owner is true, we release the buffer I/O from the current
    6195             :  * resource owner. (forget_owner=false is used when the resource owner itself
    6196             :  * is being released)
    6197             :  */
    6198             : void
    6199     4783034 : TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
    6200             :                   bool forget_owner, bool release_aio)
    6201             : {
    6202             :     uint32      buf_state;
    6203     4783034 :     uint32      unset_flag_bits = 0;
    6204     4783034 :     int         refcount_change = 0;
    6205             : 
    6206     4783034 :     buf_state = LockBufHdr(buf);
    6207             : 
    6208             :     Assert(buf_state & BM_IO_IN_PROGRESS);
    6209     4783034 :     unset_flag_bits |= BM_IO_IN_PROGRESS;
    6210             : 
    6211             :     /* Clear earlier errors, if this IO failed, it'll be marked again */
    6212     4783034 :     unset_flag_bits |= BM_IO_ERROR;
    6213             : 
    6214     4783034 :     if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
    6215     1139586 :         unset_flag_bits |= BM_DIRTY | BM_CHECKPOINT_NEEDED;
    6216             : 
    6217     4783034 :     if (release_aio)
    6218             :     {
    6219             :         /* release ownership by the AIO subsystem */
    6220             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
    6221     2624670 :         refcount_change = -1;
    6222     2624670 :         pgaio_wref_clear(&buf->io_wref);
    6223             :     }
    6224             : 
    6225     4783034 :     buf_state = UnlockBufHdrExt(buf, buf_state,
    6226             :                                 set_flag_bits, unset_flag_bits,
    6227             :                                 refcount_change);
    6228             : 
    6229     4783034 :     if (forget_owner)
    6230     2158322 :         ResourceOwnerForgetBufferIO(CurrentResourceOwner,
    6231             :                                     BufferDescriptorGetBuffer(buf));
    6232             : 
    6233     4783034 :     ConditionVariableBroadcast(BufferDescriptorGetIOCV(buf));
    6234             : 
    6235             :     /*
    6236             :      * Support LockBufferForCleanup()
    6237             :      *
    6238             :      * We may have just released the last pin other than the waiter's. In most
    6239             :      * cases, this backend holds another pin on the buffer. But, if, for
    6240             :      * example, this backend is completing an IO issued by another backend, it
    6241             :      * may be time to wake the waiter.
    6242             :      */
    6243     4783034 :     if (release_aio && (buf_state & BM_PIN_COUNT_WAITER))
    6244           0 :         WakePinCountWaiter(buf);
    6245     4783034 : }
    6246             : 
    6247             : /*
    6248             :  * AbortBufferIO: Clean up active buffer I/O after an error.
    6249             :  *
    6250             :  *  All LWLocks we might have held have been released,
    6251             :  *  but we haven't yet released buffer pins, so the buffer is still pinned.
    6252             :  *
    6253             :  *  If I/O was in progress, we always set BM_IO_ERROR, even though it's
    6254             :  *  possible the error condition wasn't related to the I/O.
    6255             :  *
    6256             :  *  Note: this does not remove the buffer I/O from the resource owner.
    6257             :  *  That's correct when we're releasing the whole resource owner, but
    6258             :  *  beware if you use this in other contexts.
    6259             :  */
    6260             : static void
    6261          30 : AbortBufferIO(Buffer buffer)
    6262             : {
    6263          30 :     BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
    6264             :     uint32      buf_state;
    6265             : 
    6266          30 :     buf_state = LockBufHdr(buf_hdr);
    6267             :     Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
    6268             : 
    6269          30 :     if (!(buf_state & BM_VALID))
    6270             :     {
    6271             :         Assert(!(buf_state & BM_DIRTY));
    6272          30 :         UnlockBufHdr(buf_hdr);
    6273             :     }
    6274             :     else
    6275             :     {
    6276             :         Assert(buf_state & BM_DIRTY);
    6277           0 :         UnlockBufHdr(buf_hdr);
    6278             : 
    6279             :         /* Issue notice if this is not the first failure... */
    6280           0 :         if (buf_state & BM_IO_ERROR)
    6281             :         {
    6282             :             /* Buffer is pinned, so we can read tag without spinlock */
    6283           0 :             ereport(WARNING,
    6284             :                     (errcode(ERRCODE_IO_ERROR),
    6285             :                      errmsg("could not write block %u of %s",
    6286             :                             buf_hdr->tag.blockNum,
    6287             :                             relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
    6288             :                                         BufTagGetForkNum(&buf_hdr->tag)).str),
    6289             :                      errdetail("Multiple failures --- write error might be permanent.")));
    6290             :         }
    6291             :     }
    6292             : 
    6293          30 :     TerminateBufferIO(buf_hdr, false, BM_IO_ERROR, false, false);
    6294          30 : }
    6295             : 
    6296             : /*
    6297             :  * Error context callback for errors occurring during shared buffer writes.
    6298             :  */
    6299             : static void
    6300          82 : shared_buffer_write_error_callback(void *arg)
    6301             : {
    6302          82 :     BufferDesc *bufHdr = (BufferDesc *) arg;
    6303             : 
    6304             :     /* Buffer is pinned, so we can read the tag without locking the spinlock */
    6305          82 :     if (bufHdr != NULL)
    6306         164 :         errcontext("writing block %u of relation \"%s\"",
    6307             :                    bufHdr->tag.blockNum,
    6308          82 :                    relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
    6309             :                                BufTagGetForkNum(&bufHdr->tag)).str);
    6310          82 : }
    6311             : 
    6312             : /*
    6313             :  * Error context callback for errors occurring during local buffer writes.
    6314             :  */
    6315             : static void
    6316           0 : local_buffer_write_error_callback(void *arg)
    6317             : {
    6318           0 :     BufferDesc *bufHdr = (BufferDesc *) arg;
    6319             : 
    6320           0 :     if (bufHdr != NULL)
    6321           0 :         errcontext("writing block %u of relation \"%s\"",
    6322             :                    bufHdr->tag.blockNum,
    6323           0 :                    relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
    6324             :                                   MyProcNumber,
    6325             :                                   BufTagGetForkNum(&bufHdr->tag)).str);
    6326           0 : }
    6327             : 
    6328             : /*
    6329             :  * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
    6330             :  */
    6331             : static int
    6332    19421204 : rlocator_comparator(const void *p1, const void *p2)
    6333             : {
    6334    19421204 :     RelFileLocator n1 = *(const RelFileLocator *) p1;
    6335    19421204 :     RelFileLocator n2 = *(const RelFileLocator *) p2;
    6336             : 
    6337    19421204 :     if (n1.relNumber < n2.relNumber)
    6338    19358192 :         return -1;
    6339       63012 :     else if (n1.relNumber > n2.relNumber)
    6340       60068 :         return 1;
    6341             : 
    6342        2944 :     if (n1.dbOid < n2.dbOid)
    6343           0 :         return -1;
    6344        2944 :     else if (n1.dbOid > n2.dbOid)
    6345           0 :         return 1;
    6346             : 
    6347        2944 :     if (n1.spcOid < n2.spcOid)
    6348           0 :         return -1;
    6349        2944 :     else if (n1.spcOid > n2.spcOid)
    6350           0 :         return 1;
    6351             :     else
    6352        2944 :         return 0;
    6353             : }
    6354             : 
    6355             : /*
    6356             :  * Lock buffer header - set BM_LOCKED in buffer state.
    6357             :  */
    6358             : uint32
    6359    64732732 : LockBufHdr(BufferDesc *desc)
    6360             : {
    6361             :     uint32      old_buf_state;
    6362             : 
    6363             :     Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
    6364             : 
    6365             :     while (true)
    6366             :     {
    6367             :         /*
    6368             :          * Always try once to acquire the lock directly, without setting up
    6369             :          * the spin-delay infrastructure. The work necessary for that shows up
    6370             :          * in profiles and is rarely necessary.
    6371             :          */
    6372    64737964 :         old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
    6373    64737964 :         if (likely(!(old_buf_state & BM_LOCKED)))
    6374    64732732 :             break;              /* got lock */
    6375             : 
    6376             :         /* and then spin without atomic operations until lock is released */
    6377             :         {
    6378             :             SpinDelayStatus delayStatus;
    6379             : 
    6380        5232 :             init_local_spin_delay(&delayStatus);
    6381             : 
    6382       14642 :             while (old_buf_state & BM_LOCKED)
    6383             :             {
    6384        9410 :                 perform_spin_delay(&delayStatus);
    6385        9410 :                 old_buf_state = pg_atomic_read_u32(&desc->state);
    6386             :             }
    6387        5232 :             finish_spin_delay(&delayStatus);
    6388             :         }
    6389             : 
    6390             :         /*
    6391             :          * Retry. The lock might obviously already be re-acquired by the time
    6392             :          * we're attempting to get it again.
    6393             :          */
    6394             :     }
    6395             : 
    6396    64732732 :     return old_buf_state | BM_LOCKED;
    6397             : }
    6398             : 
    6399             : /*
    6400             :  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
    6401             :  * state at that point.
    6402             :  *
    6403             :  * Obviously the buffer could be locked by the time the value is returned, so
    6404             :  * this is primarily useful in CAS style loops.
    6405             :  */
    6406             : pg_noinline uint32
    6407        1386 : WaitBufHdrUnlocked(BufferDesc *buf)
    6408             : {
    6409             :     SpinDelayStatus delayStatus;
    6410             :     uint32      buf_state;
    6411             : 
    6412        1386 :     init_local_spin_delay(&delayStatus);
    6413             : 
    6414        1386 :     buf_state = pg_atomic_read_u32(&buf->state);
    6415             : 
    6416       19900 :     while (buf_state & BM_LOCKED)
    6417             :     {
    6418       18514 :         perform_spin_delay(&delayStatus);
    6419       18514 :         buf_state = pg_atomic_read_u32(&buf->state);
    6420             :     }
    6421             : 
    6422        1386 :     finish_spin_delay(&delayStatus);
    6423             : 
    6424        1386 :     return buf_state;
    6425             : }
    6426             : 
    6427             : /*
    6428             :  * BufferTag comparator.
    6429             :  */
    6430             : static inline int
    6431           0 : buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
    6432             : {
    6433             :     int         ret;
    6434             :     RelFileLocator rlocatora;
    6435             :     RelFileLocator rlocatorb;
    6436             : 
    6437           0 :     rlocatora = BufTagGetRelFileLocator(ba);
    6438           0 :     rlocatorb = BufTagGetRelFileLocator(bb);
    6439             : 
    6440           0 :     ret = rlocator_comparator(&rlocatora, &rlocatorb);
    6441             : 
    6442           0 :     if (ret != 0)
    6443           0 :         return ret;
    6444             : 
    6445           0 :     if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
    6446           0 :         return -1;
    6447           0 :     if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
    6448           0 :         return 1;
    6449             : 
    6450           0 :     if (ba->blockNum < bb->blockNum)
    6451           0 :         return -1;
    6452           0 :     if (ba->blockNum > bb->blockNum)
    6453           0 :         return 1;
    6454             : 
    6455           0 :     return 0;
    6456             : }
    6457             : 
    6458             : /*
    6459             :  * Comparator determining the writeout order in a checkpoint.
    6460             :  *
    6461             :  * It is important that tablespaces are compared first, the logic balancing
    6462             :  * writes between tablespaces relies on it.
    6463             :  */
    6464             : static inline int
    6465     6084816 : ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
    6466             : {
    6467             :     /* compare tablespace */
    6468     6084816 :     if (a->tsId < b->tsId)
    6469       18594 :         return -1;
    6470     6066222 :     else if (a->tsId > b->tsId)
    6471       52930 :         return 1;
    6472             :     /* compare relation */
    6473     6013292 :     if (a->relNumber < b->relNumber)
    6474     1655716 :         return -1;
    6475     4357576 :     else if (a->relNumber > b->relNumber)
    6476     1672762 :         return 1;
    6477             :     /* compare fork */
    6478     2684814 :     else if (a->forkNum < b->forkNum)
    6479      116074 :         return -1;
    6480     2568740 :     else if (a->forkNum > b->forkNum)
    6481      115700 :         return 1;
    6482             :     /* compare block number */
    6483     2453040 :     else if (a->blockNum < b->blockNum)
    6484     1202446 :         return -1;
    6485     1250594 :     else if (a->blockNum > b->blockNum)
    6486     1177460 :         return 1;
    6487             :     /* equal page IDs are unlikely, but not impossible */
    6488       73134 :     return 0;
    6489             : }
    6490             : 
    6491             : /*
    6492             :  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
    6493             :  * progress.
    6494             :  */
    6495             : static int
    6496      492120 : ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
    6497             : {
    6498      492120 :     CkptTsStatus *sa = (CkptTsStatus *) DatumGetPointer(a);
    6499      492120 :     CkptTsStatus *sb = (CkptTsStatus *) DatumGetPointer(b);
    6500             : 
    6501             :     /* we want a min-heap, so return 1 for the a < b */
    6502      492120 :     if (sa->progress < sb->progress)
    6503      441656 :         return 1;
    6504       50464 :     else if (sa->progress == sb->progress)
    6505        1652 :         return 0;
    6506             :     else
    6507       48812 :         return -1;
    6508             : }
    6509             : 
    6510             : /*
    6511             :  * Initialize a writeback context, discarding potential previous state.
    6512             :  *
    6513             :  * *max_pending is a pointer instead of an immediate value, so the coalesce
    6514             :  * limits can easily changed by the GUC mechanism, and so calling code does
    6515             :  * not have to check the current configuration. A value of 0 means that no
    6516             :  * writeback control will be performed.
    6517             :  */
    6518             : void
    6519        5574 : WritebackContextInit(WritebackContext *context, int *max_pending)
    6520             : {
    6521             :     Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
    6522             : 
    6523        5574 :     context->max_pending = max_pending;
    6524        5574 :     context->nr_pending = 0;
    6525        5574 : }
    6526             : 
    6527             : /*
    6528             :  * Add buffer to list of pending writeback requests.
    6529             :  */
    6530             : void
    6531     1132658 : ScheduleBufferTagForWriteback(WritebackContext *wb_context, IOContext io_context,
    6532             :                               BufferTag *tag)
    6533             : {
    6534             :     PendingWriteback *pending;
    6535             : 
    6536             :     /*
    6537             :      * As pg_flush_data() doesn't do anything with fsync disabled, there's no
    6538             :      * point in tracking in that case.
    6539             :      */
    6540     1132658 :     if (io_direct_flags & IO_DIRECT_DATA ||
    6541     1131608 :         !enableFsync)
    6542     1132658 :         return;
    6543             : 
    6544             :     /*
    6545             :      * Add buffer to the pending writeback array, unless writeback control is
    6546             :      * disabled.
    6547             :      */
    6548           0 :     if (*wb_context->max_pending > 0)
    6549             :     {
    6550             :         Assert(*wb_context->max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
    6551             : 
    6552           0 :         pending = &wb_context->pending_writebacks[wb_context->nr_pending++];
    6553             : 
    6554           0 :         pending->tag = *tag;
    6555             :     }
    6556             : 
    6557             :     /*
    6558             :      * Perform pending flushes if the writeback limit is exceeded. This
    6559             :      * includes the case where previously an item has been added, but control
    6560             :      * is now disabled.
    6561             :      */
    6562           0 :     if (wb_context->nr_pending >= *wb_context->max_pending)
    6563           0 :         IssuePendingWritebacks(wb_context, io_context);
    6564             : }
    6565             : 
    6566             : #define ST_SORT sort_pending_writebacks
    6567             : #define ST_ELEMENT_TYPE PendingWriteback
    6568             : #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
    6569             : #define ST_SCOPE static
    6570             : #define ST_DEFINE
    6571             : #include "lib/sort_template.h"
    6572             : 
    6573             : /*
    6574             :  * Issue all pending writeback requests, previously scheduled with
    6575             :  * ScheduleBufferTagForWriteback, to the OS.
    6576             :  *
    6577             :  * Because this is only used to improve the OSs IO scheduling we try to never
    6578             :  * error out - it's just a hint.
    6579             :  */
    6580             : void
    6581        2174 : IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
    6582             : {
    6583             :     instr_time  io_start;
    6584             :     int         i;
    6585             : 
    6586        2174 :     if (wb_context->nr_pending == 0)
    6587        2174 :         return;
    6588             : 
    6589             :     /*
    6590             :      * Executing the writes in-order can make them a lot faster, and allows to
    6591             :      * merge writeback requests to consecutive blocks into larger writebacks.
    6592             :      */
    6593           0 :     sort_pending_writebacks(wb_context->pending_writebacks,
    6594           0 :                             wb_context->nr_pending);
    6595             : 
    6596           0 :     io_start = pgstat_prepare_io_time(track_io_timing);
    6597             : 
    6598             :     /*
    6599             :      * Coalesce neighbouring writes, but nothing else. For that we iterate
    6600             :      * through the, now sorted, array of pending flushes, and look forward to
    6601             :      * find all neighbouring (or identical) writes.
    6602             :      */
    6603           0 :     for (i = 0; i < wb_context->nr_pending; i++)
    6604             :     {
    6605             :         PendingWriteback *cur;
    6606             :         PendingWriteback *next;
    6607             :         SMgrRelation reln;
    6608             :         int         ahead;
    6609             :         BufferTag   tag;
    6610             :         RelFileLocator currlocator;
    6611           0 :         Size        nblocks = 1;
    6612             : 
    6613           0 :         cur = &wb_context->pending_writebacks[i];
    6614           0 :         tag = cur->tag;
    6615           0 :         currlocator = BufTagGetRelFileLocator(&tag);
    6616             : 
    6617             :         /*
    6618             :          * Peek ahead, into following writeback requests, to see if they can
    6619             :          * be combined with the current one.
    6620             :          */
    6621           0 :         for (ahead = 0; i + ahead + 1 < wb_context->nr_pending; ahead++)
    6622             :         {
    6623             : 
    6624           0 :             next = &wb_context->pending_writebacks[i + ahead + 1];
    6625             : 
    6626             :             /* different file, stop */
    6627           0 :             if (!RelFileLocatorEquals(currlocator,
    6628           0 :                                       BufTagGetRelFileLocator(&next->tag)) ||
    6629           0 :                 BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
    6630             :                 break;
    6631             : 
    6632             :             /* ok, block queued twice, skip */
    6633           0 :             if (cur->tag.blockNum == next->tag.blockNum)
    6634           0 :                 continue;
    6635             : 
    6636             :             /* only merge consecutive writes */
    6637           0 :             if (cur->tag.blockNum + 1 != next->tag.blockNum)
    6638           0 :                 break;
    6639             : 
    6640           0 :             nblocks++;
    6641           0 :             cur = next;
    6642             :         }
    6643             : 
    6644           0 :         i += ahead;
    6645             : 
    6646             :         /* and finally tell the kernel to write the data to storage */
    6647           0 :         reln = smgropen(currlocator, INVALID_PROC_NUMBER);
    6648           0 :         smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
    6649             :     }
    6650             : 
    6651             :     /*
    6652             :      * Assume that writeback requests are only issued for buffers containing
    6653             :      * blocks of permanent relations.
    6654             :      */
    6655           0 :     pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
    6656           0 :                             IOOP_WRITEBACK, io_start, wb_context->nr_pending, 0);
    6657             : 
    6658           0 :     wb_context->nr_pending = 0;
    6659             : }
    6660             : 
    6661             : /* ResourceOwner callbacks */
    6662             : 
    6663             : static void
    6664          30 : ResOwnerReleaseBufferIO(Datum res)
    6665             : {
    6666          30 :     Buffer      buffer = DatumGetInt32(res);
    6667             : 
    6668          30 :     AbortBufferIO(buffer);
    6669          30 : }
    6670             : 
    6671             : static char *
    6672           0 : ResOwnerPrintBufferIO(Datum res)
    6673             : {
    6674           0 :     Buffer      buffer = DatumGetInt32(res);
    6675             : 
    6676           0 :     return psprintf("lost track of buffer IO on buffer %d", buffer);
    6677             : }
    6678             : 
    6679             : static void
    6680       15196 : ResOwnerReleaseBufferPin(Datum res)
    6681             : {
    6682       15196 :     Buffer      buffer = DatumGetInt32(res);
    6683             : 
    6684             :     /* Like ReleaseBuffer, but don't call ResourceOwnerForgetBuffer */
    6685       15196 :     if (!BufferIsValid(buffer))
    6686           0 :         elog(ERROR, "bad buffer ID: %d", buffer);
    6687             : 
    6688       15196 :     if (BufferIsLocal(buffer))
    6689        6066 :         UnpinLocalBufferNoOwner(buffer);
    6690             :     else
    6691        9130 :         UnpinBufferNoOwner(GetBufferDescriptor(buffer - 1));
    6692       15196 : }
    6693             : 
    6694             : static char *
    6695           0 : ResOwnerPrintBufferPin(Datum res)
    6696             : {
    6697           0 :     return DebugPrintBufferRefcount(DatumGetInt32(res));
    6698             : }
    6699             : 
    6700             : /*
    6701             :  * Helper function to evict unpinned buffer whose buffer header lock is
    6702             :  * already acquired.
    6703             :  */
    6704             : static bool
    6705        4282 : EvictUnpinnedBufferInternal(BufferDesc *desc, bool *buffer_flushed)
    6706             : {
    6707             :     uint32      buf_state;
    6708             :     bool        result;
    6709             : 
    6710        4282 :     *buffer_flushed = false;
    6711             : 
    6712        4282 :     buf_state = pg_atomic_read_u32(&(desc->state));
    6713             :     Assert(buf_state & BM_LOCKED);
    6714             : 
    6715        4282 :     if ((buf_state & BM_VALID) == 0)
    6716             :     {
    6717           0 :         UnlockBufHdr(desc);
    6718           0 :         return false;
    6719             :     }
    6720             : 
    6721             :     /* Check that it's not pinned already. */
    6722        4282 :     if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
    6723             :     {
    6724           0 :         UnlockBufHdr(desc);
    6725           0 :         return false;
    6726             :     }
    6727             : 
    6728        4282 :     PinBuffer_Locked(desc);     /* releases spinlock */
    6729             : 
    6730             :     /* If it was dirty, try to clean it once. */
    6731        4282 :     if (buf_state & BM_DIRTY)
    6732             :     {
    6733        1944 :         FlushUnlockedBuffer(desc, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
    6734        1944 :         *buffer_flushed = true;
    6735             :     }
    6736             : 
    6737             :     /* This will return false if it becomes dirty or someone else pins it. */
    6738        4282 :     result = InvalidateVictimBuffer(desc);
    6739             : 
    6740        4282 :     UnpinBuffer(desc);
    6741             : 
    6742        4282 :     return result;
    6743             : }
    6744             : 
    6745             : /*
    6746             :  * Try to evict the current block in a shared buffer.
    6747             :  *
    6748             :  * This function is intended for testing/development use only!
    6749             :  *
    6750             :  * To succeed, the buffer must not be pinned on entry, so if the caller had a
    6751             :  * particular block in mind, it might already have been replaced by some other
    6752             :  * block by the time this function runs.  It's also unpinned on return, so the
    6753             :  * buffer might be occupied again by the time control is returned, potentially
    6754             :  * even by the same block.  This inherent raciness without other interlocking
    6755             :  * makes the function unsuitable for non-testing usage.
    6756             :  *
    6757             :  * *buffer_flushed is set to true if the buffer was dirty and has been
    6758             :  * flushed, false otherwise.  However, *buffer_flushed=true does not
    6759             :  * necessarily mean that we flushed the buffer, it could have been flushed by
    6760             :  * someone else.
    6761             :  *
    6762             :  * Returns true if the buffer was valid and it has now been made invalid.
    6763             :  * Returns false if it wasn't valid, if it couldn't be evicted due to a pin,
    6764             :  * or if the buffer becomes dirty again while we're trying to write it out.
    6765             :  */
    6766             : bool
    6767         280 : EvictUnpinnedBuffer(Buffer buf, bool *buffer_flushed)
    6768             : {
    6769             :     BufferDesc *desc;
    6770             : 
    6771             :     Assert(BufferIsValid(buf) && !BufferIsLocal(buf));
    6772             : 
    6773             :     /* Make sure we can pin the buffer. */
    6774         280 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    6775         280 :     ReservePrivateRefCountEntry();
    6776             : 
    6777         280 :     desc = GetBufferDescriptor(buf - 1);
    6778         280 :     LockBufHdr(desc);
    6779             : 
    6780         280 :     return EvictUnpinnedBufferInternal(desc, buffer_flushed);
    6781             : }
    6782             : 
    6783             : /*
    6784             :  * Try to evict all the shared buffers.
    6785             :  *
    6786             :  * This function is intended for testing/development use only! See
    6787             :  * EvictUnpinnedBuffer().
    6788             :  *
    6789             :  * The buffers_* parameters are mandatory and indicate the total count of
    6790             :  * buffers that:
    6791             :  * - buffers_evicted - were evicted
    6792             :  * - buffers_flushed - were flushed
    6793             :  * - buffers_skipped - could not be evicted
    6794             :  */
    6795             : void
    6796           2 : EvictAllUnpinnedBuffers(int32 *buffers_evicted, int32 *buffers_flushed,
    6797             :                         int32 *buffers_skipped)
    6798             : {
    6799           2 :     *buffers_evicted = 0;
    6800           2 :     *buffers_skipped = 0;
    6801           2 :     *buffers_flushed = 0;
    6802             : 
    6803       32770 :     for (int buf = 1; buf <= NBuffers; buf++)
    6804             :     {
    6805       32768 :         BufferDesc *desc = GetBufferDescriptor(buf - 1);
    6806             :         uint32      buf_state;
    6807             :         bool        buffer_flushed;
    6808             : 
    6809       32768 :         CHECK_FOR_INTERRUPTS();
    6810             : 
    6811       32768 :         buf_state = pg_atomic_read_u32(&desc->state);
    6812       32768 :         if (!(buf_state & BM_VALID))
    6813       28766 :             continue;
    6814             : 
    6815        4002 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    6816        4002 :         ReservePrivateRefCountEntry();
    6817             : 
    6818        4002 :         LockBufHdr(desc);
    6819             : 
    6820        4002 :         if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
    6821        4002 :             (*buffers_evicted)++;
    6822             :         else
    6823           0 :             (*buffers_skipped)++;
    6824             : 
    6825        4002 :         if (buffer_flushed)
    6826        1906 :             (*buffers_flushed)++;
    6827             :     }
    6828           2 : }
    6829             : 
    6830             : /*
    6831             :  * Try to evict all the shared buffers containing provided relation's pages.
    6832             :  *
    6833             :  * This function is intended for testing/development use only! See
    6834             :  * EvictUnpinnedBuffer().
    6835             :  *
    6836             :  * The caller must hold at least AccessShareLock on the relation to prevent
    6837             :  * the relation from being dropped.
    6838             :  *
    6839             :  * The buffers_* parameters are mandatory and indicate the total count of
    6840             :  * buffers that:
    6841             :  * - buffers_evicted - were evicted
    6842             :  * - buffers_flushed - were flushed
    6843             :  * - buffers_skipped - could not be evicted
    6844             :  */
    6845             : void
    6846           2 : EvictRelUnpinnedBuffers(Relation rel, int32 *buffers_evicted,
    6847             :                         int32 *buffers_flushed, int32 *buffers_skipped)
    6848             : {
    6849             :     Assert(!RelationUsesLocalBuffers(rel));
    6850             : 
    6851           2 :     *buffers_skipped = 0;
    6852           2 :     *buffers_evicted = 0;
    6853           2 :     *buffers_flushed = 0;
    6854             : 
    6855       32770 :     for (int buf = 1; buf <= NBuffers; buf++)
    6856             :     {
    6857       32768 :         BufferDesc *desc = GetBufferDescriptor(buf - 1);
    6858       32768 :         uint32      buf_state = pg_atomic_read_u32(&(desc->state));
    6859             :         bool        buffer_flushed;
    6860             : 
    6861       32768 :         CHECK_FOR_INTERRUPTS();
    6862             : 
    6863             :         /* An unlocked precheck should be safe and saves some cycles. */
    6864       32768 :         if ((buf_state & BM_VALID) == 0 ||
    6865          54 :             !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
    6866       32768 :             continue;
    6867             : 
    6868             :         /* Make sure we can pin the buffer. */
    6869           0 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    6870           0 :         ReservePrivateRefCountEntry();
    6871             : 
    6872           0 :         buf_state = LockBufHdr(desc);
    6873             : 
    6874             :         /* recheck, could have changed without the lock */
    6875           0 :         if ((buf_state & BM_VALID) == 0 ||
    6876           0 :             !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
    6877             :         {
    6878           0 :             UnlockBufHdr(desc);
    6879           0 :             continue;
    6880             :         }
    6881             : 
    6882           0 :         if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
    6883           0 :             (*buffers_evicted)++;
    6884             :         else
    6885           0 :             (*buffers_skipped)++;
    6886             : 
    6887           0 :         if (buffer_flushed)
    6888           0 :             (*buffers_flushed)++;
    6889             :     }
    6890           2 : }
    6891             : 
    6892             : /*
    6893             :  * Helper function to mark unpinned buffer dirty whose buffer header lock is
    6894             :  * already acquired.
    6895             :  */
    6896             : static bool
    6897          72 : MarkDirtyUnpinnedBufferInternal(Buffer buf, BufferDesc *desc,
    6898             :                                 bool *buffer_already_dirty)
    6899             : {
    6900             :     uint32      buf_state;
    6901          72 :     bool        result = false;
    6902             : 
    6903          72 :     *buffer_already_dirty = false;
    6904             : 
    6905          72 :     buf_state = pg_atomic_read_u32(&(desc->state));
    6906             :     Assert(buf_state & BM_LOCKED);
    6907             : 
    6908          72 :     if ((buf_state & BM_VALID) == 0)
    6909             :     {
    6910           2 :         UnlockBufHdr(desc);
    6911           2 :         return false;
    6912             :     }
    6913             : 
    6914             :     /* Check that it's not pinned already. */
    6915          70 :     if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
    6916             :     {
    6917           0 :         UnlockBufHdr(desc);
    6918           0 :         return false;
    6919             :     }
    6920             : 
    6921             :     /* Pin the buffer and then release the buffer spinlock */
    6922          70 :     PinBuffer_Locked(desc);
    6923             : 
    6924             :     /* If it was not already dirty, mark it as dirty. */
    6925          70 :     if (!(buf_state & BM_DIRTY))
    6926             :     {
    6927          34 :         LWLockAcquire(BufferDescriptorGetContentLock(desc), LW_EXCLUSIVE);
    6928          34 :         MarkBufferDirty(buf);
    6929          34 :         result = true;
    6930          34 :         LWLockRelease(BufferDescriptorGetContentLock(desc));
    6931             :     }
    6932             :     else
    6933          36 :         *buffer_already_dirty = true;
    6934             : 
    6935          70 :     UnpinBuffer(desc);
    6936             : 
    6937          70 :     return result;
    6938             : }
    6939             : 
    6940             : /*
    6941             :  * Try to mark the provided shared buffer as dirty.
    6942             :  *
    6943             :  * This function is intended for testing/development use only!
    6944             :  *
    6945             :  * Same as EvictUnpinnedBuffer() but with MarkBufferDirty() call inside.
    6946             :  *
    6947             :  * The buffer_already_dirty parameter is mandatory and indicate if the buffer
    6948             :  * could not be dirtied because it is already dirty.
    6949             :  *
    6950             :  * Returns true if the buffer has successfully been marked as dirty.
    6951             :  */
    6952             : bool
    6953           2 : MarkDirtyUnpinnedBuffer(Buffer buf, bool *buffer_already_dirty)
    6954             : {
    6955             :     BufferDesc *desc;
    6956           2 :     bool        buffer_dirtied = false;
    6957             : 
    6958             :     Assert(!BufferIsLocal(buf));
    6959             : 
    6960             :     /* Make sure we can pin the buffer. */
    6961           2 :     ResourceOwnerEnlarge(CurrentResourceOwner);
    6962           2 :     ReservePrivateRefCountEntry();
    6963             : 
    6964           2 :     desc = GetBufferDescriptor(buf - 1);
    6965           2 :     LockBufHdr(desc);
    6966             : 
    6967           2 :     buffer_dirtied = MarkDirtyUnpinnedBufferInternal(buf, desc, buffer_already_dirty);
    6968             :     /* Both can not be true at the same time */
    6969             :     Assert(!(buffer_dirtied && *buffer_already_dirty));
    6970             : 
    6971           2 :     return buffer_dirtied;
    6972             : }
    6973             : 
    6974             : /*
    6975             :  * Try to mark all the shared buffers containing provided relation's pages as
    6976             :  * dirty.
    6977             :  *
    6978             :  * This function is intended for testing/development use only! See
    6979             :  * MarkDirtyUnpinnedBuffer().
    6980             :  *
    6981             :  * The buffers_* parameters are mandatory and indicate the total count of
    6982             :  * buffers that:
    6983             :  * - buffers_dirtied - were dirtied
    6984             :  * - buffers_already_dirty - were already dirty
    6985             :  * - buffers_skipped - could not be dirtied because of a reason different
    6986             :  * than a buffer being already dirty.
    6987             :  */
    6988             : void
    6989           2 : MarkDirtyRelUnpinnedBuffers(Relation rel,
    6990             :                             int32 *buffers_dirtied,
    6991             :                             int32 *buffers_already_dirty,
    6992             :                             int32 *buffers_skipped)
    6993             : {
    6994             :     Assert(!RelationUsesLocalBuffers(rel));
    6995             : 
    6996           2 :     *buffers_dirtied = 0;
    6997           2 :     *buffers_already_dirty = 0;
    6998           2 :     *buffers_skipped = 0;
    6999             : 
    7000       32770 :     for (int buf = 1; buf <= NBuffers; buf++)
    7001             :     {
    7002       32768 :         BufferDesc *desc = GetBufferDescriptor(buf - 1);
    7003       32768 :         uint32      buf_state = pg_atomic_read_u32(&(desc->state));
    7004             :         bool        buffer_already_dirty;
    7005             : 
    7006       32768 :         CHECK_FOR_INTERRUPTS();
    7007             : 
    7008             :         /* An unlocked precheck should be safe and saves some cycles. */
    7009       32768 :         if ((buf_state & BM_VALID) == 0 ||
    7010          54 :             !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
    7011       32768 :             continue;
    7012             : 
    7013             :         /* Make sure we can pin the buffer. */
    7014           0 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    7015           0 :         ReservePrivateRefCountEntry();
    7016             : 
    7017           0 :         buf_state = LockBufHdr(desc);
    7018             : 
    7019             :         /* recheck, could have changed without the lock */
    7020           0 :         if ((buf_state & BM_VALID) == 0 ||
    7021           0 :             !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
    7022             :         {
    7023           0 :             UnlockBufHdr(desc);
    7024           0 :             continue;
    7025             :         }
    7026             : 
    7027           0 :         if (MarkDirtyUnpinnedBufferInternal(buf, desc, &buffer_already_dirty))
    7028           0 :             (*buffers_dirtied)++;
    7029           0 :         else if (buffer_already_dirty)
    7030           0 :             (*buffers_already_dirty)++;
    7031             :         else
    7032           0 :             (*buffers_skipped)++;
    7033             :     }
    7034           2 : }
    7035             : 
    7036             : /*
    7037             :  * Try to mark all the shared buffers as dirty.
    7038             :  *
    7039             :  * This function is intended for testing/development use only! See
    7040             :  * MarkDirtyUnpinnedBuffer().
    7041             :  *
    7042             :  * See MarkDirtyRelUnpinnedBuffers() above for details about the buffers_*
    7043             :  * parameters.
    7044             :  */
    7045             : void
    7046           2 : MarkDirtyAllUnpinnedBuffers(int32 *buffers_dirtied,
    7047             :                             int32 *buffers_already_dirty,
    7048             :                             int32 *buffers_skipped)
    7049             : {
    7050           2 :     *buffers_dirtied = 0;
    7051           2 :     *buffers_already_dirty = 0;
    7052           2 :     *buffers_skipped = 0;
    7053             : 
    7054       32770 :     for (int buf = 1; buf <= NBuffers; buf++)
    7055             :     {
    7056       32768 :         BufferDesc *desc = GetBufferDescriptor(buf - 1);
    7057             :         uint32      buf_state;
    7058             :         bool        buffer_already_dirty;
    7059             : 
    7060       32768 :         CHECK_FOR_INTERRUPTS();
    7061             : 
    7062       32768 :         buf_state = pg_atomic_read_u32(&desc->state);
    7063       32768 :         if (!(buf_state & BM_VALID))
    7064       32698 :             continue;
    7065             : 
    7066          70 :         ResourceOwnerEnlarge(CurrentResourceOwner);
    7067          70 :         ReservePrivateRefCountEntry();
    7068             : 
    7069          70 :         LockBufHdr(desc);
    7070             : 
    7071          70 :         if (MarkDirtyUnpinnedBufferInternal(buf, desc, &buffer_already_dirty))
    7072          34 :             (*buffers_dirtied)++;
    7073          36 :         else if (buffer_already_dirty)
    7074          36 :             (*buffers_already_dirty)++;
    7075             :         else
    7076           0 :             (*buffers_skipped)++;
    7077             :     }
    7078           2 : }
    7079             : 
    7080             : /*
    7081             :  * Generic implementation of the AIO handle staging callback for readv/writev
    7082             :  * on local/shared buffers.
    7083             :  *
    7084             :  * Each readv/writev can target multiple buffers. The buffers have already
    7085             :  * been registered with the IO handle.
    7086             :  *
    7087             :  * To make the IO ready for execution ("staging"), we need to ensure that the
    7088             :  * targeted buffers are in an appropriate state while the IO is ongoing. For
    7089             :  * that the AIO subsystem needs to have its own buffer pin, otherwise an error
    7090             :  * in this backend could lead to this backend's buffer pin being released as
    7091             :  * part of error handling, which in turn could lead to the buffer being
    7092             :  * replaced while IO is ongoing.
    7093             :  */
    7094             : static pg_attribute_always_inline void
    7095     2576290 : buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
    7096             : {
    7097             :     uint64     *io_data;
    7098             :     uint8       handle_data_len;
    7099             :     PgAioWaitRef io_ref;
    7100     2576290 :     BufferTag   first PG_USED_FOR_ASSERTS_ONLY = {0};
    7101             : 
    7102     2576290 :     io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
    7103             : 
    7104     2576290 :     pgaio_io_get_wref(ioh, &io_ref);
    7105             : 
    7106             :     /* iterate over all buffers affected by the vectored readv/writev */
    7107     5495772 :     for (int i = 0; i < handle_data_len; i++)
    7108             :     {
    7109     2919482 :         Buffer      buffer = (Buffer) io_data[i];
    7110     2919482 :         BufferDesc *buf_hdr = is_temp ?
    7111       16820 :             GetLocalBufferDescriptor(-buffer - 1)
    7112     2919482 :             : GetBufferDescriptor(buffer - 1);
    7113             :         uint32      buf_state;
    7114             : 
    7115             :         /*
    7116             :          * Check that all the buffers are actually ones that could conceivably
    7117             :          * be done in one IO, i.e. are sequential. This is the last
    7118             :          * buffer-aware code before IO is actually executed and confusion
    7119             :          * about which buffers are targeted by IO can be hard to debug, making
    7120             :          * it worth doing extra-paranoid checks.
    7121             :          */
    7122     2919482 :         if (i == 0)
    7123     2576290 :             first = buf_hdr->tag;
    7124             :         else
    7125             :         {
    7126             :             Assert(buf_hdr->tag.relNumber == first.relNumber);
    7127             :             Assert(buf_hdr->tag.blockNum == first.blockNum + i);
    7128             :         }
    7129             : 
    7130     2919482 :         if (is_temp)
    7131       16820 :             buf_state = pg_atomic_read_u32(&buf_hdr->state);
    7132             :         else
    7133     2902662 :             buf_state = LockBufHdr(buf_hdr);
    7134             : 
    7135             :         /* verify the buffer is in the expected state */
    7136             :         Assert(buf_state & BM_TAG_VALID);
    7137             :         if (is_write)
    7138             :         {
    7139             :             Assert(buf_state & BM_VALID);
    7140             :             Assert(buf_state & BM_DIRTY);
    7141             :         }
    7142             :         else
    7143             :         {
    7144             :             Assert(!(buf_state & BM_VALID));
    7145             :             Assert(!(buf_state & BM_DIRTY));
    7146             :         }
    7147             : 
    7148             :         /* temp buffers don't use BM_IO_IN_PROGRESS */
    7149     2919482 :         if (!is_temp)
    7150             :             Assert(buf_state & BM_IO_IN_PROGRESS);
    7151             : 
    7152             :         Assert(BUF_STATE_GET_REFCOUNT(buf_state) >= 1);
    7153             : 
    7154             :         /*
    7155             :          * Reflect that the buffer is now owned by the AIO subsystem.
    7156             :          *
    7157             :          * For local buffers: This can't be done just via LocalRefCount, as
    7158             :          * one might initially think, as this backend could error out while
    7159             :          * AIO is still in progress, releasing all the pins by the backend
    7160             :          * itself.
    7161             :          *
    7162             :          * This pin is released again in TerminateBufferIO().
    7163             :          */
    7164     2919482 :         buf_hdr->io_wref = io_ref;
    7165             : 
    7166     2919482 :         if (is_temp)
    7167             :         {
    7168       16820 :             buf_state += BUF_REFCOUNT_ONE;
    7169       16820 :             pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
    7170             :         }
    7171             :         else
    7172     2902662 :             UnlockBufHdrExt(buf_hdr, buf_state, 0, 0, 1);
    7173             : 
    7174             :         /*
    7175             :          * Ensure the content lock that prevents buffer modifications while
    7176             :          * the buffer is being written out is not released early due to an
    7177             :          * error.
    7178             :          */
    7179     2919482 :         if (is_write && !is_temp)
    7180             :         {
    7181             :             LWLock     *content_lock;
    7182             : 
    7183           0 :             content_lock = BufferDescriptorGetContentLock(buf_hdr);
    7184             : 
    7185             :             Assert(LWLockHeldByMe(content_lock));
    7186             : 
    7187             :             /*
    7188             :              * Lock is now owned by AIO subsystem.
    7189             :              */
    7190           0 :             LWLockDisown(content_lock);
    7191             :         }
    7192             : 
    7193             :         /*
    7194             :          * Stop tracking this buffer via the resowner - the AIO system now
    7195             :          * keeps track.
    7196             :          */
    7197     2919482 :         if (!is_temp)
    7198     2902662 :             ResourceOwnerForgetBufferIO(CurrentResourceOwner, buffer);
    7199             :     }
    7200     2576290 : }
    7201             : 
    7202             : /*
    7203             :  * Decode readv errors as encoded by buffer_readv_encode_error().
    7204             :  */
    7205             : static inline void
    7206         698 : buffer_readv_decode_error(PgAioResult result,
    7207             :                           bool *zeroed_any,
    7208             :                           bool *ignored_any,
    7209             :                           uint8 *zeroed_or_error_count,
    7210             :                           uint8 *checkfail_count,
    7211             :                           uint8 *first_off)
    7212             : {
    7213         698 :     uint32      rem_error = result.error_data;
    7214             : 
    7215             :     /* see static asserts in buffer_readv_encode_error */
    7216             : #define READV_COUNT_BITS    7
    7217             : #define READV_COUNT_MASK    ((1 << READV_COUNT_BITS) - 1)
    7218             : 
    7219         698 :     *zeroed_any = rem_error & 1;
    7220         698 :     rem_error >>= 1;
    7221             : 
    7222         698 :     *ignored_any = rem_error & 1;
    7223         698 :     rem_error >>= 1;
    7224             : 
    7225         698 :     *zeroed_or_error_count = rem_error & READV_COUNT_MASK;
    7226         698 :     rem_error >>= READV_COUNT_BITS;
    7227             : 
    7228         698 :     *checkfail_count = rem_error & READV_COUNT_MASK;
    7229         698 :     rem_error >>= READV_COUNT_BITS;
    7230             : 
    7231         698 :     *first_off = rem_error & READV_COUNT_MASK;
    7232         698 :     rem_error >>= READV_COUNT_BITS;
    7233         698 : }
    7234             : 
    7235             : /*
    7236             :  * Helper to encode errors for buffer_readv_complete()
    7237             :  *
    7238             :  * Errors are encoded as follows:
    7239             :  * - bit 0 indicates whether any page was zeroed (1) or not (0)
    7240             :  * - bit 1 indicates whether any checksum failure was ignored (1) or not (0)
    7241             :  * - next READV_COUNT_BITS bits indicate the number of errored or zeroed pages
    7242             :  * - next READV_COUNT_BITS bits indicate the number of checksum failures
    7243             :  * - next READV_COUNT_BITS bits indicate the first offset of the first page
    7244             :  *   that was errored or zeroed or, if no errors/zeroes, the first ignored
    7245             :  *   checksum
    7246             :  */
    7247             : static inline void
    7248         384 : buffer_readv_encode_error(PgAioResult *result,
    7249             :                           bool is_temp,
    7250             :                           bool zeroed_any,
    7251             :                           bool ignored_any,
    7252             :                           uint8 error_count,
    7253             :                           uint8 zeroed_count,
    7254             :                           uint8 checkfail_count,
    7255             :                           uint8 first_error_off,
    7256             :                           uint8 first_zeroed_off,
    7257             :                           uint8 first_ignored_off)
    7258             : {
    7259             : 
    7260         384 :     uint8       shift = 0;
    7261         384 :     uint8       zeroed_or_error_count =
    7262             :         error_count > 0 ? error_count : zeroed_count;
    7263             :     uint8       first_off;
    7264             : 
    7265             :     StaticAssertDecl(PG_IOV_MAX <= 1 << READV_COUNT_BITS,
    7266             :                      "PG_IOV_MAX is bigger than reserved space for error data");
    7267             :     StaticAssertDecl((1 + 1 + 3 * READV_COUNT_BITS) <= PGAIO_RESULT_ERROR_BITS,
    7268             :                      "PGAIO_RESULT_ERROR_BITS is insufficient for buffer_readv");
    7269             : 
    7270             :     /*
    7271             :      * We only have space to encode one offset - but luckily that's good
    7272             :      * enough. If there is an error, the error is the interesting offset, same
    7273             :      * with a zeroed buffer vs an ignored buffer.
    7274             :      */
    7275         384 :     if (error_count > 0)
    7276         188 :         first_off = first_error_off;
    7277         196 :     else if (zeroed_count > 0)
    7278         160 :         first_off = first_zeroed_off;
    7279             :     else
    7280          36 :         first_off = first_ignored_off;
    7281             : 
    7282             :     Assert(!zeroed_any || error_count == 0);
    7283             : 
    7284         384 :     result->error_data = 0;
    7285             : 
    7286         384 :     result->error_data |= zeroed_any << shift;
    7287         384 :     shift += 1;
    7288             : 
    7289         384 :     result->error_data |= ignored_any << shift;
    7290         384 :     shift += 1;
    7291             : 
    7292         384 :     result->error_data |= ((uint32) zeroed_or_error_count) << shift;
    7293         384 :     shift += READV_COUNT_BITS;
    7294             : 
    7295         384 :     result->error_data |= ((uint32) checkfail_count) << shift;
    7296         384 :     shift += READV_COUNT_BITS;
    7297             : 
    7298         384 :     result->error_data |= ((uint32) first_off) << shift;
    7299         384 :     shift += READV_COUNT_BITS;
    7300             : 
    7301         384 :     result->id = is_temp ? PGAIO_HCB_LOCAL_BUFFER_READV :
    7302             :         PGAIO_HCB_SHARED_BUFFER_READV;
    7303             : 
    7304         384 :     if (error_count > 0)
    7305         188 :         result->status = PGAIO_RS_ERROR;
    7306             :     else
    7307         196 :         result->status = PGAIO_RS_WARNING;
    7308             : 
    7309             :     /*
    7310             :      * The encoding is complicated enough to warrant cross-checking it against
    7311             :      * the decode function.
    7312             :      */
    7313             : #ifdef USE_ASSERT_CHECKING
    7314             :     {
    7315             :         bool        zeroed_any_2,
    7316             :                     ignored_any_2;
    7317             :         uint8       zeroed_or_error_count_2,
    7318             :                     checkfail_count_2,
    7319             :                     first_off_2;
    7320             : 
    7321             :         buffer_readv_decode_error(*result,
    7322             :                                   &zeroed_any_2, &ignored_any_2,
    7323             :                                   &zeroed_or_error_count_2,
    7324             :                                   &checkfail_count_2,
    7325             :                                   &first_off_2);
    7326             :         Assert(zeroed_any == zeroed_any_2);
    7327             :         Assert(ignored_any == ignored_any_2);
    7328             :         Assert(zeroed_or_error_count == zeroed_or_error_count_2);
    7329             :         Assert(checkfail_count == checkfail_count_2);
    7330             :         Assert(first_off == first_off_2);
    7331             :     }
    7332             : #endif
    7333             : 
    7334             : #undef READV_COUNT_BITS
    7335             : #undef READV_COUNT_MASK
    7336         384 : }
    7337             : 
    7338             : /*
    7339             :  * Helper for AIO readv completion callbacks, supporting both shared and temp
    7340             :  * buffers. Gets called once for each buffer in a multi-page read.
    7341             :  */
    7342             : static pg_attribute_always_inline void
    7343     2641490 : buffer_readv_complete_one(PgAioTargetData *td, uint8 buf_off, Buffer buffer,
    7344             :                           uint8 flags, bool failed, bool is_temp,
    7345             :                           bool *buffer_invalid,
    7346             :                           bool *failed_checksum,
    7347             :                           bool *ignored_checksum,
    7348             :                           bool *zeroed_buffer)
    7349             : {
    7350     2641490 :     BufferDesc *buf_hdr = is_temp ?
    7351       16820 :         GetLocalBufferDescriptor(-buffer - 1)
    7352     2641490 :         : GetBufferDescriptor(buffer - 1);
    7353     2641490 :     BufferTag   tag = buf_hdr->tag;
    7354     2641490 :     char       *bufdata = BufferGetBlock(buffer);
    7355             :     uint32      set_flag_bits;
    7356             :     int         piv_flags;
    7357             : 
    7358             :     /* check that the buffer is in the expected state for a read */
    7359             : #ifdef USE_ASSERT_CHECKING
    7360             :     {
    7361             :         uint32      buf_state = pg_atomic_read_u32(&buf_hdr->state);
    7362             : 
    7363             :         Assert(buf_state & BM_TAG_VALID);
    7364             :         Assert(!(buf_state & BM_VALID));
    7365             :         /* temp buffers don't use BM_IO_IN_PROGRESS */
    7366             :         if (!is_temp)
    7367             :             Assert(buf_state & BM_IO_IN_PROGRESS);
    7368             :         Assert(!(buf_state & BM_DIRTY));
    7369             :     }
    7370             : #endif
    7371             : 
    7372     2641490 :     *buffer_invalid = false;
    7373     2641490 :     *failed_checksum = false;
    7374     2641490 :     *ignored_checksum = false;
    7375     2641490 :     *zeroed_buffer = false;
    7376             : 
    7377             :     /*
    7378             :      * We ask PageIsVerified() to only log the message about checksum errors,
    7379             :      * as the completion might be run in any backend (or IO workers). We will
    7380             :      * report checksum errors in buffer_readv_report().
    7381             :      */
    7382     2641490 :     piv_flags = PIV_LOG_LOG;
    7383             : 
    7384             :     /* the local zero_damaged_pages may differ from the definer's */
    7385     2641490 :     if (flags & READ_BUFFERS_IGNORE_CHECKSUM_FAILURES)
    7386          76 :         piv_flags |= PIV_IGNORE_CHECKSUM_FAILURE;
    7387             : 
    7388             :     /* Check for garbage data. */
    7389     2641490 :     if (!failed)
    7390             :     {
    7391             :         /*
    7392             :          * If the buffer is not currently pinned by this backend, e.g. because
    7393             :          * we're completing this IO after an error, the buffer data will have
    7394             :          * been marked as inaccessible when the buffer was unpinned. The AIO
    7395             :          * subsystem holds a pin, but that doesn't prevent the buffer from
    7396             :          * having been marked as inaccessible. The completion might also be
    7397             :          * executed in a different process.
    7398             :          */
    7399             : #ifdef USE_VALGRIND
    7400             :         if (!BufferIsPinned(buffer))
    7401             :             VALGRIND_MAKE_MEM_DEFINED(bufdata, BLCKSZ);
    7402             : #endif
    7403             : 
    7404     2641432 :         if (!PageIsVerified((Page) bufdata, tag.blockNum, piv_flags,
    7405             :                             failed_checksum))
    7406             :         {
    7407         192 :             if (flags & READ_BUFFERS_ZERO_ON_ERROR)
    7408             :             {
    7409          92 :                 memset(bufdata, 0, BLCKSZ);
    7410          92 :                 *zeroed_buffer = true;
    7411             :             }
    7412             :             else
    7413             :             {
    7414         100 :                 *buffer_invalid = true;
    7415             :                 /* mark buffer as having failed */
    7416         100 :                 failed = true;
    7417             :             }
    7418             :         }
    7419     2641240 :         else if (*failed_checksum)
    7420          24 :             *ignored_checksum = true;
    7421             : 
    7422             :         /* undo what we did above */
    7423             : #ifdef USE_VALGRIND
    7424             :         if (!BufferIsPinned(buffer))
    7425             :             VALGRIND_MAKE_MEM_NOACCESS(bufdata, BLCKSZ);
    7426             : #endif
    7427             : 
    7428             :         /*
    7429             :          * Immediately log a message about the invalid page, but only to the
    7430             :          * server log. The reason to do so immediately is that this may be
    7431             :          * executed in a different backend than the one that originated the
    7432             :          * request. The reason to do so immediately is that the originator
    7433             :          * might not process the query result immediately (because it is busy
    7434             :          * doing another part of query processing) or at all (e.g. if it was
    7435             :          * cancelled or errored out due to another IO also failing). The
    7436             :          * definer of the IO will emit an ERROR or WARNING when processing the
    7437             :          * IO's results
    7438             :          *
    7439             :          * To avoid duplicating the code to emit these log messages, we reuse
    7440             :          * buffer_readv_report().
    7441             :          */
    7442     2641432 :         if (*buffer_invalid || *failed_checksum || *zeroed_buffer)
    7443             :         {
    7444         216 :             PgAioResult result_one = {0};
    7445             : 
    7446         216 :             buffer_readv_encode_error(&result_one, is_temp,
    7447         216 :                                       *zeroed_buffer,
    7448         216 :                                       *ignored_checksum,
    7449         216 :                                       *buffer_invalid,
    7450         216 :                                       *zeroed_buffer ? 1 : 0,
    7451         216 :                                       *failed_checksum ? 1 : 0,
    7452             :                                       buf_off, buf_off, buf_off);
    7453         216 :             pgaio_result_report(result_one, td, LOG_SERVER_ONLY);
    7454             :         }
    7455             :     }
    7456             : 
    7457             :     /* Terminate I/O and set BM_VALID. */
    7458     2641490 :     set_flag_bits = failed ? BM_IO_ERROR : BM_VALID;
    7459     2641490 :     if (is_temp)
    7460       16820 :         TerminateLocalBufferIO(buf_hdr, false, set_flag_bits, true);
    7461             :     else
    7462     2624670 :         TerminateBufferIO(buf_hdr, false, set_flag_bits, false, true);
    7463             : 
    7464             :     /*
    7465             :      * Call the BUFFER_READ_DONE tracepoint in the callback, even though the
    7466             :      * callback may not be executed in the same backend that called
    7467             :      * BUFFER_READ_START. The alternative would be to defer calling the
    7468             :      * tracepoint to a later point (e.g. the local completion callback for
    7469             :      * shared buffer reads), which seems even less helpful.
    7470             :      */
    7471             :     TRACE_POSTGRESQL_BUFFER_READ_DONE(tag.forkNum,
    7472             :                                       tag.blockNum,
    7473             :                                       tag.spcOid,
    7474             :                                       tag.dbOid,
    7475             :                                       tag.relNumber,
    7476             :                                       is_temp ? MyProcNumber : INVALID_PROC_NUMBER,
    7477             :                                       false);
    7478     2641490 : }
    7479             : 
    7480             : /*
    7481             :  * Perform completion handling of a single AIO read. This read may cover
    7482             :  * multiple blocks / buffers.
    7483             :  *
    7484             :  * Shared between shared and local buffers, to reduce code duplication.
    7485             :  */
    7486             : static pg_attribute_always_inline PgAioResult
    7487     2366116 : buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
    7488             :                       uint8 cb_data, bool is_temp)
    7489             : {
    7490     2366116 :     PgAioResult result = prior_result;
    7491     2366116 :     PgAioTargetData *td = pgaio_io_get_target_data(ioh);
    7492     2366116 :     uint8       first_error_off = 0;
    7493     2366116 :     uint8       first_zeroed_off = 0;
    7494     2366116 :     uint8       first_ignored_off = 0;
    7495     2366116 :     uint8       error_count = 0;
    7496     2366116 :     uint8       zeroed_count = 0;
    7497     2366116 :     uint8       ignored_count = 0;
    7498     2366116 :     uint8       checkfail_count = 0;
    7499             :     uint64     *io_data;
    7500             :     uint8       handle_data_len;
    7501             : 
    7502             :     if (is_temp)
    7503             :     {
    7504             :         Assert(td->smgr.is_temp);
    7505             :         Assert(pgaio_io_get_owner(ioh) == MyProcNumber);
    7506             :     }
    7507             :     else
    7508             :         Assert(!td->smgr.is_temp);
    7509             : 
    7510             :     /*
    7511             :      * Iterate over all the buffers affected by this IO and call the
    7512             :      * per-buffer completion function for each buffer.
    7513             :      */
    7514     2366116 :     io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
    7515     5007606 :     for (uint8 buf_off = 0; buf_off < handle_data_len; buf_off++)
    7516             :     {
    7517     2641490 :         Buffer      buf = io_data[buf_off];
    7518             :         bool        failed;
    7519     2641490 :         bool        failed_verification = false;
    7520     2641490 :         bool        failed_checksum = false;
    7521     2641490 :         bool        zeroed_buffer = false;
    7522     2641490 :         bool        ignored_checksum = false;
    7523             : 
    7524             :         Assert(BufferIsValid(buf));
    7525             : 
    7526             :         /*
    7527             :          * If the entire I/O failed on a lower-level, each buffer needs to be
    7528             :          * marked as failed. In case of a partial read, the first few buffers
    7529             :          * may be ok.
    7530             :          */
    7531     2641490 :         failed =
    7532     2641490 :             prior_result.status == PGAIO_RS_ERROR
    7533     2641490 :             || prior_result.result <= buf_off;
    7534             : 
    7535     2641490 :         buffer_readv_complete_one(td, buf_off, buf, cb_data, failed, is_temp,
    7536             :                                   &failed_verification,
    7537             :                                   &failed_checksum,
    7538             :                                   &ignored_checksum,
    7539             :                                   &zeroed_buffer);
    7540             : 
    7541             :         /*
    7542             :          * Track information about the number of different kinds of error
    7543             :          * conditions across all pages, as there can be multiple pages failing
    7544             :          * verification as part of one IO.
    7545             :          */
    7546     2641490 :         if (failed_verification && !zeroed_buffer && error_count++ == 0)
    7547          88 :             first_error_off = buf_off;
    7548     2641490 :         if (zeroed_buffer && zeroed_count++ == 0)
    7549          68 :             first_zeroed_off = buf_off;
    7550     2641490 :         if (ignored_checksum && ignored_count++ == 0)
    7551          20 :             first_ignored_off = buf_off;
    7552     2641490 :         if (failed_checksum)
    7553          64 :             checkfail_count++;
    7554             :     }
    7555             : 
    7556             :     /*
    7557             :      * If the smgr read succeeded [partially] and page verification failed for
    7558             :      * some of the pages, adjust the IO's result state appropriately.
    7559             :      */
    7560     2366116 :     if (prior_result.status != PGAIO_RS_ERROR &&
    7561     2366010 :         (error_count > 0 || ignored_count > 0 || zeroed_count > 0))
    7562             :     {
    7563         168 :         buffer_readv_encode_error(&result, is_temp,
    7564             :                                   zeroed_count > 0, ignored_count > 0,
    7565             :                                   error_count, zeroed_count, checkfail_count,
    7566             :                                   first_error_off, first_zeroed_off,
    7567             :                                   first_ignored_off);
    7568         168 :         pgaio_result_report(result, td, DEBUG1);
    7569             :     }
    7570             : 
    7571             :     /*
    7572             :      * For shared relations this reporting is done in
    7573             :      * shared_buffer_readv_complete_local().
    7574             :      */
    7575     2366116 :     if (is_temp && checkfail_count > 0)
    7576           4 :         pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
    7577             :                                               checkfail_count);
    7578             : 
    7579     2366116 :     return result;
    7580             : }
    7581             : 
    7582             : /*
    7583             :  * AIO error reporting callback for aio_shared_buffer_readv_cb and
    7584             :  * aio_local_buffer_readv_cb.
    7585             :  *
    7586             :  * The error is encoded / decoded in buffer_readv_encode_error() /
    7587             :  * buffer_readv_decode_error().
    7588             :  */
    7589             : static void
    7590         544 : buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
    7591             :                     int elevel)
    7592             : {
    7593         544 :     int         nblocks = td->smgr.nblocks;
    7594         544 :     BlockNumber first = td->smgr.blockNum;
    7595         544 :     BlockNumber last = first + nblocks - 1;
    7596         544 :     ProcNumber  errProc =
    7597         544 :         td->smgr.is_temp ? MyProcNumber : INVALID_PROC_NUMBER;
    7598             :     RelPathStr  rpath =
    7599         544 :         relpathbackend(td->smgr.rlocator, errProc, td->smgr.forkNum);
    7600             :     bool        zeroed_any,
    7601             :                 ignored_any;
    7602             :     uint8       zeroed_or_error_count,
    7603             :                 checkfail_count,
    7604             :                 first_off;
    7605             :     uint8       affected_count;
    7606             :     const char *msg_one,
    7607             :                *msg_mult,
    7608             :                *det_mult,
    7609             :                *hint_mult;
    7610             : 
    7611         544 :     buffer_readv_decode_error(result, &zeroed_any, &ignored_any,
    7612             :                               &zeroed_or_error_count,
    7613             :                               &checkfail_count,
    7614             :                               &first_off);
    7615             : 
    7616             :     /*
    7617             :      * Treat a read that had both zeroed buffers *and* ignored checksums as a
    7618             :      * special case, it's too irregular to be emitted the same way as the
    7619             :      * other cases.
    7620             :      */
    7621         544 :     if (zeroed_any && ignored_any)
    7622             :     {
    7623             :         Assert(zeroed_any && ignored_any);
    7624             :         Assert(nblocks > 1); /* same block can't be both zeroed and ignored */
    7625             :         Assert(result.status != PGAIO_RS_ERROR);
    7626           8 :         affected_count = zeroed_or_error_count;
    7627             : 
    7628           8 :         ereport(elevel,
    7629             :                 errcode(ERRCODE_DATA_CORRUPTED),
    7630             :                 errmsg("zeroing %u page(s) and ignoring %u checksum failure(s) among blocks %u..%u of relation \"%s\"",
    7631             :                        affected_count, checkfail_count, first, last, rpath.str),
    7632             :                 affected_count > 1 ?
    7633             :                 errdetail("Block %u held the first zeroed page.",
    7634             :                           first + first_off) : 0,
    7635             :                 errhint_plural("See server log for details about the other %d invalid block.",
    7636             :                                "See server log for details about the other %d invalid blocks.",
    7637             :                                affected_count + checkfail_count - 1,
    7638             :                                affected_count + checkfail_count - 1));
    7639           8 :         return;
    7640             :     }
    7641             : 
    7642             :     /*
    7643             :      * The other messages are highly repetitive. To avoid duplicating a long
    7644             :      * and complicated ereport(), gather the translated format strings
    7645             :      * separately and then do one common ereport.
    7646             :      */
    7647         536 :     if (result.status == PGAIO_RS_ERROR)
    7648             :     {
    7649             :         Assert(!zeroed_any);    /* can't have invalid pages when zeroing them */
    7650         272 :         affected_count = zeroed_or_error_count;
    7651         272 :         msg_one = _("invalid page in block %u of relation \"%s\"");
    7652         272 :         msg_mult = _("%u invalid pages among blocks %u..%u of relation \"%s\"");
    7653         272 :         det_mult = _("Block %u held the first invalid page.");
    7654         272 :         hint_mult = _("See server log for the other %u invalid block(s).");
    7655             :     }
    7656         264 :     else if (zeroed_any && !ignored_any)
    7657             :     {
    7658         216 :         affected_count = zeroed_or_error_count;
    7659         216 :         msg_one = _("invalid page in block %u of relation \"%s\"; zeroing out page");
    7660         216 :         msg_mult = _("zeroing out %u invalid pages among blocks %u..%u of relation \"%s\"");
    7661         216 :         det_mult = _("Block %u held the first zeroed page.");
    7662         216 :         hint_mult = _("See server log for the other %u zeroed block(s).");
    7663             :     }
    7664          48 :     else if (!zeroed_any && ignored_any)
    7665             :     {
    7666          48 :         affected_count = checkfail_count;
    7667          48 :         msg_one = _("ignoring checksum failure in block %u of relation \"%s\"");
    7668          48 :         msg_mult = _("ignoring %u checksum failures among blocks %u..%u of relation \"%s\"");
    7669          48 :         det_mult = _("Block %u held the first ignored page.");
    7670          48 :         hint_mult = _("See server log for the other %u ignored block(s).");
    7671             :     }
    7672             :     else
    7673           0 :         pg_unreachable();
    7674             : 
    7675         536 :     ereport(elevel,
    7676             :             errcode(ERRCODE_DATA_CORRUPTED),
    7677             :             affected_count == 1 ?
    7678             :             errmsg_internal(msg_one, first + first_off, rpath.str) :
    7679             :             errmsg_internal(msg_mult, affected_count, first, last, rpath.str),
    7680             :             affected_count > 1 ? errdetail_internal(det_mult, first + first_off) : 0,
    7681             :             affected_count > 1 ? errhint_internal(hint_mult, affected_count - 1) : 0);
    7682             : }
    7683             : 
    7684             : static void
    7685     2572676 : shared_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
    7686             : {
    7687     2572676 :     buffer_stage_common(ioh, false, false);
    7688     2572676 : }
    7689             : 
    7690             : static PgAioResult
    7691     2362502 : shared_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
    7692             :                              uint8 cb_data)
    7693             : {
    7694     2362502 :     return buffer_readv_complete(ioh, prior_result, cb_data, false);
    7695             : }
    7696             : 
    7697             : /*
    7698             :  * We need a backend-local completion callback for shared buffers, to be able
    7699             :  * to report checksum errors correctly. Unfortunately that can only safely
    7700             :  * happen if the reporting backend has previously called
    7701             :  * pgstat_prepare_report_checksum_failure(), which we can only guarantee in
    7702             :  * the backend that started the IO. Hence this callback.
    7703             :  */
    7704             : static PgAioResult
    7705     2572676 : shared_buffer_readv_complete_local(PgAioHandle *ioh, PgAioResult prior_result,
    7706             :                                    uint8 cb_data)
    7707             : {
    7708             :     bool        zeroed_any,
    7709             :                 ignored_any;
    7710             :     uint8       zeroed_or_error_count,
    7711             :                 checkfail_count,
    7712             :                 first_off;
    7713             : 
    7714     2572676 :     if (prior_result.status == PGAIO_RS_OK)
    7715     2572522 :         return prior_result;
    7716             : 
    7717         154 :     buffer_readv_decode_error(prior_result,
    7718             :                               &zeroed_any,
    7719             :                               &ignored_any,
    7720             :                               &zeroed_or_error_count,
    7721             :                               &checkfail_count,
    7722             :                               &first_off);
    7723             : 
    7724         154 :     if (checkfail_count)
    7725             :     {
    7726          48 :         PgAioTargetData *td = pgaio_io_get_target_data(ioh);
    7727             : 
    7728          48 :         pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
    7729             :                                               checkfail_count);
    7730             :     }
    7731             : 
    7732         154 :     return prior_result;
    7733             : }
    7734             : 
    7735             : static void
    7736        3614 : local_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
    7737             : {
    7738        3614 :     buffer_stage_common(ioh, false, true);
    7739        3614 : }
    7740             : 
    7741             : static PgAioResult
    7742        3614 : local_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
    7743             :                             uint8 cb_data)
    7744             : {
    7745        3614 :     return buffer_readv_complete(ioh, prior_result, cb_data, true);
    7746             : }
    7747             : 
    7748             : /* readv callback is passed READ_BUFFERS_* flags as callback data */
    7749             : const PgAioHandleCallbacks aio_shared_buffer_readv_cb = {
    7750             :     .stage = shared_buffer_readv_stage,
    7751             :     .complete_shared = shared_buffer_readv_complete,
    7752             :     /* need a local callback to report checksum failures */
    7753             :     .complete_local = shared_buffer_readv_complete_local,
    7754             :     .report = buffer_readv_report,
    7755             : };
    7756             : 
    7757             : /* readv callback is passed READ_BUFFERS_* flags as callback data */
    7758             : const PgAioHandleCallbacks aio_local_buffer_readv_cb = {
    7759             :     .stage = local_buffer_readv_stage,
    7760             : 
    7761             :     /*
    7762             :      * Note that this, in contrast to the shared_buffers case, uses
    7763             :      * complete_local, as only the issuing backend has access to the required
    7764             :      * datastructures. This is important in case the IO completion may be
    7765             :      * consumed incidentally by another backend.
    7766             :      */
    7767             :     .complete_local = local_buffer_readv_complete,
    7768             :     .report = buffer_readv_report,
    7769             : };

Generated by: LCOV version 1.16