LCOV - code coverage report
Current view: top level - src/backend/access/heap - heapam.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 2509 2735 91.7 %
Date: 2026-02-02 02:17:09 Functions: 82 82 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * heapam.c
       4             :  *    heap access method code
       5             :  *
       6             :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/heap/heapam.c
      12             :  *
      13             :  *
      14             :  * INTERFACE ROUTINES
      15             :  *      heap_beginscan  - begin relation scan
      16             :  *      heap_rescan     - restart a relation scan
      17             :  *      heap_endscan    - end relation scan
      18             :  *      heap_getnext    - retrieve next tuple in scan
      19             :  *      heap_fetch      - retrieve tuple with given tid
      20             :  *      heap_insert     - insert tuple into a relation
      21             :  *      heap_multi_insert - insert multiple tuples into a relation
      22             :  *      heap_delete     - delete a tuple from a relation
      23             :  *      heap_update     - replace a tuple in a relation with another tuple
      24             :  *
      25             :  * NOTES
      26             :  *    This file contains the heap_ routines which implement
      27             :  *    the POSTGRES heap access method used for all POSTGRES
      28             :  *    relations.
      29             :  *
      30             :  *-------------------------------------------------------------------------
      31             :  */
      32             : #include "postgres.h"
      33             : 
      34             : #include "access/heapam.h"
      35             : #include "access/heaptoast.h"
      36             : #include "access/hio.h"
      37             : #include "access/multixact.h"
      38             : #include "access/subtrans.h"
      39             : #include "access/syncscan.h"
      40             : #include "access/valid.h"
      41             : #include "access/visibilitymap.h"
      42             : #include "access/xloginsert.h"
      43             : #include "catalog/pg_database.h"
      44             : #include "catalog/pg_database_d.h"
      45             : #include "commands/vacuum.h"
      46             : #include "pgstat.h"
      47             : #include "port/pg_bitutils.h"
      48             : #include "storage/lmgr.h"
      49             : #include "storage/predicate.h"
      50             : #include "storage/procarray.h"
      51             : #include "utils/datum.h"
      52             : #include "utils/injection_point.h"
      53             : #include "utils/inval.h"
      54             : #include "utils/spccache.h"
      55             : #include "utils/syscache.h"
      56             : 
      57             : 
      58             : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
      59             :                                      TransactionId xid, CommandId cid, int options);
      60             : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
      61             :                                   Buffer newbuf, HeapTuple oldtup,
      62             :                                   HeapTuple newtup, HeapTuple old_key_tuple,
      63             :                                   bool all_visible_cleared, bool new_all_visible_cleared);
      64             : #ifdef USE_ASSERT_CHECKING
      65             : static void check_lock_if_inplace_updateable_rel(Relation relation,
      66             :                                                  const ItemPointerData *otid,
      67             :                                                  HeapTuple newtup);
      68             : static void check_inplace_rel_lock(HeapTuple oldtup);
      69             : #endif
      70             : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
      71             :                                            Bitmapset *interesting_cols,
      72             :                                            Bitmapset *external_cols,
      73             :                                            HeapTuple oldtup, HeapTuple newtup,
      74             :                                            bool *has_external);
      75             : static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
      76             :                                  LockTupleMode mode, LockWaitPolicy wait_policy,
      77             :                                  bool *have_tuple_lock);
      78             : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
      79             :                                                    BlockNumber block,
      80             :                                                    ScanDirection dir);
      81             : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
      82             :                                                         ScanDirection dir);
      83             : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
      84             :                                       uint16 old_infomask2, TransactionId add_to_xmax,
      85             :                                       LockTupleMode mode, bool is_update,
      86             :                                       TransactionId *result_xmax, uint16 *result_infomask,
      87             :                                       uint16 *result_infomask2);
      88             : static TM_Result heap_lock_updated_tuple(Relation rel,
      89             :                                          uint16 prior_infomask,
      90             :                                          TransactionId prior_raw_xmax,
      91             :                                          const ItemPointerData *prior_ctid,
      92             :                                          TransactionId xid,
      93             :                                          LockTupleMode mode);
      94             : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
      95             :                                    uint16 *new_infomask2);
      96             : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
      97             :                                              uint16 t_infomask);
      98             : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
      99             :                                     LockTupleMode lockmode, bool *current_is_member);
     100             : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
     101             :                             Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
     102             :                             int *remaining);
     103             : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
     104             :                                        uint16 infomask, Relation rel, int *remaining,
     105             :                                        bool logLockFailure);
     106             : static void index_delete_sort(TM_IndexDeleteOp *delstate);
     107             : static int  bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
     108             : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
     109             : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
     110             :                                         bool *copy);
     111             : 
     112             : 
     113             : /*
     114             :  * This table lists the heavyweight lock mode that corresponds to each tuple
     115             :  * lock mode, as well as one or two corresponding MultiXactStatus values:
     116             :  * .lockstatus to merely lock tuples, and .updstatus to update them.  The
     117             :  * latter is set to -1 if the corresponding tuple lock mode does not allow
     118             :  * updating tuples -- see get_mxact_status_for_lock().
     119             :  *
     120             :  * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
     121             :  *
     122             :  * Don't look at lockstatus/updstatus directly!  Use get_mxact_status_for_lock
     123             :  * instead.
     124             :  */
     125             : static const struct
     126             : {
     127             :     LOCKMODE    hwlock;
     128             :     int         lockstatus;
     129             :     int         updstatus;
     130             : }           tupleLockExtraInfo[] =
     131             : 
     132             : {
     133             :     [LockTupleKeyShare] = {
     134             :         .hwlock = AccessShareLock,
     135             :         .lockstatus = MultiXactStatusForKeyShare,
     136             :         /* KeyShare does not allow updating tuples */
     137             :         .updstatus = -1
     138             :     },
     139             :     [LockTupleShare] = {
     140             :         .hwlock = RowShareLock,
     141             :         .lockstatus = MultiXactStatusForShare,
     142             :         /* Share does not allow updating tuples */
     143             :         .updstatus = -1
     144             :     },
     145             :     [LockTupleNoKeyExclusive] = {
     146             :         .hwlock = ExclusiveLock,
     147             :         .lockstatus = MultiXactStatusForNoKeyUpdate,
     148             :         .updstatus = MultiXactStatusNoKeyUpdate
     149             :     },
     150             :     [LockTupleExclusive] = {
     151             :         .hwlock = AccessExclusiveLock,
     152             :         .lockstatus = MultiXactStatusForUpdate,
     153             :         .updstatus = MultiXactStatusUpdate
     154             :     }
     155             : };
     156             : 
     157             : /* Get the LOCKMODE for a given MultiXactStatus */
     158             : #define LOCKMODE_from_mxstatus(status) \
     159             :             (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
     160             : 
     161             : /*
     162             :  * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
     163             :  * This is more readable than having every caller translate it to lock.h's
     164             :  * LOCKMODE.
     165             :  */
     166             : #define LockTupleTuplock(rel, tup, mode) \
     167             :     LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     168             : #define UnlockTupleTuplock(rel, tup, mode) \
     169             :     UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     170             : #define ConditionalLockTupleTuplock(rel, tup, mode, log) \
     171             :     ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
     172             : 
     173             : #ifdef USE_PREFETCH
     174             : /*
     175             :  * heap_index_delete_tuples and index_delete_prefetch_buffer use this
     176             :  * structure to coordinate prefetching activity
     177             :  */
     178             : typedef struct
     179             : {
     180             :     BlockNumber cur_hblkno;
     181             :     int         next_item;
     182             :     int         ndeltids;
     183             :     TM_IndexDelete *deltids;
     184             : } IndexDeletePrefetchState;
     185             : #endif
     186             : 
     187             : /* heap_index_delete_tuples bottom-up index deletion costing constants */
     188             : #define BOTTOMUP_MAX_NBLOCKS            6
     189             : #define BOTTOMUP_TOLERANCE_NBLOCKS      3
     190             : 
     191             : /*
     192             :  * heap_index_delete_tuples uses this when determining which heap blocks it
     193             :  * must visit to help its bottom-up index deletion caller
     194             :  */
     195             : typedef struct IndexDeleteCounts
     196             : {
     197             :     int16       npromisingtids; /* Number of "promising" TIDs in group */
     198             :     int16       ntids;          /* Number of TIDs in group */
     199             :     int16       ifirsttid;      /* Offset to group's first deltid */
     200             : } IndexDeleteCounts;
     201             : 
     202             : /*
     203             :  * This table maps tuple lock strength values for each particular
     204             :  * MultiXactStatus value.
     205             :  */
     206             : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
     207             : {
     208             :     LockTupleKeyShare,          /* ForKeyShare */
     209             :     LockTupleShare,             /* ForShare */
     210             :     LockTupleNoKeyExclusive,    /* ForNoKeyUpdate */
     211             :     LockTupleExclusive,         /* ForUpdate */
     212             :     LockTupleNoKeyExclusive,    /* NoKeyUpdate */
     213             :     LockTupleExclusive          /* Update */
     214             : };
     215             : 
     216             : /* Get the LockTupleMode for a given MultiXactStatus */
     217             : #define TUPLOCK_from_mxstatus(status) \
     218             :             (MultiXactStatusLock[(status)])
     219             : 
     220             : /*
     221             :  * Check that we have a valid snapshot if we might need TOAST access.
     222             :  */
     223             : static inline void
     224    21193444 : AssertHasSnapshotForToast(Relation rel)
     225             : {
     226             : #ifdef USE_ASSERT_CHECKING
     227             : 
     228             :     /* bootstrap mode in particular breaks this rule */
     229             :     if (!IsNormalProcessingMode())
     230             :         return;
     231             : 
     232             :     /* if the relation doesn't have a TOAST table, we are good */
     233             :     if (!OidIsValid(rel->rd_rel->reltoastrelid))
     234             :         return;
     235             : 
     236             :     Assert(HaveRegisteredOrActiveSnapshot());
     237             : 
     238             : #endif                          /* USE_ASSERT_CHECKING */
     239    21193444 : }
     240             : 
     241             : /* ----------------------------------------------------------------
     242             :  *                       heap support routines
     243             :  * ----------------------------------------------------------------
     244             :  */
     245             : 
     246             : /*
     247             :  * Streaming read API callback for parallel sequential scans. Returns the next
     248             :  * block the caller wants from the read stream or InvalidBlockNumber when done.
     249             :  */
     250             : static BlockNumber
     251      203416 : heap_scan_stream_read_next_parallel(ReadStream *stream,
     252             :                                     void *callback_private_data,
     253             :                                     void *per_buffer_data)
     254             : {
     255      203416 :     HeapScanDesc scan = (HeapScanDesc) callback_private_data;
     256             : 
     257             :     Assert(ScanDirectionIsForward(scan->rs_dir));
     258             :     Assert(scan->rs_base.rs_parallel);
     259             : 
     260      203416 :     if (unlikely(!scan->rs_inited))
     261             :     {
     262             :         /* parallel scan */
     263        3292 :         table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
     264        3292 :                                                  scan->rs_parallelworkerdata,
     265        3292 :                                                  (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel,
     266             :                                                  scan->rs_startblock,
     267             :                                                  scan->rs_numblocks);
     268             : 
     269             :         /* may return InvalidBlockNumber if there are no more blocks */
     270        6584 :         scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     271        3292 :                                                                     scan->rs_parallelworkerdata,
     272        3292 :                                                                     (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
     273        3292 :         scan->rs_inited = true;
     274             :     }
     275             :     else
     276             :     {
     277      200124 :         scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     278      200124 :                                                                     scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
     279      200124 :                                                                     scan->rs_base.rs_parallel);
     280             :     }
     281             : 
     282      203416 :     return scan->rs_prefetch_block;
     283             : }
     284             : 
     285             : /*
     286             :  * Streaming read API callback for serial sequential and TID range scans.
     287             :  * Returns the next block the caller wants from the read stream or
     288             :  * InvalidBlockNumber when done.
     289             :  */
     290             : static BlockNumber
     291     7734508 : heap_scan_stream_read_next_serial(ReadStream *stream,
     292             :                                   void *callback_private_data,
     293             :                                   void *per_buffer_data)
     294             : {
     295     7734508 :     HeapScanDesc scan = (HeapScanDesc) callback_private_data;
     296             : 
     297     7734508 :     if (unlikely(!scan->rs_inited))
     298             :     {
     299     1983004 :         scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
     300     1983004 :         scan->rs_inited = true;
     301             :     }
     302             :     else
     303     5751504 :         scan->rs_prefetch_block = heapgettup_advance_block(scan,
     304             :                                                            scan->rs_prefetch_block,
     305             :                                                            scan->rs_dir);
     306             : 
     307     7734508 :     return scan->rs_prefetch_block;
     308             : }
     309             : 
     310             : /*
     311             :  * Read stream API callback for bitmap heap scans.
     312             :  * Returns the next block the caller wants from the read stream or
     313             :  * InvalidBlockNumber when done.
     314             :  */
     315             : static BlockNumber
     316      434088 : bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
     317             :                             void *per_buffer_data)
     318             : {
     319      434088 :     TBMIterateResult *tbmres = per_buffer_data;
     320      434088 :     BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
     321      434088 :     HeapScanDesc hscan = (HeapScanDesc) bscan;
     322      434088 :     TableScanDesc sscan = &hscan->rs_base;
     323             : 
     324             :     for (;;)
     325             :     {
     326      434088 :         CHECK_FOR_INTERRUPTS();
     327             : 
     328             :         /* no more entries in the bitmap */
     329      434088 :         if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
     330       26346 :             return InvalidBlockNumber;
     331             : 
     332             :         /*
     333             :          * Ignore any claimed entries past what we think is the end of the
     334             :          * relation. It may have been extended after the start of our scan (we
     335             :          * only hold an AccessShareLock, and it could be inserts from this
     336             :          * backend).  We don't take this optimization in SERIALIZABLE
     337             :          * isolation though, as we need to examine all invisible tuples
     338             :          * reachable by the index.
     339             :          */
     340      407742 :         if (!IsolationIsSerializable() &&
     341      407524 :             tbmres->blockno >= hscan->rs_nblocks)
     342           0 :             continue;
     343             : 
     344      407742 :         return tbmres->blockno;
     345             :     }
     346             : 
     347             :     /* not reachable */
     348             :     Assert(false);
     349             : }
     350             : 
     351             : /* ----------------
     352             :  *      initscan - scan code common to heap_beginscan and heap_rescan
     353             :  * ----------------
     354             :  */
     355             : static void
     356     2032882 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
     357             : {
     358     2032882 :     ParallelBlockTableScanDesc bpscan = NULL;
     359             :     bool        allow_strat;
     360             :     bool        allow_sync;
     361             : 
     362             :     /*
     363             :      * Determine the number of blocks we have to scan.
     364             :      *
     365             :      * It is sufficient to do this once at scan start, since any tuples added
     366             :      * while the scan is in progress will be invisible to my snapshot anyway.
     367             :      * (That is not true when using a non-MVCC snapshot.  However, we couldn't
     368             :      * guarantee to return tuples added after scan start anyway, since they
     369             :      * might go into pages we already scanned.  To guarantee consistent
     370             :      * results for a non-MVCC snapshot, the caller must hold some higher-level
     371             :      * lock that ensures the interesting tuple(s) won't change.)
     372             :      */
     373     2032882 :     if (scan->rs_base.rs_parallel != NULL)
     374             :     {
     375        4470 :         bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     376        4470 :         scan->rs_nblocks = bpscan->phs_nblocks;
     377             :     }
     378             :     else
     379     2028412 :         scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
     380             : 
     381             :     /*
     382             :      * If the table is large relative to NBuffers, use a bulk-read access
     383             :      * strategy and enable synchronized scanning (see syncscan.c).  Although
     384             :      * the thresholds for these features could be different, we make them the
     385             :      * same so that there are only two behaviors to tune rather than four.
     386             :      * (However, some callers need to be able to disable one or both of these
     387             :      * behaviors, independently of the size of the table; also there is a GUC
     388             :      * variable that can disable synchronized scanning.)
     389             :      *
     390             :      * Note that table_block_parallelscan_initialize has a very similar test;
     391             :      * if you change this, consider changing that one, too.
     392             :      */
     393     2032878 :     if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
     394     2018224 :         scan->rs_nblocks > NBuffers / 4)
     395             :     {
     396       28118 :         allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
     397       28118 :         allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
     398             :     }
     399             :     else
     400     2004760 :         allow_strat = allow_sync = false;
     401             : 
     402     2032878 :     if (allow_strat)
     403             :     {
     404             :         /* During a rescan, keep the previous strategy object. */
     405       25454 :         if (scan->rs_strategy == NULL)
     406       25088 :             scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
     407             :     }
     408             :     else
     409             :     {
     410     2007424 :         if (scan->rs_strategy != NULL)
     411           0 :             FreeAccessStrategy(scan->rs_strategy);
     412     2007424 :         scan->rs_strategy = NULL;
     413             :     }
     414             : 
     415     2032878 :     if (scan->rs_base.rs_parallel != NULL)
     416             :     {
     417             :         /* For parallel scan, believe whatever ParallelTableScanDesc says. */
     418        4470 :         if (scan->rs_base.rs_parallel->phs_syncscan)
     419           4 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     420             :         else
     421        4466 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     422             : 
     423             :         /*
     424             :          * If not rescanning, initialize the startblock.  Finding the actual
     425             :          * start location is done in table_block_parallelscan_startblock_init,
     426             :          * based on whether an alternative start location has been set with
     427             :          * heap_setscanlimits, or using the syncscan location, when syncscan
     428             :          * is enabled.
     429             :          */
     430        4470 :         if (!keep_startblock)
     431        4242 :             scan->rs_startblock = InvalidBlockNumber;
     432             :     }
     433             :     else
     434             :     {
     435     2028408 :         if (keep_startblock)
     436             :         {
     437             :             /*
     438             :              * When rescanning, we want to keep the previous startblock
     439             :              * setting, so that rewinding a cursor doesn't generate surprising
     440             :              * results.  Reset the active syncscan setting, though.
     441             :              */
     442     1246526 :             if (allow_sync && synchronize_seqscans)
     443         100 :                 scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     444             :             else
     445     1246426 :                 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     446             :         }
     447      781882 :         else if (allow_sync && synchronize_seqscans)
     448             :         {
     449         144 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     450         144 :             scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
     451             :         }
     452             :         else
     453             :         {
     454      781738 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     455      781738 :             scan->rs_startblock = 0;
     456             :         }
     457             :     }
     458             : 
     459     2032878 :     scan->rs_numblocks = InvalidBlockNumber;
     460     2032878 :     scan->rs_inited = false;
     461     2032878 :     scan->rs_ctup.t_data = NULL;
     462     2032878 :     ItemPointerSetInvalid(&scan->rs_ctup.t_self);
     463     2032878 :     scan->rs_cbuf = InvalidBuffer;
     464     2032878 :     scan->rs_cblock = InvalidBlockNumber;
     465     2032878 :     scan->rs_ntuples = 0;
     466     2032878 :     scan->rs_cindex = 0;
     467             : 
     468             :     /*
     469             :      * Initialize to ForwardScanDirection because it is most common and
     470             :      * because heap scans go forward before going backward (e.g. CURSORs).
     471             :      */
     472     2032878 :     scan->rs_dir = ForwardScanDirection;
     473     2032878 :     scan->rs_prefetch_block = InvalidBlockNumber;
     474             : 
     475             :     /* page-at-a-time fields are always invalid when not rs_inited */
     476             : 
     477             :     /*
     478             :      * copy the scan key, if appropriate
     479             :      */
     480     2032878 :     if (key != NULL && scan->rs_base.rs_nkeys > 0)
     481      450836 :         memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
     482             : 
     483             :     /*
     484             :      * Currently, we only have a stats counter for sequential heap scans (but
     485             :      * e.g for bitmap scans the underlying bitmap index scans will be counted,
     486             :      * and for sample scans we update stats for tuple fetches).
     487             :      */
     488     2032878 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
     489     1985534 :         pgstat_count_heap_scan(scan->rs_base.rs_rd);
     490     2032878 : }
     491             : 
     492             : /*
     493             :  * heap_setscanlimits - restrict range of a heapscan
     494             :  *
     495             :  * startBlk is the page to start at
     496             :  * numBlks is number of pages to scan (InvalidBlockNumber means "all")
     497             :  */
     498             : void
     499        5716 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
     500             : {
     501        5716 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     502             : 
     503             :     Assert(!scan->rs_inited);    /* else too late to change */
     504             :     /* else rs_startblock is significant */
     505             :     Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
     506             : 
     507             :     /* Check startBlk is valid (but allow case of zero blocks...) */
     508             :     Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
     509             : 
     510        5716 :     scan->rs_startblock = startBlk;
     511        5716 :     scan->rs_numblocks = numBlks;
     512        5716 : }
     513             : 
     514             : /*
     515             :  * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
     516             :  * multiple times, with constant arguments for all_visible,
     517             :  * check_serializable.
     518             :  */
     519             : pg_attribute_always_inline
     520             : static int
     521     5730760 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
     522             :                     Page page, Buffer buffer,
     523             :                     BlockNumber block, int lines,
     524             :                     bool all_visible, bool check_serializable)
     525             : {
     526     5730760 :     Oid         relid = RelationGetRelid(scan->rs_base.rs_rd);
     527     5730760 :     int         ntup = 0;
     528     5730760 :     int         nvis = 0;
     529             :     BatchMVCCState batchmvcc;
     530             : 
     531             :     /* page at a time should have been disabled otherwise */
     532             :     Assert(IsMVCCSnapshot(snapshot));
     533             : 
     534             :     /* first find all tuples on the page */
     535   289765424 :     for (OffsetNumber lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
     536             :     {
     537   284034664 :         ItemId      lpp = PageGetItemId(page, lineoff);
     538             :         HeapTuple   tup;
     539             : 
     540   284034664 :         if (unlikely(!ItemIdIsNormal(lpp)))
     541    63813276 :             continue;
     542             : 
     543             :         /*
     544             :          * If the page is not all-visible or we need to check serializability,
     545             :          * maintain enough state to be able to refind the tuple efficiently,
     546             :          * without again first needing to fetch the item and then via that the
     547             :          * tuple.
     548             :          */
     549   220221388 :         if (!all_visible || check_serializable)
     550             :         {
     551   134369774 :             tup = &batchmvcc.tuples[ntup];
     552             : 
     553   134369774 :             tup->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
     554   134369774 :             tup->t_len = ItemIdGetLength(lpp);
     555   134369774 :             tup->t_tableOid = relid;
     556   134369774 :             ItemPointerSet(&(tup->t_self), block, lineoff);
     557             :         }
     558             : 
     559             :         /*
     560             :          * If the page is all visible, these fields otherwise won't be
     561             :          * populated in loop below.
     562             :          */
     563   220221388 :         if (all_visible)
     564             :         {
     565    85851614 :             if (check_serializable)
     566             :             {
     567           0 :                 batchmvcc.visible[ntup] = true;
     568             :             }
     569    85851614 :             scan->rs_vistuples[ntup] = lineoff;
     570             :         }
     571             : 
     572   220221388 :         ntup++;
     573             :     }
     574             : 
     575             :     Assert(ntup <= MaxHeapTuplesPerPage);
     576             : 
     577             :     /*
     578             :      * Unless the page is all visible, test visibility for all tuples one go.
     579             :      * That is considerably more efficient than calling
     580             :      * HeapTupleSatisfiesMVCC() one-by-one.
     581             :      */
     582     5730760 :     if (all_visible)
     583     2025226 :         nvis = ntup;
     584             :     else
     585     3705534 :         nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer,
     586             :                                            ntup,
     587             :                                            &batchmvcc,
     588     3705534 :                                            scan->rs_vistuples);
     589             : 
     590             :     /*
     591             :      * So far we don't have batch API for testing serializabilty, so do so
     592             :      * one-by-one.
     593             :      */
     594     5730760 :     if (check_serializable)
     595             :     {
     596        4090 :         for (int i = 0; i < ntup; i++)
     597             :         {
     598        2842 :             HeapCheckForSerializableConflictOut(batchmvcc.visible[i],
     599             :                                                 scan->rs_base.rs_rd,
     600             :                                                 &batchmvcc.tuples[i],
     601             :                                                 buffer, snapshot);
     602             :         }
     603             :     }
     604             : 
     605     5730744 :     return nvis;
     606             : }
     607             : 
     608             : /*
     609             :  * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
     610             :  *
     611             :  * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
     612             :  * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
     613             :  */
     614             : void
     615     5730760 : heap_prepare_pagescan(TableScanDesc sscan)
     616             : {
     617     5730760 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     618     5730760 :     Buffer      buffer = scan->rs_cbuf;
     619     5730760 :     BlockNumber block = scan->rs_cblock;
     620             :     Snapshot    snapshot;
     621             :     Page        page;
     622             :     int         lines;
     623             :     bool        all_visible;
     624             :     bool        check_serializable;
     625             : 
     626             :     Assert(BufferGetBlockNumber(buffer) == block);
     627             : 
     628             :     /* ensure we're not accidentally being used when not in pagemode */
     629             :     Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
     630     5730760 :     snapshot = scan->rs_base.rs_snapshot;
     631             : 
     632             :     /*
     633             :      * Prune and repair fragmentation for the whole page, if possible.
     634             :      */
     635     5730760 :     heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
     636             : 
     637             :     /*
     638             :      * We must hold share lock on the buffer content while examining tuple
     639             :      * visibility.  Afterwards, however, the tuples we have found to be
     640             :      * visible are guaranteed good as long as we hold the buffer pin.
     641             :      */
     642     5730760 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
     643             : 
     644     5730760 :     page = BufferGetPage(buffer);
     645     5730760 :     lines = PageGetMaxOffsetNumber(page);
     646             : 
     647             :     /*
     648             :      * If the all-visible flag indicates that all tuples on the page are
     649             :      * visible to everyone, we can skip the per-tuple visibility tests.
     650             :      *
     651             :      * Note: In hot standby, a tuple that's already visible to all
     652             :      * transactions on the primary might still be invisible to a read-only
     653             :      * transaction in the standby. We partly handle this problem by tracking
     654             :      * the minimum xmin of visible tuples as the cut-off XID while marking a
     655             :      * page all-visible on the primary and WAL log that along with the
     656             :      * visibility map SET operation. In hot standby, we wait for (or abort)
     657             :      * all transactions that can potentially may not see one or more tuples on
     658             :      * the page. That's how index-only scans work fine in hot standby. A
     659             :      * crucial difference between index-only scans and heap scans is that the
     660             :      * index-only scan completely relies on the visibility map where as heap
     661             :      * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
     662             :      * the page-level flag can be trusted in the same way, because it might
     663             :      * get propagated somehow without being explicitly WAL-logged, e.g. via a
     664             :      * full page write. Until we can prove that beyond doubt, let's check each
     665             :      * tuple for visibility the hard way.
     666             :      */
     667     5730760 :     all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
     668             :     check_serializable =
     669     5730760 :         CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
     670             : 
     671             :     /*
     672             :      * We call page_collect_tuples() with constant arguments, to get the
     673             :      * compiler to constant fold the constant arguments. Separate calls with
     674             :      * constant arguments, rather than variables, are needed on several
     675             :      * compilers to actually perform constant folding.
     676             :      */
     677     5730760 :     if (likely(all_visible))
     678             :     {
     679     2025226 :         if (likely(!check_serializable))
     680     2025226 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     681             :                                                    block, lines, true, false);
     682             :         else
     683           0 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     684             :                                                    block, lines, true, true);
     685             :     }
     686             :     else
     687             :     {
     688     3705534 :         if (likely(!check_serializable))
     689     3704270 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     690             :                                                    block, lines, false, false);
     691             :         else
     692        1264 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     693             :                                                    block, lines, false, true);
     694             :     }
     695             : 
     696     5730744 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
     697     5730744 : }
     698             : 
     699             : /*
     700             :  * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
     701             :  *
     702             :  * Read the next block of the scan relation from the read stream and save it
     703             :  * in the scan descriptor.  It is already pinned.
     704             :  */
     705             : static inline void
     706     7561116 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
     707             : {
     708             :     Assert(scan->rs_read_stream);
     709             : 
     710             :     /* release previous scan buffer, if any */
     711     7561116 :     if (BufferIsValid(scan->rs_cbuf))
     712             :     {
     713     5574818 :         ReleaseBuffer(scan->rs_cbuf);
     714     5574818 :         scan->rs_cbuf = InvalidBuffer;
     715             :     }
     716             : 
     717             :     /*
     718             :      * Be sure to check for interrupts at least once per page.  Checks at
     719             :      * higher code levels won't be able to stop a seqscan that encounters many
     720             :      * pages' worth of consecutive dead tuples.
     721             :      */
     722     7561116 :     CHECK_FOR_INTERRUPTS();
     723             : 
     724             :     /*
     725             :      * If the scan direction is changing, reset the prefetch block to the
     726             :      * current block. Otherwise, we will incorrectly prefetch the blocks
     727             :      * between the prefetch block and the current block again before
     728             :      * prefetching blocks in the new, correct scan direction.
     729             :      */
     730     7561114 :     if (unlikely(scan->rs_dir != dir))
     731             :     {
     732         154 :         scan->rs_prefetch_block = scan->rs_cblock;
     733         154 :         read_stream_reset(scan->rs_read_stream);
     734             :     }
     735             : 
     736     7561114 :     scan->rs_dir = dir;
     737             : 
     738     7561114 :     scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
     739     7561064 :     if (BufferIsValid(scan->rs_cbuf))
     740     5907476 :         scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
     741     7561064 : }
     742             : 
     743             : /*
     744             :  * heapgettup_initial_block - return the first BlockNumber to scan
     745             :  *
     746             :  * Returns InvalidBlockNumber when there are no blocks to scan.  This can
     747             :  * occur with empty tables and in parallel scans when parallel workers get all
     748             :  * of the pages before we can get a chance to get our first page.
     749             :  */
     750             : static pg_noinline BlockNumber
     751     1983004 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
     752             : {
     753             :     Assert(!scan->rs_inited);
     754             :     Assert(scan->rs_base.rs_parallel == NULL);
     755             : 
     756             :     /* When there are no pages to scan, return InvalidBlockNumber */
     757     1983004 :     if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     758     1009246 :         return InvalidBlockNumber;
     759             : 
     760      973758 :     if (ScanDirectionIsForward(dir))
     761             :     {
     762      973694 :         return scan->rs_startblock;
     763             :     }
     764             :     else
     765             :     {
     766             :         /*
     767             :          * Disable reporting to syncscan logic in a backwards scan; it's not
     768             :          * very likely anyone else is doing the same thing at the same time,
     769             :          * and much more likely that we'll just bollix things for forward
     770             :          * scanners.
     771             :          */
     772          64 :         scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     773             : 
     774             :         /*
     775             :          * Start from last page of the scan.  Ensure we take into account
     776             :          * rs_numblocks if it's been adjusted by heap_setscanlimits().
     777             :          */
     778          64 :         if (scan->rs_numblocks != InvalidBlockNumber)
     779           6 :             return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
     780             : 
     781          58 :         if (scan->rs_startblock > 0)
     782           0 :             return scan->rs_startblock - 1;
     783             : 
     784          58 :         return scan->rs_nblocks - 1;
     785             :     }
     786             : }
     787             : 
     788             : 
     789             : /*
     790             :  * heapgettup_start_page - helper function for heapgettup()
     791             :  *
     792             :  * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
     793             :  * to the number of tuples on this page.  Also set *lineoff to the first
     794             :  * offset to scan with forward scans getting the first offset and backward
     795             :  * getting the final offset on the page.
     796             :  */
     797             : static Page
     798      185270 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
     799             :                       OffsetNumber *lineoff)
     800             : {
     801             :     Page        page;
     802             : 
     803             :     Assert(scan->rs_inited);
     804             :     Assert(BufferIsValid(scan->rs_cbuf));
     805             : 
     806             :     /* Caller is responsible for ensuring buffer is locked if needed */
     807      185270 :     page = BufferGetPage(scan->rs_cbuf);
     808             : 
     809      185270 :     *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
     810             : 
     811      185270 :     if (ScanDirectionIsForward(dir))
     812      185270 :         *lineoff = FirstOffsetNumber;
     813             :     else
     814           0 :         *lineoff = (OffsetNumber) (*linesleft);
     815             : 
     816             :     /* lineoff now references the physically previous or next tid */
     817      185270 :     return page;
     818             : }
     819             : 
     820             : 
     821             : /*
     822             :  * heapgettup_continue_page - helper function for heapgettup()
     823             :  *
     824             :  * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
     825             :  * to the number of tuples left to scan on this page.  Also set *lineoff to
     826             :  * the next offset to scan according to the ScanDirection in 'dir'.
     827             :  */
     828             : static inline Page
     829    14965950 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
     830             :                          OffsetNumber *lineoff)
     831             : {
     832             :     Page        page;
     833             : 
     834             :     Assert(scan->rs_inited);
     835             :     Assert(BufferIsValid(scan->rs_cbuf));
     836             : 
     837             :     /* Caller is responsible for ensuring buffer is locked if needed */
     838    14965950 :     page = BufferGetPage(scan->rs_cbuf);
     839             : 
     840    14965950 :     if (ScanDirectionIsForward(dir))
     841             :     {
     842    14965950 :         *lineoff = OffsetNumberNext(scan->rs_coffset);
     843    14965950 :         *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
     844             :     }
     845             :     else
     846             :     {
     847             :         /*
     848             :          * The previous returned tuple may have been vacuumed since the
     849             :          * previous scan when we use a non-MVCC snapshot, so we must
     850             :          * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
     851             :          */
     852           0 :         *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
     853           0 :         *linesleft = *lineoff;
     854             :     }
     855             : 
     856             :     /* lineoff now references the physically previous or next tid */
     857    14965950 :     return page;
     858             : }
     859             : 
     860             : /*
     861             :  * heapgettup_advance_block - helper for heap_fetch_next_buffer()
     862             :  *
     863             :  * Given the current block number, the scan direction, and various information
     864             :  * contained in the scan descriptor, calculate the BlockNumber to scan next
     865             :  * and return it.  If there are no further blocks to scan, return
     866             :  * InvalidBlockNumber to indicate this fact to the caller.
     867             :  *
     868             :  * This should not be called to determine the initial block number -- only for
     869             :  * subsequent blocks.
     870             :  *
     871             :  * This also adjusts rs_numblocks when a limit has been imposed by
     872             :  * heap_setscanlimits().
     873             :  */
     874             : static inline BlockNumber
     875     5751504 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
     876             : {
     877             :     Assert(scan->rs_base.rs_parallel == NULL);
     878             : 
     879     5751504 :     if (likely(ScanDirectionIsForward(dir)))
     880             :     {
     881     5751386 :         block++;
     882             : 
     883             :         /* wrap back to the start of the heap */
     884     5751386 :         if (block >= scan->rs_nblocks)
     885      767878 :             block = 0;
     886             : 
     887             :         /*
     888             :          * Report our new scan position for synchronization purposes. We don't
     889             :          * do that when moving backwards, however. That would just mess up any
     890             :          * other forward-moving scanners.
     891             :          *
     892             :          * Note: we do this before checking for end of scan so that the final
     893             :          * state of the position hint is back at the start of the rel.  That's
     894             :          * not strictly necessary, but otherwise when you run the same query
     895             :          * multiple times the starting position would shift a little bit
     896             :          * backwards on every invocation, which is confusing. We don't
     897             :          * guarantee any specific ordering in general, though.
     898             :          */
     899     5751386 :         if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
     900       22530 :             ss_report_location(scan->rs_base.rs_rd, block);
     901             : 
     902             :         /* we're done if we're back at where we started */
     903     5751386 :         if (block == scan->rs_startblock)
     904      767796 :             return InvalidBlockNumber;
     905             : 
     906             :         /* check if the limit imposed by heap_setscanlimits() is met */
     907     4983590 :         if (scan->rs_numblocks != InvalidBlockNumber)
     908             :         {
     909        4968 :             if (--scan->rs_numblocks == 0)
     910        3092 :                 return InvalidBlockNumber;
     911             :         }
     912             : 
     913     4980498 :         return block;
     914             :     }
     915             :     else
     916             :     {
     917             :         /* we're done if the last block is the start position */
     918         118 :         if (block == scan->rs_startblock)
     919         118 :             return InvalidBlockNumber;
     920             : 
     921             :         /* check if the limit imposed by heap_setscanlimits() is met */
     922           0 :         if (scan->rs_numblocks != InvalidBlockNumber)
     923             :         {
     924           0 :             if (--scan->rs_numblocks == 0)
     925           0 :                 return InvalidBlockNumber;
     926             :         }
     927             : 
     928             :         /* wrap to the end of the heap when the last page was page 0 */
     929           0 :         if (block == 0)
     930           0 :             block = scan->rs_nblocks;
     931             : 
     932           0 :         block--;
     933             : 
     934           0 :         return block;
     935             :     }
     936             : }
     937             : 
     938             : /* ----------------
     939             :  *      heapgettup - fetch next heap tuple
     940             :  *
     941             :  *      Initialize the scan if not already done; then advance to the next
     942             :  *      tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
     943             :  *      or set scan->rs_ctup.t_data = NULL if no more tuples.
     944             :  *
     945             :  * Note: the reason nkeys/key are passed separately, even though they are
     946             :  * kept in the scan descriptor, is that the caller may not want us to check
     947             :  * the scankeys.
     948             :  *
     949             :  * Note: when we fall off the end of the scan in either direction, we
     950             :  * reset rs_inited.  This means that a further request with the same
     951             :  * scan direction will restart the scan, which is a bit odd, but a
     952             :  * request with the opposite scan direction will start a fresh scan
     953             :  * in the proper direction.  The latter is required behavior for cursors,
     954             :  * while the former case is generally undefined behavior in Postgres
     955             :  * so we don't care too much.
     956             :  * ----------------
     957             :  */
     958             : static void
     959    15008008 : heapgettup(HeapScanDesc scan,
     960             :            ScanDirection dir,
     961             :            int nkeys,
     962             :            ScanKey key)
     963             : {
     964    15008008 :     HeapTuple   tuple = &(scan->rs_ctup);
     965             :     Page        page;
     966             :     OffsetNumber lineoff;
     967             :     int         linesleft;
     968             : 
     969    15008008 :     if (likely(scan->rs_inited))
     970             :     {
     971             :         /* continue from previously returned page/tuple */
     972    14965950 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     973    14965950 :         page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
     974    14965950 :         goto continue_page;
     975             :     }
     976             : 
     977             :     /*
     978             :      * advance the scan until we find a qualifying tuple or run out of stuff
     979             :      * to scan
     980             :      */
     981             :     while (true)
     982             :     {
     983      227028 :         heap_fetch_next_buffer(scan, dir);
     984             : 
     985             :         /* did we run out of blocks to scan? */
     986      227028 :         if (!BufferIsValid(scan->rs_cbuf))
     987       41758 :             break;
     988             : 
     989             :         Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
     990             : 
     991      185270 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     992      185270 :         page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
     993    15151220 : continue_page:
     994             : 
     995             :         /*
     996             :          * Only continue scanning the page while we have lines left.
     997             :          *
     998             :          * Note that this protects us from accessing line pointers past
     999             :          * PageGetMaxOffsetNumber(); both for forward scans when we resume the
    1000             :          * table scan, and for when we start scanning a new page.
    1001             :          */
    1002    15237214 :         for (; linesleft > 0; linesleft--, lineoff += dir)
    1003             :         {
    1004             :             bool        visible;
    1005    15052244 :             ItemId      lpp = PageGetItemId(page, lineoff);
    1006             : 
    1007    15052244 :             if (!ItemIdIsNormal(lpp))
    1008       75532 :                 continue;
    1009             : 
    1010    14976712 :             tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
    1011    14976712 :             tuple->t_len = ItemIdGetLength(lpp);
    1012    14976712 :             ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
    1013             : 
    1014    14976712 :             visible = HeapTupleSatisfiesVisibility(tuple,
    1015             :                                                    scan->rs_base.rs_snapshot,
    1016             :                                                    scan->rs_cbuf);
    1017             : 
    1018    14976712 :             HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
    1019             :                                                 tuple, scan->rs_cbuf,
    1020             :                                                 scan->rs_base.rs_snapshot);
    1021             : 
    1022             :             /* skip tuples not visible to this snapshot */
    1023    14976712 :             if (!visible)
    1024       10462 :                 continue;
    1025             : 
    1026             :             /* skip any tuples that don't match the scan key */
    1027    14966250 :             if (key != NULL &&
    1028           0 :                 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
    1029             :                              nkeys, key))
    1030           0 :                 continue;
    1031             : 
    1032    14966250 :             LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
    1033    14966250 :             scan->rs_coffset = lineoff;
    1034    14966250 :             return;
    1035             :         }
    1036             : 
    1037             :         /*
    1038             :          * if we get here, it means we've exhausted the items on this page and
    1039             :          * it's time to move to the next.
    1040             :          */
    1041      184970 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
    1042             :     }
    1043             : 
    1044             :     /* end of scan */
    1045       41758 :     if (BufferIsValid(scan->rs_cbuf))
    1046           0 :         ReleaseBuffer(scan->rs_cbuf);
    1047             : 
    1048       41758 :     scan->rs_cbuf = InvalidBuffer;
    1049       41758 :     scan->rs_cblock = InvalidBlockNumber;
    1050       41758 :     scan->rs_prefetch_block = InvalidBlockNumber;
    1051       41758 :     tuple->t_data = NULL;
    1052       41758 :     scan->rs_inited = false;
    1053             : }
    1054             : 
    1055             : /* ----------------
    1056             :  *      heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
    1057             :  *
    1058             :  *      Same API as heapgettup, but used in page-at-a-time mode
    1059             :  *
    1060             :  * The internal logic is much the same as heapgettup's too, but there are some
    1061             :  * differences: we do not take the buffer content lock (that only needs to
    1062             :  * happen inside heap_prepare_pagescan), and we iterate through just the
    1063             :  * tuples listed in rs_vistuples[] rather than all tuples on the page.  Notice
    1064             :  * that lineindex is 0-based, where the corresponding loop variable lineoff in
    1065             :  * heapgettup is 1-based.
    1066             :  * ----------------
    1067             :  */
    1068             : static void
    1069    99568590 : heapgettup_pagemode(HeapScanDesc scan,
    1070             :                     ScanDirection dir,
    1071             :                     int nkeys,
    1072             :                     ScanKey key)
    1073             : {
    1074    99568590 :     HeapTuple   tuple = &(scan->rs_ctup);
    1075             :     Page        page;
    1076             :     uint32      lineindex;
    1077             :     uint32      linesleft;
    1078             : 
    1079    99568590 :     if (likely(scan->rs_inited))
    1080             :     {
    1081             :         /* continue from previously returned page/tuple */
    1082    97624350 :         page = BufferGetPage(scan->rs_cbuf);
    1083             : 
    1084    97624350 :         lineindex = scan->rs_cindex + dir;
    1085    97624350 :         if (ScanDirectionIsForward(dir))
    1086    97623692 :             linesleft = scan->rs_ntuples - lineindex;
    1087             :         else
    1088         658 :             linesleft = scan->rs_cindex;
    1089             :         /* lineindex now references the next or previous visible tid */
    1090             : 
    1091    97624350 :         goto continue_page;
    1092             :     }
    1093             : 
    1094             :     /*
    1095             :      * advance the scan until we find a qualifying tuple or run out of stuff
    1096             :      * to scan
    1097             :      */
    1098             :     while (true)
    1099             :     {
    1100     7334088 :         heap_fetch_next_buffer(scan, dir);
    1101             : 
    1102             :         /* did we run out of blocks to scan? */
    1103     7334036 :         if (!BufferIsValid(scan->rs_cbuf))
    1104     1611830 :             break;
    1105             : 
    1106             :         Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
    1107             : 
    1108             :         /* prune the page and determine visible tuple offsets */
    1109     5722206 :         heap_prepare_pagescan((TableScanDesc) scan);
    1110     5722190 :         page = BufferGetPage(scan->rs_cbuf);
    1111     5722190 :         linesleft = scan->rs_ntuples;
    1112     5722190 :         lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
    1113             : 
    1114             :         /* block is the same for all tuples, set it once outside the loop */
    1115     5722190 :         ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
    1116             : 
    1117             :         /* lineindex now references the next or previous visible tid */
    1118   103346540 : continue_page:
    1119             : 
    1120   202085508 :         for (; linesleft > 0; linesleft--, lineindex += dir)
    1121             :         {
    1122             :             ItemId      lpp;
    1123             :             OffsetNumber lineoff;
    1124             : 
    1125             :             Assert(lineindex < scan->rs_ntuples);
    1126   196695660 :             lineoff = scan->rs_vistuples[lineindex];
    1127   196695660 :             lpp = PageGetItemId(page, lineoff);
    1128             :             Assert(ItemIdIsNormal(lpp));
    1129             : 
    1130   196695660 :             tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
    1131   196695660 :             tuple->t_len = ItemIdGetLength(lpp);
    1132   196695660 :             ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
    1133             : 
    1134             :             /* skip any tuples that don't match the scan key */
    1135   196695660 :             if (key != NULL &&
    1136    99505842 :                 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
    1137             :                              nkeys, key))
    1138    98738968 :                 continue;
    1139             : 
    1140    97956692 :             scan->rs_cindex = lineindex;
    1141    97956692 :             return;
    1142             :         }
    1143             :     }
    1144             : 
    1145             :     /* end of scan */
    1146     1611830 :     if (BufferIsValid(scan->rs_cbuf))
    1147           0 :         ReleaseBuffer(scan->rs_cbuf);
    1148     1611830 :     scan->rs_cbuf = InvalidBuffer;
    1149     1611830 :     scan->rs_cblock = InvalidBlockNumber;
    1150     1611830 :     scan->rs_prefetch_block = InvalidBlockNumber;
    1151     1611830 :     tuple->t_data = NULL;
    1152     1611830 :     scan->rs_inited = false;
    1153             : }
    1154             : 
    1155             : 
    1156             : /* ----------------------------------------------------------------
    1157             :  *                   heap access method interface
    1158             :  * ----------------------------------------------------------------
    1159             :  */
    1160             : 
    1161             : 
    1162             : TableScanDesc
    1163      786128 : heap_beginscan(Relation relation, Snapshot snapshot,
    1164             :                int nkeys, ScanKey key,
    1165             :                ParallelTableScanDesc parallel_scan,
    1166             :                uint32 flags)
    1167             : {
    1168             :     HeapScanDesc scan;
    1169             : 
    1170             :     /*
    1171             :      * increment relation ref count while scanning relation
    1172             :      *
    1173             :      * This is just to make really sure the relcache entry won't go away while
    1174             :      * the scan has a pointer to it.  Caller should be holding the rel open
    1175             :      * anyway, so this is redundant in all normal scenarios...
    1176             :      */
    1177      786128 :     RelationIncrementReferenceCount(relation);
    1178             : 
    1179             :     /*
    1180             :      * allocate and initialize scan descriptor
    1181             :      */
    1182      786128 :     if (flags & SO_TYPE_BITMAPSCAN)
    1183             :     {
    1184       22088 :         BitmapHeapScanDesc bscan = palloc_object(BitmapHeapScanDescData);
    1185             : 
    1186             :         /*
    1187             :          * Bitmap Heap scans do not have any fields that a normal Heap Scan
    1188             :          * does not have, so no special initializations required here.
    1189             :          */
    1190       22088 :         scan = (HeapScanDesc) bscan;
    1191             :     }
    1192             :     else
    1193      764040 :         scan = (HeapScanDesc) palloc_object(HeapScanDescData);
    1194             : 
    1195      786128 :     scan->rs_base.rs_rd = relation;
    1196      786128 :     scan->rs_base.rs_snapshot = snapshot;
    1197      786128 :     scan->rs_base.rs_nkeys = nkeys;
    1198      786128 :     scan->rs_base.rs_flags = flags;
    1199      786128 :     scan->rs_base.rs_parallel = parallel_scan;
    1200      786128 :     scan->rs_strategy = NULL;    /* set in initscan */
    1201      786128 :     scan->rs_cbuf = InvalidBuffer;
    1202             : 
    1203             :     /*
    1204             :      * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
    1205             :      */
    1206      786128 :     if (!(snapshot && IsMVCCSnapshot(snapshot)))
    1207       59858 :         scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1208             : 
    1209             :     /* Check that a historic snapshot is not used for non-catalog tables */
    1210      786128 :     if (snapshot &&
    1211      768328 :         IsHistoricMVCCSnapshot(snapshot) &&
    1212        1316 :         !RelationIsAccessibleInLogicalDecoding(relation))
    1213             :     {
    1214           0 :         ereport(ERROR,
    1215             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    1216             :                  errmsg("cannot query non-catalog table \"%s\" during logical decoding",
    1217             :                         RelationGetRelationName(relation))));
    1218             :     }
    1219             : 
    1220             :     /*
    1221             :      * For seqscan and sample scans in a serializable transaction, acquire a
    1222             :      * predicate lock on the entire relation. This is required not only to
    1223             :      * lock all the matching tuples, but also to conflict with new insertions
    1224             :      * into the table. In an indexscan, we take page locks on the index pages
    1225             :      * covering the range specified in the scan qual, but in a heap scan there
    1226             :      * is nothing more fine-grained to lock. A bitmap scan is a different
    1227             :      * story, there we have already scanned the index and locked the index
    1228             :      * pages covering the predicate. But in that case we still have to lock
    1229             :      * any matching heap tuples. For sample scan we could optimize the locking
    1230             :      * to be at least page-level granularity, but we'd need to add per-tuple
    1231             :      * locking for that.
    1232             :      */
    1233      786128 :     if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
    1234             :     {
    1235             :         /*
    1236             :          * Ensure a missing snapshot is noticed reliably, even if the
    1237             :          * isolation mode means predicate locking isn't performed (and
    1238             :          * therefore the snapshot isn't used here).
    1239             :          */
    1240             :         Assert(snapshot);
    1241      743474 :         PredicateLockRelation(relation, snapshot);
    1242             :     }
    1243             : 
    1244             :     /* we only need to set this up once */
    1245      786128 :     scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
    1246             : 
    1247             :     /*
    1248             :      * Allocate memory to keep track of page allocation for parallel workers
    1249             :      * when doing a parallel scan.
    1250             :      */
    1251      786128 :     if (parallel_scan != NULL)
    1252        4242 :         scan->rs_parallelworkerdata = palloc_object(ParallelBlockTableScanWorkerData);
    1253             :     else
    1254      781886 :         scan->rs_parallelworkerdata = NULL;
    1255             : 
    1256             :     /*
    1257             :      * we do this here instead of in initscan() because heap_rescan also calls
    1258             :      * initscan() and we don't want to allocate memory again
    1259             :      */
    1260      786128 :     if (nkeys > 0)
    1261      450836 :         scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
    1262             :     else
    1263      335292 :         scan->rs_base.rs_key = NULL;
    1264             : 
    1265      786128 :     initscan(scan, key, false);
    1266             : 
    1267      786124 :     scan->rs_read_stream = NULL;
    1268             : 
    1269             :     /*
    1270             :      * Set up a read stream for sequential scans and TID range scans. This
    1271             :      * should be done after initscan() because initscan() allocates the
    1272             :      * BufferAccessStrategy object passed to the read stream API.
    1273             :      */
    1274      786124 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
    1275       42800 :         scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
    1276      745304 :     {
    1277             :         ReadStreamBlockNumberCB cb;
    1278             : 
    1279      745304 :         if (scan->rs_base.rs_parallel)
    1280        4242 :             cb = heap_scan_stream_read_next_parallel;
    1281             :         else
    1282      741062 :             cb = heap_scan_stream_read_next_serial;
    1283             : 
    1284             :         /* ---
    1285             :          * It is safe to use batchmode as the only locks taken by `cb`
    1286             :          * are never taken while waiting for IO:
    1287             :          * - SyncScanLock is used in the non-parallel case
    1288             :          * - in the parallel case, only spinlocks and atomics are used
    1289             :          * ---
    1290             :          */
    1291      745304 :         scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
    1292             :                                                           READ_STREAM_USE_BATCHING,
    1293             :                                                           scan->rs_strategy,
    1294             :                                                           scan->rs_base.rs_rd,
    1295             :                                                           MAIN_FORKNUM,
    1296             :                                                           cb,
    1297             :                                                           scan,
    1298             :                                                           0);
    1299             :     }
    1300       40820 :     else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
    1301             :     {
    1302       22088 :         scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT |
    1303             :                                                           READ_STREAM_USE_BATCHING,
    1304             :                                                           scan->rs_strategy,
    1305             :                                                           scan->rs_base.rs_rd,
    1306             :                                                           MAIN_FORKNUM,
    1307             :                                                           bitmapheap_stream_read_next,
    1308             :                                                           scan,
    1309             :                                                           sizeof(TBMIterateResult));
    1310             :     }
    1311             : 
    1312             : 
    1313      786124 :     return (TableScanDesc) scan;
    1314             : }
    1315             : 
    1316             : void
    1317     1246754 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
    1318             :             bool allow_strat, bool allow_sync, bool allow_pagemode)
    1319             : {
    1320     1246754 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1321             : 
    1322     1246754 :     if (set_params)
    1323             :     {
    1324          30 :         if (allow_strat)
    1325          30 :             scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
    1326             :         else
    1327           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
    1328             : 
    1329          30 :         if (allow_sync)
    1330          12 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
    1331             :         else
    1332          18 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
    1333             : 
    1334          30 :         if (allow_pagemode && scan->rs_base.rs_snapshot &&
    1335          30 :             IsMVCCSnapshot(scan->rs_base.rs_snapshot))
    1336          30 :             scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
    1337             :         else
    1338           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1339             :     }
    1340             : 
    1341             :     /*
    1342             :      * unpin scan buffers
    1343             :      */
    1344     1246754 :     if (BufferIsValid(scan->rs_cbuf))
    1345             :     {
    1346        3246 :         ReleaseBuffer(scan->rs_cbuf);
    1347        3246 :         scan->rs_cbuf = InvalidBuffer;
    1348             :     }
    1349             : 
    1350             :     /*
    1351             :      * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
    1352             :      * additional data vs a normal HeapScan
    1353             :      */
    1354             : 
    1355             :     /*
    1356             :      * The read stream is reset on rescan. This must be done before
    1357             :      * initscan(), as some state referred to by read_stream_reset() is reset
    1358             :      * in initscan().
    1359             :      */
    1360     1246754 :     if (scan->rs_read_stream)
    1361     1246718 :         read_stream_reset(scan->rs_read_stream);
    1362             : 
    1363             :     /*
    1364             :      * reinitialize scan descriptor
    1365             :      */
    1366     1246754 :     initscan(scan, key, true);
    1367     1246754 : }
    1368             : 
    1369             : void
    1370      781390 : heap_endscan(TableScanDesc sscan)
    1371             : {
    1372      781390 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1373             : 
    1374             :     /* Note: no locking manipulations needed */
    1375             : 
    1376             :     /*
    1377             :      * unpin scan buffers
    1378             :      */
    1379      781390 :     if (BufferIsValid(scan->rs_cbuf))
    1380      326150 :         ReleaseBuffer(scan->rs_cbuf);
    1381             : 
    1382             :     /*
    1383             :      * Must free the read stream before freeing the BufferAccessStrategy.
    1384             :      */
    1385      781390 :     if (scan->rs_read_stream)
    1386      762764 :         read_stream_end(scan->rs_read_stream);
    1387             : 
    1388             :     /*
    1389             :      * decrement relation reference count and free scan descriptor storage
    1390             :      */
    1391      781390 :     RelationDecrementReferenceCount(scan->rs_base.rs_rd);
    1392             : 
    1393      781390 :     if (scan->rs_base.rs_key)
    1394      450782 :         pfree(scan->rs_base.rs_key);
    1395             : 
    1396      781390 :     if (scan->rs_strategy != NULL)
    1397       25070 :         FreeAccessStrategy(scan->rs_strategy);
    1398             : 
    1399      781390 :     if (scan->rs_parallelworkerdata != NULL)
    1400        4242 :         pfree(scan->rs_parallelworkerdata);
    1401             : 
    1402      781390 :     if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
    1403       77814 :         UnregisterSnapshot(scan->rs_base.rs_snapshot);
    1404             : 
    1405      781390 :     pfree(scan);
    1406      781390 : }
    1407             : 
    1408             : HeapTuple
    1409    19366694 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
    1410             : {
    1411    19366694 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1412             : 
    1413             :     /*
    1414             :      * This is still widely used directly, without going through table AM, so
    1415             :      * add a safety check.  It's possible we should, at a later point,
    1416             :      * downgrade this to an assert. The reason for checking the AM routine,
    1417             :      * rather than the AM oid, is that this allows to write regression tests
    1418             :      * that create another AM reusing the heap handler.
    1419             :      */
    1420    19366694 :     if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
    1421           0 :         ereport(ERROR,
    1422             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1423             :                  errmsg_internal("only heap AM is supported")));
    1424             : 
    1425             :     /* Note: no locking manipulations needed */
    1426             : 
    1427    19366694 :     if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
    1428     5335432 :         heapgettup_pagemode(scan, direction,
    1429     5335432 :                             scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1430             :     else
    1431    14031262 :         heapgettup(scan, direction,
    1432    14031262 :                    scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1433             : 
    1434    19366692 :     if (scan->rs_ctup.t_data == NULL)
    1435      131452 :         return NULL;
    1436             : 
    1437             :     /*
    1438             :      * if we get here it means we have a new current scan tuple, so point to
    1439             :      * the proper return buffer and return the tuple.
    1440             :      */
    1441             : 
    1442    19235240 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1443             : 
    1444    19235240 :     return &scan->rs_ctup;
    1445             : }
    1446             : 
    1447             : bool
    1448    95198446 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
    1449             : {
    1450    95198446 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1451             : 
    1452             :     /* Note: no locking manipulations needed */
    1453             : 
    1454    95198446 :     if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1455    94221700 :         heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1456             :     else
    1457      976746 :         heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1458             : 
    1459    95198396 :     if (scan->rs_ctup.t_data == NULL)
    1460             :     {
    1461     1521928 :         ExecClearTuple(slot);
    1462     1521928 :         return false;
    1463             :     }
    1464             : 
    1465             :     /*
    1466             :      * if we get here it means we have a new current scan tuple, so point to
    1467             :      * the proper return buffer and return the tuple.
    1468             :      */
    1469             : 
    1470    93676468 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1471             : 
    1472    93676468 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
    1473             :                              scan->rs_cbuf);
    1474    93676468 :     return true;
    1475             : }
    1476             : 
    1477             : void
    1478        2070 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
    1479             :                   ItemPointer maxtid)
    1480             : {
    1481        2070 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1482             :     BlockNumber startBlk;
    1483             :     BlockNumber numBlks;
    1484             :     ItemPointerData highestItem;
    1485             :     ItemPointerData lowestItem;
    1486             : 
    1487             :     /*
    1488             :      * For relations without any pages, we can simply leave the TID range
    1489             :      * unset.  There will be no tuples to scan, therefore no tuples outside
    1490             :      * the given TID range.
    1491             :      */
    1492        2070 :     if (scan->rs_nblocks == 0)
    1493          48 :         return;
    1494             : 
    1495             :     /*
    1496             :      * Set up some ItemPointers which point to the first and last possible
    1497             :      * tuples in the heap.
    1498             :      */
    1499        2058 :     ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
    1500        2058 :     ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
    1501             : 
    1502             :     /*
    1503             :      * If the given maximum TID is below the highest possible TID in the
    1504             :      * relation, then restrict the range to that, otherwise we scan to the end
    1505             :      * of the relation.
    1506             :      */
    1507        2058 :     if (ItemPointerCompare(maxtid, &highestItem) < 0)
    1508         260 :         ItemPointerCopy(maxtid, &highestItem);
    1509             : 
    1510             :     /*
    1511             :      * If the given minimum TID is above the lowest possible TID in the
    1512             :      * relation, then restrict the range to only scan for TIDs above that.
    1513             :      */
    1514        2058 :     if (ItemPointerCompare(mintid, &lowestItem) > 0)
    1515        1822 :         ItemPointerCopy(mintid, &lowestItem);
    1516             : 
    1517             :     /*
    1518             :      * Check for an empty range and protect from would be negative results
    1519             :      * from the numBlks calculation below.
    1520             :      */
    1521        2058 :     if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
    1522             :     {
    1523             :         /* Set an empty range of blocks to scan */
    1524          36 :         heap_setscanlimits(sscan, 0, 0);
    1525          36 :         return;
    1526             :     }
    1527             : 
    1528             :     /*
    1529             :      * Calculate the first block and the number of blocks we must scan. We
    1530             :      * could be more aggressive here and perform some more validation to try
    1531             :      * and further narrow the scope of blocks to scan by checking if the
    1532             :      * lowestItem has an offset above MaxOffsetNumber.  In this case, we could
    1533             :      * advance startBlk by one.  Likewise, if highestItem has an offset of 0
    1534             :      * we could scan one fewer blocks.  However, such an optimization does not
    1535             :      * seem worth troubling over, currently.
    1536             :      */
    1537        2022 :     startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
    1538             : 
    1539        2022 :     numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
    1540        2022 :         ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
    1541             : 
    1542             :     /* Set the start block and number of blocks to scan */
    1543        2022 :     heap_setscanlimits(sscan, startBlk, numBlks);
    1544             : 
    1545             :     /* Finally, set the TID range in sscan */
    1546        2022 :     ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
    1547        2022 :     ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
    1548             : }
    1549             : 
    1550             : bool
    1551       11272 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
    1552             :                           TupleTableSlot *slot)
    1553             : {
    1554       11272 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1555       11272 :     ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
    1556       11272 :     ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
    1557             : 
    1558             :     /* Note: no locking manipulations needed */
    1559             :     for (;;)
    1560             :     {
    1561       11458 :         if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1562       11458 :             heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1563             :         else
    1564           0 :             heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1565             : 
    1566       11442 :         if (scan->rs_ctup.t_data == NULL)
    1567             :         {
    1568         208 :             ExecClearTuple(slot);
    1569         208 :             return false;
    1570             :         }
    1571             : 
    1572             :         /*
    1573             :          * heap_set_tidrange will have used heap_setscanlimits to limit the
    1574             :          * range of pages we scan to only ones that can contain the TID range
    1575             :          * we're scanning for.  Here we must filter out any tuples from these
    1576             :          * pages that are outside of that range.
    1577             :          */
    1578       11234 :         if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
    1579             :         {
    1580         186 :             ExecClearTuple(slot);
    1581             : 
    1582             :             /*
    1583             :              * When scanning backwards, the TIDs will be in descending order.
    1584             :              * Future tuples in this direction will be lower still, so we can
    1585             :              * just return false to indicate there will be no more tuples.
    1586             :              */
    1587         186 :             if (ScanDirectionIsBackward(direction))
    1588           0 :                 return false;
    1589             : 
    1590         186 :             continue;
    1591             :         }
    1592             : 
    1593             :         /*
    1594             :          * Likewise for the final page, we must filter out TIDs greater than
    1595             :          * maxtid.
    1596             :          */
    1597       11048 :         if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
    1598             :         {
    1599         112 :             ExecClearTuple(slot);
    1600             : 
    1601             :             /*
    1602             :              * When scanning forward, the TIDs will be in ascending order.
    1603             :              * Future tuples in this direction will be higher still, so we can
    1604             :              * just return false to indicate there will be no more tuples.
    1605             :              */
    1606         112 :             if (ScanDirectionIsForward(direction))
    1607         112 :                 return false;
    1608           0 :             continue;
    1609             :         }
    1610             : 
    1611       10936 :         break;
    1612             :     }
    1613             : 
    1614             :     /*
    1615             :      * if we get here it means we have a new current scan tuple, so point to
    1616             :      * the proper return buffer and return the tuple.
    1617             :      */
    1618       10936 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1619             : 
    1620       10936 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
    1621       10936 :     return true;
    1622             : }
    1623             : 
    1624             : /*
    1625             :  *  heap_fetch      - retrieve tuple with given tid
    1626             :  *
    1627             :  * On entry, tuple->t_self is the TID to fetch.  We pin the buffer holding
    1628             :  * the tuple, fill in the remaining fields of *tuple, and check the tuple
    1629             :  * against the specified snapshot.
    1630             :  *
    1631             :  * If successful (tuple found and passes snapshot time qual), then *userbuf
    1632             :  * is set to the buffer holding the tuple and true is returned.  The caller
    1633             :  * must unpin the buffer when done with the tuple.
    1634             :  *
    1635             :  * If the tuple is not found (ie, item number references a deleted slot),
    1636             :  * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
    1637             :  * and false is returned.
    1638             :  *
    1639             :  * If the tuple is found but fails the time qual check, then the behavior
    1640             :  * depends on the keep_buf parameter.  If keep_buf is false, the results
    1641             :  * are the same as for the tuple-not-found case.  If keep_buf is true,
    1642             :  * then tuple->t_data and *userbuf are returned as for the success case,
    1643             :  * and again the caller must unpin the buffer; but false is returned.
    1644             :  *
    1645             :  * heap_fetch does not follow HOT chains: only the exact TID requested will
    1646             :  * be fetched.
    1647             :  *
    1648             :  * It is somewhat inconsistent that we ereport() on invalid block number but
    1649             :  * return false on invalid item number.  There are a couple of reasons though.
    1650             :  * One is that the caller can relatively easily check the block number for
    1651             :  * validity, but cannot check the item number without reading the page
    1652             :  * himself.  Another is that when we are following a t_ctid link, we can be
    1653             :  * reasonably confident that the page number is valid (since VACUUM shouldn't
    1654             :  * truncate off the destination page without having killed the referencing
    1655             :  * tuple first), but the item number might well not be good.
    1656             :  */
    1657             : bool
    1658      362588 : heap_fetch(Relation relation,
    1659             :            Snapshot snapshot,
    1660             :            HeapTuple tuple,
    1661             :            Buffer *userbuf,
    1662             :            bool keep_buf)
    1663             : {
    1664      362588 :     ItemPointer tid = &(tuple->t_self);
    1665             :     ItemId      lp;
    1666             :     Buffer      buffer;
    1667             :     Page        page;
    1668             :     OffsetNumber offnum;
    1669             :     bool        valid;
    1670             : 
    1671             :     /*
    1672             :      * Fetch and pin the appropriate page of the relation.
    1673             :      */
    1674      362588 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    1675             : 
    1676             :     /*
    1677             :      * Need share lock on buffer to examine tuple commit status.
    1678             :      */
    1679      362572 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1680      362572 :     page = BufferGetPage(buffer);
    1681             : 
    1682             :     /*
    1683             :      * We'd better check for out-of-range offnum in case of VACUUM since the
    1684             :      * TID was obtained.
    1685             :      */
    1686      362572 :     offnum = ItemPointerGetOffsetNumber(tid);
    1687      362572 :     if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1688             :     {
    1689           6 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1690           6 :         ReleaseBuffer(buffer);
    1691           6 :         *userbuf = InvalidBuffer;
    1692           6 :         tuple->t_data = NULL;
    1693           6 :         return false;
    1694             :     }
    1695             : 
    1696             :     /*
    1697             :      * get the item line pointer corresponding to the requested tid
    1698             :      */
    1699      362566 :     lp = PageGetItemId(page, offnum);
    1700             : 
    1701             :     /*
    1702             :      * Must check for deleted tuple.
    1703             :      */
    1704      362566 :     if (!ItemIdIsNormal(lp))
    1705             :     {
    1706         700 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1707         700 :         ReleaseBuffer(buffer);
    1708         700 :         *userbuf = InvalidBuffer;
    1709         700 :         tuple->t_data = NULL;
    1710         700 :         return false;
    1711             :     }
    1712             : 
    1713             :     /*
    1714             :      * fill in *tuple fields
    1715             :      */
    1716      361866 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1717      361866 :     tuple->t_len = ItemIdGetLength(lp);
    1718      361866 :     tuple->t_tableOid = RelationGetRelid(relation);
    1719             : 
    1720             :     /*
    1721             :      * check tuple visibility, then release lock
    1722             :      */
    1723      361866 :     valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
    1724             : 
    1725      361866 :     if (valid)
    1726      361760 :         PredicateLockTID(relation, &(tuple->t_self), snapshot,
    1727      361760 :                          HeapTupleHeaderGetXmin(tuple->t_data));
    1728             : 
    1729      361866 :     HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
    1730             : 
    1731      361866 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1732             : 
    1733      361866 :     if (valid)
    1734             :     {
    1735             :         /*
    1736             :          * All checks passed, so return the tuple as valid. Caller is now
    1737             :          * responsible for releasing the buffer.
    1738             :          */
    1739      361760 :         *userbuf = buffer;
    1740             : 
    1741      361760 :         return true;
    1742             :     }
    1743             : 
    1744             :     /* Tuple failed time qual, but maybe caller wants to see it anyway. */
    1745         106 :     if (keep_buf)
    1746          68 :         *userbuf = buffer;
    1747             :     else
    1748             :     {
    1749          38 :         ReleaseBuffer(buffer);
    1750          38 :         *userbuf = InvalidBuffer;
    1751          38 :         tuple->t_data = NULL;
    1752             :     }
    1753             : 
    1754         106 :     return false;
    1755             : }
    1756             : 
    1757             : /*
    1758             :  *  heap_hot_search_buffer  - search HOT chain for tuple satisfying snapshot
    1759             :  *
    1760             :  * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
    1761             :  * of a HOT chain), and buffer is the buffer holding this tuple.  We search
    1762             :  * for the first chain member satisfying the given snapshot.  If one is
    1763             :  * found, we update *tid to reference that tuple's offset number, and
    1764             :  * return true.  If no match, return false without modifying *tid.
    1765             :  *
    1766             :  * heapTuple is a caller-supplied buffer.  When a match is found, we return
    1767             :  * the tuple here, in addition to updating *tid.  If no match is found, the
    1768             :  * contents of this buffer on return are undefined.
    1769             :  *
    1770             :  * If all_dead is not NULL, we check non-visible tuples to see if they are
    1771             :  * globally dead; *all_dead is set true if all members of the HOT chain
    1772             :  * are vacuumable, false if not.
    1773             :  *
    1774             :  * Unlike heap_fetch, the caller must already have pin and (at least) share
    1775             :  * lock on the buffer; it is still pinned/locked at exit.
    1776             :  */
    1777             : bool
    1778    45266994 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
    1779             :                        Snapshot snapshot, HeapTuple heapTuple,
    1780             :                        bool *all_dead, bool first_call)
    1781             : {
    1782    45266994 :     Page        page = BufferGetPage(buffer);
    1783    45266994 :     TransactionId prev_xmax = InvalidTransactionId;
    1784             :     BlockNumber blkno;
    1785             :     OffsetNumber offnum;
    1786             :     bool        at_chain_start;
    1787             :     bool        valid;
    1788             :     bool        skip;
    1789    45266994 :     GlobalVisState *vistest = NULL;
    1790             : 
    1791             :     /* If this is not the first call, previous call returned a (live!) tuple */
    1792    45266994 :     if (all_dead)
    1793    38589262 :         *all_dead = first_call;
    1794             : 
    1795    45266994 :     blkno = ItemPointerGetBlockNumber(tid);
    1796    45266994 :     offnum = ItemPointerGetOffsetNumber(tid);
    1797    45266994 :     at_chain_start = first_call;
    1798    45266994 :     skip = !first_call;
    1799             : 
    1800             :     /* XXX: we should assert that a snapshot is pushed or registered */
    1801             :     Assert(TransactionIdIsValid(RecentXmin));
    1802             :     Assert(BufferGetBlockNumber(buffer) == blkno);
    1803             : 
    1804             :     /* Scan through possible multiple members of HOT-chain */
    1805             :     for (;;)
    1806     3078190 :     {
    1807             :         ItemId      lp;
    1808             : 
    1809             :         /* check for bogus TID */
    1810    48345184 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1811             :             break;
    1812             : 
    1813    48345184 :         lp = PageGetItemId(page, offnum);
    1814             : 
    1815             :         /* check for unused, dead, or redirected items */
    1816    48345184 :         if (!ItemIdIsNormal(lp))
    1817             :         {
    1818             :             /* We should only see a redirect at start of chain */
    1819     1700066 :             if (ItemIdIsRedirected(lp) && at_chain_start)
    1820             :             {
    1821             :                 /* Follow the redirect */
    1822      957428 :                 offnum = ItemIdGetRedirect(lp);
    1823      957428 :                 at_chain_start = false;
    1824      957428 :                 continue;
    1825             :             }
    1826             :             /* else must be end of chain */
    1827      742638 :             break;
    1828             :         }
    1829             : 
    1830             :         /*
    1831             :          * Update heapTuple to point to the element of the HOT chain we're
    1832             :          * currently investigating. Having t_self set correctly is important
    1833             :          * because the SSI checks and the *Satisfies routine for historical
    1834             :          * MVCC snapshots need the correct tid to decide about the visibility.
    1835             :          */
    1836    46645118 :         heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1837    46645118 :         heapTuple->t_len = ItemIdGetLength(lp);
    1838    46645118 :         heapTuple->t_tableOid = RelationGetRelid(relation);
    1839    46645118 :         ItemPointerSet(&heapTuple->t_self, blkno, offnum);
    1840             : 
    1841             :         /*
    1842             :          * Shouldn't see a HEAP_ONLY tuple at chain start.
    1843             :          */
    1844    46645118 :         if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
    1845           0 :             break;
    1846             : 
    1847             :         /*
    1848             :          * The xmin should match the previous xmax value, else chain is
    1849             :          * broken.
    1850             :          */
    1851    48765880 :         if (TransactionIdIsValid(prev_xmax) &&
    1852     2120762 :             !TransactionIdEquals(prev_xmax,
    1853             :                                  HeapTupleHeaderGetXmin(heapTuple->t_data)))
    1854           0 :             break;
    1855             : 
    1856             :         /*
    1857             :          * When first_call is true (and thus, skip is initially false) we'll
    1858             :          * return the first tuple we find.  But on later passes, heapTuple
    1859             :          * will initially be pointing to the tuple we returned last time.
    1860             :          * Returning it again would be incorrect (and would loop forever), so
    1861             :          * we skip it and return the next match we find.
    1862             :          */
    1863    46645118 :         if (!skip)
    1864             :         {
    1865             :             /* If it's visible per the snapshot, we must return it */
    1866    46471158 :             valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
    1867    46471158 :             HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
    1868             :                                                 buffer, snapshot);
    1869             : 
    1870    46471148 :             if (valid)
    1871             :             {
    1872    32040538 :                 ItemPointerSetOffsetNumber(tid, offnum);
    1873    32040538 :                 PredicateLockTID(relation, &heapTuple->t_self, snapshot,
    1874    32040538 :                                  HeapTupleHeaderGetXmin(heapTuple->t_data));
    1875    32040538 :                 if (all_dead)
    1876    25948712 :                     *all_dead = false;
    1877    32040538 :                 return true;
    1878             :             }
    1879             :         }
    1880    14604570 :         skip = false;
    1881             : 
    1882             :         /*
    1883             :          * If we can't see it, maybe no one else can either.  At caller
    1884             :          * request, check whether all chain members are dead to all
    1885             :          * transactions.
    1886             :          *
    1887             :          * Note: if you change the criterion here for what is "dead", fix the
    1888             :          * planner's get_actual_variable_range() function to match.
    1889             :          */
    1890    14604570 :         if (all_dead && *all_dead)
    1891             :         {
    1892    12971386 :             if (!vistest)
    1893    12729522 :                 vistest = GlobalVisTestFor(relation);
    1894             : 
    1895    12971386 :             if (!HeapTupleIsSurelyDead(heapTuple, vistest))
    1896    12261482 :                 *all_dead = false;
    1897             :         }
    1898             : 
    1899             :         /*
    1900             :          * Check to see if HOT chain continues past this tuple; if so fetch
    1901             :          * the next offnum and loop around.
    1902             :          */
    1903    14604570 :         if (HeapTupleIsHotUpdated(heapTuple))
    1904             :         {
    1905             :             Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
    1906             :                    blkno);
    1907     2120762 :             offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
    1908     2120762 :             at_chain_start = false;
    1909     2120762 :             prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
    1910             :         }
    1911             :         else
    1912    12483808 :             break;              /* end of chain */
    1913             :     }
    1914             : 
    1915    13226446 :     return false;
    1916             : }
    1917             : 
    1918             : /*
    1919             :  *  heap_get_latest_tid -  get the latest tid of a specified tuple
    1920             :  *
    1921             :  * Actually, this gets the latest version that is visible according to the
    1922             :  * scan's snapshot.  Create a scan using SnapshotDirty to get the very latest,
    1923             :  * possibly uncommitted version.
    1924             :  *
    1925             :  * *tid is both an input and an output parameter: it is updated to
    1926             :  * show the latest version of the row.  Note that it will not be changed
    1927             :  * if no version of the row passes the snapshot test.
    1928             :  */
    1929             : void
    1930         300 : heap_get_latest_tid(TableScanDesc sscan,
    1931             :                     ItemPointer tid)
    1932             : {
    1933         300 :     Relation    relation = sscan->rs_rd;
    1934         300 :     Snapshot    snapshot = sscan->rs_snapshot;
    1935             :     ItemPointerData ctid;
    1936             :     TransactionId priorXmax;
    1937             : 
    1938             :     /*
    1939             :      * table_tuple_get_latest_tid() verified that the passed in tid is valid.
    1940             :      * Assume that t_ctid links are valid however - there shouldn't be invalid
    1941             :      * ones in the table.
    1942             :      */
    1943             :     Assert(ItemPointerIsValid(tid));
    1944             : 
    1945             :     /*
    1946             :      * Loop to chase down t_ctid links.  At top of loop, ctid is the tuple we
    1947             :      * need to examine, and *tid is the TID we will return if ctid turns out
    1948             :      * to be bogus.
    1949             :      *
    1950             :      * Note that we will loop until we reach the end of the t_ctid chain.
    1951             :      * Depending on the snapshot passed, there might be at most one visible
    1952             :      * version of the row, but we don't try to optimize for that.
    1953             :      */
    1954         300 :     ctid = *tid;
    1955         300 :     priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    1956             :     for (;;)
    1957          90 :     {
    1958             :         Buffer      buffer;
    1959             :         Page        page;
    1960             :         OffsetNumber offnum;
    1961             :         ItemId      lp;
    1962             :         HeapTupleData tp;
    1963             :         bool        valid;
    1964             : 
    1965             :         /*
    1966             :          * Read, pin, and lock the page.
    1967             :          */
    1968         390 :         buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
    1969         390 :         LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1970         390 :         page = BufferGetPage(buffer);
    1971             : 
    1972             :         /*
    1973             :          * Check for bogus item number.  This is not treated as an error
    1974             :          * condition because it can happen while following a t_ctid link. We
    1975             :          * just assume that the prior tid is OK and return it unchanged.
    1976             :          */
    1977         390 :         offnum = ItemPointerGetOffsetNumber(&ctid);
    1978         390 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1979             :         {
    1980           0 :             UnlockReleaseBuffer(buffer);
    1981           0 :             break;
    1982             :         }
    1983         390 :         lp = PageGetItemId(page, offnum);
    1984         390 :         if (!ItemIdIsNormal(lp))
    1985             :         {
    1986           0 :             UnlockReleaseBuffer(buffer);
    1987           0 :             break;
    1988             :         }
    1989             : 
    1990             :         /* OK to access the tuple */
    1991         390 :         tp.t_self = ctid;
    1992         390 :         tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1993         390 :         tp.t_len = ItemIdGetLength(lp);
    1994         390 :         tp.t_tableOid = RelationGetRelid(relation);
    1995             : 
    1996             :         /*
    1997             :          * After following a t_ctid link, we might arrive at an unrelated
    1998             :          * tuple.  Check for XMIN match.
    1999             :          */
    2000         480 :         if (TransactionIdIsValid(priorXmax) &&
    2001          90 :             !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
    2002             :         {
    2003           0 :             UnlockReleaseBuffer(buffer);
    2004           0 :             break;
    2005             :         }
    2006             : 
    2007             :         /*
    2008             :          * Check tuple visibility; if visible, set it as the new result
    2009             :          * candidate.
    2010             :          */
    2011         390 :         valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
    2012         390 :         HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
    2013         390 :         if (valid)
    2014         276 :             *tid = ctid;
    2015             : 
    2016             :         /*
    2017             :          * If there's a valid t_ctid link, follow it, else we're done.
    2018             :          */
    2019         552 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    2020         276 :             HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
    2021         228 :             HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
    2022         114 :             ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    2023             :         {
    2024         300 :             UnlockReleaseBuffer(buffer);
    2025         300 :             break;
    2026             :         }
    2027             : 
    2028          90 :         ctid = tp.t_data->t_ctid;
    2029          90 :         priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    2030          90 :         UnlockReleaseBuffer(buffer);
    2031             :     }                           /* end of loop */
    2032         300 : }
    2033             : 
    2034             : 
    2035             : /*
    2036             :  * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
    2037             :  *
    2038             :  * This is called after we have waited for the XMAX transaction to terminate.
    2039             :  * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
    2040             :  * be set on exit.  If the transaction committed, we set the XMAX_COMMITTED
    2041             :  * hint bit if possible --- but beware that that may not yet be possible,
    2042             :  * if the transaction committed asynchronously.
    2043             :  *
    2044             :  * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
    2045             :  * even if it commits.
    2046             :  *
    2047             :  * Hence callers should look only at XMAX_INVALID.
    2048             :  *
    2049             :  * Note this is not allowed for tuples whose xmax is a multixact.
    2050             :  */
    2051             : static void
    2052         440 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
    2053             : {
    2054             :     Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
    2055             :     Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
    2056             : 
    2057         440 :     if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
    2058             :     {
    2059         786 :         if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
    2060         348 :             TransactionIdDidCommit(xid))
    2061         294 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
    2062             :                                  xid);
    2063             :         else
    2064         144 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
    2065             :                                  InvalidTransactionId);
    2066             :     }
    2067         440 : }
    2068             : 
    2069             : 
    2070             : /*
    2071             :  * GetBulkInsertState - prepare status object for a bulk insert
    2072             :  */
    2073             : BulkInsertState
    2074        5468 : GetBulkInsertState(void)
    2075             : {
    2076             :     BulkInsertState bistate;
    2077             : 
    2078        5468 :     bistate = (BulkInsertState) palloc_object(BulkInsertStateData);
    2079        5468 :     bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
    2080        5468 :     bistate->current_buf = InvalidBuffer;
    2081        5468 :     bistate->next_free = InvalidBlockNumber;
    2082        5468 :     bistate->last_free = InvalidBlockNumber;
    2083        5468 :     bistate->already_extended_by = 0;
    2084        5468 :     return bistate;
    2085             : }
    2086             : 
    2087             : /*
    2088             :  * FreeBulkInsertState - clean up after finishing a bulk insert
    2089             :  */
    2090             : void
    2091        5178 : FreeBulkInsertState(BulkInsertState bistate)
    2092             : {
    2093        5178 :     if (bistate->current_buf != InvalidBuffer)
    2094        4020 :         ReleaseBuffer(bistate->current_buf);
    2095        5178 :     FreeAccessStrategy(bistate->strategy);
    2096        5178 :     pfree(bistate);
    2097        5178 : }
    2098             : 
    2099             : /*
    2100             :  * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
    2101             :  */
    2102             : void
    2103      161516 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
    2104             : {
    2105      161516 :     if (bistate->current_buf != InvalidBuffer)
    2106       60042 :         ReleaseBuffer(bistate->current_buf);
    2107      161516 :     bistate->current_buf = InvalidBuffer;
    2108             : 
    2109             :     /*
    2110             :      * Despite the name, we also reset bulk relation extension state.
    2111             :      * Otherwise we can end up erroring out due to looking for free space in
    2112             :      * ->next_free of one partition, even though ->next_free was set when
    2113             :      * extending another partition. It could obviously also be bad for
    2114             :      * efficiency to look at existing blocks at offsets from another
    2115             :      * partition, even if we don't error out.
    2116             :      */
    2117      161516 :     bistate->next_free = InvalidBlockNumber;
    2118      161516 :     bistate->last_free = InvalidBlockNumber;
    2119      161516 : }
    2120             : 
    2121             : 
    2122             : /*
    2123             :  *  heap_insert     - insert tuple into a heap
    2124             :  *
    2125             :  * The new tuple is stamped with current transaction ID and the specified
    2126             :  * command ID.
    2127             :  *
    2128             :  * See table_tuple_insert for comments about most of the input flags, except
    2129             :  * that this routine directly takes a tuple rather than a slot.
    2130             :  *
    2131             :  * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
    2132             :  * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
    2133             :  * implement table_tuple_insert_speculative().
    2134             :  *
    2135             :  * On return the header fields of *tup are updated to match the stored tuple;
    2136             :  * in particular tup->t_self receives the actual TID where the tuple was
    2137             :  * stored.  But note that any toasting of fields within the tuple data is NOT
    2138             :  * reflected into *tup.
    2139             :  */
    2140             : void
    2141    16769074 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
    2142             :             int options, BulkInsertState bistate)
    2143             : {
    2144    16769074 :     TransactionId xid = GetCurrentTransactionId();
    2145             :     HeapTuple   heaptup;
    2146             :     Buffer      buffer;
    2147    16769058 :     Buffer      vmbuffer = InvalidBuffer;
    2148    16769058 :     bool        all_visible_cleared = false;
    2149             : 
    2150             :     /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
    2151             :     Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
    2152             :            RelationGetNumberOfAttributes(relation));
    2153             : 
    2154    16769058 :     AssertHasSnapshotForToast(relation);
    2155             : 
    2156             :     /*
    2157             :      * Fill in tuple header fields and toast the tuple if necessary.
    2158             :      *
    2159             :      * Note: below this point, heaptup is the data we actually intend to store
    2160             :      * into the relation; tup is the caller's original untoasted data.
    2161             :      */
    2162    16769058 :     heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
    2163             : 
    2164             :     /*
    2165             :      * Find buffer to insert this tuple into.  If the page is all visible,
    2166             :      * this will also pin the requisite visibility map page.
    2167             :      */
    2168    16769058 :     buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
    2169             :                                        InvalidBuffer, options, bistate,
    2170             :                                        &vmbuffer, NULL,
    2171             :                                        0);
    2172             : 
    2173             :     /*
    2174             :      * We're about to do the actual insert -- but check for conflict first, to
    2175             :      * avoid possibly having to roll back work we've just done.
    2176             :      *
    2177             :      * This is safe without a recheck as long as there is no possibility of
    2178             :      * another process scanning the page between this check and the insert
    2179             :      * being visible to the scan (i.e., an exclusive buffer content lock is
    2180             :      * continuously held from this point until the tuple insert is visible).
    2181             :      *
    2182             :      * For a heap insert, we only need to check for table-level SSI locks. Our
    2183             :      * new tuple can't possibly conflict with existing tuple locks, and heap
    2184             :      * page locks are only consolidated versions of tuple locks; they do not
    2185             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2186             :      * buffer when making the call, which makes for a faster check.
    2187             :      */
    2188    16769058 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2189             : 
    2190             :     /* NO EREPORT(ERROR) from here till changes are logged */
    2191    16769034 :     START_CRIT_SECTION();
    2192             : 
    2193    16769034 :     RelationPutHeapTuple(relation, buffer, heaptup,
    2194    16769034 :                          (options & HEAP_INSERT_SPECULATIVE) != 0);
    2195             : 
    2196    16769034 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    2197             :     {
    2198       14872 :         all_visible_cleared = true;
    2199       14872 :         PageClearAllVisible(BufferGetPage(buffer));
    2200       14872 :         visibilitymap_clear(relation,
    2201       14872 :                             ItemPointerGetBlockNumber(&(heaptup->t_self)),
    2202             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    2203             :     }
    2204             : 
    2205             :     /*
    2206             :      * XXX Should we set PageSetPrunable on this page ?
    2207             :      *
    2208             :      * The inserting transaction may eventually abort thus making this tuple
    2209             :      * DEAD and hence available for pruning. Though we don't want to optimize
    2210             :      * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
    2211             :      * aborted tuple will never be pruned until next vacuum is triggered.
    2212             :      *
    2213             :      * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
    2214             :      */
    2215             : 
    2216    16769034 :     MarkBufferDirty(buffer);
    2217             : 
    2218             :     /* XLOG stuff */
    2219    16769034 :     if (RelationNeedsWAL(relation))
    2220             :     {
    2221             :         xl_heap_insert xlrec;
    2222             :         xl_heap_header xlhdr;
    2223             :         XLogRecPtr  recptr;
    2224    14989640 :         Page        page = BufferGetPage(buffer);
    2225    14989640 :         uint8       info = XLOG_HEAP_INSERT;
    2226    14989640 :         int         bufflags = 0;
    2227             : 
    2228             :         /*
    2229             :          * If this is a catalog, we need to transmit combo CIDs to properly
    2230             :          * decode, so log that as well.
    2231             :          */
    2232    14989640 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    2233        6868 :             log_heap_new_cid(relation, heaptup);
    2234             : 
    2235             :         /*
    2236             :          * If this is the single and first tuple on page, we can reinit the
    2237             :          * page instead of restoring the whole thing.  Set flag, and hide
    2238             :          * buffer references from XLogInsert.
    2239             :          */
    2240    15182912 :         if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
    2241      193272 :             PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    2242             :         {
    2243      191526 :             info |= XLOG_HEAP_INIT_PAGE;
    2244      191526 :             bufflags |= REGBUF_WILL_INIT;
    2245             :         }
    2246             : 
    2247    14989640 :         xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2248    14989640 :         xlrec.flags = 0;
    2249    14989640 :         if (all_visible_cleared)
    2250       14866 :             xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
    2251    14989640 :         if (options & HEAP_INSERT_SPECULATIVE)
    2252        4164 :             xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
    2253             :         Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
    2254             : 
    2255             :         /*
    2256             :          * For logical decoding, we need the tuple even if we're doing a full
    2257             :          * page write, so make sure it's included even if we take a full-page
    2258             :          * image. (XXX We could alternatively store a pointer into the FPW).
    2259             :          */
    2260    14989640 :         if (RelationIsLogicallyLogged(relation) &&
    2261      501026 :             !(options & HEAP_INSERT_NO_LOGICAL))
    2262             :         {
    2263      500972 :             xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2264      500972 :             bufflags |= REGBUF_KEEP_DATA;
    2265             : 
    2266      500972 :             if (IsToastRelation(relation))
    2267        3572 :                 xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
    2268             :         }
    2269             : 
    2270    14989640 :         XLogBeginInsert();
    2271    14989640 :         XLogRegisterData(&xlrec, SizeOfHeapInsert);
    2272             : 
    2273    14989640 :         xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
    2274    14989640 :         xlhdr.t_infomask = heaptup->t_data->t_infomask;
    2275    14989640 :         xlhdr.t_hoff = heaptup->t_data->t_hoff;
    2276             : 
    2277             :         /*
    2278             :          * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
    2279             :          * write the whole page to the xlog, we don't need to store
    2280             :          * xl_heap_header in the xlog.
    2281             :          */
    2282    14989640 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2283    14989640 :         XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
    2284             :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    2285    14989640 :         XLogRegisterBufData(0,
    2286    14989640 :                             (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2287    14989640 :                             heaptup->t_len - SizeofHeapTupleHeader);
    2288             : 
    2289             :         /* filtering by origin on a row level is much more efficient */
    2290    14989640 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2291             : 
    2292    14989640 :         recptr = XLogInsert(RM_HEAP_ID, info);
    2293             : 
    2294    14989640 :         PageSetLSN(page, recptr);
    2295             :     }
    2296             : 
    2297    16769034 :     END_CRIT_SECTION();
    2298             : 
    2299    16769034 :     UnlockReleaseBuffer(buffer);
    2300    16769034 :     if (vmbuffer != InvalidBuffer)
    2301       15430 :         ReleaseBuffer(vmbuffer);
    2302             : 
    2303             :     /*
    2304             :      * If tuple is cacheable, mark it for invalidation from the caches in case
    2305             :      * we abort.  Note it is OK to do this after releasing the buffer, because
    2306             :      * the heaptup data structure is all in local memory, not in the shared
    2307             :      * buffer.
    2308             :      */
    2309    16769034 :     CacheInvalidateHeapTuple(relation, heaptup, NULL);
    2310             : 
    2311             :     /* Note: speculative insertions are counted too, even if aborted later */
    2312    16769034 :     pgstat_count_heap_insert(relation, 1);
    2313             : 
    2314             :     /*
    2315             :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    2316             :      * back to the caller's image, too.
    2317             :      */
    2318    16769034 :     if (heaptup != tup)
    2319             :     {
    2320       36884 :         tup->t_self = heaptup->t_self;
    2321       36884 :         heap_freetuple(heaptup);
    2322             :     }
    2323    16769034 : }
    2324             : 
    2325             : /*
    2326             :  * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
    2327             :  * tuple header fields and toasts the tuple if necessary.  Returns a toasted
    2328             :  * version of the tuple if it was toasted, or the original tuple if not. Note
    2329             :  * that in any case, the header fields are also set in the original tuple.
    2330             :  */
    2331             : static HeapTuple
    2332    19781884 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
    2333             :                     CommandId cid, int options)
    2334             : {
    2335             :     /*
    2336             :      * To allow parallel inserts, we need to ensure that they are safe to be
    2337             :      * performed in workers. We have the infrastructure to allow parallel
    2338             :      * inserts in general except for the cases where inserts generate a new
    2339             :      * CommandId (eg. inserts into a table having a foreign key column).
    2340             :      */
    2341    19781884 :     if (IsParallelWorker())
    2342           0 :         ereport(ERROR,
    2343             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2344             :                  errmsg("cannot insert tuples in a parallel worker")));
    2345             : 
    2346    19781884 :     tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    2347    19781884 :     tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    2348    19781884 :     tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
    2349    19781884 :     HeapTupleHeaderSetXmin(tup->t_data, xid);
    2350    19781884 :     if (options & HEAP_INSERT_FROZEN)
    2351      204176 :         HeapTupleHeaderSetXminFrozen(tup->t_data);
    2352             : 
    2353    19781884 :     HeapTupleHeaderSetCmin(tup->t_data, cid);
    2354    19781884 :     HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
    2355    19781884 :     tup->t_tableOid = RelationGetRelid(relation);
    2356             : 
    2357             :     /*
    2358             :      * If the new tuple is too big for storage or contains already toasted
    2359             :      * out-of-line attributes from some other relation, invoke the toaster.
    2360             :      */
    2361    19781884 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    2362       62848 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    2363             :     {
    2364             :         /* toast table entries should never be recursively toasted */
    2365             :         Assert(!HeapTupleHasExternal(tup));
    2366       62752 :         return tup;
    2367             :     }
    2368    19719132 :     else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
    2369       36988 :         return heap_toast_insert_or_update(relation, tup, NULL, options);
    2370             :     else
    2371    19682144 :         return tup;
    2372             : }
    2373             : 
    2374             : /*
    2375             :  * Helper for heap_multi_insert() that computes the number of entire pages
    2376             :  * that inserting the remaining heaptuples requires. Used to determine how
    2377             :  * much the relation needs to be extended by.
    2378             :  */
    2379             : static int
    2380      761652 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
    2381             : {
    2382      761652 :     size_t      page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
    2383      761652 :     int         npages = 1;
    2384             : 
    2385     4964594 :     for (int i = done; i < ntuples; i++)
    2386             :     {
    2387     4202942 :         size_t      tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
    2388             : 
    2389     4202942 :         if (page_avail < tup_sz)
    2390             :         {
    2391       31200 :             npages++;
    2392       31200 :             page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
    2393             :         }
    2394     4202942 :         page_avail -= tup_sz;
    2395             :     }
    2396             : 
    2397      761652 :     return npages;
    2398             : }
    2399             : 
    2400             : /*
    2401             :  *  heap_multi_insert   - insert multiple tuples into a heap
    2402             :  *
    2403             :  * This is like heap_insert(), but inserts multiple tuples in one operation.
    2404             :  * That's faster than calling heap_insert() in a loop, because when multiple
    2405             :  * tuples can be inserted on a single page, we can write just a single WAL
    2406             :  * record covering all of them, and only need to lock/unlock the page once.
    2407             :  *
    2408             :  * Note: this leaks memory into the current memory context. You can create a
    2409             :  * temporary context before calling this, if that's a problem.
    2410             :  */
    2411             : void
    2412      748104 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
    2413             :                   CommandId cid, int options, BulkInsertState bistate)
    2414             : {
    2415      748104 :     TransactionId xid = GetCurrentTransactionId();
    2416             :     HeapTuple  *heaptuples;
    2417             :     int         i;
    2418             :     int         ndone;
    2419             :     PGAlignedBlock scratch;
    2420             :     Page        page;
    2421      748104 :     Buffer      vmbuffer = InvalidBuffer;
    2422             :     bool        needwal;
    2423             :     Size        saveFreeSpace;
    2424      748104 :     bool        need_tuple_data = RelationIsLogicallyLogged(relation);
    2425      748104 :     bool        need_cids = RelationIsAccessibleInLogicalDecoding(relation);
    2426      748104 :     bool        starting_with_empty_page = false;
    2427      748104 :     int         npages = 0;
    2428      748104 :     int         npages_used = 0;
    2429             : 
    2430             :     /* currently not needed (thus unsupported) for heap_multi_insert() */
    2431             :     Assert(!(options & HEAP_INSERT_NO_LOGICAL));
    2432             : 
    2433      748104 :     AssertHasSnapshotForToast(relation);
    2434             : 
    2435      748104 :     needwal = RelationNeedsWAL(relation);
    2436      748104 :     saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
    2437             :                                                    HEAP_DEFAULT_FILLFACTOR);
    2438             : 
    2439             :     /* Toast and set header data in all the slots */
    2440      748104 :     heaptuples = palloc(ntuples * sizeof(HeapTuple));
    2441     3760930 :     for (i = 0; i < ntuples; i++)
    2442             :     {
    2443             :         HeapTuple   tuple;
    2444             : 
    2445     3012826 :         tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
    2446     3012826 :         slots[i]->tts_tableOid = RelationGetRelid(relation);
    2447     3012826 :         tuple->t_tableOid = slots[i]->tts_tableOid;
    2448     3012826 :         heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
    2449             :                                             options);
    2450             :     }
    2451             : 
    2452             :     /*
    2453             :      * We're about to do the actual inserts -- but check for conflict first,
    2454             :      * to minimize the possibility of having to roll back work we've just
    2455             :      * done.
    2456             :      *
    2457             :      * A check here does not definitively prevent a serialization anomaly;
    2458             :      * that check MUST be done at least past the point of acquiring an
    2459             :      * exclusive buffer content lock on every buffer that will be affected,
    2460             :      * and MAY be done after all inserts are reflected in the buffers and
    2461             :      * those locks are released; otherwise there is a race condition.  Since
    2462             :      * multiple buffers can be locked and unlocked in the loop below, and it
    2463             :      * would not be feasible to identify and lock all of those buffers before
    2464             :      * the loop, we must do a final check at the end.
    2465             :      *
    2466             :      * The check here could be omitted with no loss of correctness; it is
    2467             :      * present strictly as an optimization.
    2468             :      *
    2469             :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2470             :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2471             :      * page locks are only consolidated versions of tuple locks; they do not
    2472             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2473             :      * buffer when making the call, which makes for a faster check.
    2474             :      */
    2475      748104 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2476             : 
    2477      748104 :     ndone = 0;
    2478     1526166 :     while (ndone < ntuples)
    2479             :     {
    2480             :         Buffer      buffer;
    2481      778062 :         bool        all_visible_cleared = false;
    2482      778062 :         bool        all_frozen_set = false;
    2483             :         int         nthispage;
    2484             : 
    2485      778062 :         CHECK_FOR_INTERRUPTS();
    2486             : 
    2487             :         /*
    2488             :          * Compute number of pages needed to fit the to-be-inserted tuples in
    2489             :          * the worst case.  This will be used to determine how much to extend
    2490             :          * the relation by in RelationGetBufferForTuple(), if needed.  If we
    2491             :          * filled a prior page from scratch, we can just update our last
    2492             :          * computation, but if we started with a partially filled page,
    2493             :          * recompute from scratch, the number of potentially required pages
    2494             :          * can vary due to tuples needing to fit onto the page, page headers
    2495             :          * etc.
    2496             :          */
    2497      778062 :         if (ndone == 0 || !starting_with_empty_page)
    2498             :         {
    2499      761652 :             npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
    2500             :                                              saveFreeSpace);
    2501      761652 :             npages_used = 0;
    2502             :         }
    2503             :         else
    2504       16410 :             npages_used++;
    2505             : 
    2506             :         /*
    2507             :          * Find buffer where at least the next tuple will fit.  If the page is
    2508             :          * all-visible, this will also pin the requisite visibility map page.
    2509             :          *
    2510             :          * Also pin visibility map page if COPY FREEZE inserts tuples into an
    2511             :          * empty page. See all_frozen_set below.
    2512             :          */
    2513      778062 :         buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
    2514             :                                            InvalidBuffer, options, bistate,
    2515             :                                            &vmbuffer, NULL,
    2516             :                                            npages - npages_used);
    2517      778062 :         page = BufferGetPage(buffer);
    2518             : 
    2519      778062 :         starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
    2520             : 
    2521      778062 :         if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
    2522             :         {
    2523        3322 :             all_frozen_set = true;
    2524             :             /* Lock the vmbuffer before entering the critical section */
    2525        3322 :             LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
    2526             :         }
    2527             : 
    2528             :         /* NO EREPORT(ERROR) from here till changes are logged */
    2529      778062 :         START_CRIT_SECTION();
    2530             : 
    2531             :         /*
    2532             :          * RelationGetBufferForTuple has ensured that the first tuple fits.
    2533             :          * Put that on the page, and then as many other tuples as fit.
    2534             :          */
    2535      778062 :         RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
    2536             : 
    2537             :         /*
    2538             :          * For logical decoding we need combo CIDs to properly decode the
    2539             :          * catalog.
    2540             :          */
    2541      778062 :         if (needwal && need_cids)
    2542       10124 :             log_heap_new_cid(relation, heaptuples[ndone]);
    2543             : 
    2544     3012826 :         for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
    2545             :         {
    2546     2264722 :             HeapTuple   heaptup = heaptuples[ndone + nthispage];
    2547             : 
    2548     2264722 :             if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
    2549       29958 :                 break;
    2550             : 
    2551     2234764 :             RelationPutHeapTuple(relation, buffer, heaptup, false);
    2552             : 
    2553             :             /*
    2554             :              * For logical decoding we need combo CIDs to properly decode the
    2555             :              * catalog.
    2556             :              */
    2557     2234764 :             if (needwal && need_cids)
    2558        9478 :                 log_heap_new_cid(relation, heaptup);
    2559             :         }
    2560             : 
    2561             :         /*
    2562             :          * If the page is all visible, need to clear that, unless we're only
    2563             :          * going to add further frozen rows to it.
    2564             :          *
    2565             :          * If we're only adding already frozen rows to a previously empty
    2566             :          * page, mark it as all-frozen and update the visibility map. We're
    2567             :          * already holding a pin on the vmbuffer.
    2568             :          */
    2569      778062 :         if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
    2570             :         {
    2571        5876 :             all_visible_cleared = true;
    2572        5876 :             PageClearAllVisible(page);
    2573        5876 :             visibilitymap_clear(relation,
    2574             :                                 BufferGetBlockNumber(buffer),
    2575             :                                 vmbuffer, VISIBILITYMAP_VALID_BITS);
    2576             :         }
    2577      772186 :         else if (all_frozen_set)
    2578             :         {
    2579        3322 :             PageSetAllVisible(page);
    2580        3322 :             visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
    2581             :                                      vmbuffer,
    2582             :                                      VISIBILITYMAP_ALL_VISIBLE |
    2583             :                                      VISIBILITYMAP_ALL_FROZEN,
    2584             :                                      relation->rd_locator);
    2585             :         }
    2586             : 
    2587             :         /*
    2588             :          * XXX Should we set PageSetPrunable on this page ? See heap_insert()
    2589             :          */
    2590             : 
    2591      778062 :         MarkBufferDirty(buffer);
    2592             : 
    2593             :         /* XLOG stuff */
    2594      778062 :         if (needwal)
    2595             :         {
    2596             :             XLogRecPtr  recptr;
    2597             :             xl_heap_multi_insert *xlrec;
    2598      770410 :             uint8       info = XLOG_HEAP2_MULTI_INSERT;
    2599             :             char       *tupledata;
    2600             :             int         totaldatalen;
    2601      770410 :             char       *scratchptr = scratch.data;
    2602             :             bool        init;
    2603      770410 :             int         bufflags = 0;
    2604             : 
    2605             :             /*
    2606             :              * If the page was previously empty, we can reinit the page
    2607             :              * instead of restoring the whole thing.
    2608             :              */
    2609      770410 :             init = starting_with_empty_page;
    2610             : 
    2611             :             /* allocate xl_heap_multi_insert struct from the scratch area */
    2612      770410 :             xlrec = (xl_heap_multi_insert *) scratchptr;
    2613      770410 :             scratchptr += SizeOfHeapMultiInsert;
    2614             : 
    2615             :             /*
    2616             :              * Allocate offsets array. Unless we're reinitializing the page,
    2617             :              * in that case the tuples are stored in order starting at
    2618             :              * FirstOffsetNumber and we don't need to store the offsets
    2619             :              * explicitly.
    2620             :              */
    2621      770410 :             if (!init)
    2622      743454 :                 scratchptr += nthispage * sizeof(OffsetNumber);
    2623             : 
    2624             :             /* the rest of the scratch space is used for tuple data */
    2625      770410 :             tupledata = scratchptr;
    2626             : 
    2627             :             /* check that the mutually exclusive flags are not both set */
    2628             :             Assert(!(all_visible_cleared && all_frozen_set));
    2629             : 
    2630      770410 :             xlrec->flags = 0;
    2631      770410 :             if (all_visible_cleared)
    2632        5876 :                 xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
    2633             : 
    2634             :             /*
    2635             :              * We don't have to worry about including a conflict xid in the
    2636             :              * WAL record, as HEAP_INSERT_FROZEN intentionally violates
    2637             :              * visibility rules.
    2638             :              */
    2639      770410 :             if (all_frozen_set)
    2640          34 :                 xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
    2641             : 
    2642      770410 :             xlrec->ntuples = nthispage;
    2643             : 
    2644             :             /*
    2645             :              * Write out an xl_multi_insert_tuple and the tuple data itself
    2646             :              * for each tuple.
    2647             :              */
    2648     3372420 :             for (i = 0; i < nthispage; i++)
    2649             :             {
    2650     2602010 :                 HeapTuple   heaptup = heaptuples[ndone + i];
    2651             :                 xl_multi_insert_tuple *tuphdr;
    2652             :                 int         datalen;
    2653             : 
    2654     2602010 :                 if (!init)
    2655     1547178 :                     xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2656             :                 /* xl_multi_insert_tuple needs two-byte alignment. */
    2657     2602010 :                 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
    2658     2602010 :                 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
    2659             : 
    2660     2602010 :                 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
    2661     2602010 :                 tuphdr->t_infomask = heaptup->t_data->t_infomask;
    2662     2602010 :                 tuphdr->t_hoff = heaptup->t_data->t_hoff;
    2663             : 
    2664             :                 /* write bitmap [+ padding] [+ oid] + data */
    2665     2602010 :                 datalen = heaptup->t_len - SizeofHeapTupleHeader;
    2666     2602010 :                 memcpy(scratchptr,
    2667     2602010 :                        (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2668             :                        datalen);
    2669     2602010 :                 tuphdr->datalen = datalen;
    2670     2602010 :                 scratchptr += datalen;
    2671             :             }
    2672      770410 :             totaldatalen = scratchptr - tupledata;
    2673             :             Assert((scratchptr - scratch.data) < BLCKSZ);
    2674             : 
    2675      770410 :             if (need_tuple_data)
    2676         144 :                 xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2677             : 
    2678             :             /*
    2679             :              * Signal that this is the last xl_heap_multi_insert record
    2680             :              * emitted by this call to heap_multi_insert(). Needed for logical
    2681             :              * decoding so it knows when to cleanup temporary data.
    2682             :              */
    2683      770410 :             if (ndone + nthispage == ntuples)
    2684      747270 :                 xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
    2685             : 
    2686      770410 :             if (init)
    2687             :             {
    2688       26956 :                 info |= XLOG_HEAP_INIT_PAGE;
    2689       26956 :                 bufflags |= REGBUF_WILL_INIT;
    2690             :             }
    2691             : 
    2692             :             /*
    2693             :              * If we're doing logical decoding, include the new tuple data
    2694             :              * even if we take a full-page image of the page.
    2695             :              */
    2696      770410 :             if (need_tuple_data)
    2697         144 :                 bufflags |= REGBUF_KEEP_DATA;
    2698             : 
    2699      770410 :             XLogBeginInsert();
    2700      770410 :             XLogRegisterData(xlrec, tupledata - scratch.data);
    2701      770410 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2702      770410 :             if (all_frozen_set)
    2703          34 :                 XLogRegisterBuffer(1, vmbuffer, 0);
    2704             : 
    2705      770410 :             XLogRegisterBufData(0, tupledata, totaldatalen);
    2706             : 
    2707             :             /* filtering by origin on a row level is much more efficient */
    2708      770410 :             XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2709             : 
    2710      770410 :             recptr = XLogInsert(RM_HEAP2_ID, info);
    2711             : 
    2712      770410 :             PageSetLSN(page, recptr);
    2713      770410 :             if (all_frozen_set)
    2714             :             {
    2715             :                 Assert(BufferIsDirty(vmbuffer));
    2716          34 :                 PageSetLSN(BufferGetPage(vmbuffer), recptr);
    2717             :             }
    2718             :         }
    2719             : 
    2720      778062 :         END_CRIT_SECTION();
    2721             : 
    2722      778062 :         if (all_frozen_set)
    2723        3322 :             LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
    2724             : 
    2725      778062 :         UnlockReleaseBuffer(buffer);
    2726      778062 :         ndone += nthispage;
    2727             : 
    2728             :         /*
    2729             :          * NB: Only release vmbuffer after inserting all tuples - it's fairly
    2730             :          * likely that we'll insert into subsequent heap pages that are likely
    2731             :          * to use the same vm page.
    2732             :          */
    2733             :     }
    2734             : 
    2735             :     /* We're done with inserting all tuples, so release the last vmbuffer. */
    2736      748104 :     if (vmbuffer != InvalidBuffer)
    2737        6078 :         ReleaseBuffer(vmbuffer);
    2738             : 
    2739             :     /*
    2740             :      * We're done with the actual inserts.  Check for conflicts again, to
    2741             :      * ensure that all rw-conflicts in to these inserts are detected.  Without
    2742             :      * this final check, a sequential scan of the heap may have locked the
    2743             :      * table after the "before" check, missing one opportunity to detect the
    2744             :      * conflict, and then scanned the table before the new tuples were there,
    2745             :      * missing the other chance to detect the conflict.
    2746             :      *
    2747             :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2748             :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2749             :      * page locks are only consolidated versions of tuple locks; they do not
    2750             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2751             :      * buffer when making the call.
    2752             :      */
    2753      748104 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2754             : 
    2755             :     /*
    2756             :      * If tuples are cacheable, mark them for invalidation from the caches in
    2757             :      * case we abort.  Note it is OK to do this after releasing the buffer,
    2758             :      * because the heaptuples data structure is all in local memory, not in
    2759             :      * the shared buffer.
    2760             :      */
    2761      748104 :     if (IsCatalogRelation(relation))
    2762             :     {
    2763     2553810 :         for (i = 0; i < ntuples; i++)
    2764     1808154 :             CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
    2765             :     }
    2766             : 
    2767             :     /* copy t_self fields back to the caller's slots */
    2768     3760930 :     for (i = 0; i < ntuples; i++)
    2769     3012826 :         slots[i]->tts_tid = heaptuples[i]->t_self;
    2770             : 
    2771      748104 :     pgstat_count_heap_insert(relation, ntuples);
    2772      748104 : }
    2773             : 
    2774             : /*
    2775             :  *  simple_heap_insert - insert a tuple
    2776             :  *
    2777             :  * Currently, this routine differs from heap_insert only in supplying
    2778             :  * a default command ID and not allowing access to the speedup options.
    2779             :  *
    2780             :  * This should be used rather than using heap_insert directly in most places
    2781             :  * where we are modifying system catalogs.
    2782             :  */
    2783             : void
    2784     1856018 : simple_heap_insert(Relation relation, HeapTuple tup)
    2785             : {
    2786     1856018 :     heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
    2787     1856018 : }
    2788             : 
    2789             : /*
    2790             :  * Given infomask/infomask2, compute the bits that must be saved in the
    2791             :  * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
    2792             :  * xl_heap_lock_updated WAL records.
    2793             :  *
    2794             :  * See fix_infomask_from_infobits.
    2795             :  */
    2796             : static uint8
    2797     4128262 : compute_infobits(uint16 infomask, uint16 infomask2)
    2798             : {
    2799             :     return
    2800     4128262 :         ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
    2801     4128262 :         ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
    2802     4128262 :         ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
    2803             :     /* note we ignore HEAP_XMAX_SHR_LOCK here */
    2804     8256524 :         ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
    2805             :         ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
    2806     4128262 :          XLHL_KEYS_UPDATED : 0);
    2807             : }
    2808             : 
    2809             : /*
    2810             :  * Given two versions of the same t_infomask for a tuple, compare them and
    2811             :  * return whether the relevant status for a tuple Xmax has changed.  This is
    2812             :  * used after a buffer lock has been released and reacquired: we want to ensure
    2813             :  * that the tuple state continues to be the same it was when we previously
    2814             :  * examined it.
    2815             :  *
    2816             :  * Note the Xmax field itself must be compared separately.
    2817             :  */
    2818             : static inline bool
    2819       10758 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
    2820             : {
    2821       10758 :     const uint16 interesting =
    2822             :         HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
    2823             : 
    2824       10758 :     if ((new_infomask & interesting) != (old_infomask & interesting))
    2825          32 :         return true;
    2826             : 
    2827       10726 :     return false;
    2828             : }
    2829             : 
    2830             : /*
    2831             :  *  heap_delete - delete a tuple
    2832             :  *
    2833             :  * See table_tuple_delete() for an explanation of the parameters, except that
    2834             :  * this routine directly takes a tuple rather than a slot.
    2835             :  *
    2836             :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    2837             :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    2838             :  * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
    2839             :  * generated by another transaction).
    2840             :  */
    2841             : TM_Result
    2842     3052000 : heap_delete(Relation relation, const ItemPointerData *tid,
    2843             :             CommandId cid, Snapshot crosscheck, bool wait,
    2844             :             TM_FailureData *tmfd, bool changingPart)
    2845             : {
    2846             :     TM_Result   result;
    2847     3052000 :     TransactionId xid = GetCurrentTransactionId();
    2848             :     ItemId      lp;
    2849             :     HeapTupleData tp;
    2850             :     Page        page;
    2851             :     BlockNumber block;
    2852             :     Buffer      buffer;
    2853     3052000 :     Buffer      vmbuffer = InvalidBuffer;
    2854             :     TransactionId new_xmax;
    2855             :     uint16      new_infomask,
    2856             :                 new_infomask2;
    2857     3052000 :     bool        have_tuple_lock = false;
    2858             :     bool        iscombo;
    2859     3052000 :     bool        all_visible_cleared = false;
    2860     3052000 :     HeapTuple   old_key_tuple = NULL;   /* replica identity of the tuple */
    2861     3052000 :     bool        old_key_copied = false;
    2862             : 
    2863             :     Assert(ItemPointerIsValid(tid));
    2864             : 
    2865     3052000 :     AssertHasSnapshotForToast(relation);
    2866             : 
    2867             :     /*
    2868             :      * Forbid this during a parallel operation, lest it allocate a combo CID.
    2869             :      * Other workers might need that combo CID for visibility checks, and we
    2870             :      * have no provision for broadcasting it to them.
    2871             :      */
    2872     3052000 :     if (IsInParallelMode())
    2873           0 :         ereport(ERROR,
    2874             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2875             :                  errmsg("cannot delete tuples during a parallel operation")));
    2876             : 
    2877     3052000 :     block = ItemPointerGetBlockNumber(tid);
    2878     3052000 :     buffer = ReadBuffer(relation, block);
    2879     3052000 :     page = BufferGetPage(buffer);
    2880             : 
    2881             :     /*
    2882             :      * Before locking the buffer, pin the visibility map page if it appears to
    2883             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    2884             :      * in the middle of changing this, so we'll need to recheck after we have
    2885             :      * the lock.
    2886             :      */
    2887     3052000 :     if (PageIsAllVisible(page))
    2888         476 :         visibilitymap_pin(relation, block, &vmbuffer);
    2889             : 
    2890     3052000 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2891             : 
    2892     3052000 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    2893             :     Assert(ItemIdIsNormal(lp));
    2894             : 
    2895     3052000 :     tp.t_tableOid = RelationGetRelid(relation);
    2896     3052000 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    2897     3052000 :     tp.t_len = ItemIdGetLength(lp);
    2898     3052000 :     tp.t_self = *tid;
    2899             : 
    2900           2 : l1:
    2901             : 
    2902             :     /*
    2903             :      * If we didn't pin the visibility map page and the page has become all
    2904             :      * visible while we were busy locking the buffer, we'll have to unlock and
    2905             :      * re-lock, to avoid holding the buffer lock across an I/O.  That's a bit
    2906             :      * unfortunate, but hopefully shouldn't happen often.
    2907             :      */
    2908     3052002 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    2909             :     {
    2910           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2911           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    2912           0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2913             :     }
    2914             : 
    2915     3052002 :     result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
    2916             : 
    2917     3052002 :     if (result == TM_Invisible)
    2918             :     {
    2919           0 :         UnlockReleaseBuffer(buffer);
    2920           0 :         ereport(ERROR,
    2921             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    2922             :                  errmsg("attempted to delete invisible tuple")));
    2923             :     }
    2924     3052002 :     else if (result == TM_BeingModified && wait)
    2925             :     {
    2926             :         TransactionId xwait;
    2927             :         uint16      infomask;
    2928             : 
    2929             :         /* must copy state data before unlocking buffer */
    2930       81124 :         xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
    2931       81124 :         infomask = tp.t_data->t_infomask;
    2932             : 
    2933             :         /*
    2934             :          * Sleep until concurrent transaction ends -- except when there's a
    2935             :          * single locker and it's our own transaction.  Note we don't care
    2936             :          * which lock mode the locker has, because we need the strongest one.
    2937             :          *
    2938             :          * Before sleeping, we need to acquire tuple lock to establish our
    2939             :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    2940             :          * release us when we are next-in-line for the tuple.
    2941             :          *
    2942             :          * If we are forced to "start over" below, we keep the tuple lock;
    2943             :          * this arranges that we stay at the head of the line while rechecking
    2944             :          * tuple state.
    2945             :          */
    2946       81124 :         if (infomask & HEAP_XMAX_IS_MULTI)
    2947             :         {
    2948          16 :             bool        current_is_member = false;
    2949             : 
    2950          16 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    2951             :                                         LockTupleExclusive, &current_is_member))
    2952             :             {
    2953          16 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2954             : 
    2955             :                 /*
    2956             :                  * Acquire the lock, if necessary (but skip it when we're
    2957             :                  * requesting a lock and already have one; avoids deadlock).
    2958             :                  */
    2959          16 :                 if (!current_is_member)
    2960          12 :                     heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    2961             :                                          LockWaitBlock, &have_tuple_lock);
    2962             : 
    2963             :                 /* wait for multixact */
    2964          16 :                 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
    2965             :                                 relation, &(tp.t_self), XLTW_Delete,
    2966             :                                 NULL);
    2967          16 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2968             : 
    2969             :                 /*
    2970             :                  * If xwait had just locked the tuple then some other xact
    2971             :                  * could update this tuple before we get to this point.  Check
    2972             :                  * for xmax change, and start over if so.
    2973             :                  *
    2974             :                  * We also must start over if we didn't pin the VM page, and
    2975             :                  * the page has become all visible.
    2976             :                  */
    2977          32 :                 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
    2978          32 :                     xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    2979          16 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    2980             :                                          xwait))
    2981           0 :                     goto l1;
    2982             :             }
    2983             : 
    2984             :             /*
    2985             :              * You might think the multixact is necessarily done here, but not
    2986             :              * so: it could have surviving members, namely our own xact or
    2987             :              * other subxacts of this backend.  It is legal for us to delete
    2988             :              * the tuple in either case, however (the latter case is
    2989             :              * essentially a situation of upgrading our former shared lock to
    2990             :              * exclusive).  We don't bother changing the on-disk hint bits
    2991             :              * since we are about to overwrite the xmax altogether.
    2992             :              */
    2993             :         }
    2994       81108 :         else if (!TransactionIdIsCurrentTransactionId(xwait))
    2995             :         {
    2996             :             /*
    2997             :              * Wait for regular transaction to end; but first, acquire tuple
    2998             :              * lock.
    2999             :              */
    3000         104 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3001         104 :             heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    3002             :                                  LockWaitBlock, &have_tuple_lock);
    3003         104 :             XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
    3004          96 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3005             : 
    3006             :             /*
    3007             :              * xwait is done, but if xwait had just locked the tuple then some
    3008             :              * other xact could update this tuple before we get to this point.
    3009             :              * Check for xmax change, and start over if so.
    3010             :              *
    3011             :              * We also must start over if we didn't pin the VM page, and the
    3012             :              * page has become all visible.
    3013             :              */
    3014         192 :             if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
    3015         190 :                 xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    3016          94 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    3017             :                                      xwait))
    3018           2 :                 goto l1;
    3019             : 
    3020             :             /* Otherwise check if it committed or aborted */
    3021          94 :             UpdateXmaxHintBits(tp.t_data, buffer, xwait);
    3022             :         }
    3023             : 
    3024             :         /*
    3025             :          * We may overwrite if previous xmax aborted, or if it committed but
    3026             :          * only locked the tuple without updating it.
    3027             :          */
    3028      162188 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    3029       81136 :             HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
    3030          62 :             HeapTupleHeaderIsOnlyLocked(tp.t_data))
    3031       81060 :             result = TM_Ok;
    3032          54 :         else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    3033          46 :             result = TM_Updated;
    3034             :         else
    3035           8 :             result = TM_Deleted;
    3036             :     }
    3037             : 
    3038             :     /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
    3039             :     if (result != TM_Ok)
    3040             :     {
    3041             :         Assert(result == TM_SelfModified ||
    3042             :                result == TM_Updated ||
    3043             :                result == TM_Deleted ||
    3044             :                result == TM_BeingModified);
    3045             :         Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
    3046             :         Assert(result != TM_Updated ||
    3047             :                !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
    3048             :     }
    3049             : 
    3050     3051992 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    3051             :     {
    3052             :         /* Perform additional check for transaction-snapshot mode RI updates */
    3053           2 :         if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
    3054           2 :             result = TM_Updated;
    3055             :     }
    3056             : 
    3057     3051992 :     if (result != TM_Ok)
    3058             :     {
    3059         124 :         tmfd->ctid = tp.t_data->t_ctid;
    3060         124 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    3061         124 :         if (result == TM_SelfModified)
    3062          42 :             tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
    3063             :         else
    3064          82 :             tmfd->cmax = InvalidCommandId;
    3065         124 :         UnlockReleaseBuffer(buffer);
    3066         124 :         if (have_tuple_lock)
    3067          54 :             UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    3068         124 :         if (vmbuffer != InvalidBuffer)
    3069           0 :             ReleaseBuffer(vmbuffer);
    3070         124 :         return result;
    3071             :     }
    3072             : 
    3073             :     /*
    3074             :      * We're about to do the actual delete -- check for conflict first, to
    3075             :      * avoid possibly having to roll back work we've just done.
    3076             :      *
    3077             :      * This is safe without a recheck as long as there is no possibility of
    3078             :      * another process scanning the page between this check and the delete
    3079             :      * being visible to the scan (i.e., an exclusive buffer content lock is
    3080             :      * continuously held from this point until the tuple delete is visible).
    3081             :      */
    3082     3051868 :     CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
    3083             : 
    3084             :     /* replace cid with a combo CID if necessary */
    3085     3051840 :     HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
    3086             : 
    3087             :     /*
    3088             :      * Compute replica identity tuple before entering the critical section so
    3089             :      * we don't PANIC upon a memory allocation failure.
    3090             :      */
    3091     3051840 :     old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
    3092             : 
    3093             :     /*
    3094             :      * If this is the first possibly-multixact-able operation in the current
    3095             :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    3096             :      * certain that the transaction will never become a member of any older
    3097             :      * MultiXactIds than that.  (We have to do this even if we end up just
    3098             :      * using our own TransactionId below, since some other backend could
    3099             :      * incorporate our XID into a MultiXact immediately afterwards.)
    3100             :      */
    3101     3051840 :     MultiXactIdSetOldestMember();
    3102             : 
    3103     3051840 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
    3104     3051840 :                               tp.t_data->t_infomask, tp.t_data->t_infomask2,
    3105             :                               xid, LockTupleExclusive, true,
    3106             :                               &new_xmax, &new_infomask, &new_infomask2);
    3107             : 
    3108     3051840 :     START_CRIT_SECTION();
    3109             : 
    3110             :     /*
    3111             :      * If this transaction commits, the tuple will become DEAD sooner or
    3112             :      * later.  Set flag that this page is a candidate for pruning once our xid
    3113             :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    3114             :      * the subsequent page pruning will be a no-op and the hint will be
    3115             :      * cleared.
    3116             :      */
    3117     3051840 :     PageSetPrunable(page, xid);
    3118             : 
    3119     3051840 :     if (PageIsAllVisible(page))
    3120             :     {
    3121         476 :         all_visible_cleared = true;
    3122         476 :         PageClearAllVisible(page);
    3123         476 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    3124             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    3125             :     }
    3126             : 
    3127             :     /* store transaction information of xact deleting the tuple */
    3128     3051840 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3129     3051840 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3130     3051840 :     tp.t_data->t_infomask |= new_infomask;
    3131     3051840 :     tp.t_data->t_infomask2 |= new_infomask2;
    3132     3051840 :     HeapTupleHeaderClearHotUpdated(tp.t_data);
    3133     3051840 :     HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
    3134     3051840 :     HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
    3135             :     /* Make sure there is no forward chain link in t_ctid */
    3136     3051840 :     tp.t_data->t_ctid = tp.t_self;
    3137             : 
    3138             :     /* Signal that this is actually a move into another partition */
    3139     3051840 :     if (changingPart)
    3140         986 :         HeapTupleHeaderSetMovedPartitions(tp.t_data);
    3141             : 
    3142     3051840 :     MarkBufferDirty(buffer);
    3143             : 
    3144             :     /*
    3145             :      * XLOG stuff
    3146             :      *
    3147             :      * NB: heap_abort_speculative() uses the same xlog record and replay
    3148             :      * routines.
    3149             :      */
    3150     3051840 :     if (RelationNeedsWAL(relation))
    3151             :     {
    3152             :         xl_heap_delete xlrec;
    3153             :         xl_heap_header xlhdr;
    3154             :         XLogRecPtr  recptr;
    3155             : 
    3156             :         /*
    3157             :          * For logical decode we need combo CIDs to properly decode the
    3158             :          * catalog
    3159             :          */
    3160     2926652 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    3161       12584 :             log_heap_new_cid(relation, &tp);
    3162             : 
    3163     2926652 :         xlrec.flags = 0;
    3164     2926652 :         if (all_visible_cleared)
    3165         476 :             xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
    3166     2926652 :         if (changingPart)
    3167         986 :             xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
    3168     5853304 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    3169     2926652 :                                               tp.t_data->t_infomask2);
    3170     2926652 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    3171     2926652 :         xlrec.xmax = new_xmax;
    3172             : 
    3173     2926652 :         if (old_key_tuple != NULL)
    3174             :         {
    3175       94036 :             if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    3176         264 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
    3177             :             else
    3178       93772 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
    3179             :         }
    3180             : 
    3181     2926652 :         XLogBeginInsert();
    3182     2926652 :         XLogRegisterData(&xlrec, SizeOfHeapDelete);
    3183             : 
    3184     2926652 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3185             : 
    3186             :         /*
    3187             :          * Log replica identity of the deleted tuple if there is one
    3188             :          */
    3189     2926652 :         if (old_key_tuple != NULL)
    3190             :         {
    3191       94036 :             xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    3192       94036 :             xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
    3193       94036 :             xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
    3194             : 
    3195       94036 :             XLogRegisterData(&xlhdr, SizeOfHeapHeader);
    3196       94036 :             XLogRegisterData((char *) old_key_tuple->t_data
    3197             :                              + SizeofHeapTupleHeader,
    3198       94036 :                              old_key_tuple->t_len
    3199             :                              - SizeofHeapTupleHeader);
    3200             :         }
    3201             : 
    3202             :         /* filtering by origin on a row level is much more efficient */
    3203     2926652 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    3204             : 
    3205     2926652 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    3206             : 
    3207     2926652 :         PageSetLSN(page, recptr);
    3208             :     }
    3209             : 
    3210     3051840 :     END_CRIT_SECTION();
    3211             : 
    3212     3051840 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3213             : 
    3214     3051840 :     if (vmbuffer != InvalidBuffer)
    3215         476 :         ReleaseBuffer(vmbuffer);
    3216             : 
    3217             :     /*
    3218             :      * If the tuple has toasted out-of-line attributes, we need to delete
    3219             :      * those items too.  We have to do this before releasing the buffer
    3220             :      * because we need to look at the contents of the tuple, but it's OK to
    3221             :      * release the content lock on the buffer first.
    3222             :      */
    3223     3051840 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3224        5152 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3225             :     {
    3226             :         /* toast table entries should never be recursively toasted */
    3227             :         Assert(!HeapTupleHasExternal(&tp));
    3228             :     }
    3229     3046708 :     else if (HeapTupleHasExternal(&tp))
    3230         588 :         heap_toast_delete(relation, &tp, false);
    3231             : 
    3232             :     /*
    3233             :      * Mark tuple for invalidation from system caches at next command
    3234             :      * boundary. We have to do this before releasing the buffer because we
    3235             :      * need to look at the contents of the tuple.
    3236             :      */
    3237     3051840 :     CacheInvalidateHeapTuple(relation, &tp, NULL);
    3238             : 
    3239             :     /* Now we can release the buffer */
    3240     3051840 :     ReleaseBuffer(buffer);
    3241             : 
    3242             :     /*
    3243             :      * Release the lmgr tuple lock, if we had it.
    3244             :      */
    3245     3051840 :     if (have_tuple_lock)
    3246          52 :         UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    3247             : 
    3248     3051840 :     pgstat_count_heap_delete(relation);
    3249             : 
    3250     3051840 :     if (old_key_tuple != NULL && old_key_copied)
    3251       93774 :         heap_freetuple(old_key_tuple);
    3252             : 
    3253     3051840 :     return TM_Ok;
    3254             : }
    3255             : 
    3256             : /*
    3257             :  *  simple_heap_delete - delete a tuple
    3258             :  *
    3259             :  * This routine may be used to delete a tuple when concurrent updates of
    3260             :  * the target tuple are not expected (for example, because we have a lock
    3261             :  * on the relation associated with the tuple).  Any failure is reported
    3262             :  * via ereport().
    3263             :  */
    3264             : void
    3265     1320046 : simple_heap_delete(Relation relation, const ItemPointerData *tid)
    3266             : {
    3267             :     TM_Result   result;
    3268             :     TM_FailureData tmfd;
    3269             : 
    3270     1320046 :     result = heap_delete(relation, tid,
    3271             :                          GetCurrentCommandId(true), InvalidSnapshot,
    3272             :                          true /* wait for commit */ ,
    3273             :                          &tmfd, false /* changingPart */ );
    3274     1320046 :     switch (result)
    3275             :     {
    3276           0 :         case TM_SelfModified:
    3277             :             /* Tuple was already updated in current command? */
    3278           0 :             elog(ERROR, "tuple already updated by self");
    3279             :             break;
    3280             : 
    3281     1320046 :         case TM_Ok:
    3282             :             /* done successfully */
    3283     1320046 :             break;
    3284             : 
    3285           0 :         case TM_Updated:
    3286           0 :             elog(ERROR, "tuple concurrently updated");
    3287             :             break;
    3288             : 
    3289           0 :         case TM_Deleted:
    3290           0 :             elog(ERROR, "tuple concurrently deleted");
    3291             :             break;
    3292             : 
    3293           0 :         default:
    3294           0 :             elog(ERROR, "unrecognized heap_delete status: %u", result);
    3295             :             break;
    3296             :     }
    3297     1320046 : }
    3298             : 
    3299             : /*
    3300             :  *  heap_update - replace a tuple
    3301             :  *
    3302             :  * See table_tuple_update() for an explanation of the parameters, except that
    3303             :  * this routine directly takes a tuple rather than a slot.
    3304             :  *
    3305             :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    3306             :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    3307             :  * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
    3308             :  * generated by another transaction).
    3309             :  */
    3310             : TM_Result
    3311      624282 : heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
    3312             :             CommandId cid, Snapshot crosscheck, bool wait,
    3313             :             TM_FailureData *tmfd, LockTupleMode *lockmode,
    3314             :             TU_UpdateIndexes *update_indexes)
    3315             : {
    3316             :     TM_Result   result;
    3317      624282 :     TransactionId xid = GetCurrentTransactionId();
    3318             :     Bitmapset  *hot_attrs;
    3319             :     Bitmapset  *sum_attrs;
    3320             :     Bitmapset  *key_attrs;
    3321             :     Bitmapset  *id_attrs;
    3322             :     Bitmapset  *interesting_attrs;
    3323             :     Bitmapset  *modified_attrs;
    3324             :     ItemId      lp;
    3325             :     HeapTupleData oldtup;
    3326             :     HeapTuple   heaptup;
    3327      624282 :     HeapTuple   old_key_tuple = NULL;
    3328      624282 :     bool        old_key_copied = false;
    3329             :     Page        page;
    3330             :     BlockNumber block;
    3331             :     MultiXactStatus mxact_status;
    3332             :     Buffer      buffer,
    3333             :                 newbuf,
    3334      624282 :                 vmbuffer = InvalidBuffer,
    3335      624282 :                 vmbuffer_new = InvalidBuffer;
    3336             :     bool        need_toast;
    3337             :     Size        newtupsize,
    3338             :                 pagefree;
    3339      624282 :     bool        have_tuple_lock = false;
    3340             :     bool        iscombo;
    3341      624282 :     bool        use_hot_update = false;
    3342      624282 :     bool        summarized_update = false;
    3343             :     bool        key_intact;
    3344      624282 :     bool        all_visible_cleared = false;
    3345      624282 :     bool        all_visible_cleared_new = false;
    3346             :     bool        checked_lockers;
    3347             :     bool        locker_remains;
    3348      624282 :     bool        id_has_external = false;
    3349             :     TransactionId xmax_new_tuple,
    3350             :                 xmax_old_tuple;
    3351             :     uint16      infomask_old_tuple,
    3352             :                 infomask2_old_tuple,
    3353             :                 infomask_new_tuple,
    3354             :                 infomask2_new_tuple;
    3355             : 
    3356             :     Assert(ItemPointerIsValid(otid));
    3357             : 
    3358             :     /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
    3359             :     Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
    3360             :            RelationGetNumberOfAttributes(relation));
    3361             : 
    3362      624282 :     AssertHasSnapshotForToast(relation);
    3363             : 
    3364             :     /*
    3365             :      * Forbid this during a parallel operation, lest it allocate a combo CID.
    3366             :      * Other workers might need that combo CID for visibility checks, and we
    3367             :      * have no provision for broadcasting it to them.
    3368             :      */
    3369      624282 :     if (IsInParallelMode())
    3370           0 :         ereport(ERROR,
    3371             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    3372             :                  errmsg("cannot update tuples during a parallel operation")));
    3373             : 
    3374             : #ifdef USE_ASSERT_CHECKING
    3375             :     check_lock_if_inplace_updateable_rel(relation, otid, newtup);
    3376             : #endif
    3377             : 
    3378             :     /*
    3379             :      * Fetch the list of attributes to be checked for various operations.
    3380             :      *
    3381             :      * For HOT considerations, this is wasted effort if we fail to update or
    3382             :      * have to put the new tuple on a different page.  But we must compute the
    3383             :      * list before obtaining buffer lock --- in the worst case, if we are
    3384             :      * doing an update on one of the relevant system catalogs, we could
    3385             :      * deadlock if we try to fetch the list later.  In any case, the relcache
    3386             :      * caches the data so this is usually pretty cheap.
    3387             :      *
    3388             :      * We also need columns used by the replica identity and columns that are
    3389             :      * considered the "key" of rows in the table.
    3390             :      *
    3391             :      * Note that we get copies of each bitmap, so we need not worry about
    3392             :      * relcache flush happening midway through.
    3393             :      */
    3394      624282 :     hot_attrs = RelationGetIndexAttrBitmap(relation,
    3395             :                                            INDEX_ATTR_BITMAP_HOT_BLOCKING);
    3396      624282 :     sum_attrs = RelationGetIndexAttrBitmap(relation,
    3397             :                                            INDEX_ATTR_BITMAP_SUMMARIZED);
    3398      624282 :     key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
    3399      624282 :     id_attrs = RelationGetIndexAttrBitmap(relation,
    3400             :                                           INDEX_ATTR_BITMAP_IDENTITY_KEY);
    3401      624282 :     interesting_attrs = NULL;
    3402      624282 :     interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
    3403      624282 :     interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
    3404      624282 :     interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
    3405      624282 :     interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
    3406             : 
    3407      624282 :     block = ItemPointerGetBlockNumber(otid);
    3408      624282 :     INJECTION_POINT("heap_update-before-pin", NULL);
    3409      624282 :     buffer = ReadBuffer(relation, block);
    3410      624282 :     page = BufferGetPage(buffer);
    3411             : 
    3412             :     /*
    3413             :      * Before locking the buffer, pin the visibility map page if it appears to
    3414             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    3415             :      * in the middle of changing this, so we'll need to recheck after we have
    3416             :      * the lock.
    3417             :      */
    3418      624282 :     if (PageIsAllVisible(page))
    3419        3058 :         visibilitymap_pin(relation, block, &vmbuffer);
    3420             : 
    3421      624282 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3422             : 
    3423      624282 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
    3424             : 
    3425             :     /*
    3426             :      * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
    3427             :      * we see LP_NORMAL here.  When the otid origin is a syscache, we may have
    3428             :      * neither a pin nor a snapshot.  Hence, we may see other LP_ states, each
    3429             :      * of which indicates concurrent pruning.
    3430             :      *
    3431             :      * Failing with TM_Updated would be most accurate.  However, unlike other
    3432             :      * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
    3433             :      * LP_DEAD cases.  While the distinction between TM_Updated and TM_Deleted
    3434             :      * does matter to SQL statements UPDATE and MERGE, those SQL statements
    3435             :      * hold a snapshot that ensures LP_NORMAL.  Hence, the choice between
    3436             :      * TM_Updated and TM_Deleted affects only the wording of error messages.
    3437             :      * Settle on TM_Deleted, for two reasons.  First, it avoids complicating
    3438             :      * the specification of when tmfd->ctid is valid.  Second, it creates
    3439             :      * error log evidence that we took this branch.
    3440             :      *
    3441             :      * Since it's possible to see LP_UNUSED at otid, it's also possible to see
    3442             :      * LP_NORMAL for a tuple that replaced LP_UNUSED.  If it's a tuple for an
    3443             :      * unrelated row, we'll fail with "duplicate key value violates unique".
    3444             :      * XXX if otid is the live, newer version of the newtup row, we'll discard
    3445             :      * changes originating in versions of this catalog row after the version
    3446             :      * the caller got from syscache.  See syscache-update-pruned.spec.
    3447             :      */
    3448      624282 :     if (!ItemIdIsNormal(lp))
    3449             :     {
    3450             :         Assert(RelationSupportsSysCache(RelationGetRelid(relation)));
    3451             : 
    3452           2 :         UnlockReleaseBuffer(buffer);
    3453             :         Assert(!have_tuple_lock);
    3454           2 :         if (vmbuffer != InvalidBuffer)
    3455           2 :             ReleaseBuffer(vmbuffer);
    3456           2 :         tmfd->ctid = *otid;
    3457           2 :         tmfd->xmax = InvalidTransactionId;
    3458           2 :         tmfd->cmax = InvalidCommandId;
    3459           2 :         *update_indexes = TU_None;
    3460             : 
    3461           2 :         bms_free(hot_attrs);
    3462           2 :         bms_free(sum_attrs);
    3463           2 :         bms_free(key_attrs);
    3464           2 :         bms_free(id_attrs);
    3465             :         /* modified_attrs not yet initialized */
    3466           2 :         bms_free(interesting_attrs);
    3467           2 :         return TM_Deleted;
    3468             :     }
    3469             : 
    3470             :     /*
    3471             :      * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
    3472             :      * properly.
    3473             :      */
    3474      624280 :     oldtup.t_tableOid = RelationGetRelid(relation);
    3475      624280 :     oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    3476      624280 :     oldtup.t_len = ItemIdGetLength(lp);
    3477      624280 :     oldtup.t_self = *otid;
    3478             : 
    3479             :     /* the new tuple is ready, except for this: */
    3480      624280 :     newtup->t_tableOid = RelationGetRelid(relation);
    3481             : 
    3482             :     /*
    3483             :      * Determine columns modified by the update.  Additionally, identify
    3484             :      * whether any of the unmodified replica identity key attributes in the
    3485             :      * old tuple is externally stored or not.  This is required because for
    3486             :      * such attributes the flattened value won't be WAL logged as part of the
    3487             :      * new tuple so we must include it as part of the old_key_tuple.  See
    3488             :      * ExtractReplicaIdentity.
    3489             :      */
    3490      624280 :     modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
    3491             :                                               id_attrs, &oldtup,
    3492             :                                               newtup, &id_has_external);
    3493             : 
    3494             :     /*
    3495             :      * If we're not updating any "key" column, we can grab a weaker lock type.
    3496             :      * This allows for more concurrency when we are running simultaneously
    3497             :      * with foreign key checks.
    3498             :      *
    3499             :      * Note that if a column gets detoasted while executing the update, but
    3500             :      * the value ends up being the same, this test will fail and we will use
    3501             :      * the stronger lock.  This is acceptable; the important case to optimize
    3502             :      * is updates that don't manipulate key columns, not those that
    3503             :      * serendipitously arrive at the same key values.
    3504             :      */
    3505      624280 :     if (!bms_overlap(modified_attrs, key_attrs))
    3506             :     {
    3507      615656 :         *lockmode = LockTupleNoKeyExclusive;
    3508      615656 :         mxact_status = MultiXactStatusNoKeyUpdate;
    3509      615656 :         key_intact = true;
    3510             : 
    3511             :         /*
    3512             :          * If this is the first possibly-multixact-able operation in the
    3513             :          * current transaction, set my per-backend OldestMemberMXactId
    3514             :          * setting. We can be certain that the transaction will never become a
    3515             :          * member of any older MultiXactIds than that.  (We have to do this
    3516             :          * even if we end up just using our own TransactionId below, since
    3517             :          * some other backend could incorporate our XID into a MultiXact
    3518             :          * immediately afterwards.)
    3519             :          */
    3520      615656 :         MultiXactIdSetOldestMember();
    3521             :     }
    3522             :     else
    3523             :     {
    3524        8624 :         *lockmode = LockTupleExclusive;
    3525        8624 :         mxact_status = MultiXactStatusUpdate;
    3526        8624 :         key_intact = false;
    3527             :     }
    3528             : 
    3529             :     /*
    3530             :      * Note: beyond this point, use oldtup not otid to refer to old tuple.
    3531             :      * otid may very well point at newtup->t_self, which we will overwrite
    3532             :      * with the new tuple's location, so there's great risk of confusion if we
    3533             :      * use otid anymore.
    3534             :      */
    3535             : 
    3536      624280 : l2:
    3537      624282 :     checked_lockers = false;
    3538      624282 :     locker_remains = false;
    3539      624282 :     result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
    3540             : 
    3541             :     /* see below about the "no wait" case */
    3542             :     Assert(result != TM_BeingModified || wait);
    3543             : 
    3544      624282 :     if (result == TM_Invisible)
    3545             :     {
    3546           0 :         UnlockReleaseBuffer(buffer);
    3547           0 :         ereport(ERROR,
    3548             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    3549             :                  errmsg("attempted to update invisible tuple")));
    3550             :     }
    3551      624282 :     else if (result == TM_BeingModified && wait)
    3552             :     {
    3553             :         TransactionId xwait;
    3554             :         uint16      infomask;
    3555       72214 :         bool        can_continue = false;
    3556             : 
    3557             :         /*
    3558             :          * XXX note that we don't consider the "no wait" case here.  This
    3559             :          * isn't a problem currently because no caller uses that case, but it
    3560             :          * should be fixed if such a caller is introduced.  It wasn't a
    3561             :          * problem previously because this code would always wait, but now
    3562             :          * that some tuple locks do not conflict with one of the lock modes we
    3563             :          * use, it is possible that this case is interesting to handle
    3564             :          * specially.
    3565             :          *
    3566             :          * This may cause failures with third-party code that calls
    3567             :          * heap_update directly.
    3568             :          */
    3569             : 
    3570             :         /* must copy state data before unlocking buffer */
    3571       72214 :         xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3572       72214 :         infomask = oldtup.t_data->t_infomask;
    3573             : 
    3574             :         /*
    3575             :          * Now we have to do something about the existing locker.  If it's a
    3576             :          * multi, sleep on it; we might be awakened before it is completely
    3577             :          * gone (or even not sleep at all in some cases); we need to preserve
    3578             :          * it as locker, unless it is gone completely.
    3579             :          *
    3580             :          * If it's not a multi, we need to check for sleeping conditions
    3581             :          * before actually going to sleep.  If the update doesn't conflict
    3582             :          * with the locks, we just continue without sleeping (but making sure
    3583             :          * it is preserved).
    3584             :          *
    3585             :          * Before sleeping, we need to acquire tuple lock to establish our
    3586             :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    3587             :          * release us when we are next-in-line for the tuple.  Note we must
    3588             :          * not acquire the tuple lock until we're sure we're going to sleep;
    3589             :          * otherwise we're open for race conditions with other transactions
    3590             :          * holding the tuple lock which sleep on us.
    3591             :          *
    3592             :          * If we are forced to "start over" below, we keep the tuple lock;
    3593             :          * this arranges that we stay at the head of the line while rechecking
    3594             :          * tuple state.
    3595             :          */
    3596       72214 :         if (infomask & HEAP_XMAX_IS_MULTI)
    3597             :         {
    3598             :             TransactionId update_xact;
    3599             :             int         remain;
    3600         358 :             bool        current_is_member = false;
    3601             : 
    3602         358 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    3603             :                                         *lockmode, &current_is_member))
    3604             :             {
    3605          16 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3606             : 
    3607             :                 /*
    3608             :                  * Acquire the lock, if necessary (but skip it when we're
    3609             :                  * requesting a lock and already have one; avoids deadlock).
    3610             :                  */
    3611          16 :                 if (!current_is_member)
    3612           0 :                     heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3613             :                                          LockWaitBlock, &have_tuple_lock);
    3614             : 
    3615             :                 /* wait for multixact */
    3616          16 :                 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
    3617             :                                 relation, &oldtup.t_self, XLTW_Update,
    3618             :                                 &remain);
    3619          16 :                 checked_lockers = true;
    3620          16 :                 locker_remains = remain != 0;
    3621          16 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3622             : 
    3623             :                 /*
    3624             :                  * If xwait had just locked the tuple then some other xact
    3625             :                  * could update this tuple before we get to this point.  Check
    3626             :                  * for xmax change, and start over if so.
    3627             :                  */
    3628          16 :                 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
    3629          16 :                                           infomask) ||
    3630          16 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3631             :                                          xwait))
    3632           0 :                     goto l2;
    3633             :             }
    3634             : 
    3635             :             /*
    3636             :              * Note that the multixact may not be done by now.  It could have
    3637             :              * surviving members; our own xact or other subxacts of this
    3638             :              * backend, and also any other concurrent transaction that locked
    3639             :              * the tuple with LockTupleKeyShare if we only got
    3640             :              * LockTupleNoKeyExclusive.  If this is the case, we have to be
    3641             :              * careful to mark the updated tuple with the surviving members in
    3642             :              * Xmax.
    3643             :              *
    3644             :              * Note that there could have been another update in the
    3645             :              * MultiXact. In that case, we need to check whether it committed
    3646             :              * or aborted. If it aborted we are safe to update it again;
    3647             :              * otherwise there is an update conflict, and we have to return
    3648             :              * TableTuple{Deleted, Updated} below.
    3649             :              *
    3650             :              * In the LockTupleExclusive case, we still need to preserve the
    3651             :              * surviving members: those would include the tuple locks we had
    3652             :              * before this one, which are important to keep in case this
    3653             :              * subxact aborts.
    3654             :              */
    3655         358 :             if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
    3656          16 :                 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
    3657             :             else
    3658         342 :                 update_xact = InvalidTransactionId;
    3659             : 
    3660             :             /*
    3661             :              * There was no UPDATE in the MultiXact; or it aborted. No
    3662             :              * TransactionIdIsInProgress() call needed here, since we called
    3663             :              * MultiXactIdWait() above.
    3664             :              */
    3665         374 :             if (!TransactionIdIsValid(update_xact) ||
    3666          16 :                 TransactionIdDidAbort(update_xact))
    3667         344 :                 can_continue = true;
    3668             :         }
    3669       71856 :         else if (TransactionIdIsCurrentTransactionId(xwait))
    3670             :         {
    3671             :             /*
    3672             :              * The only locker is ourselves; we can avoid grabbing the tuple
    3673             :              * lock here, but must preserve our locking information.
    3674             :              */
    3675       71642 :             checked_lockers = true;
    3676       71642 :             locker_remains = true;
    3677       71642 :             can_continue = true;
    3678             :         }
    3679         214 :         else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
    3680             :         {
    3681             :             /*
    3682             :              * If it's just a key-share locker, and we're not changing the key
    3683             :              * columns, we don't need to wait for it to end; but we need to
    3684             :              * preserve it as locker.
    3685             :              */
    3686          58 :             checked_lockers = true;
    3687          58 :             locker_remains = true;
    3688          58 :             can_continue = true;
    3689             :         }
    3690             :         else
    3691             :         {
    3692             :             /*
    3693             :              * Wait for regular transaction to end; but first, acquire tuple
    3694             :              * lock.
    3695             :              */
    3696         156 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3697         156 :             heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3698             :                                  LockWaitBlock, &have_tuple_lock);
    3699         156 :             XactLockTableWait(xwait, relation, &oldtup.t_self,
    3700             :                               XLTW_Update);
    3701         156 :             checked_lockers = true;
    3702         156 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3703             : 
    3704             :             /*
    3705             :              * xwait is done, but if xwait had just locked the tuple then some
    3706             :              * other xact could update this tuple before we get to this point.
    3707             :              * Check for xmax change, and start over if so.
    3708             :              */
    3709         310 :             if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
    3710         154 :                 !TransactionIdEquals(xwait,
    3711             :                                      HeapTupleHeaderGetRawXmax(oldtup.t_data)))
    3712           2 :                 goto l2;
    3713             : 
    3714             :             /* Otherwise check if it committed or aborted */
    3715         154 :             UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
    3716         154 :             if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
    3717          44 :                 can_continue = true;
    3718             :         }
    3719             : 
    3720       72212 :         if (can_continue)
    3721       72088 :             result = TM_Ok;
    3722         124 :         else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
    3723         114 :             result = TM_Updated;
    3724             :         else
    3725          10 :             result = TM_Deleted;
    3726             :     }
    3727             : 
    3728             :     /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
    3729             :     if (result != TM_Ok)
    3730             :     {
    3731             :         Assert(result == TM_SelfModified ||
    3732             :                result == TM_Updated ||
    3733             :                result == TM_Deleted ||
    3734             :                result == TM_BeingModified);
    3735             :         Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
    3736             :         Assert(result != TM_Updated ||
    3737             :                !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
    3738             :     }
    3739             : 
    3740      624280 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    3741             :     {
    3742             :         /* Perform additional check for transaction-snapshot mode RI updates */
    3743           2 :         if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
    3744           2 :             result = TM_Updated;
    3745             :     }
    3746             : 
    3747      624280 :     if (result != TM_Ok)
    3748             :     {
    3749         320 :         tmfd->ctid = oldtup.t_data->t_ctid;
    3750         320 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
    3751         320 :         if (result == TM_SelfModified)
    3752         104 :             tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
    3753             :         else
    3754         216 :             tmfd->cmax = InvalidCommandId;
    3755         320 :         UnlockReleaseBuffer(buffer);
    3756         320 :         if (have_tuple_lock)
    3757         110 :             UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    3758         320 :         if (vmbuffer != InvalidBuffer)
    3759           0 :             ReleaseBuffer(vmbuffer);
    3760         320 :         *update_indexes = TU_None;
    3761             : 
    3762         320 :         bms_free(hot_attrs);
    3763         320 :         bms_free(sum_attrs);
    3764         320 :         bms_free(key_attrs);
    3765         320 :         bms_free(id_attrs);
    3766         320 :         bms_free(modified_attrs);
    3767         320 :         bms_free(interesting_attrs);
    3768         320 :         return result;
    3769             :     }
    3770             : 
    3771             :     /*
    3772             :      * If we didn't pin the visibility map page and the page has become all
    3773             :      * visible while we were busy locking the buffer, or during some
    3774             :      * subsequent window during which we had it unlocked, we'll have to unlock
    3775             :      * and re-lock, to avoid holding the buffer lock across an I/O.  That's a
    3776             :      * bit unfortunate, especially since we'll now have to recheck whether the
    3777             :      * tuple has been locked or updated under us, but hopefully it won't
    3778             :      * happen very often.
    3779             :      */
    3780      623960 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    3781             :     {
    3782           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3783           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    3784           0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3785           0 :         goto l2;
    3786             :     }
    3787             : 
    3788             :     /* Fill in transaction status data */
    3789             : 
    3790             :     /*
    3791             :      * If the tuple we're updating is locked, we need to preserve the locking
    3792             :      * info in the old tuple's Xmax.  Prepare a new Xmax value for this.
    3793             :      */
    3794      623960 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3795      623960 :                               oldtup.t_data->t_infomask,
    3796      623960 :                               oldtup.t_data->t_infomask2,
    3797             :                               xid, *lockmode, true,
    3798             :                               &xmax_old_tuple, &infomask_old_tuple,
    3799             :                               &infomask2_old_tuple);
    3800             : 
    3801             :     /*
    3802             :      * And also prepare an Xmax value for the new copy of the tuple.  If there
    3803             :      * was no xmax previously, or there was one but all lockers are now gone,
    3804             :      * then use InvalidTransactionId; otherwise, get the xmax from the old
    3805             :      * tuple.  (In rare cases that might also be InvalidTransactionId and yet
    3806             :      * not have the HEAP_XMAX_INVALID bit set; that's fine.)
    3807             :      */
    3808      696004 :     if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    3809      144088 :         HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
    3810       71702 :         (checked_lockers && !locker_remains))
    3811      551916 :         xmax_new_tuple = InvalidTransactionId;
    3812             :     else
    3813       72044 :         xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3814             : 
    3815      623960 :     if (!TransactionIdIsValid(xmax_new_tuple))
    3816             :     {
    3817      551916 :         infomask_new_tuple = HEAP_XMAX_INVALID;
    3818      551916 :         infomask2_new_tuple = 0;
    3819             :     }
    3820             :     else
    3821             :     {
    3822             :         /*
    3823             :          * If we found a valid Xmax for the new tuple, then the infomask bits
    3824             :          * to use on the new tuple depend on what was there on the old one.
    3825             :          * Note that since we're doing an update, the only possibility is that
    3826             :          * the lockers had FOR KEY SHARE lock.
    3827             :          */
    3828       72044 :         if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
    3829             :         {
    3830         344 :             GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
    3831             :                                    &infomask2_new_tuple);
    3832             :         }
    3833             :         else
    3834             :         {
    3835       71700 :             infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
    3836       71700 :             infomask2_new_tuple = 0;
    3837             :         }
    3838             :     }
    3839             : 
    3840             :     /*
    3841             :      * Prepare the new tuple with the appropriate initial values of Xmin and
    3842             :      * Xmax, as well as initial infomask bits as computed above.
    3843             :      */
    3844      623960 :     newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    3845      623960 :     newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    3846      623960 :     HeapTupleHeaderSetXmin(newtup->t_data, xid);
    3847      623960 :     HeapTupleHeaderSetCmin(newtup->t_data, cid);
    3848      623960 :     newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
    3849      623960 :     newtup->t_data->t_infomask2 |= infomask2_new_tuple;
    3850      623960 :     HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
    3851             : 
    3852             :     /*
    3853             :      * Replace cid with a combo CID if necessary.  Note that we already put
    3854             :      * the plain cid into the new tuple.
    3855             :      */
    3856      623960 :     HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
    3857             : 
    3858             :     /*
    3859             :      * If the toaster needs to be activated, OR if the new tuple will not fit
    3860             :      * on the same page as the old, then we need to release the content lock
    3861             :      * (but not the pin!) on the old tuple's buffer while we are off doing
    3862             :      * TOAST and/or table-file-extension work.  We must mark the old tuple to
    3863             :      * show that it's locked, else other processes may try to update it
    3864             :      * themselves.
    3865             :      *
    3866             :      * We need to invoke the toaster if there are already any out-of-line
    3867             :      * toasted values present, or if the new tuple is over-threshold.
    3868             :      */
    3869      623960 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3870           0 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3871             :     {
    3872             :         /* toast table entries should never be recursively toasted */
    3873             :         Assert(!HeapTupleHasExternal(&oldtup));
    3874             :         Assert(!HeapTupleHasExternal(newtup));
    3875           0 :         need_toast = false;
    3876             :     }
    3877             :     else
    3878     1871128 :         need_toast = (HeapTupleHasExternal(&oldtup) ||
    3879     1247168 :                       HeapTupleHasExternal(newtup) ||
    3880      623160 :                       newtup->t_len > TOAST_TUPLE_THRESHOLD);
    3881             : 
    3882      623960 :     pagefree = PageGetHeapFreeSpace(page);
    3883             : 
    3884      623960 :     newtupsize = MAXALIGN(newtup->t_len);
    3885             : 
    3886      623960 :     if (need_toast || newtupsize > pagefree)
    3887      303772 :     {
    3888             :         TransactionId xmax_lock_old_tuple;
    3889             :         uint16      infomask_lock_old_tuple,
    3890             :                     infomask2_lock_old_tuple;
    3891      303772 :         bool        cleared_all_frozen = false;
    3892             : 
    3893             :         /*
    3894             :          * To prevent concurrent sessions from updating the tuple, we have to
    3895             :          * temporarily mark it locked, while we release the page-level lock.
    3896             :          *
    3897             :          * To satisfy the rule that any xid potentially appearing in a buffer
    3898             :          * written out to disk, we unfortunately have to WAL log this
    3899             :          * temporary modification.  We can reuse xl_heap_lock for this
    3900             :          * purpose.  If we crash/error before following through with the
    3901             :          * actual update, xmax will be of an aborted transaction, allowing
    3902             :          * other sessions to proceed.
    3903             :          */
    3904             : 
    3905             :         /*
    3906             :          * Compute xmax / infomask appropriate for locking the tuple. This has
    3907             :          * to be done separately from the combo that's going to be used for
    3908             :          * updating, because the potentially created multixact would otherwise
    3909             :          * be wrong.
    3910             :          */
    3911      303772 :         compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3912      303772 :                                   oldtup.t_data->t_infomask,
    3913      303772 :                                   oldtup.t_data->t_infomask2,
    3914             :                                   xid, *lockmode, false,
    3915             :                                   &xmax_lock_old_tuple, &infomask_lock_old_tuple,
    3916             :                                   &infomask2_lock_old_tuple);
    3917             : 
    3918             :         Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
    3919             : 
    3920      303772 :         START_CRIT_SECTION();
    3921             : 
    3922             :         /* Clear obsolete visibility flags ... */
    3923      303772 :         oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3924      303772 :         oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3925      303772 :         HeapTupleClearHotUpdated(&oldtup);
    3926             :         /* ... and store info about transaction updating this tuple */
    3927             :         Assert(TransactionIdIsValid(xmax_lock_old_tuple));
    3928      303772 :         HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
    3929      303772 :         oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
    3930      303772 :         oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
    3931      303772 :         HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    3932             : 
    3933             :         /* temporarily make it look not-updated, but locked */
    3934      303772 :         oldtup.t_data->t_ctid = oldtup.t_self;
    3935             : 
    3936             :         /*
    3937             :          * Clear all-frozen bit on visibility map if needed. We could
    3938             :          * immediately reset ALL_VISIBLE, but given that the WAL logging
    3939             :          * overhead would be unchanged, that doesn't seem necessarily
    3940             :          * worthwhile.
    3941             :          */
    3942      305620 :         if (PageIsAllVisible(page) &&
    3943        1848 :             visibilitymap_clear(relation, block, vmbuffer,
    3944             :                                 VISIBILITYMAP_ALL_FROZEN))
    3945        1550 :             cleared_all_frozen = true;
    3946             : 
    3947      303772 :         MarkBufferDirty(buffer);
    3948             : 
    3949      303772 :         if (RelationNeedsWAL(relation))
    3950             :         {
    3951             :             xl_heap_lock xlrec;
    3952             :             XLogRecPtr  recptr;
    3953             : 
    3954      283518 :             XLogBeginInsert();
    3955      283518 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3956             : 
    3957      283518 :             xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
    3958      283518 :             xlrec.xmax = xmax_lock_old_tuple;
    3959      567036 :             xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
    3960      283518 :                                                   oldtup.t_data->t_infomask2);
    3961      283518 :             xlrec.flags =
    3962      283518 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    3963      283518 :             XLogRegisterData(&xlrec, SizeOfHeapLock);
    3964      283518 :             recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    3965      283518 :             PageSetLSN(page, recptr);
    3966             :         }
    3967             : 
    3968      303772 :         END_CRIT_SECTION();
    3969             : 
    3970      303772 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3971             : 
    3972             :         /*
    3973             :          * Let the toaster do its thing, if needed.
    3974             :          *
    3975             :          * Note: below this point, heaptup is the data we actually intend to
    3976             :          * store into the relation; newtup is the caller's original untoasted
    3977             :          * data.
    3978             :          */
    3979      303772 :         if (need_toast)
    3980             :         {
    3981             :             /* Note we always use WAL and FSM during updates */
    3982        3346 :             heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
    3983        3346 :             newtupsize = MAXALIGN(heaptup->t_len);
    3984             :         }
    3985             :         else
    3986      300426 :             heaptup = newtup;
    3987             : 
    3988             :         /*
    3989             :          * Now, do we need a new page for the tuple, or not?  This is a bit
    3990             :          * tricky since someone else could have added tuples to the page while
    3991             :          * we weren't looking.  We have to recheck the available space after
    3992             :          * reacquiring the buffer lock.  But don't bother to do that if the
    3993             :          * former amount of free space is still not enough; it's unlikely
    3994             :          * there's more free now than before.
    3995             :          *
    3996             :          * What's more, if we need to get a new page, we will need to acquire
    3997             :          * buffer locks on both old and new pages.  To avoid deadlock against
    3998             :          * some other backend trying to get the same two locks in the other
    3999             :          * order, we must be consistent about the order we get the locks in.
    4000             :          * We use the rule "lock the lower-numbered page of the relation
    4001             :          * first".  To implement this, we must do RelationGetBufferForTuple
    4002             :          * while not holding the lock on the old page, and we must rely on it
    4003             :          * to get the locks on both pages in the correct order.
    4004             :          *
    4005             :          * Another consideration is that we need visibility map page pin(s) if
    4006             :          * we will have to clear the all-visible flag on either page.  If we
    4007             :          * call RelationGetBufferForTuple, we rely on it to acquire any such
    4008             :          * pins; but if we don't, we have to handle that here.  Hence we need
    4009             :          * a loop.
    4010             :          */
    4011             :         for (;;)
    4012             :         {
    4013      303772 :             if (newtupsize > pagefree)
    4014             :             {
    4015             :                 /* It doesn't fit, must use RelationGetBufferForTuple. */
    4016      302636 :                 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
    4017             :                                                    buffer, 0, NULL,
    4018             :                                                    &vmbuffer_new, &vmbuffer,
    4019             :                                                    0);
    4020             :                 /* We're all done. */
    4021      302636 :                 break;
    4022             :             }
    4023             :             /* Acquire VM page pin if needed and we don't have it. */
    4024        1136 :             if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    4025           0 :                 visibilitymap_pin(relation, block, &vmbuffer);
    4026             :             /* Re-acquire the lock on the old tuple's page. */
    4027        1136 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    4028             :             /* Re-check using the up-to-date free space */
    4029        1136 :             pagefree = PageGetHeapFreeSpace(page);
    4030        1136 :             if (newtupsize > pagefree ||
    4031        1136 :                 (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
    4032             :             {
    4033             :                 /*
    4034             :                  * Rats, it doesn't fit anymore, or somebody just now set the
    4035             :                  * all-visible flag.  We must now unlock and loop to avoid
    4036             :                  * deadlock.  Fortunately, this path should seldom be taken.
    4037             :                  */
    4038           0 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4039             :             }
    4040             :             else
    4041             :             {
    4042             :                 /* We're all done. */
    4043        1136 :                 newbuf = buffer;
    4044        1136 :                 break;
    4045             :             }
    4046             :         }
    4047             :     }
    4048             :     else
    4049             :     {
    4050             :         /* No TOAST work needed, and it'll fit on same page */
    4051      320188 :         newbuf = buffer;
    4052      320188 :         heaptup = newtup;
    4053             :     }
    4054             : 
    4055             :     /*
    4056             :      * We're about to do the actual update -- check for conflict first, to
    4057             :      * avoid possibly having to roll back work we've just done.
    4058             :      *
    4059             :      * This is safe without a recheck as long as there is no possibility of
    4060             :      * another process scanning the pages between this check and the update
    4061             :      * being visible to the scan (i.e., exclusive buffer content lock(s) are
    4062             :      * continuously held from this point until the tuple update is visible).
    4063             :      *
    4064             :      * For the new tuple the only check needed is at the relation level, but
    4065             :      * since both tuples are in the same relation and the check for oldtup
    4066             :      * will include checking the relation level, there is no benefit to a
    4067             :      * separate check for the new tuple.
    4068             :      */
    4069      623960 :     CheckForSerializableConflictIn(relation, &oldtup.t_self,
    4070             :                                    BufferGetBlockNumber(buffer));
    4071             : 
    4072             :     /*
    4073             :      * At this point newbuf and buffer are both pinned and locked, and newbuf
    4074             :      * has enough space for the new tuple.  If they are the same buffer, only
    4075             :      * one pin is held.
    4076             :      */
    4077             : 
    4078      623936 :     if (newbuf == buffer)
    4079             :     {
    4080             :         /*
    4081             :          * Since the new tuple is going into the same page, we might be able
    4082             :          * to do a HOT update.  Check if any of the index columns have been
    4083             :          * changed.
    4084             :          */
    4085      321300 :         if (!bms_overlap(modified_attrs, hot_attrs))
    4086             :         {
    4087      296648 :             use_hot_update = true;
    4088             : 
    4089             :             /*
    4090             :              * If none of the columns that are used in hot-blocking indexes
    4091             :              * were updated, we can apply HOT, but we do still need to check
    4092             :              * if we need to update the summarizing indexes, and update those
    4093             :              * indexes if the columns were updated, or we may fail to detect
    4094             :              * e.g. value bound changes in BRIN minmax indexes.
    4095             :              */
    4096      296648 :             if (bms_overlap(modified_attrs, sum_attrs))
    4097        3282 :                 summarized_update = true;
    4098             :         }
    4099             :     }
    4100             :     else
    4101             :     {
    4102             :         /* Set a hint that the old page could use prune/defrag */
    4103      302636 :         PageSetFull(page);
    4104             :     }
    4105             : 
    4106             :     /*
    4107             :      * Compute replica identity tuple before entering the critical section so
    4108             :      * we don't PANIC upon a memory allocation failure.
    4109             :      * ExtractReplicaIdentity() will return NULL if nothing needs to be
    4110             :      * logged.  Pass old key required as true only if the replica identity key
    4111             :      * columns are modified or it has external data.
    4112             :      */
    4113      623936 :     old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
    4114      623936 :                                            bms_overlap(modified_attrs, id_attrs) ||
    4115             :                                            id_has_external,
    4116             :                                            &old_key_copied);
    4117             : 
    4118             :     /* NO EREPORT(ERROR) from here till changes are logged */
    4119      623936 :     START_CRIT_SECTION();
    4120             : 
    4121             :     /*
    4122             :      * If this transaction commits, the old tuple will become DEAD sooner or
    4123             :      * later.  Set flag that this page is a candidate for pruning once our xid
    4124             :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    4125             :      * the subsequent page pruning will be a no-op and the hint will be
    4126             :      * cleared.
    4127             :      *
    4128             :      * XXX Should we set hint on newbuf as well?  If the transaction aborts,
    4129             :      * there would be a prunable tuple in the newbuf; but for now we choose
    4130             :      * not to optimize for aborts.  Note that heap_xlog_update must be kept in
    4131             :      * sync if this decision changes.
    4132             :      */
    4133      623936 :     PageSetPrunable(page, xid);
    4134             : 
    4135      623936 :     if (use_hot_update)
    4136             :     {
    4137             :         /* Mark the old tuple as HOT-updated */
    4138      296648 :         HeapTupleSetHotUpdated(&oldtup);
    4139             :         /* And mark the new tuple as heap-only */
    4140      296648 :         HeapTupleSetHeapOnly(heaptup);
    4141             :         /* Mark the caller's copy too, in case different from heaptup */
    4142      296648 :         HeapTupleSetHeapOnly(newtup);
    4143             :     }
    4144             :     else
    4145             :     {
    4146             :         /* Make sure tuples are correctly marked as not-HOT */
    4147      327288 :         HeapTupleClearHotUpdated(&oldtup);
    4148      327288 :         HeapTupleClearHeapOnly(heaptup);
    4149      327288 :         HeapTupleClearHeapOnly(newtup);
    4150             :     }
    4151             : 
    4152      623936 :     RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
    4153             : 
    4154             : 
    4155             :     /* Clear obsolete visibility flags, possibly set by ourselves above... */
    4156      623936 :     oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    4157      623936 :     oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    4158             :     /* ... and store info about transaction updating this tuple */
    4159             :     Assert(TransactionIdIsValid(xmax_old_tuple));
    4160      623936 :     HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
    4161      623936 :     oldtup.t_data->t_infomask |= infomask_old_tuple;
    4162      623936 :     oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
    4163      623936 :     HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    4164             : 
    4165             :     /* record address of new tuple in t_ctid of old one */
    4166      623936 :     oldtup.t_data->t_ctid = heaptup->t_self;
    4167             : 
    4168             :     /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
    4169      623936 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    4170             :     {
    4171        3056 :         all_visible_cleared = true;
    4172        3056 :         PageClearAllVisible(BufferGetPage(buffer));
    4173        3056 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    4174             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    4175             :     }
    4176      623936 :     if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
    4177             :     {
    4178        1616 :         all_visible_cleared_new = true;
    4179        1616 :         PageClearAllVisible(BufferGetPage(newbuf));
    4180        1616 :         visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
    4181             :                             vmbuffer_new, VISIBILITYMAP_VALID_BITS);
    4182             :     }
    4183             : 
    4184      623936 :     if (newbuf != buffer)
    4185      302636 :         MarkBufferDirty(newbuf);
    4186      623936 :     MarkBufferDirty(buffer);
    4187             : 
    4188             :     /* XLOG stuff */
    4189      623936 :     if (RelationNeedsWAL(relation))
    4190             :     {
    4191             :         XLogRecPtr  recptr;
    4192             : 
    4193             :         /*
    4194             :          * For logical decoding we need combo CIDs to properly decode the
    4195             :          * catalog.
    4196             :          */
    4197      601206 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    4198             :         {
    4199        5134 :             log_heap_new_cid(relation, &oldtup);
    4200        5134 :             log_heap_new_cid(relation, heaptup);
    4201             :         }
    4202             : 
    4203      601206 :         recptr = log_heap_update(relation, buffer,
    4204             :                                  newbuf, &oldtup, heaptup,
    4205             :                                  old_key_tuple,
    4206             :                                  all_visible_cleared,
    4207             :                                  all_visible_cleared_new);
    4208      601206 :         if (newbuf != buffer)
    4209             :         {
    4210      282394 :             PageSetLSN(BufferGetPage(newbuf), recptr);
    4211             :         }
    4212      601206 :         PageSetLSN(BufferGetPage(buffer), recptr);
    4213             :     }
    4214             : 
    4215      623936 :     END_CRIT_SECTION();
    4216             : 
    4217      623936 :     if (newbuf != buffer)
    4218      302636 :         LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
    4219      623936 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4220             : 
    4221             :     /*
    4222             :      * Mark old tuple for invalidation from system caches at next command
    4223             :      * boundary, and mark the new tuple for invalidation in case we abort. We
    4224             :      * have to do this before releasing the buffer because oldtup is in the
    4225             :      * buffer.  (heaptup is all in local memory, but it's necessary to process
    4226             :      * both tuple versions in one call to inval.c so we can avoid redundant
    4227             :      * sinval messages.)
    4228             :      */
    4229      623936 :     CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
    4230             : 
    4231             :     /* Now we can release the buffer(s) */
    4232      623936 :     if (newbuf != buffer)
    4233      302636 :         ReleaseBuffer(newbuf);
    4234      623936 :     ReleaseBuffer(buffer);
    4235      623936 :     if (BufferIsValid(vmbuffer_new))
    4236        1616 :         ReleaseBuffer(vmbuffer_new);
    4237      623936 :     if (BufferIsValid(vmbuffer))
    4238        3056 :         ReleaseBuffer(vmbuffer);
    4239             : 
    4240             :     /*
    4241             :      * Release the lmgr tuple lock, if we had it.
    4242             :      */
    4243      623936 :     if (have_tuple_lock)
    4244          44 :         UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    4245             : 
    4246      623936 :     pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
    4247             : 
    4248             :     /*
    4249             :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    4250             :      * back to the caller's image, too.
    4251             :      */
    4252      623936 :     if (heaptup != newtup)
    4253             :     {
    4254        3250 :         newtup->t_self = heaptup->t_self;
    4255        3250 :         heap_freetuple(heaptup);
    4256             :     }
    4257             : 
    4258             :     /*
    4259             :      * If it is a HOT update, the update may still need to update summarized
    4260             :      * indexes, lest we fail to update those summaries and get incorrect
    4261             :      * results (for example, minmax bounds of the block may change with this
    4262             :      * update).
    4263             :      */
    4264      623936 :     if (use_hot_update)
    4265             :     {
    4266      296648 :         if (summarized_update)
    4267        3282 :             *update_indexes = TU_Summarizing;
    4268             :         else
    4269      293366 :             *update_indexes = TU_None;
    4270             :     }
    4271             :     else
    4272      327288 :         *update_indexes = TU_All;
    4273             : 
    4274      623936 :     if (old_key_tuple != NULL && old_key_copied)
    4275         168 :         heap_freetuple(old_key_tuple);
    4276             : 
    4277      623936 :     bms_free(hot_attrs);
    4278      623936 :     bms_free(sum_attrs);
    4279      623936 :     bms_free(key_attrs);
    4280      623936 :     bms_free(id_attrs);
    4281      623936 :     bms_free(modified_attrs);
    4282      623936 :     bms_free(interesting_attrs);
    4283             : 
    4284      623936 :     return TM_Ok;
    4285             : }
    4286             : 
    4287             : #ifdef USE_ASSERT_CHECKING
    4288             : /*
    4289             :  * Confirm adequate lock held during heap_update(), per rules from
    4290             :  * README.tuplock section "Locking to write inplace-updated tables".
    4291             :  */
    4292             : static void
    4293             : check_lock_if_inplace_updateable_rel(Relation relation,
    4294             :                                      const ItemPointerData *otid,
    4295             :                                      HeapTuple newtup)
    4296             : {
    4297             :     /* LOCKTAG_TUPLE acceptable for any catalog */
    4298             :     switch (RelationGetRelid(relation))
    4299             :     {
    4300             :         case RelationRelationId:
    4301             :         case DatabaseRelationId:
    4302             :             {
    4303             :                 LOCKTAG     tuptag;
    4304             : 
    4305             :                 SET_LOCKTAG_TUPLE(tuptag,
    4306             :                                   relation->rd_lockInfo.lockRelId.dbId,
    4307             :                                   relation->rd_lockInfo.lockRelId.relId,
    4308             :                                   ItemPointerGetBlockNumber(otid),
    4309             :                                   ItemPointerGetOffsetNumber(otid));
    4310             :                 if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
    4311             :                     return;
    4312             :             }
    4313             :             break;
    4314             :         default:
    4315             :             Assert(!IsInplaceUpdateRelation(relation));
    4316             :             return;
    4317             :     }
    4318             : 
    4319             :     switch (RelationGetRelid(relation))
    4320             :     {
    4321             :         case RelationRelationId:
    4322             :             {
    4323             :                 /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
    4324             :                 Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
    4325             :                 Oid         relid = classForm->oid;
    4326             :                 Oid         dbid;
    4327             :                 LOCKTAG     tag;
    4328             : 
    4329             :                 if (IsSharedRelation(relid))
    4330             :                     dbid = InvalidOid;
    4331             :                 else
    4332             :                     dbid = MyDatabaseId;
    4333             : 
    4334             :                 if (classForm->relkind == RELKIND_INDEX)
    4335             :                 {
    4336             :                     Relation    irel = index_open(relid, AccessShareLock);
    4337             : 
    4338             :                     SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
    4339             :                     index_close(irel, AccessShareLock);
    4340             :                 }
    4341             :                 else
    4342             :                     SET_LOCKTAG_RELATION(tag, dbid, relid);
    4343             : 
    4344             :                 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
    4345             :                     !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
    4346             :                     elog(WARNING,
    4347             :                          "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
    4348             :                          NameStr(classForm->relname),
    4349             :                          relid,
    4350             :                          classForm->relkind,
    4351             :                          ItemPointerGetBlockNumber(otid),
    4352             :                          ItemPointerGetOffsetNumber(otid));
    4353             :             }
    4354             :             break;
    4355             :         case DatabaseRelationId:
    4356             :             {
    4357             :                 /* LOCKTAG_TUPLE required */
    4358             :                 Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
    4359             : 
    4360             :                 elog(WARNING,
    4361             :                      "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
    4362             :                      NameStr(dbForm->datname),
    4363             :                      dbForm->oid,
    4364             :                      ItemPointerGetBlockNumber(otid),
    4365             :                      ItemPointerGetOffsetNumber(otid));
    4366             :             }
    4367             :             break;
    4368             :     }
    4369             : }
    4370             : 
    4371             : /*
    4372             :  * Confirm adequate relation lock held, per rules from README.tuplock section
    4373             :  * "Locking to write inplace-updated tables".
    4374             :  */
    4375             : static void
    4376             : check_inplace_rel_lock(HeapTuple oldtup)
    4377             : {
    4378             :     Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
    4379             :     Oid         relid = classForm->oid;
    4380             :     Oid         dbid;
    4381             :     LOCKTAG     tag;
    4382             : 
    4383             :     if (IsSharedRelation(relid))
    4384             :         dbid = InvalidOid;
    4385             :     else
    4386             :         dbid = MyDatabaseId;
    4387             : 
    4388             :     if (classForm->relkind == RELKIND_INDEX)
    4389             :     {
    4390             :         Relation    irel = index_open(relid, AccessShareLock);
    4391             : 
    4392             :         SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
    4393             :         index_close(irel, AccessShareLock);
    4394             :     }
    4395             :     else
    4396             :         SET_LOCKTAG_RELATION(tag, dbid, relid);
    4397             : 
    4398             :     if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
    4399             :         elog(WARNING,
    4400             :              "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
    4401             :              NameStr(classForm->relname),
    4402             :              relid,
    4403             :              classForm->relkind,
    4404             :              ItemPointerGetBlockNumber(&oldtup->t_self),
    4405             :              ItemPointerGetOffsetNumber(&oldtup->t_self));
    4406             : }
    4407             : #endif
    4408             : 
    4409             : /*
    4410             :  * Check if the specified attribute's values are the same.  Subroutine for
    4411             :  * HeapDetermineColumnsInfo.
    4412             :  */
    4413             : static bool
    4414     1531728 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
    4415             :                  bool isnull1, bool isnull2)
    4416             : {
    4417             :     /*
    4418             :      * If one value is NULL and other is not, then they are certainly not
    4419             :      * equal
    4420             :      */
    4421     1531728 :     if (isnull1 != isnull2)
    4422          90 :         return false;
    4423             : 
    4424             :     /*
    4425             :      * If both are NULL, they can be considered equal.
    4426             :      */
    4427     1531638 :     if (isnull1)
    4428        9982 :         return true;
    4429             : 
    4430             :     /*
    4431             :      * We do simple binary comparison of the two datums.  This may be overly
    4432             :      * strict because there can be multiple binary representations for the
    4433             :      * same logical value.  But we should be OK as long as there are no false
    4434             :      * positives.  Using a type-specific equality operator is messy because
    4435             :      * there could be multiple notions of equality in different operator
    4436             :      * classes; furthermore, we cannot safely invoke user-defined functions
    4437             :      * while holding exclusive buffer lock.
    4438             :      */
    4439     1521656 :     if (attrnum <= 0)
    4440             :     {
    4441             :         /* The only allowed system columns are OIDs, so do this */
    4442           0 :         return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
    4443             :     }
    4444             :     else
    4445             :     {
    4446             :         CompactAttribute *att;
    4447             : 
    4448             :         Assert(attrnum <= tupdesc->natts);
    4449     1521656 :         att = TupleDescCompactAttr(tupdesc, attrnum - 1);
    4450     1521656 :         return datumIsEqual(value1, value2, att->attbyval, att->attlen);
    4451             :     }
    4452             : }
    4453             : 
    4454             : /*
    4455             :  * Check which columns are being updated.
    4456             :  *
    4457             :  * Given an updated tuple, determine (and return into the output bitmapset),
    4458             :  * from those listed as interesting, the set of columns that changed.
    4459             :  *
    4460             :  * has_external indicates if any of the unmodified attributes (from those
    4461             :  * listed as interesting) of the old tuple is a member of external_cols and is
    4462             :  * stored externally.
    4463             :  */
    4464             : static Bitmapset *
    4465      624280 : HeapDetermineColumnsInfo(Relation relation,
    4466             :                          Bitmapset *interesting_cols,
    4467             :                          Bitmapset *external_cols,
    4468             :                          HeapTuple oldtup, HeapTuple newtup,
    4469             :                          bool *has_external)
    4470             : {
    4471             :     int         attidx;
    4472      624280 :     Bitmapset  *modified = NULL;
    4473      624280 :     TupleDesc   tupdesc = RelationGetDescr(relation);
    4474             : 
    4475      624280 :     attidx = -1;
    4476     2156008 :     while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
    4477             :     {
    4478             :         /* attidx is zero-based, attrnum is the normal attribute number */
    4479     1531728 :         AttrNumber  attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
    4480             :         Datum       value1,
    4481             :                     value2;
    4482             :         bool        isnull1,
    4483             :                     isnull2;
    4484             : 
    4485             :         /*
    4486             :          * If it's a whole-tuple reference, say "not equal".  It's not really
    4487             :          * worth supporting this case, since it could only succeed after a
    4488             :          * no-op update, which is hardly a case worth optimizing for.
    4489             :          */
    4490     1531728 :         if (attrnum == 0)
    4491             :         {
    4492           0 :             modified = bms_add_member(modified, attidx);
    4493     1468084 :             continue;
    4494             :         }
    4495             : 
    4496             :         /*
    4497             :          * Likewise, automatically say "not equal" for any system attribute
    4498             :          * other than tableOID; we cannot expect these to be consistent in a
    4499             :          * HOT chain, or even to be set correctly yet in the new tuple.
    4500             :          */
    4501     1531728 :         if (attrnum < 0)
    4502             :         {
    4503           0 :             if (attrnum != TableOidAttributeNumber)
    4504             :             {
    4505           0 :                 modified = bms_add_member(modified, attidx);
    4506           0 :                 continue;
    4507             :             }
    4508             :         }
    4509             : 
    4510             :         /*
    4511             :          * Extract the corresponding values.  XXX this is pretty inefficient
    4512             :          * if there are many indexed columns.  Should we do a single
    4513             :          * heap_deform_tuple call on each tuple, instead?   But that doesn't
    4514             :          * work for system columns ...
    4515             :          */
    4516     1531728 :         value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
    4517     1531728 :         value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
    4518             : 
    4519     1531728 :         if (!heap_attr_equals(tupdesc, attrnum, value1,
    4520             :                               value2, isnull1, isnull2))
    4521             :         {
    4522       54486 :             modified = bms_add_member(modified, attidx);
    4523       54486 :             continue;
    4524             :         }
    4525             : 
    4526             :         /*
    4527             :          * No need to check attributes that can't be stored externally. Note
    4528             :          * that system attributes can't be stored externally.
    4529             :          */
    4530     1477242 :         if (attrnum < 0 || isnull1 ||
    4531     1467260 :             TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
    4532     1413598 :             continue;
    4533             : 
    4534             :         /*
    4535             :          * Check if the old tuple's attribute is stored externally and is a
    4536             :          * member of external_cols.
    4537             :          */
    4538       63654 :         if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
    4539          10 :             bms_is_member(attidx, external_cols))
    4540           4 :             *has_external = true;
    4541             :     }
    4542             : 
    4543      624280 :     return modified;
    4544             : }
    4545             : 
    4546             : /*
    4547             :  *  simple_heap_update - replace a tuple
    4548             :  *
    4549             :  * This routine may be used to update a tuple when concurrent updates of
    4550             :  * the target tuple are not expected (for example, because we have a lock
    4551             :  * on the relation associated with the tuple).  Any failure is reported
    4552             :  * via ereport().
    4553             :  */
    4554             : void
    4555      234810 : simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup,
    4556             :                    TU_UpdateIndexes *update_indexes)
    4557             : {
    4558             :     TM_Result   result;
    4559             :     TM_FailureData tmfd;
    4560             :     LockTupleMode lockmode;
    4561             : 
    4562      234810 :     result = heap_update(relation, otid, tup,
    4563             :                          GetCurrentCommandId(true), InvalidSnapshot,
    4564             :                          true /* wait for commit */ ,
    4565             :                          &tmfd, &lockmode, update_indexes);
    4566      234810 :     switch (result)
    4567             :     {
    4568           0 :         case TM_SelfModified:
    4569             :             /* Tuple was already updated in current command? */
    4570           0 :             elog(ERROR, "tuple already updated by self");
    4571             :             break;
    4572             : 
    4573      234808 :         case TM_Ok:
    4574             :             /* done successfully */
    4575      234808 :             break;
    4576             : 
    4577           0 :         case TM_Updated:
    4578           0 :             elog(ERROR, "tuple concurrently updated");
    4579             :             break;
    4580             : 
    4581           2 :         case TM_Deleted:
    4582           2 :             elog(ERROR, "tuple concurrently deleted");
    4583             :             break;
    4584             : 
    4585           0 :         default:
    4586           0 :             elog(ERROR, "unrecognized heap_update status: %u", result);
    4587             :             break;
    4588             :     }
    4589      234808 : }
    4590             : 
    4591             : 
    4592             : /*
    4593             :  * Return the MultiXactStatus corresponding to the given tuple lock mode.
    4594             :  */
    4595             : static MultiXactStatus
    4596      230884 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
    4597             : {
    4598             :     int         retval;
    4599             : 
    4600      230884 :     if (is_update)
    4601         430 :         retval = tupleLockExtraInfo[mode].updstatus;
    4602             :     else
    4603      230454 :         retval = tupleLockExtraInfo[mode].lockstatus;
    4604             : 
    4605      230884 :     if (retval == -1)
    4606           0 :         elog(ERROR, "invalid lock tuple mode %d/%s", mode,
    4607             :              is_update ? "true" : "false");
    4608             : 
    4609      230884 :     return (MultiXactStatus) retval;
    4610             : }
    4611             : 
    4612             : /*
    4613             :  *  heap_lock_tuple - lock a tuple in shared or exclusive mode
    4614             :  *
    4615             :  * Note that this acquires a buffer pin, which the caller must release.
    4616             :  *
    4617             :  * Input parameters:
    4618             :  *  relation: relation containing tuple (caller must hold suitable lock)
    4619             :  *  cid: current command ID (used for visibility test, and stored into
    4620             :  *      tuple's cmax if lock is successful)
    4621             :  *  mode: indicates if shared or exclusive tuple lock is desired
    4622             :  *  wait_policy: what to do if tuple lock is not available
    4623             :  *  follow_updates: if true, follow the update chain to also lock descendant
    4624             :  *      tuples.
    4625             :  *
    4626             :  * Output parameters:
    4627             :  *  *tuple: all fields filled in
    4628             :  *  *buffer: set to buffer holding tuple (pinned but not locked at exit)
    4629             :  *  *tmfd: filled in failure cases (see below)
    4630             :  *
    4631             :  * Function results are the same as the ones for table_tuple_lock().
    4632             :  *
    4633             :  * In the failure cases other than TM_Invisible, the routine fills
    4634             :  * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
    4635             :  * if necessary), and t_cmax (the last only for TM_SelfModified,
    4636             :  * since we cannot obtain cmax from a combo CID generated by another
    4637             :  * transaction).
    4638             :  * See comments for struct TM_FailureData for additional info.
    4639             :  *
    4640             :  * See README.tuplock for a thorough explanation of this mechanism.
    4641             :  */
    4642             : TM_Result
    4643      316814 : heap_lock_tuple(Relation relation, HeapTuple tuple,
    4644             :                 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
    4645             :                 bool follow_updates,
    4646             :                 Buffer *buffer, TM_FailureData *tmfd)
    4647             : {
    4648             :     TM_Result   result;
    4649      316814 :     ItemPointer tid = &(tuple->t_self);
    4650             :     ItemId      lp;
    4651             :     Page        page;
    4652      316814 :     Buffer      vmbuffer = InvalidBuffer;
    4653             :     BlockNumber block;
    4654             :     TransactionId xid,
    4655             :                 xmax;
    4656             :     uint16      old_infomask,
    4657             :                 new_infomask,
    4658             :                 new_infomask2;
    4659      316814 :     bool        first_time = true;
    4660      316814 :     bool        skip_tuple_lock = false;
    4661      316814 :     bool        have_tuple_lock = false;
    4662      316814 :     bool        cleared_all_frozen = false;
    4663             : 
    4664      316814 :     *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    4665      316814 :     block = ItemPointerGetBlockNumber(tid);
    4666             : 
    4667             :     /*
    4668             :      * Before locking the buffer, pin the visibility map page if it appears to
    4669             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    4670             :      * in the middle of changing this, so we'll need to recheck after we have
    4671             :      * the lock.
    4672             :      */
    4673      316814 :     if (PageIsAllVisible(BufferGetPage(*buffer)))
    4674        3332 :         visibilitymap_pin(relation, block, &vmbuffer);
    4675             : 
    4676      316814 :     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4677             : 
    4678      316814 :     page = BufferGetPage(*buffer);
    4679      316814 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    4680             :     Assert(ItemIdIsNormal(lp));
    4681             : 
    4682      316814 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    4683      316814 :     tuple->t_len = ItemIdGetLength(lp);
    4684      316814 :     tuple->t_tableOid = RelationGetRelid(relation);
    4685             : 
    4686          32 : l3:
    4687      316846 :     result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
    4688             : 
    4689      316846 :     if (result == TM_Invisible)
    4690             :     {
    4691             :         /*
    4692             :          * This is possible, but only when locking a tuple for ON CONFLICT
    4693             :          * UPDATE.  We return this value here rather than throwing an error in
    4694             :          * order to give that case the opportunity to throw a more specific
    4695             :          * error.
    4696             :          */
    4697          24 :         result = TM_Invisible;
    4698          24 :         goto out_locked;
    4699             :     }
    4700      316822 :     else if (result == TM_BeingModified ||
    4701      154426 :              result == TM_Updated ||
    4702             :              result == TM_Deleted)
    4703             :     {
    4704             :         TransactionId xwait;
    4705             :         uint16      infomask;
    4706             :         uint16      infomask2;
    4707             :         bool        require_sleep;
    4708             :         ItemPointerData t_ctid;
    4709             : 
    4710             :         /* must copy state data before unlocking buffer */
    4711      162398 :         xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
    4712      162398 :         infomask = tuple->t_data->t_infomask;
    4713      162398 :         infomask2 = tuple->t_data->t_infomask2;
    4714      162398 :         ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
    4715             : 
    4716      162398 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4717             : 
    4718             :         /*
    4719             :          * If any subtransaction of the current top transaction already holds
    4720             :          * a lock as strong as or stronger than what we're requesting, we
    4721             :          * effectively hold the desired lock already.  We *must* succeed
    4722             :          * without trying to take the tuple lock, else we will deadlock
    4723             :          * against anyone wanting to acquire a stronger lock.
    4724             :          *
    4725             :          * Note we only do this the first time we loop on the HTSU result;
    4726             :          * there is no point in testing in subsequent passes, because
    4727             :          * evidently our own transaction cannot have acquired a new lock after
    4728             :          * the first time we checked.
    4729             :          */
    4730      162398 :         if (first_time)
    4731             :         {
    4732      162376 :             first_time = false;
    4733             : 
    4734      162376 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4735             :             {
    4736             :                 int         i;
    4737             :                 int         nmembers;
    4738             :                 MultiXactMember *members;
    4739             : 
    4740             :                 /*
    4741             :                  * We don't need to allow old multixacts here; if that had
    4742             :                  * been the case, HeapTupleSatisfiesUpdate would have returned
    4743             :                  * MayBeUpdated and we wouldn't be here.
    4744             :                  */
    4745             :                 nmembers =
    4746      146596 :                     GetMultiXactIdMembers(xwait, &members, false,
    4747      146596 :                                           HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    4748             : 
    4749     2845334 :                 for (i = 0; i < nmembers; i++)
    4750             :                 {
    4751             :                     /* only consider members of our own transaction */
    4752     2698766 :                     if (!TransactionIdIsCurrentTransactionId(members[i].xid))
    4753     2698668 :                         continue;
    4754             : 
    4755          98 :                     if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
    4756             :                     {
    4757          28 :                         pfree(members);
    4758          28 :                         result = TM_Ok;
    4759          28 :                         goto out_unlocked;
    4760             :                     }
    4761             :                     else
    4762             :                     {
    4763             :                         /*
    4764             :                          * Disable acquisition of the heavyweight tuple lock.
    4765             :                          * Otherwise, when promoting a weaker lock, we might
    4766             :                          * deadlock with another locker that has acquired the
    4767             :                          * heavyweight tuple lock and is waiting for our
    4768             :                          * transaction to finish.
    4769             :                          *
    4770             :                          * Note that in this case we still need to wait for
    4771             :                          * the multixact if required, to avoid acquiring
    4772             :                          * conflicting locks.
    4773             :                          */
    4774          70 :                         skip_tuple_lock = true;
    4775             :                     }
    4776             :                 }
    4777             : 
    4778      146568 :                 if (members)
    4779      146568 :                     pfree(members);
    4780             :             }
    4781       15780 :             else if (TransactionIdIsCurrentTransactionId(xwait))
    4782             :             {
    4783       13142 :                 switch (mode)
    4784             :                 {
    4785         348 :                     case LockTupleKeyShare:
    4786             :                         Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
    4787             :                                HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4788             :                                HEAP_XMAX_IS_EXCL_LOCKED(infomask));
    4789         348 :                         result = TM_Ok;
    4790         348 :                         goto out_unlocked;
    4791          40 :                     case LockTupleShare:
    4792          52 :                         if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4793          12 :                             HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4794             :                         {
    4795          28 :                             result = TM_Ok;
    4796          28 :                             goto out_unlocked;
    4797             :                         }
    4798          12 :                         break;
    4799         144 :                     case LockTupleNoKeyExclusive:
    4800         144 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4801             :                         {
    4802         120 :                             result = TM_Ok;
    4803         120 :                             goto out_unlocked;
    4804             :                         }
    4805          24 :                         break;
    4806       12610 :                     case LockTupleExclusive:
    4807       12610 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
    4808        2530 :                             infomask2 & HEAP_KEYS_UPDATED)
    4809             :                         {
    4810        2488 :                             result = TM_Ok;
    4811        2488 :                             goto out_unlocked;
    4812             :                         }
    4813       10122 :                         break;
    4814             :                 }
    4815             :             }
    4816             :         }
    4817             : 
    4818             :         /*
    4819             :          * Initially assume that we will have to wait for the locking
    4820             :          * transaction(s) to finish.  We check various cases below in which
    4821             :          * this can be turned off.
    4822             :          */
    4823      159386 :         require_sleep = true;
    4824      159386 :         if (mode == LockTupleKeyShare)
    4825             :         {
    4826             :             /*
    4827             :              * If we're requesting KeyShare, and there's no update present, we
    4828             :              * don't need to wait.  Even if there is an update, we can still
    4829             :              * continue if the key hasn't been modified.
    4830             :              *
    4831             :              * However, if there are updates, we need to walk the update chain
    4832             :              * to mark future versions of the row as locked, too.  That way,
    4833             :              * if somebody deletes that future version, we're protected
    4834             :              * against the key going away.  This locking of future versions
    4835             :              * could block momentarily, if a concurrent transaction is
    4836             :              * deleting a key; or it could return a value to the effect that
    4837             :              * the transaction deleting the key has already committed.  So we
    4838             :              * do this before re-locking the buffer; otherwise this would be
    4839             :              * prone to deadlocks.
    4840             :              *
    4841             :              * Note that the TID we're locking was grabbed before we unlocked
    4842             :              * the buffer.  For it to change while we're not looking, the
    4843             :              * other properties we're testing for below after re-locking the
    4844             :              * buffer would also change, in which case we would restart this
    4845             :              * loop above.
    4846             :              */
    4847      147726 :             if (!(infomask2 & HEAP_KEYS_UPDATED))
    4848             :             {
    4849             :                 bool        updated;
    4850             : 
    4851      147640 :                 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
    4852             : 
    4853             :                 /*
    4854             :                  * If there are updates, follow the update chain; bail out if
    4855             :                  * that cannot be done.
    4856             :                  */
    4857      147640 :                 if (follow_updates && updated &&
    4858        4338 :                     !ItemPointerEquals(&tuple->t_self, &t_ctid))
    4859             :                 {
    4860             :                     TM_Result   res;
    4861             : 
    4862        4338 :                     res = heap_lock_updated_tuple(relation,
    4863             :                                                   infomask, xwait, &t_ctid,
    4864             :                                                   GetCurrentTransactionId(),
    4865             :                                                   mode);
    4866        4338 :                     if (res != TM_Ok)
    4867             :                     {
    4868          12 :                         result = res;
    4869             :                         /* recovery code expects to have buffer lock held */
    4870          12 :                         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4871         392 :                         goto failed;
    4872             :                     }
    4873             :                 }
    4874             : 
    4875      147628 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4876             : 
    4877             :                 /*
    4878             :                  * Make sure it's still an appropriate lock, else start over.
    4879             :                  * Also, if it wasn't updated before we released the lock, but
    4880             :                  * is updated now, we start over too; the reason is that we
    4881             :                  * now need to follow the update chain to lock the new
    4882             :                  * versions.
    4883             :                  */
    4884      147628 :                 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
    4885        4302 :                     ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
    4886        4302 :                      !updated))
    4887          32 :                     goto l3;
    4888             : 
    4889             :                 /* Things look okay, so we can skip sleeping */
    4890      147628 :                 require_sleep = false;
    4891             : 
    4892             :                 /*
    4893             :                  * Note we allow Xmax to change here; other updaters/lockers
    4894             :                  * could have modified it before we grabbed the buffer lock.
    4895             :                  * However, this is not a problem, because with the recheck we
    4896             :                  * just did we ensure that they still don't conflict with the
    4897             :                  * lock we want.
    4898             :                  */
    4899             :             }
    4900             :         }
    4901       11660 :         else if (mode == LockTupleShare)
    4902             :         {
    4903             :             /*
    4904             :              * If we're requesting Share, we can similarly avoid sleeping if
    4905             :              * there's no update and no exclusive lock present.
    4906             :              */
    4907         886 :             if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
    4908         886 :                 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4909             :             {
    4910         874 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4911             : 
    4912             :                 /*
    4913             :                  * Make sure it's still an appropriate lock, else start over.
    4914             :                  * See above about allowing xmax to change.
    4915             :                  */
    4916        1748 :                 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    4917         874 :                     HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
    4918           0 :                     goto l3;
    4919         874 :                 require_sleep = false;
    4920             :             }
    4921             :         }
    4922       10774 :         else if (mode == LockTupleNoKeyExclusive)
    4923             :         {
    4924             :             /*
    4925             :              * If we're requesting NoKeyExclusive, we might also be able to
    4926             :              * avoid sleeping; just ensure that there no conflicting lock
    4927             :              * already acquired.
    4928             :              */
    4929         340 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4930             :             {
    4931          52 :                 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    4932             :                                              mode, NULL))
    4933             :                 {
    4934             :                     /*
    4935             :                      * No conflict, but if the xmax changed under us in the
    4936             :                      * meantime, start over.
    4937             :                      */
    4938          26 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4939          52 :                     if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4940          26 :                         !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4941             :                                              xwait))
    4942           0 :                         goto l3;
    4943             : 
    4944             :                     /* otherwise, we're good */
    4945          26 :                     require_sleep = false;
    4946             :                 }
    4947             :             }
    4948         288 :             else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
    4949             :             {
    4950          36 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4951             : 
    4952             :                 /* if the xmax changed in the meantime, start over */
    4953          72 :                 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4954          36 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4955             :                                          xwait))
    4956           0 :                     goto l3;
    4957             :                 /* otherwise, we're good */
    4958          36 :                 require_sleep = false;
    4959             :             }
    4960             :         }
    4961             : 
    4962             :         /*
    4963             :          * As a check independent from those above, we can also avoid sleeping
    4964             :          * if the current transaction is the sole locker of the tuple.  Note
    4965             :          * that the strength of the lock already held is irrelevant; this is
    4966             :          * not about recording the lock in Xmax (which will be done regardless
    4967             :          * of this optimization, below).  Also, note that the cases where we
    4968             :          * hold a lock stronger than we are requesting are already handled
    4969             :          * above by not doing anything.
    4970             :          *
    4971             :          * Note we only deal with the non-multixact case here; MultiXactIdWait
    4972             :          * is well equipped to deal with this situation on its own.
    4973             :          */
    4974      170104 :         if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
    4975       10730 :             TransactionIdIsCurrentTransactionId(xwait))
    4976             :         {
    4977             :             /* ... but if the xmax changed in the meantime, start over */
    4978       10122 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4979       20244 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4980       10122 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4981             :                                      xwait))
    4982           0 :                 goto l3;
    4983             :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
    4984       10122 :             require_sleep = false;
    4985             :         }
    4986             : 
    4987             :         /*
    4988             :          * Time to sleep on the other transaction/multixact, if necessary.
    4989             :          *
    4990             :          * If the other transaction is an update/delete that's already
    4991             :          * committed, then sleeping cannot possibly do any good: if we're
    4992             :          * required to sleep, get out to raise an error instead.
    4993             :          *
    4994             :          * By here, we either have already acquired the buffer exclusive lock,
    4995             :          * or we must wait for the locking transaction or multixact; so below
    4996             :          * we ensure that we grab buffer lock after the sleep.
    4997             :          */
    4998      159374 :         if (require_sleep && (result == TM_Updated || result == TM_Deleted))
    4999             :         {
    5000         304 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5001         304 :             goto failed;
    5002             :         }
    5003      159070 :         else if (require_sleep)
    5004             :         {
    5005             :             /*
    5006             :              * Acquire tuple lock to establish our priority for the tuple, or
    5007             :              * die trying.  LockTuple will release us when we are next-in-line
    5008             :              * for the tuple.  We must do this even if we are share-locking,
    5009             :              * but not if we already have a weaker lock on the tuple.
    5010             :              *
    5011             :              * If we are forced to "start over" below, we keep the tuple lock;
    5012             :              * this arranges that we stay at the head of the line while
    5013             :              * rechecking tuple state.
    5014             :              */
    5015         384 :             if (!skip_tuple_lock &&
    5016         352 :                 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
    5017             :                                       &have_tuple_lock))
    5018             :             {
    5019             :                 /*
    5020             :                  * This can only happen if wait_policy is Skip and the lock
    5021             :                  * couldn't be obtained.
    5022             :                  */
    5023           2 :                 result = TM_WouldBlock;
    5024             :                 /* recovery code expects to have buffer lock held */
    5025           2 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5026           2 :                 goto failed;
    5027             :             }
    5028             : 
    5029         380 :             if (infomask & HEAP_XMAX_IS_MULTI)
    5030             :             {
    5031          80 :                 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
    5032             : 
    5033             :                 /* We only ever lock tuples, never update them */
    5034          80 :                 if (status >= MultiXactStatusNoKeyUpdate)
    5035           0 :                     elog(ERROR, "invalid lock mode in heap_lock_tuple");
    5036             : 
    5037             :                 /* wait for multixact to end, or die trying  */
    5038          80 :                 switch (wait_policy)
    5039             :                 {
    5040          72 :                     case LockWaitBlock:
    5041          72 :                         MultiXactIdWait((MultiXactId) xwait, status, infomask,
    5042          72 :                                         relation, &tuple->t_self, XLTW_Lock, NULL);
    5043          72 :                         break;
    5044           4 :                     case LockWaitSkip:
    5045           4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    5046             :                                                         status, infomask, relation,
    5047             :                                                         NULL, false))
    5048             :                         {
    5049           4 :                             result = TM_WouldBlock;
    5050             :                             /* recovery code expects to have buffer lock held */
    5051           4 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5052           4 :                             goto failed;
    5053             :                         }
    5054           0 :                         break;
    5055           4 :                     case LockWaitError:
    5056           4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    5057             :                                                         status, infomask, relation,
    5058             :                                                         NULL, log_lock_failures))
    5059           4 :                             ereport(ERROR,
    5060             :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5061             :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    5062             :                                             RelationGetRelationName(relation))));
    5063             : 
    5064           0 :                         break;
    5065             :                 }
    5066             : 
    5067             :                 /*
    5068             :                  * Of course, the multixact might not be done here: if we're
    5069             :                  * requesting a light lock mode, other transactions with light
    5070             :                  * locks could still be alive, as well as locks owned by our
    5071             :                  * own xact or other subxacts of this backend.  We need to
    5072             :                  * preserve the surviving MultiXact members.  Note that it
    5073             :                  * isn't absolutely necessary in the latter case, but doing so
    5074             :                  * is simpler.
    5075             :                  */
    5076             :             }
    5077             :             else
    5078             :             {
    5079             :                 /* wait for regular transaction to end, or die trying */
    5080         300 :                 switch (wait_policy)
    5081             :                 {
    5082         222 :                     case LockWaitBlock:
    5083         222 :                         XactLockTableWait(xwait, relation, &tuple->t_self,
    5084             :                                           XLTW_Lock);
    5085         222 :                         break;
    5086          66 :                     case LockWaitSkip:
    5087          66 :                         if (!ConditionalXactLockTableWait(xwait, false))
    5088             :                         {
    5089          66 :                             result = TM_WouldBlock;
    5090             :                             /* recovery code expects to have buffer lock held */
    5091          66 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5092          66 :                             goto failed;
    5093             :                         }
    5094           0 :                         break;
    5095          12 :                     case LockWaitError:
    5096          12 :                         if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
    5097          12 :                             ereport(ERROR,
    5098             :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5099             :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    5100             :                                             RelationGetRelationName(relation))));
    5101           0 :                         break;
    5102             :                 }
    5103             :             }
    5104             : 
    5105             :             /* if there are updates, follow the update chain */
    5106         294 :             if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
    5107         118 :                 !ItemPointerEquals(&tuple->t_self, &t_ctid))
    5108             :             {
    5109             :                 TM_Result   res;
    5110             : 
    5111          90 :                 res = heap_lock_updated_tuple(relation,
    5112             :                                               infomask, xwait, &t_ctid,
    5113             :                                               GetCurrentTransactionId(),
    5114             :                                               mode);
    5115          90 :                 if (res != TM_Ok)
    5116             :                 {
    5117           4 :                     result = res;
    5118             :                     /* recovery code expects to have buffer lock held */
    5119           4 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5120           4 :                     goto failed;
    5121             :                 }
    5122             :             }
    5123             : 
    5124         290 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5125             : 
    5126             :             /*
    5127             :              * xwait is done, but if xwait had just locked the tuple then some
    5128             :              * other xact could update this tuple before we get to this point.
    5129             :              * Check for xmax change, and start over if so.
    5130             :              */
    5131         552 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    5132         262 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    5133             :                                      xwait))
    5134          32 :                 goto l3;
    5135             : 
    5136         258 :             if (!(infomask & HEAP_XMAX_IS_MULTI))
    5137             :             {
    5138             :                 /*
    5139             :                  * Otherwise check if it committed or aborted.  Note we cannot
    5140             :                  * be here if the tuple was only locked by somebody who didn't
    5141             :                  * conflict with us; that would have been handled above.  So
    5142             :                  * that transaction must necessarily be gone by now.  But
    5143             :                  * don't check for this in the multixact case, because some
    5144             :                  * locker transactions might still be running.
    5145             :                  */
    5146         192 :                 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
    5147             :             }
    5148             :         }
    5149             : 
    5150             :         /* By here, we're certain that we hold buffer exclusive lock again */
    5151             : 
    5152             :         /*
    5153             :          * We may lock if previous xmax aborted, or if it committed but only
    5154             :          * locked the tuple without updating it; or if we didn't have to wait
    5155             :          * at all for whatever reason.
    5156             :          */
    5157      158944 :         if (!require_sleep ||
    5158         454 :             (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
    5159         360 :             HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    5160         164 :             HeapTupleHeaderIsOnlyLocked(tuple->t_data))
    5161      158792 :             result = TM_Ok;
    5162         152 :         else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
    5163         114 :             result = TM_Updated;
    5164             :         else
    5165          38 :             result = TM_Deleted;
    5166             :     }
    5167             : 
    5168      154424 : failed:
    5169      313760 :     if (result != TM_Ok)
    5170             :     {
    5171             :         Assert(result == TM_SelfModified || result == TM_Updated ||
    5172             :                result == TM_Deleted || result == TM_WouldBlock);
    5173             : 
    5174             :         /*
    5175             :          * When locking a tuple under LockWaitSkip semantics and we fail with
    5176             :          * TM_WouldBlock above, it's possible for concurrent transactions to
    5177             :          * release the lock and set HEAP_XMAX_INVALID in the meantime.  So
    5178             :          * this assert is slightly different from the equivalent one in
    5179             :          * heap_delete and heap_update.
    5180             :          */
    5181             :         Assert((result == TM_WouldBlock) ||
    5182             :                !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
    5183             :         Assert(result != TM_Updated ||
    5184             :                !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
    5185         556 :         tmfd->ctid = tuple->t_data->t_ctid;
    5186         556 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    5187         556 :         if (result == TM_SelfModified)
    5188          12 :             tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
    5189             :         else
    5190         544 :             tmfd->cmax = InvalidCommandId;
    5191         556 :         goto out_locked;
    5192             :     }
    5193             : 
    5194             :     /*
    5195             :      * If we didn't pin the visibility map page and the page has become all
    5196             :      * visible while we were busy locking the buffer, or during some
    5197             :      * subsequent window during which we had it unlocked, we'll have to unlock
    5198             :      * and re-lock, to avoid holding the buffer lock across I/O.  That's a bit
    5199             :      * unfortunate, especially since we'll now have to recheck whether the
    5200             :      * tuple has been locked or updated under us, but hopefully it won't
    5201             :      * happen very often.
    5202             :      */
    5203      313204 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    5204             :     {
    5205           0 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    5206           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    5207           0 :         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5208           0 :         goto l3;
    5209             :     }
    5210             : 
    5211      313204 :     xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
    5212      313204 :     old_infomask = tuple->t_data->t_infomask;
    5213             : 
    5214             :     /*
    5215             :      * If this is the first possibly-multixact-able operation in the current
    5216             :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    5217             :      * certain that the transaction will never become a member of any older
    5218             :      * MultiXactIds than that.  (We have to do this even if we end up just
    5219             :      * using our own TransactionId below, since some other backend could
    5220             :      * incorporate our XID into a MultiXact immediately afterwards.)
    5221             :      */
    5222      313204 :     MultiXactIdSetOldestMember();
    5223             : 
    5224             :     /*
    5225             :      * Compute the new xmax and infomask to store into the tuple.  Note we do
    5226             :      * not modify the tuple just yet, because that would leave it in the wrong
    5227             :      * state if multixact.c elogs.
    5228             :      */
    5229      313204 :     compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
    5230             :                               GetCurrentTransactionId(), mode, false,
    5231             :                               &xid, &new_infomask, &new_infomask2);
    5232             : 
    5233      313204 :     START_CRIT_SECTION();
    5234             : 
    5235             :     /*
    5236             :      * Store transaction information of xact locking the tuple.
    5237             :      *
    5238             :      * Note: Cmax is meaningless in this context, so don't set it; this avoids
    5239             :      * possibly generating a useless combo CID.  Moreover, if we're locking a
    5240             :      * previously updated tuple, it's important to preserve the Cmax.
    5241             :      *
    5242             :      * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
    5243             :      * we would break the HOT chain.
    5244             :      */
    5245      313204 :     tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
    5246      313204 :     tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    5247      313204 :     tuple->t_data->t_infomask |= new_infomask;
    5248      313204 :     tuple->t_data->t_infomask2 |= new_infomask2;
    5249      313204 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    5250      308910 :         HeapTupleHeaderClearHotUpdated(tuple->t_data);
    5251      313204 :     HeapTupleHeaderSetXmax(tuple->t_data, xid);
    5252             : 
    5253             :     /*
    5254             :      * Make sure there is no forward chain link in t_ctid.  Note that in the
    5255             :      * cases where the tuple has been updated, we must not overwrite t_ctid,
    5256             :      * because it was set by the updater.  Moreover, if the tuple has been
    5257             :      * updated, we need to follow the update chain to lock the new versions of
    5258             :      * the tuple as well.
    5259             :      */
    5260      313204 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    5261      308910 :         tuple->t_data->t_ctid = *tid;
    5262             : 
    5263             :     /* Clear only the all-frozen bit on visibility map if needed */
    5264      316536 :     if (PageIsAllVisible(page) &&
    5265        3332 :         visibilitymap_clear(relation, block, vmbuffer,
    5266             :                             VISIBILITYMAP_ALL_FROZEN))
    5267          28 :         cleared_all_frozen = true;
    5268             : 
    5269             : 
    5270      313204 :     MarkBufferDirty(*buffer);
    5271             : 
    5272             :     /*
    5273             :      * XLOG stuff.  You might think that we don't need an XLOG record because
    5274             :      * there is no state change worth restoring after a crash.  You would be
    5275             :      * wrong however: we have just written either a TransactionId or a
    5276             :      * MultiXactId that may never have been seen on disk before, and we need
    5277             :      * to make sure that there are XLOG entries covering those ID numbers.
    5278             :      * Else the same IDs might be re-used after a crash, which would be
    5279             :      * disastrous if this page made it to disk before the crash.  Essentially
    5280             :      * we have to enforce the WAL log-before-data rule even in this case.
    5281             :      * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
    5282             :      * entries for everything anyway.)
    5283             :      */
    5284      313204 :     if (RelationNeedsWAL(relation))
    5285             :     {
    5286             :         xl_heap_lock xlrec;
    5287             :         XLogRecPtr  recptr;
    5288             : 
    5289      312500 :         XLogBeginInsert();
    5290      312500 :         XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
    5291             : 
    5292      312500 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    5293      312500 :         xlrec.xmax = xid;
    5294      625000 :         xlrec.infobits_set = compute_infobits(new_infomask,
    5295      312500 :                                               tuple->t_data->t_infomask2);
    5296      312500 :         xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    5297      312500 :         XLogRegisterData(&xlrec, SizeOfHeapLock);
    5298             : 
    5299             :         /* we don't decode row locks atm, so no need to log the origin */
    5300             : 
    5301      312500 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    5302             : 
    5303      312500 :         PageSetLSN(page, recptr);
    5304             :     }
    5305             : 
    5306      313204 :     END_CRIT_SECTION();
    5307             : 
    5308      313204 :     result = TM_Ok;
    5309             : 
    5310      313784 : out_locked:
    5311      313784 :     LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    5312             : 
    5313      316796 : out_unlocked:
    5314      316796 :     if (BufferIsValid(vmbuffer))
    5315        3332 :         ReleaseBuffer(vmbuffer);
    5316             : 
    5317             :     /*
    5318             :      * Don't update the visibility map here. Locking a tuple doesn't change
    5319             :      * visibility info.
    5320             :      */
    5321             : 
    5322             :     /*
    5323             :      * Now that we have successfully marked the tuple as locked, we can
    5324             :      * release the lmgr tuple lock, if we had it.
    5325             :      */
    5326      316796 :     if (have_tuple_lock)
    5327         322 :         UnlockTupleTuplock(relation, tid, mode);
    5328             : 
    5329      316796 :     return result;
    5330             : }
    5331             : 
    5332             : /*
    5333             :  * Acquire heavyweight lock on the given tuple, in preparation for acquiring
    5334             :  * its normal, Xmax-based tuple lock.
    5335             :  *
    5336             :  * have_tuple_lock is an input and output parameter: on input, it indicates
    5337             :  * whether the lock has previously been acquired (and this function does
    5338             :  * nothing in that case).  If this function returns success, have_tuple_lock
    5339             :  * has been flipped to true.
    5340             :  *
    5341             :  * Returns false if it was unable to obtain the lock; this can only happen if
    5342             :  * wait_policy is Skip.
    5343             :  */
    5344             : static bool
    5345         624 : heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode,
    5346             :                      LockWaitPolicy wait_policy, bool *have_tuple_lock)
    5347             : {
    5348         624 :     if (*have_tuple_lock)
    5349          18 :         return true;
    5350             : 
    5351         606 :     switch (wait_policy)
    5352             :     {
    5353         524 :         case LockWaitBlock:
    5354         524 :             LockTupleTuplock(relation, tid, mode);
    5355         524 :             break;
    5356             : 
    5357          68 :         case LockWaitSkip:
    5358          68 :             if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
    5359           2 :                 return false;
    5360          66 :             break;
    5361             : 
    5362          14 :         case LockWaitError:
    5363          14 :             if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
    5364           2 :                 ereport(ERROR,
    5365             :                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5366             :                          errmsg("could not obtain lock on row in relation \"%s\"",
    5367             :                                 RelationGetRelationName(relation))));
    5368          12 :             break;
    5369             :     }
    5370         602 :     *have_tuple_lock = true;
    5371             : 
    5372         602 :     return true;
    5373             : }
    5374             : 
    5375             : /*
    5376             :  * Given an original set of Xmax and infomask, and a transaction (identified by
    5377             :  * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
    5378             :  * corresponding infomasks to use on the tuple.
    5379             :  *
    5380             :  * Note that this might have side effects such as creating a new MultiXactId.
    5381             :  *
    5382             :  * Most callers will have called HeapTupleSatisfiesUpdate before this function;
    5383             :  * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
    5384             :  * but it was not running anymore. There is a race condition, which is that the
    5385             :  * MultiXactId may have finished since then, but that uncommon case is handled
    5386             :  * either here, or within MultiXactIdExpand.
    5387             :  *
    5388             :  * There is a similar race condition possible when the old xmax was a regular
    5389             :  * TransactionId.  We test TransactionIdIsInProgress again just to narrow the
    5390             :  * window, but it's still possible to end up creating an unnecessary
    5391             :  * MultiXactId.  Fortunately this is harmless.
    5392             :  */
    5393             : static void
    5394     4297138 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
    5395             :                           uint16 old_infomask2, TransactionId add_to_xmax,
    5396             :                           LockTupleMode mode, bool is_update,
    5397             :                           TransactionId *result_xmax, uint16 *result_infomask,
    5398             :                           uint16 *result_infomask2)
    5399             : {
    5400             :     TransactionId new_xmax;
    5401             :     uint16      new_infomask,
    5402             :                 new_infomask2;
    5403             : 
    5404             :     Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
    5405             : 
    5406      208140 : l5:
    5407     4505278 :     new_infomask = 0;
    5408     4505278 :     new_infomask2 = 0;
    5409     4505278 :     if (old_infomask & HEAP_XMAX_INVALID)
    5410             :     {
    5411             :         /*
    5412             :          * No previous locker; we just insert our own TransactionId.
    5413             :          *
    5414             :          * Note that it's critical that this case be the first one checked,
    5415             :          * because there are several blocks below that come back to this one
    5416             :          * to implement certain optimizations; old_infomask might contain
    5417             :          * other dirty bits in those cases, but we don't really care.
    5418             :          */
    5419     4143882 :         if (is_update)
    5420             :         {
    5421     3675370 :             new_xmax = add_to_xmax;
    5422     3675370 :             if (mode == LockTupleExclusive)
    5423     3124538 :                 new_infomask2 |= HEAP_KEYS_UPDATED;
    5424             :         }
    5425             :         else
    5426             :         {
    5427      468512 :             new_infomask |= HEAP_XMAX_LOCK_ONLY;
    5428      468512 :             switch (mode)
    5429             :             {
    5430        5280 :                 case LockTupleKeyShare:
    5431        5280 :                     new_xmax = add_to_xmax;
    5432        5280 :                     new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
    5433        5280 :                     break;
    5434        1478 :                 case LockTupleShare:
    5435        1478 :                     new_xmax = add_to_xmax;
    5436        1478 :                     new_infomask |= HEAP_XMAX_SHR_LOCK;
    5437        1478 :                     break;
    5438      270174 :                 case LockTupleNoKeyExclusive:
    5439      270174 :                     new_xmax = add_to_xmax;
    5440      270174 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    5441      270174 :                     break;
    5442      191580 :                 case LockTupleExclusive:
    5443      191580 :                     new_xmax = add_to_xmax;
    5444      191580 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    5445      191580 :                     new_infomask2 |= HEAP_KEYS_UPDATED;
    5446      191580 :                     break;
    5447           0 :                 default:
    5448           0 :                     new_xmax = InvalidTransactionId;    /* silence compiler */
    5449           0 :                     elog(ERROR, "invalid lock mode");
    5450             :             }
    5451             :         }
    5452             :     }
    5453      361396 :     else if (old_infomask & HEAP_XMAX_IS_MULTI)
    5454             :     {
    5455             :         MultiXactStatus new_status;
    5456             : 
    5457             :         /*
    5458             :          * Currently we don't allow XMAX_COMMITTED to be set for multis, so
    5459             :          * cross-check.
    5460             :          */
    5461             :         Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
    5462             : 
    5463             :         /*
    5464             :          * A multixact together with LOCK_ONLY set but neither lock bit set
    5465             :          * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
    5466             :          * anymore.  This check is critical for databases upgraded by
    5467             :          * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
    5468             :          * that such multis are never passed.
    5469             :          */
    5470      151124 :         if (HEAP_LOCKED_UPGRADED(old_infomask))
    5471             :         {
    5472           0 :             old_infomask &= ~HEAP_XMAX_IS_MULTI;
    5473           0 :             old_infomask |= HEAP_XMAX_INVALID;
    5474           0 :             goto l5;
    5475             :         }
    5476             : 
    5477             :         /*
    5478             :          * If the XMAX is already a MultiXactId, then we need to expand it to
    5479             :          * include add_to_xmax; but if all the members were lockers and are
    5480             :          * all gone, we can do away with the IS_MULTI bit and just set
    5481             :          * add_to_xmax as the only locker/updater.  If all lockers are gone
    5482             :          * and we have an updater that aborted, we can also do without a
    5483             :          * multi.
    5484             :          *
    5485             :          * The cost of doing GetMultiXactIdMembers would be paid by
    5486             :          * MultiXactIdExpand if we weren't to do this, so this check is not
    5487             :          * incurring extra work anyhow.
    5488             :          */
    5489      151124 :         if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
    5490             :         {
    5491          48 :             if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
    5492          16 :                 !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
    5493             :                                                                 old_infomask)))
    5494             :             {
    5495             :                 /*
    5496             :                  * Reset these bits and restart; otherwise fall through to
    5497             :                  * create a new multi below.
    5498             :                  */
    5499          48 :                 old_infomask &= ~HEAP_XMAX_IS_MULTI;
    5500          48 :                 old_infomask |= HEAP_XMAX_INVALID;
    5501          48 :                 goto l5;
    5502             :             }
    5503             :         }
    5504             : 
    5505      151076 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5506             : 
    5507      151076 :         new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
    5508             :                                      new_status);
    5509      151076 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5510             :     }
    5511      210272 :     else if (old_infomask & HEAP_XMAX_COMMITTED)
    5512             :     {
    5513             :         /*
    5514             :          * It's a committed update, so we need to preserve him as updater of
    5515             :          * the tuple.
    5516             :          */
    5517             :         MultiXactStatus status;
    5518             :         MultiXactStatus new_status;
    5519             : 
    5520          26 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5521           0 :             status = MultiXactStatusUpdate;
    5522             :         else
    5523          26 :             status = MultiXactStatusNoKeyUpdate;
    5524             : 
    5525          26 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5526             : 
    5527             :         /*
    5528             :          * since it's not running, it's obviously impossible for the old
    5529             :          * updater to be identical to the current one, so we need not check
    5530             :          * for that case as we do in the block above.
    5531             :          */
    5532          26 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    5533          26 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5534             :     }
    5535      210246 :     else if (TransactionIdIsInProgress(xmax))
    5536             :     {
    5537             :         /*
    5538             :          * If the XMAX is a valid, in-progress TransactionId, then we need to
    5539             :          * create a new MultiXactId that includes both the old locker or
    5540             :          * updater and our own TransactionId.
    5541             :          */
    5542             :         MultiXactStatus new_status;
    5543             :         MultiXactStatus old_status;
    5544             :         LockTupleMode old_mode;
    5545             : 
    5546      210228 :         if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5547             :         {
    5548      210176 :             if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5549       11354 :                 old_status = MultiXactStatusForKeyShare;
    5550      198822 :             else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5551         866 :                 old_status = MultiXactStatusForShare;
    5552      197956 :             else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5553             :             {
    5554      197956 :                 if (old_infomask2 & HEAP_KEYS_UPDATED)
    5555      185646 :                     old_status = MultiXactStatusForUpdate;
    5556             :                 else
    5557       12310 :                     old_status = MultiXactStatusForNoKeyUpdate;
    5558             :             }
    5559             :             else
    5560             :             {
    5561             :                 /*
    5562             :                  * LOCK_ONLY can be present alone only when a page has been
    5563             :                  * upgraded by pg_upgrade.  But in that case,
    5564             :                  * TransactionIdIsInProgress() should have returned false.  We
    5565             :                  * assume it's no longer locked in this case.
    5566             :                  */
    5567           0 :                 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
    5568           0 :                 old_infomask |= HEAP_XMAX_INVALID;
    5569           0 :                 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
    5570           0 :                 goto l5;
    5571             :             }
    5572             :         }
    5573             :         else
    5574             :         {
    5575             :             /* it's an update, but which kind? */
    5576          52 :             if (old_infomask2 & HEAP_KEYS_UPDATED)
    5577           0 :                 old_status = MultiXactStatusUpdate;
    5578             :             else
    5579          52 :                 old_status = MultiXactStatusNoKeyUpdate;
    5580             :         }
    5581             : 
    5582      210228 :         old_mode = TUPLOCK_from_mxstatus(old_status);
    5583             : 
    5584             :         /*
    5585             :          * If the lock to be acquired is for the same TransactionId as the
    5586             :          * existing lock, there's an optimization possible: consider only the
    5587             :          * strongest of both locks as the only one present, and restart.
    5588             :          */
    5589      210228 :         if (xmax == add_to_xmax)
    5590             :         {
    5591             :             /*
    5592             :              * Note that it's not possible for the original tuple to be
    5593             :              * updated: we wouldn't be here because the tuple would have been
    5594             :              * invisible and we wouldn't try to update it.  As a subtlety,
    5595             :              * this code can also run when traversing an update chain to lock
    5596             :              * future versions of a tuple.  But we wouldn't be here either,
    5597             :              * because the add_to_xmax would be different from the original
    5598             :              * updater.
    5599             :              */
    5600             :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5601             : 
    5602             :             /* acquire the strongest of both */
    5603      208076 :             if (mode < old_mode)
    5604      104510 :                 mode = old_mode;
    5605             :             /* mustn't touch is_update */
    5606             : 
    5607      208076 :             old_infomask |= HEAP_XMAX_INVALID;
    5608      208076 :             goto l5;
    5609             :         }
    5610             : 
    5611             :         /* otherwise, just fall back to creating a new multixact */
    5612        2152 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5613        2152 :         new_xmax = MultiXactIdCreate(xmax, old_status,
    5614             :                                      add_to_xmax, new_status);
    5615        2152 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5616             :     }
    5617          28 :     else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
    5618          10 :              TransactionIdDidCommit(xmax))
    5619           2 :     {
    5620             :         /*
    5621             :          * It's a committed update, so we gotta preserve him as updater of the
    5622             :          * tuple.
    5623             :          */
    5624             :         MultiXactStatus status;
    5625             :         MultiXactStatus new_status;
    5626             : 
    5627           2 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5628           0 :             status = MultiXactStatusUpdate;
    5629             :         else
    5630           2 :             status = MultiXactStatusNoKeyUpdate;
    5631             : 
    5632           2 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5633             : 
    5634             :         /*
    5635             :          * since it's not running, it's obviously impossible for the old
    5636             :          * updater to be identical to the current one, so we need not check
    5637             :          * for that case as we do in the block above.
    5638             :          */
    5639           2 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    5640           2 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5641             :     }
    5642             :     else
    5643             :     {
    5644             :         /*
    5645             :          * Can get here iff the locking/updating transaction was running when
    5646             :          * the infomask was extracted from the tuple, but finished before
    5647             :          * TransactionIdIsInProgress got to run.  Deal with it as if there was
    5648             :          * no locker at all in the first place.
    5649             :          */
    5650          16 :         old_infomask |= HEAP_XMAX_INVALID;
    5651          16 :         goto l5;
    5652             :     }
    5653             : 
    5654     4297138 :     *result_infomask = new_infomask;
    5655     4297138 :     *result_infomask2 = new_infomask2;
    5656     4297138 :     *result_xmax = new_xmax;
    5657     4297138 : }
    5658             : 
    5659             : /*
    5660             :  * Subroutine for heap_lock_updated_tuple_rec.
    5661             :  *
    5662             :  * Given a hypothetical multixact status held by the transaction identified
    5663             :  * with the given xid, does the current transaction need to wait, fail, or can
    5664             :  * it continue if it wanted to acquire a lock of the given mode?  "needwait"
    5665             :  * is set to true if waiting is necessary; if it can continue, then TM_Ok is
    5666             :  * returned.  If the lock is already held by the current transaction, return
    5667             :  * TM_SelfModified.  In case of a conflict with another transaction, a
    5668             :  * different HeapTupleSatisfiesUpdate return code is returned.
    5669             :  *
    5670             :  * The held status is said to be hypothetical because it might correspond to a
    5671             :  * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
    5672             :  * way for simplicity of API.
    5673             :  */
    5674             : static TM_Result
    5675       77548 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
    5676             :                            LockTupleMode mode, HeapTuple tup,
    5677             :                            bool *needwait)
    5678             : {
    5679             :     MultiXactStatus wantedstatus;
    5680             : 
    5681       77548 :     *needwait = false;
    5682       77548 :     wantedstatus = get_mxact_status_for_lock(mode, false);
    5683             : 
    5684             :     /*
    5685             :      * Note: we *must* check TransactionIdIsInProgress before
    5686             :      * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
    5687             :      * for an explanation.
    5688             :      */
    5689       77548 :     if (TransactionIdIsCurrentTransactionId(xid))
    5690             :     {
    5691             :         /*
    5692             :          * The tuple has already been locked by our own transaction.  This is
    5693             :          * very rare but can happen if multiple transactions are trying to
    5694             :          * lock an ancient version of the same tuple.
    5695             :          */
    5696           0 :         return TM_SelfModified;
    5697             :     }
    5698       77548 :     else if (TransactionIdIsInProgress(xid))
    5699             :     {
    5700             :         /*
    5701             :          * If the locking transaction is running, what we do depends on
    5702             :          * whether the lock modes conflict: if they do, then we must wait for
    5703             :          * it to finish; otherwise we can fall through to lock this tuple
    5704             :          * version without waiting.
    5705             :          */
    5706       73078 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5707       73078 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5708             :         {
    5709          16 :             *needwait = true;
    5710             :         }
    5711             : 
    5712             :         /*
    5713             :          * If we set needwait above, then this value doesn't matter;
    5714             :          * otherwise, this value signals to caller that it's okay to proceed.
    5715             :          */
    5716       73078 :         return TM_Ok;
    5717             :     }
    5718        4470 :     else if (TransactionIdDidAbort(xid))
    5719         412 :         return TM_Ok;
    5720        4058 :     else if (TransactionIdDidCommit(xid))
    5721             :     {
    5722             :         /*
    5723             :          * The other transaction committed.  If it was only a locker, then the
    5724             :          * lock is completely gone now and we can return success; but if it
    5725             :          * was an update, then what we do depends on whether the two lock
    5726             :          * modes conflict.  If they conflict, then we must report error to
    5727             :          * caller. But if they don't, we can fall through to allow the current
    5728             :          * transaction to lock the tuple.
    5729             :          *
    5730             :          * Note: the reason we worry about ISUPDATE here is because as soon as
    5731             :          * a transaction ends, all its locks are gone and meaningless, and
    5732             :          * thus we can ignore them; whereas its updates persist.  In the
    5733             :          * TransactionIdIsInProgress case, above, we don't need to check
    5734             :          * because we know the lock is still "alive" and thus a conflict needs
    5735             :          * always be checked.
    5736             :          */
    5737        4058 :         if (!ISUPDATE_from_mxstatus(status))
    5738        4040 :             return TM_Ok;
    5739             : 
    5740          18 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5741          18 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5742             :         {
    5743             :             /* bummer */
    5744          16 :             if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
    5745          12 :                 return TM_Updated;
    5746             :             else
    5747           4 :                 return TM_Deleted;
    5748             :         }
    5749             : 
    5750           2 :         return TM_Ok;
    5751             :     }
    5752             : 
    5753             :     /* Not in progress, not aborted, not committed -- must have crashed */
    5754           0 :     return TM_Ok;
    5755             : }
    5756             : 
    5757             : 
    5758             : /*
    5759             :  * Recursive part of heap_lock_updated_tuple
    5760             :  *
    5761             :  * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
    5762             :  * xid with the given mode; if this tuple is updated, recurse to lock the new
    5763             :  * version as well.
    5764             :  */
    5765             : static TM_Result
    5766        4424 : heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax,
    5767             :                             const ItemPointerData *tid, TransactionId xid,
    5768             :                             LockTupleMode mode)
    5769             : {
    5770             :     TM_Result   result;
    5771             :     ItemPointerData tupid;
    5772             :     HeapTupleData mytup;
    5773             :     Buffer      buf;
    5774             :     uint16      new_infomask,
    5775             :                 new_infomask2,
    5776             :                 old_infomask,
    5777             :                 old_infomask2;
    5778             :     TransactionId xmax,
    5779             :                 new_xmax;
    5780        4424 :     bool        cleared_all_frozen = false;
    5781             :     bool        pinned_desired_page;
    5782        4424 :     Buffer      vmbuffer = InvalidBuffer;
    5783             :     BlockNumber block;
    5784             : 
    5785        4424 :     ItemPointerCopy(tid, &tupid);
    5786             : 
    5787             :     for (;;)
    5788             :     {
    5789        4430 :         new_infomask = 0;
    5790        4430 :         new_xmax = InvalidTransactionId;
    5791        4430 :         block = ItemPointerGetBlockNumber(&tupid);
    5792        4430 :         ItemPointerCopy(&tupid, &(mytup.t_self));
    5793             : 
    5794        4430 :         if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
    5795             :         {
    5796             :             /*
    5797             :              * if we fail to find the updated version of the tuple, it's
    5798             :              * because it was vacuumed/pruned away after its creator
    5799             :              * transaction aborted.  So behave as if we got to the end of the
    5800             :              * chain, and there's no further tuple to lock: return success to
    5801             :              * caller.
    5802             :              */
    5803           0 :             result = TM_Ok;
    5804           0 :             goto out_unlocked;
    5805             :         }
    5806             : 
    5807        4430 : l4:
    5808        4446 :         CHECK_FOR_INTERRUPTS();
    5809             : 
    5810             :         /*
    5811             :          * Before locking the buffer, pin the visibility map page if it
    5812             :          * appears to be necessary.  Since we haven't got the lock yet,
    5813             :          * someone else might be in the middle of changing this, so we'll need
    5814             :          * to recheck after we have the lock.
    5815             :          */
    5816        4446 :         if (PageIsAllVisible(BufferGetPage(buf)))
    5817             :         {
    5818           0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5819           0 :             pinned_desired_page = true;
    5820             :         }
    5821             :         else
    5822        4446 :             pinned_desired_page = false;
    5823             : 
    5824        4446 :         LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5825             : 
    5826             :         /*
    5827             :          * If we didn't pin the visibility map page and the page has become
    5828             :          * all visible while we were busy locking the buffer, we'll have to
    5829             :          * unlock and re-lock, to avoid holding the buffer lock across I/O.
    5830             :          * That's a bit unfortunate, but hopefully shouldn't happen often.
    5831             :          *
    5832             :          * Note: in some paths through this function, we will reach here
    5833             :          * holding a pin on a vm page that may or may not be the one matching
    5834             :          * this page.  If this page isn't all-visible, we won't use the vm
    5835             :          * page, but we hold onto such a pin till the end of the function.
    5836             :          */
    5837        4446 :         if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
    5838             :         {
    5839           0 :             LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5840           0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5841           0 :             LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5842             :         }
    5843             : 
    5844             :         /*
    5845             :          * Check the tuple XMIN against prior XMAX, if any.  If we reached the
    5846             :          * end of the chain, we're done, so return success.
    5847             :          */
    5848        8892 :         if (TransactionIdIsValid(priorXmax) &&
    5849        4446 :             !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
    5850             :                                  priorXmax))
    5851             :         {
    5852           4 :             result = TM_Ok;
    5853           4 :             goto out_locked;
    5854             :         }
    5855             : 
    5856             :         /*
    5857             :          * Also check Xmin: if this tuple was created by an aborted
    5858             :          * (sub)transaction, then we already locked the last live one in the
    5859             :          * chain, thus we're done, so return success.
    5860             :          */
    5861        4442 :         if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
    5862             :         {
    5863          48 :             result = TM_Ok;
    5864          48 :             goto out_locked;
    5865             :         }
    5866             : 
    5867        4394 :         old_infomask = mytup.t_data->t_infomask;
    5868        4394 :         old_infomask2 = mytup.t_data->t_infomask2;
    5869        4394 :         xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5870             : 
    5871             :         /*
    5872             :          * If this tuple version has been updated or locked by some concurrent
    5873             :          * transaction(s), what we do depends on whether our lock mode
    5874             :          * conflicts with what those other transactions hold, and also on the
    5875             :          * status of them.
    5876             :          */
    5877        4394 :         if (!(old_infomask & HEAP_XMAX_INVALID))
    5878             :         {
    5879             :             TransactionId rawxmax;
    5880             :             bool        needwait;
    5881             : 
    5882        4276 :             rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5883        4276 :             if (old_infomask & HEAP_XMAX_IS_MULTI)
    5884             :             {
    5885             :                 int         nmembers;
    5886             :                 int         i;
    5887             :                 MultiXactMember *members;
    5888             : 
    5889             :                 /*
    5890             :                  * We don't need a test for pg_upgrade'd tuples: this is only
    5891             :                  * applied to tuples after the first in an update chain.  Said
    5892             :                  * first tuple in the chain may well be locked-in-9.2-and-
    5893             :                  * pg_upgraded, but that one was already locked by our caller,
    5894             :                  * not us; and any subsequent ones cannot be because our
    5895             :                  * caller must necessarily have obtained a snapshot later than
    5896             :                  * the pg_upgrade itself.
    5897             :                  */
    5898             :                 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
    5899             : 
    5900        4218 :                 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
    5901        4218 :                                                  HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5902       81708 :                 for (i = 0; i < nmembers; i++)
    5903             :                 {
    5904       77490 :                     result = test_lockmode_for_conflict(members[i].status,
    5905       77490 :                                                         members[i].xid,
    5906             :                                                         mode,
    5907             :                                                         &mytup,
    5908             :                                                         &needwait);
    5909             : 
    5910             :                     /*
    5911             :                      * If the tuple was already locked by ourselves in a
    5912             :                      * previous iteration of this (say heap_lock_tuple was
    5913             :                      * forced to restart the locking loop because of a change
    5914             :                      * in xmax), then we hold the lock already on this tuple
    5915             :                      * version and we don't need to do anything; and this is
    5916             :                      * not an error condition either.  We just need to skip
    5917             :                      * this tuple and continue locking the next version in the
    5918             :                      * update chain.
    5919             :                      */
    5920       77490 :                     if (result == TM_SelfModified)
    5921             :                     {
    5922           0 :                         pfree(members);
    5923           0 :                         goto next;
    5924             :                     }
    5925             : 
    5926       77490 :                     if (needwait)
    5927             :                     {
    5928           0 :                         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5929           0 :                         XactLockTableWait(members[i].xid, rel,
    5930             :                                           &mytup.t_self,
    5931             :                                           XLTW_LockUpdated);
    5932           0 :                         pfree(members);
    5933           0 :                         goto l4;
    5934             :                     }
    5935       77490 :                     if (result != TM_Ok)
    5936             :                     {
    5937           0 :                         pfree(members);
    5938           0 :                         goto out_locked;
    5939             :                     }
    5940             :                 }
    5941        4218 :                 if (members)
    5942        4218 :                     pfree(members);
    5943             :             }
    5944             :             else
    5945             :             {
    5946             :                 MultiXactStatus status;
    5947             : 
    5948             :                 /*
    5949             :                  * For a non-multi Xmax, we first need to compute the
    5950             :                  * corresponding MultiXactStatus by using the infomask bits.
    5951             :                  */
    5952          58 :                 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5953             :                 {
    5954          20 :                     if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5955          20 :                         status = MultiXactStatusForKeyShare;
    5956           0 :                     else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5957           0 :                         status = MultiXactStatusForShare;
    5958           0 :                     else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5959             :                     {
    5960           0 :                         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5961           0 :                             status = MultiXactStatusForUpdate;
    5962             :                         else
    5963           0 :                             status = MultiXactStatusForNoKeyUpdate;
    5964             :                     }
    5965             :                     else
    5966             :                     {
    5967             :                         /*
    5968             :                          * LOCK_ONLY present alone (a pg_upgraded tuple marked
    5969             :                          * as share-locked in the old cluster) shouldn't be
    5970             :                          * seen in the middle of an update chain.
    5971             :                          */
    5972           0 :                         elog(ERROR, "invalid lock status in tuple");
    5973             :                     }
    5974             :                 }
    5975             :                 else
    5976             :                 {
    5977             :                     /* it's an update, but which kind? */
    5978          38 :                     if (old_infomask2 & HEAP_KEYS_UPDATED)
    5979          28 :                         status = MultiXactStatusUpdate;
    5980             :                     else
    5981          10 :                         status = MultiXactStatusNoKeyUpdate;
    5982             :                 }
    5983             : 
    5984          58 :                 result = test_lockmode_for_conflict(status, rawxmax, mode,
    5985             :                                                     &mytup, &needwait);
    5986             : 
    5987             :                 /*
    5988             :                  * If the tuple was already locked by ourselves in a previous
    5989             :                  * iteration of this (say heap_lock_tuple was forced to
    5990             :                  * restart the locking loop because of a change in xmax), then
    5991             :                  * we hold the lock already on this tuple version and we don't
    5992             :                  * need to do anything; and this is not an error condition
    5993             :                  * either.  We just need to skip this tuple and continue
    5994             :                  * locking the next version in the update chain.
    5995             :                  */
    5996          58 :                 if (result == TM_SelfModified)
    5997           0 :                     goto next;
    5998             : 
    5999          58 :                 if (needwait)
    6000             :                 {
    6001          16 :                     LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    6002          16 :                     XactLockTableWait(rawxmax, rel, &mytup.t_self,
    6003             :                                       XLTW_LockUpdated);
    6004          16 :                     goto l4;
    6005             :                 }
    6006          42 :                 if (result != TM_Ok)
    6007             :                 {
    6008          16 :                     goto out_locked;
    6009             :                 }
    6010             :             }
    6011             :         }
    6012             : 
    6013             :         /* compute the new Xmax and infomask values for the tuple ... */
    6014        4362 :         compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
    6015             :                                   xid, mode, false,
    6016             :                                   &new_xmax, &new_infomask, &new_infomask2);
    6017             : 
    6018        4362 :         if (PageIsAllVisible(BufferGetPage(buf)) &&
    6019           0 :             visibilitymap_clear(rel, block, vmbuffer,
    6020             :                                 VISIBILITYMAP_ALL_FROZEN))
    6021           0 :             cleared_all_frozen = true;
    6022             : 
    6023        4362 :         START_CRIT_SECTION();
    6024             : 
    6025             :         /* ... and set them */
    6026        4362 :         HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
    6027        4362 :         mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
    6028        4362 :         mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6029        4362 :         mytup.t_data->t_infomask |= new_infomask;
    6030        4362 :         mytup.t_data->t_infomask2 |= new_infomask2;
    6031             : 
    6032        4362 :         MarkBufferDirty(buf);
    6033             : 
    6034             :         /* XLOG stuff */
    6035        4362 :         if (RelationNeedsWAL(rel))
    6036             :         {
    6037             :             xl_heap_lock_updated xlrec;
    6038             :             XLogRecPtr  recptr;
    6039        4362 :             Page        page = BufferGetPage(buf);
    6040             : 
    6041        4362 :             XLogBeginInsert();
    6042        4362 :             XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    6043             : 
    6044        4362 :             xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
    6045        4362 :             xlrec.xmax = new_xmax;
    6046        4362 :             xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
    6047        4362 :             xlrec.flags =
    6048        4362 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    6049             : 
    6050        4362 :             XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
    6051             : 
    6052        4362 :             recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
    6053             : 
    6054        4362 :             PageSetLSN(page, recptr);
    6055             :         }
    6056             : 
    6057        4362 :         END_CRIT_SECTION();
    6058             : 
    6059        4362 : next:
    6060             :         /* if we find the end of update chain, we're done. */
    6061        8724 :         if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
    6062        8724 :             HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
    6063        4370 :             ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
    6064           8 :             HeapTupleHeaderIsOnlyLocked(mytup.t_data))
    6065             :         {
    6066        4356 :             result = TM_Ok;
    6067        4356 :             goto out_locked;
    6068             :         }
    6069             : 
    6070             :         /* tail recursion */
    6071           6 :         priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
    6072           6 :         ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
    6073           6 :         UnlockReleaseBuffer(buf);
    6074             :     }
    6075             : 
    6076             :     result = TM_Ok;
    6077             : 
    6078        4424 : out_locked:
    6079        4424 :     UnlockReleaseBuffer(buf);
    6080             : 
    6081        4424 : out_unlocked:
    6082        4424 :     if (vmbuffer != InvalidBuffer)
    6083           0 :         ReleaseBuffer(vmbuffer);
    6084             : 
    6085        4424 :     return result;
    6086             : }
    6087             : 
    6088             : /*
    6089             :  * heap_lock_updated_tuple
    6090             :  *      Follow update chain when locking an updated tuple, acquiring locks (row
    6091             :  *      marks) on the updated versions.
    6092             :  *
    6093             :  * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
    6094             :  * fields from the initial tuple.  We will lock the tuples starting from the
    6095             :  * one that 'prior_ctid' points to.  Note: This function does not lock the
    6096             :  * initial tuple itself.
    6097             :  *
    6098             :  * This function doesn't check visibility, it just unconditionally marks the
    6099             :  * tuple(s) as locked.  If any tuple in the updated chain is being deleted
    6100             :  * concurrently (or updated with the key being modified), sleep until the
    6101             :  * transaction doing it is finished.
    6102             :  *
    6103             :  * Note that we don't acquire heavyweight tuple locks on the tuples we walk
    6104             :  * when we have to wait for other transactions to release them, as opposed to
    6105             :  * what heap_lock_tuple does.  The reason is that having more than one
    6106             :  * transaction walking the chain is probably uncommon enough that risk of
    6107             :  * starvation is not likely: one of the preconditions for being here is that
    6108             :  * the snapshot in use predates the update that created this tuple (because we
    6109             :  * started at an earlier version of the tuple), but at the same time such a
    6110             :  * transaction cannot be using repeatable read or serializable isolation
    6111             :  * levels, because that would lead to a serializability failure.
    6112             :  */
    6113             : static TM_Result
    6114        4428 : heap_lock_updated_tuple(Relation rel,
    6115             :                         uint16 prior_infomask,
    6116             :                         TransactionId prior_raw_xmax,
    6117             :                         const ItemPointerData *prior_ctid,
    6118             :                         TransactionId xid, LockTupleMode mode)
    6119             : {
    6120        4428 :     INJECTION_POINT("heap_lock_updated_tuple", NULL);
    6121             : 
    6122             :     /*
    6123             :      * If the tuple has moved into another partition (effectively a delete)
    6124             :      * stop here.
    6125             :      */
    6126        4428 :     if (!ItemPointerIndicatesMovedPartitions(prior_ctid))
    6127             :     {
    6128             :         TransactionId prior_xmax;
    6129             : 
    6130             :         /*
    6131             :          * If this is the first possibly-multixact-able operation in the
    6132             :          * current transaction, set my per-backend OldestMemberMXactId
    6133             :          * setting. We can be certain that the transaction will never become a
    6134             :          * member of any older MultiXactIds than that.  (We have to do this
    6135             :          * even if we end up just using our own TransactionId below, since
    6136             :          * some other backend could incorporate our XID into a MultiXact
    6137             :          * immediately afterwards.)
    6138             :          */
    6139        4424 :         MultiXactIdSetOldestMember();
    6140             : 
    6141        8848 :         prior_xmax = (prior_infomask & HEAP_XMAX_IS_MULTI) ?
    6142        4424 :             MultiXactIdGetUpdateXid(prior_raw_xmax, prior_infomask) : prior_raw_xmax;
    6143        4424 :         return heap_lock_updated_tuple_rec(rel, prior_xmax, prior_ctid, xid, mode);
    6144             :     }
    6145             : 
    6146             :     /* nothing to lock */
    6147           4 :     return TM_Ok;
    6148             : }
    6149             : 
    6150             : /*
    6151             :  *  heap_finish_speculative - mark speculative insertion as successful
    6152             :  *
    6153             :  * To successfully finish a speculative insertion we have to clear speculative
    6154             :  * token from tuple.  To do so the t_ctid field, which will contain a
    6155             :  * speculative token value, is modified in place to point to the tuple itself,
    6156             :  * which is characteristic of a newly inserted ordinary tuple.
    6157             :  *
    6158             :  * NB: It is not ok to commit without either finishing or aborting a
    6159             :  * speculative insertion.  We could treat speculative tuples of committed
    6160             :  * transactions implicitly as completed, but then we would have to be prepared
    6161             :  * to deal with speculative tokens on committed tuples.  That wouldn't be
    6162             :  * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
    6163             :  * but clearing the token at completion isn't very expensive either.
    6164             :  * An explicit confirmation WAL record also makes logical decoding simpler.
    6165             :  */
    6166             : void
    6167        4176 : heap_finish_speculative(Relation relation, const ItemPointerData *tid)
    6168             : {
    6169             :     Buffer      buffer;
    6170             :     Page        page;
    6171             :     OffsetNumber offnum;
    6172             :     ItemId      lp;
    6173             :     HeapTupleHeader htup;
    6174             : 
    6175        4176 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    6176        4176 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6177        4176 :     page = BufferGetPage(buffer);
    6178             : 
    6179        4176 :     offnum = ItemPointerGetOffsetNumber(tid);
    6180        4176 :     if (offnum < 1 || offnum > PageGetMaxOffsetNumber(page))
    6181           0 :         elog(ERROR, "offnum out of range");
    6182        4176 :     lp = PageGetItemId(page, offnum);
    6183        4176 :     if (!ItemIdIsNormal(lp))
    6184           0 :         elog(ERROR, "invalid lp");
    6185             : 
    6186        4176 :     htup = (HeapTupleHeader) PageGetItem(page, lp);
    6187             : 
    6188             :     /* NO EREPORT(ERROR) from here till changes are logged */
    6189        4176 :     START_CRIT_SECTION();
    6190             : 
    6191             :     Assert(HeapTupleHeaderIsSpeculative(htup));
    6192             : 
    6193        4176 :     MarkBufferDirty(buffer);
    6194             : 
    6195             :     /*
    6196             :      * Replace the speculative insertion token with a real t_ctid, pointing to
    6197             :      * itself like it does on regular tuples.
    6198             :      */
    6199        4176 :     htup->t_ctid = *tid;
    6200             : 
    6201             :     /* XLOG stuff */
    6202        4176 :     if (RelationNeedsWAL(relation))
    6203             :     {
    6204             :         xl_heap_confirm xlrec;
    6205             :         XLogRecPtr  recptr;
    6206             : 
    6207        4144 :         xlrec.offnum = ItemPointerGetOffsetNumber(tid);
    6208             : 
    6209        4144 :         XLogBeginInsert();
    6210             : 
    6211             :         /* We want the same filtering on this as on a plain insert */
    6212        4144 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    6213             : 
    6214        4144 :         XLogRegisterData(&xlrec, SizeOfHeapConfirm);
    6215        4144 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    6216             : 
    6217        4144 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
    6218             : 
    6219        4144 :         PageSetLSN(page, recptr);
    6220             :     }
    6221             : 
    6222        4176 :     END_CRIT_SECTION();
    6223             : 
    6224        4176 :     UnlockReleaseBuffer(buffer);
    6225        4176 : }
    6226             : 
    6227             : /*
    6228             :  *  heap_abort_speculative - kill a speculatively inserted tuple
    6229             :  *
    6230             :  * Marks a tuple that was speculatively inserted in the same command as dead,
    6231             :  * by setting its xmin as invalid.  That makes it immediately appear as dead
    6232             :  * to all transactions, including our own.  In particular, it makes
    6233             :  * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
    6234             :  * inserting a duplicate key value won't unnecessarily wait for our whole
    6235             :  * transaction to finish (it'll just wait for our speculative insertion to
    6236             :  * finish).
    6237             :  *
    6238             :  * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
    6239             :  * that arise due to a mutual dependency that is not user visible.  By
    6240             :  * definition, unprincipled deadlocks cannot be prevented by the user
    6241             :  * reordering lock acquisition in client code, because the implementation level
    6242             :  * lock acquisitions are not under the user's direct control.  If speculative
    6243             :  * inserters did not take this precaution, then under high concurrency they
    6244             :  * could deadlock with each other, which would not be acceptable.
    6245             :  *
    6246             :  * This is somewhat redundant with heap_delete, but we prefer to have a
    6247             :  * dedicated routine with stripped down requirements.  Note that this is also
    6248             :  * used to delete the TOAST tuples created during speculative insertion.
    6249             :  *
    6250             :  * This routine does not affect logical decoding as it only looks at
    6251             :  * confirmation records.
    6252             :  */
    6253             : void
    6254          32 : heap_abort_speculative(Relation relation, const ItemPointerData *tid)
    6255             : {
    6256          32 :     TransactionId xid = GetCurrentTransactionId();
    6257             :     ItemId      lp;
    6258             :     HeapTupleData tp;
    6259             :     Page        page;
    6260             :     BlockNumber block;
    6261             :     Buffer      buffer;
    6262             : 
    6263             :     Assert(ItemPointerIsValid(tid));
    6264             : 
    6265          32 :     block = ItemPointerGetBlockNumber(tid);
    6266          32 :     buffer = ReadBuffer(relation, block);
    6267          32 :     page = BufferGetPage(buffer);
    6268             : 
    6269          32 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6270             : 
    6271             :     /*
    6272             :      * Page can't be all visible, we just inserted into it, and are still
    6273             :      * running.
    6274             :      */
    6275             :     Assert(!PageIsAllVisible(page));
    6276             : 
    6277          32 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    6278             :     Assert(ItemIdIsNormal(lp));
    6279             : 
    6280          32 :     tp.t_tableOid = RelationGetRelid(relation);
    6281          32 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    6282          32 :     tp.t_len = ItemIdGetLength(lp);
    6283          32 :     tp.t_self = *tid;
    6284             : 
    6285             :     /*
    6286             :      * Sanity check that the tuple really is a speculatively inserted tuple,
    6287             :      * inserted by us.
    6288             :      */
    6289          32 :     if (tp.t_data->t_choice.t_heap.t_xmin != xid)
    6290           0 :         elog(ERROR, "attempted to kill a tuple inserted by another transaction");
    6291          32 :     if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
    6292           0 :         elog(ERROR, "attempted to kill a non-speculative tuple");
    6293             :     Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
    6294             : 
    6295             :     /*
    6296             :      * No need to check for serializable conflicts here.  There is never a
    6297             :      * need for a combo CID, either.  No need to extract replica identity, or
    6298             :      * do anything special with infomask bits.
    6299             :      */
    6300             : 
    6301          32 :     START_CRIT_SECTION();
    6302             : 
    6303             :     /*
    6304             :      * The tuple will become DEAD immediately.  Flag that this page is a
    6305             :      * candidate for pruning by setting xmin to TransactionXmin. While not
    6306             :      * immediately prunable, it is the oldest xid we can cheaply determine
    6307             :      * that's safe against wraparound / being older than the table's
    6308             :      * relfrozenxid.  To defend against the unlikely case of a new relation
    6309             :      * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
    6310             :      * if so (vacuum can't subsequently move relfrozenxid to beyond
    6311             :      * TransactionXmin, so there's no race here).
    6312             :      */
    6313             :     Assert(TransactionIdIsValid(TransactionXmin));
    6314             :     {
    6315          32 :         TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
    6316             :         TransactionId prune_xid;
    6317             : 
    6318          32 :         if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
    6319           0 :             prune_xid = relfrozenxid;
    6320             :         else
    6321          32 :             prune_xid = TransactionXmin;
    6322          32 :         PageSetPrunable(page, prune_xid);
    6323             :     }
    6324             : 
    6325             :     /* store transaction information of xact deleting the tuple */
    6326          32 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    6327          32 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6328             : 
    6329             :     /*
    6330             :      * Set the tuple header xmin to InvalidTransactionId.  This makes the
    6331             :      * tuple immediately invisible everyone.  (In particular, to any
    6332             :      * transactions waiting on the speculative token, woken up later.)
    6333             :      */
    6334          32 :     HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
    6335             : 
    6336             :     /* Clear the speculative insertion token too */
    6337          32 :     tp.t_data->t_ctid = tp.t_self;
    6338             : 
    6339          32 :     MarkBufferDirty(buffer);
    6340             : 
    6341             :     /*
    6342             :      * XLOG stuff
    6343             :      *
    6344             :      * The WAL records generated here match heap_delete().  The same recovery
    6345             :      * routines are used.
    6346             :      */
    6347          32 :     if (RelationNeedsWAL(relation))
    6348             :     {
    6349             :         xl_heap_delete xlrec;
    6350             :         XLogRecPtr  recptr;
    6351             : 
    6352          24 :         xlrec.flags = XLH_DELETE_IS_SUPER;
    6353          48 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    6354          24 :                                               tp.t_data->t_infomask2);
    6355          24 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    6356          24 :         xlrec.xmax = xid;
    6357             : 
    6358          24 :         XLogBeginInsert();
    6359          24 :         XLogRegisterData(&xlrec, SizeOfHeapDelete);
    6360          24 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    6361             : 
    6362             :         /* No replica identity & replication origin logged */
    6363             : 
    6364          24 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    6365             : 
    6366          24 :         PageSetLSN(page, recptr);
    6367             :     }
    6368             : 
    6369          32 :     END_CRIT_SECTION();
    6370             : 
    6371          32 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6372             : 
    6373          32 :     if (HeapTupleHasExternal(&tp))
    6374             :     {
    6375             :         Assert(!IsToastRelation(relation));
    6376           2 :         heap_toast_delete(relation, &tp, true);
    6377             :     }
    6378             : 
    6379             :     /*
    6380             :      * Never need to mark tuple for invalidation, since catalogs don't support
    6381             :      * speculative insertion
    6382             :      */
    6383             : 
    6384             :     /* Now we can release the buffer */
    6385          32 :     ReleaseBuffer(buffer);
    6386             : 
    6387             :     /* count deletion, as we counted the insertion too */
    6388          32 :     pgstat_count_heap_delete(relation);
    6389          32 : }
    6390             : 
    6391             : /*
    6392             :  * heap_inplace_lock - protect inplace update from concurrent heap_update()
    6393             :  *
    6394             :  * Evaluate whether the tuple's state is compatible with a no-key update.
    6395             :  * Current transaction rowmarks are fine, as is KEY SHARE from any
    6396             :  * transaction.  If compatible, return true with the buffer exclusive-locked,
    6397             :  * and the caller must release that by calling
    6398             :  * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
    6399             :  * an error.  Otherwise, call release_callback(arg), wait for blocking
    6400             :  * transactions to end, and return false.
    6401             :  *
    6402             :  * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
    6403             :  * DDL, this doesn't guarantee any particular predicate locking.
    6404             :  *
    6405             :  * heap_delete() is a rarer source of blocking transactions (xwait).  We'll
    6406             :  * wait for such a transaction just like for the normal heap_update() case.
    6407             :  * Normal concurrent DROP commands won't cause that, because all inplace
    6408             :  * updaters take some lock that conflicts with DROP.  An explicit SQL "DELETE
    6409             :  * FROM pg_class" can cause it.  By waiting, if the concurrent transaction
    6410             :  * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
    6411             :  * can find the successor tuple.
    6412             :  *
    6413             :  * Readers of inplace-updated fields expect changes to those fields are
    6414             :  * durable.  For example, vac_truncate_clog() reads datfrozenxid from
    6415             :  * pg_database tuples via catalog snapshots.  A future snapshot must not
    6416             :  * return a lower datfrozenxid for the same database OID (lower in the
    6417             :  * FullTransactionIdPrecedes() sense).  We achieve that since no update of a
    6418             :  * tuple can start while we hold a lock on its buffer.  In cases like
    6419             :  * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
    6420             :  * to this transaction.  ROLLBACK then is one case where it's okay to lose
    6421             :  * inplace updates.  (Restoring relhasindex=false on ROLLBACK is fine, since
    6422             :  * any concurrent CREATE INDEX would have blocked, then inplace-updated the
    6423             :  * committed tuple.)
    6424             :  *
    6425             :  * In principle, we could avoid waiting by overwriting every tuple in the
    6426             :  * updated tuple chain.  Reader expectations permit updating a tuple only if
    6427             :  * it's aborted, is the tail of the chain, or we already updated the tuple
    6428             :  * referenced in its t_ctid.  Hence, we would need to overwrite the tuples in
    6429             :  * order from tail to head.  That would imply either (a) mutating all tuples
    6430             :  * in one critical section or (b) accepting a chance of partial completion.
    6431             :  * Partial completion of a relfrozenxid update would have the weird
    6432             :  * consequence that the table's next VACUUM could see the table's relfrozenxid
    6433             :  * move forward between vacuum_get_cutoffs() and finishing.
    6434             :  */
    6435             : bool
    6436      317364 : heap_inplace_lock(Relation relation,
    6437             :                   HeapTuple oldtup_ptr, Buffer buffer,
    6438             :                   void (*release_callback) (void *), void *arg)
    6439             : {
    6440      317364 :     HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
    6441             :     TM_Result   result;
    6442             :     bool        ret;
    6443             : 
    6444             : #ifdef USE_ASSERT_CHECKING
    6445             :     if (RelationGetRelid(relation) == RelationRelationId)
    6446             :         check_inplace_rel_lock(oldtup_ptr);
    6447             : #endif
    6448             : 
    6449             :     Assert(BufferIsValid(buffer));
    6450             : 
    6451             :     /*
    6452             :      * Register shared cache invals if necessary.  Other sessions may finish
    6453             :      * inplace updates of this tuple between this step and LockTuple().  Since
    6454             :      * inplace updates don't change cache keys, that's harmless.
    6455             :      *
    6456             :      * While it's tempting to register invals only after confirming we can
    6457             :      * return true, the following obstacle precludes reordering steps that
    6458             :      * way.  Registering invals might reach a CatalogCacheInitializeCache()
    6459             :      * that locks "buffer".  That would hang indefinitely if running after our
    6460             :      * own LockBuffer().  Hence, we must register invals before LockBuffer().
    6461             :      */
    6462      317364 :     CacheInvalidateHeapTupleInplace(relation, oldtup_ptr);
    6463             : 
    6464      317364 :     LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
    6465      317364 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6466             : 
    6467             :     /*----------
    6468             :      * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
    6469             :      *
    6470             :      * - wait unconditionally
    6471             :      * - already locked tuple above, since inplace needs that unconditionally
    6472             :      * - don't recheck header after wait: simpler to defer to next iteration
    6473             :      * - don't try to continue even if the updater aborts: likewise
    6474             :      * - no crosscheck
    6475             :      */
    6476      317364 :     result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
    6477             :                                       buffer);
    6478             : 
    6479      317364 :     if (result == TM_Invisible)
    6480             :     {
    6481             :         /* no known way this can happen */
    6482           0 :         ereport(ERROR,
    6483             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    6484             :                  errmsg_internal("attempted to overwrite invisible tuple")));
    6485             :     }
    6486      317364 :     else if (result == TM_SelfModified)
    6487             :     {
    6488             :         /*
    6489             :          * CREATE INDEX might reach this if an expression is silly enough to
    6490             :          * call e.g. SELECT ... FROM pg_class FOR SHARE.  C code of other SQL
    6491             :          * statements might get here after a heap_update() of the same row, in
    6492             :          * the absence of an intervening CommandCounterIncrement().
    6493             :          */
    6494           0 :         ereport(ERROR,
    6495             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    6496             :                  errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
    6497             :     }
    6498      317364 :     else if (result == TM_BeingModified)
    6499             :     {
    6500             :         TransactionId xwait;
    6501             :         uint16      infomask;
    6502             : 
    6503          34 :         xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    6504          34 :         infomask = oldtup.t_data->t_infomask;
    6505             : 
    6506          34 :         if (infomask & HEAP_XMAX_IS_MULTI)
    6507             :         {
    6508          10 :             LockTupleMode lockmode = LockTupleNoKeyExclusive;
    6509          10 :             MultiXactStatus mxact_status = MultiXactStatusNoKeyUpdate;
    6510             :             int         remain;
    6511             : 
    6512          10 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    6513             :                                         lockmode, NULL))
    6514             :             {
    6515           4 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6516           4 :                 release_callback(arg);
    6517           4 :                 ret = false;
    6518           4 :                 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
    6519             :                                 relation, &oldtup.t_self, XLTW_Update,
    6520             :                                 &remain);
    6521             :             }
    6522             :             else
    6523           6 :                 ret = true;
    6524             :         }
    6525          24 :         else if (TransactionIdIsCurrentTransactionId(xwait))
    6526           2 :             ret = true;
    6527          22 :         else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
    6528           2 :             ret = true;
    6529             :         else
    6530             :         {
    6531          20 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6532          20 :             release_callback(arg);
    6533          20 :             ret = false;
    6534          20 :             XactLockTableWait(xwait, relation, &oldtup.t_self,
    6535             :                               XLTW_Update);
    6536             :         }
    6537             :     }
    6538             :     else
    6539             :     {
    6540      317330 :         ret = (result == TM_Ok);
    6541      317330 :         if (!ret)
    6542             :         {
    6543           0 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6544           0 :             release_callback(arg);
    6545             :         }
    6546             :     }
    6547             : 
    6548             :     /*
    6549             :      * GetCatalogSnapshot() relies on invalidation messages to know when to
    6550             :      * take a new snapshot.  COMMIT of xwait is responsible for sending the
    6551             :      * invalidation.  We're not acquiring heavyweight locks sufficient to
    6552             :      * block if not yet sent, so we must take a new snapshot to ensure a later
    6553             :      * attempt has a fair chance.  While we don't need this if xwait aborted,
    6554             :      * don't bother optimizing that.
    6555             :      */
    6556      317364 :     if (!ret)
    6557             :     {
    6558          24 :         UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
    6559          24 :         ForgetInplace_Inval();
    6560          24 :         InvalidateCatalogSnapshot();
    6561             :     }
    6562      317364 :     return ret;
    6563             : }
    6564             : 
    6565             : /*
    6566             :  * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
    6567             :  *
    6568             :  * The tuple cannot change size, and therefore its header fields and null
    6569             :  * bitmap (if any) don't change either.
    6570             :  *
    6571             :  * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
    6572             :  */
    6573             : void
    6574      167530 : heap_inplace_update_and_unlock(Relation relation,
    6575             :                                HeapTuple oldtup, HeapTuple tuple,
    6576             :                                Buffer buffer)
    6577             : {
    6578      167530 :     HeapTupleHeader htup = oldtup->t_data;
    6579             :     uint32      oldlen;
    6580             :     uint32      newlen;
    6581             :     char       *dst;
    6582             :     char       *src;
    6583      167530 :     int         nmsgs = 0;
    6584      167530 :     SharedInvalidationMessage *invalMessages = NULL;
    6585      167530 :     bool        RelcacheInitFileInval = false;
    6586             : 
    6587             :     Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
    6588      167530 :     oldlen = oldtup->t_len - htup->t_hoff;
    6589      167530 :     newlen = tuple->t_len - tuple->t_data->t_hoff;
    6590      167530 :     if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
    6591           0 :         elog(ERROR, "wrong tuple length");
    6592             : 
    6593      167530 :     dst = (char *) htup + htup->t_hoff;
    6594      167530 :     src = (char *) tuple->t_data + tuple->t_data->t_hoff;
    6595             : 
    6596             :     /* Like RecordTransactionCommit(), log only if needed */
    6597      167530 :     if (XLogStandbyInfoActive())
    6598      116818 :         nmsgs = inplaceGetInvalidationMessages(&invalMessages,
    6599             :                                                &RelcacheInitFileInval);
    6600             : 
    6601             :     /*
    6602             :      * Unlink relcache init files as needed.  If unlinking, acquire
    6603             :      * RelCacheInitLock until after associated invalidations.  By doing this
    6604             :      * in advance, if we checkpoint and then crash between inplace
    6605             :      * XLogInsert() and inval, we don't rely on StartupXLOG() ->
    6606             :      * RelationCacheInitFileRemove().  That uses elevel==LOG, so replay would
    6607             :      * neglect to PANIC on EIO.
    6608             :      */
    6609      167530 :     PreInplace_Inval();
    6610             : 
    6611             :     /*----------
    6612             :      * NO EREPORT(ERROR) from here till changes are complete
    6613             :      *
    6614             :      * Our buffer lock won't stop a reader having already pinned and checked
    6615             :      * visibility for this tuple.  Hence, we write WAL first, then mutate the
    6616             :      * buffer.  Like in MarkBufferDirtyHint() or RecordTransactionCommit(),
    6617             :      * checkpoint delay makes that acceptable.  With the usual order of
    6618             :      * changes, a crash after memcpy() and before XLogInsert() could allow
    6619             :      * datfrozenxid to overtake relfrozenxid:
    6620             :      *
    6621             :      * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
    6622             :      * ["R" is a VACUUM tbl]
    6623             :      * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
    6624             :      * D: systable_getnext() returns pg_class tuple of tbl
    6625             :      * R: memcpy() into pg_class tuple of tbl
    6626             :      * D: raise pg_database.datfrozenxid, XLogInsert(), finish
    6627             :      * [crash]
    6628             :      * [recovery restores datfrozenxid w/o relfrozenxid]
    6629             :      *
    6630             :      * Mimic MarkBufferDirtyHint() subroutine XLogSaveBufferForHint().
    6631             :      * Specifically, use DELAY_CHKPT_START, and copy the buffer to the stack.
    6632             :      * The stack copy facilitates a FPI of the post-mutation block before we
    6633             :      * accept other sessions seeing it.  DELAY_CHKPT_START allows us to
    6634             :      * XLogInsert() before MarkBufferDirty().  Since XLogSaveBufferForHint()
    6635             :      * can operate under BUFFER_LOCK_SHARED, it can't avoid DELAY_CHKPT_START.
    6636             :      * This function, however, likely could avoid it with the following order
    6637             :      * of operations: MarkBufferDirty(), XLogInsert(), memcpy().  Opt to use
    6638             :      * DELAY_CHKPT_START here, too, as a way to have fewer distinct code
    6639             :      * patterns to analyze.  Inplace update isn't so frequent that it should
    6640             :      * pursue the small optimization of skipping DELAY_CHKPT_START.
    6641             :      */
    6642             :     Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
    6643      167530 :     START_CRIT_SECTION();
    6644      167530 :     MyProc->delayChkptFlags |= DELAY_CHKPT_START;
    6645             : 
    6646             :     /* XLOG stuff */
    6647      167530 :     if (RelationNeedsWAL(relation))
    6648             :     {
    6649             :         xl_heap_inplace xlrec;
    6650             :         PGAlignedBlock copied_buffer;
    6651      167522 :         char       *origdata = (char *) BufferGetBlock(buffer);
    6652      167522 :         Page        page = BufferGetPage(buffer);
    6653      167522 :         uint16      lower = ((PageHeader) page)->pd_lower;
    6654      167522 :         uint16      upper = ((PageHeader) page)->pd_upper;
    6655             :         uintptr_t   dst_offset_in_block;
    6656             :         RelFileLocator rlocator;
    6657             :         ForkNumber  forkno;
    6658             :         BlockNumber blkno;
    6659             :         XLogRecPtr  recptr;
    6660             : 
    6661      167522 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    6662      167522 :         xlrec.dbId = MyDatabaseId;
    6663      167522 :         xlrec.tsId = MyDatabaseTableSpace;
    6664      167522 :         xlrec.relcacheInitFileInval = RelcacheInitFileInval;
    6665      167522 :         xlrec.nmsgs = nmsgs;
    6666             : 
    6667      167522 :         XLogBeginInsert();
    6668      167522 :         XLogRegisterData(&xlrec, MinSizeOfHeapInplace);
    6669      167522 :         if (nmsgs != 0)
    6670       86932 :             XLogRegisterData(invalMessages,
    6671             :                              nmsgs * sizeof(SharedInvalidationMessage));
    6672             : 
    6673             :         /* register block matching what buffer will look like after changes */
    6674      167522 :         memcpy(copied_buffer.data, origdata, lower);
    6675      167522 :         memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
    6676      167522 :         dst_offset_in_block = dst - origdata;
    6677      167522 :         memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
    6678      167522 :         BufferGetTag(buffer, &rlocator, &forkno, &blkno);
    6679             :         Assert(forkno == MAIN_FORKNUM);
    6680      167522 :         XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
    6681             :                           REGBUF_STANDARD);
    6682      167522 :         XLogRegisterBufData(0, src, newlen);
    6683             : 
    6684             :         /* inplace updates aren't decoded atm, don't log the origin */
    6685             : 
    6686      167522 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
    6687             : 
    6688      167522 :         PageSetLSN(page, recptr);
    6689             :     }
    6690             : 
    6691      167530 :     memcpy(dst, src, newlen);
    6692             : 
    6693      167530 :     MarkBufferDirty(buffer);
    6694             : 
    6695      167530 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6696             : 
    6697             :     /*
    6698             :      * Send invalidations to shared queue.  SearchSysCacheLocked1() assumes we
    6699             :      * do this before UnlockTuple().
    6700             :      */
    6701      167530 :     AtInplace_Inval();
    6702             : 
    6703      167530 :     MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
    6704      167530 :     END_CRIT_SECTION();
    6705      167530 :     UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
    6706             : 
    6707      167530 :     AcceptInvalidationMessages();   /* local processing of just-sent inval */
    6708             : 
    6709             :     /*
    6710             :      * Queue a transactional inval, for logical decoding and for third-party
    6711             :      * code that might have been relying on it since long before inplace
    6712             :      * update adopted immediate invalidation.  See README.tuplock section
    6713             :      * "Reading inplace-updated columns" for logical decoding details.
    6714             :      */
    6715      167530 :     if (!IsBootstrapProcessingMode())
    6716      137644 :         CacheInvalidateHeapTuple(relation, tuple, NULL);
    6717      167530 : }
    6718             : 
    6719             : /*
    6720             :  * heap_inplace_unlock - reverse of heap_inplace_lock
    6721             :  */
    6722             : void
    6723      149810 : heap_inplace_unlock(Relation relation,
    6724             :                     HeapTuple oldtup, Buffer buffer)
    6725             : {
    6726      149810 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6727      149810 :     UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
    6728      149810 :     ForgetInplace_Inval();
    6729      149810 : }
    6730             : 
    6731             : #define     FRM_NOOP                0x0001
    6732             : #define     FRM_INVALIDATE_XMAX     0x0002
    6733             : #define     FRM_RETURN_IS_XID       0x0004
    6734             : #define     FRM_RETURN_IS_MULTI     0x0008
    6735             : #define     FRM_MARK_COMMITTED      0x0010
    6736             : 
    6737             : /*
    6738             :  * FreezeMultiXactId
    6739             :  *      Determine what to do during freezing when a tuple is marked by a
    6740             :  *      MultiXactId.
    6741             :  *
    6742             :  * "flags" is an output value; it's used to tell caller what to do on return.
    6743             :  * "pagefrz" is an input/output value, used to manage page level freezing.
    6744             :  *
    6745             :  * Possible values that we can set in "flags":
    6746             :  * FRM_NOOP
    6747             :  *      don't do anything -- keep existing Xmax
    6748             :  * FRM_INVALIDATE_XMAX
    6749             :  *      mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
    6750             :  * FRM_RETURN_IS_XID
    6751             :  *      The Xid return value is a single update Xid to set as xmax.
    6752             :  * FRM_MARK_COMMITTED
    6753             :  *      Xmax can be marked as HEAP_XMAX_COMMITTED
    6754             :  * FRM_RETURN_IS_MULTI
    6755             :  *      The return value is a new MultiXactId to set as new Xmax.
    6756             :  *      (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
    6757             :  *
    6758             :  * Caller delegates control of page freezing to us.  In practice we always
    6759             :  * force freezing of caller's page unless FRM_NOOP processing is indicated.
    6760             :  * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
    6761             :  * can never be left behind.  We freely choose when and how to process each
    6762             :  * Multi, without ever violating the cutoff postconditions for freezing.
    6763             :  *
    6764             :  * It's useful to remove Multis on a proactive timeline (relative to freezing
    6765             :  * XIDs) to keep MultiXact member SLRU buffer misses to a minimum.  It can also
    6766             :  * be cheaper in the short run, for us, since we too can avoid SLRU buffer
    6767             :  * misses through eager processing.
    6768             :  *
    6769             :  * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
    6770             :  * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
    6771             :  * This can usually be put off, which is usually enough to avoid it altogether.
    6772             :  * Allocating new multis during VACUUM should be avoided on general principle;
    6773             :  * only VACUUM can advance relminmxid, so allocating new Multis here comes with
    6774             :  * its own special risks.
    6775             :  *
    6776             :  * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
    6777             :  * using heap_tuple_should_freeze when we haven't forced page-level freezing.
    6778             :  *
    6779             :  * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
    6780             :  * have already forced page-level freezing, since that might incur the same
    6781             :  * SLRU buffer misses that we specifically intended to avoid by freezing.
    6782             :  */
    6783             : static TransactionId
    6784          12 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
    6785             :                   const struct VacuumCutoffs *cutoffs, uint16 *flags,
    6786             :                   HeapPageFreeze *pagefrz)
    6787             : {
    6788             :     TransactionId newxmax;
    6789             :     MultiXactMember *members;
    6790             :     int         nmembers;
    6791             :     bool        need_replace;
    6792             :     int         nnewmembers;
    6793             :     MultiXactMember *newmembers;
    6794             :     bool        has_lockers;
    6795             :     TransactionId update_xid;
    6796             :     bool        update_committed;
    6797             :     TransactionId FreezePageRelfrozenXid;
    6798             : 
    6799          12 :     *flags = 0;
    6800             : 
    6801             :     /* We should only be called in Multis */
    6802             :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    6803             : 
    6804          24 :     if (!MultiXactIdIsValid(multi) ||
    6805          12 :         HEAP_LOCKED_UPGRADED(t_infomask))
    6806             :     {
    6807           0 :         *flags |= FRM_INVALIDATE_XMAX;
    6808           0 :         pagefrz->freeze_required = true;
    6809           0 :         return InvalidTransactionId;
    6810             :     }
    6811          12 :     else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
    6812           0 :         ereport(ERROR,
    6813             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    6814             :                  errmsg_internal("found multixact %u from before relminmxid %u",
    6815             :                                  multi, cutoffs->relminmxid)));
    6816          12 :     else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
    6817             :     {
    6818             :         TransactionId update_xact;
    6819             : 
    6820             :         /*
    6821             :          * This old multi cannot possibly have members still running, but
    6822             :          * verify just in case.  If it was a locker only, it can be removed
    6823             :          * without any further consideration; but if it contained an update,
    6824             :          * we might need to preserve it.
    6825             :          */
    6826           8 :         if (MultiXactIdIsRunning(multi,
    6827           8 :                                  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
    6828           0 :             ereport(ERROR,
    6829             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6830             :                      errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
    6831             :                                      multi, cutoffs->OldestMxact)));
    6832             : 
    6833           8 :         if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
    6834             :         {
    6835           8 :             *flags |= FRM_INVALIDATE_XMAX;
    6836           8 :             pagefrz->freeze_required = true;
    6837           8 :             return InvalidTransactionId;
    6838             :         }
    6839             : 
    6840             :         /* replace multi with single XID for its updater? */
    6841           0 :         update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
    6842           0 :         if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
    6843           0 :             ereport(ERROR,
    6844             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6845             :                      errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
    6846             :                                      multi, update_xact,
    6847             :                                      cutoffs->relfrozenxid)));
    6848           0 :         else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
    6849             :         {
    6850             :             /*
    6851             :              * Updater XID has to have aborted (otherwise the tuple would have
    6852             :              * been pruned away instead, since updater XID is < OldestXmin).
    6853             :              * Just remove xmax.
    6854             :              */
    6855           0 :             if (TransactionIdDidCommit(update_xact))
    6856           0 :                 ereport(ERROR,
    6857             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6858             :                          errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
    6859             :                                          multi, update_xact,
    6860             :                                          cutoffs->OldestXmin)));
    6861           0 :             *flags |= FRM_INVALIDATE_XMAX;
    6862           0 :             pagefrz->freeze_required = true;
    6863           0 :             return InvalidTransactionId;
    6864             :         }
    6865             : 
    6866             :         /* Have to keep updater XID as new xmax */
    6867           0 :         *flags |= FRM_RETURN_IS_XID;
    6868           0 :         pagefrz->freeze_required = true;
    6869           0 :         return update_xact;
    6870             :     }
    6871             : 
    6872             :     /*
    6873             :      * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
    6874             :      * need to walk the whole members array to figure out what to do, if
    6875             :      * anything.
    6876             :      */
    6877             :     nmembers =
    6878           4 :         GetMultiXactIdMembers(multi, &members, false,
    6879           4 :                               HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
    6880           4 :     if (nmembers <= 0)
    6881             :     {
    6882             :         /* Nothing worth keeping */
    6883           0 :         *flags |= FRM_INVALIDATE_XMAX;
    6884           0 :         pagefrz->freeze_required = true;
    6885           0 :         return InvalidTransactionId;
    6886             :     }
    6887             : 
    6888             :     /*
    6889             :      * The FRM_NOOP case is the only case where we might need to ratchet back
    6890             :      * FreezePageRelfrozenXid or FreezePageRelminMxid.  It is also the only
    6891             :      * case where our caller might ratchet back its NoFreezePageRelfrozenXid
    6892             :      * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
    6893             :      * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
    6894             :      * trackers managed by VACUUM being ratcheting back by xmax to the degree
    6895             :      * required to make it safe to leave xmax undisturbed, independent of
    6896             :      * whether or not page freezing is triggered somewhere else.
    6897             :      *
    6898             :      * Our policy is to force freezing in every case other than FRM_NOOP,
    6899             :      * which obviates the need to maintain either set of trackers, anywhere.
    6900             :      * Every other case will reliably execute a freeze plan for xmax that
    6901             :      * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
    6902             :      * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
    6903             :      * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
    6904             :      * OldestXmin/OldestMxact, so later values never need to be tracked here.)
    6905             :      */
    6906           4 :     need_replace = false;
    6907           4 :     FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
    6908           8 :     for (int i = 0; i < nmembers; i++)
    6909             :     {
    6910           6 :         TransactionId xid = members[i].xid;
    6911             : 
    6912             :         Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
    6913             : 
    6914           6 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    6915             :         {
    6916             :             /* Can't violate the FreezeLimit postcondition */
    6917           2 :             need_replace = true;
    6918           2 :             break;
    6919             :         }
    6920           4 :         if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
    6921           0 :             FreezePageRelfrozenXid = xid;
    6922             :     }
    6923             : 
    6924             :     /* Can't violate the MultiXactCutoff postcondition, either */
    6925           4 :     if (!need_replace)
    6926           2 :         need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
    6927             : 
    6928           4 :     if (!need_replace)
    6929             :     {
    6930             :         /*
    6931             :          * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
    6932             :          * both together to make it safe to retain this particular multi after
    6933             :          * freezing its page
    6934             :          */
    6935           2 :         *flags |= FRM_NOOP;
    6936           2 :         pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
    6937           2 :         if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
    6938           0 :             pagefrz->FreezePageRelminMxid = multi;
    6939           2 :         pfree(members);
    6940           2 :         return multi;
    6941             :     }
    6942             : 
    6943             :     /*
    6944             :      * Do a more thorough second pass over the multi to figure out which
    6945             :      * member XIDs actually need to be kept.  Checking the precise status of
    6946             :      * individual members might even show that we don't need to keep anything.
    6947             :      * That is quite possible even though the Multi must be >= OldestMxact,
    6948             :      * since our second pass only keeps member XIDs when it's truly necessary;
    6949             :      * even member XIDs >= OldestXmin often won't be kept by second pass.
    6950             :      */
    6951           2 :     nnewmembers = 0;
    6952           2 :     newmembers = palloc_array(MultiXactMember, nmembers);
    6953           2 :     has_lockers = false;
    6954           2 :     update_xid = InvalidTransactionId;
    6955           2 :     update_committed = false;
    6956             : 
    6957             :     /*
    6958             :      * Determine whether to keep each member xid, or to ignore it instead
    6959             :      */
    6960           6 :     for (int i = 0; i < nmembers; i++)
    6961             :     {
    6962           4 :         TransactionId xid = members[i].xid;
    6963           4 :         MultiXactStatus mstatus = members[i].status;
    6964             : 
    6965             :         Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
    6966             : 
    6967           4 :         if (!ISUPDATE_from_mxstatus(mstatus))
    6968             :         {
    6969             :             /*
    6970             :              * Locker XID (not updater XID).  We only keep lockers that are
    6971             :              * still running.
    6972             :              */
    6973           8 :             if (TransactionIdIsCurrentTransactionId(xid) ||
    6974           4 :                 TransactionIdIsInProgress(xid))
    6975             :             {
    6976           2 :                 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
    6977           0 :                     ereport(ERROR,
    6978             :                             (errcode(ERRCODE_DATA_CORRUPTED),
    6979             :                              errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
    6980             :                                              multi, xid,
    6981             :                                              cutoffs->OldestXmin)));
    6982           2 :                 newmembers[nnewmembers++] = members[i];
    6983           2 :                 has_lockers = true;
    6984             :             }
    6985             : 
    6986           4 :             continue;
    6987             :         }
    6988             : 
    6989             :         /*
    6990             :          * Updater XID (not locker XID).  Should we keep it?
    6991             :          *
    6992             :          * Since the tuple wasn't totally removed when vacuum pruned, the
    6993             :          * update Xid cannot possibly be older than OldestXmin cutoff unless
    6994             :          * the updater XID aborted.  If the updater transaction is known
    6995             :          * aborted or crashed then it's okay to ignore it, otherwise not.
    6996             :          *
    6997             :          * In any case the Multi should never contain two updaters, whatever
    6998             :          * their individual commit status.  Check for that first, in passing.
    6999             :          */
    7000           0 :         if (TransactionIdIsValid(update_xid))
    7001           0 :             ereport(ERROR,
    7002             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7003             :                      errmsg_internal("multixact %u has two or more updating members",
    7004             :                                      multi),
    7005             :                      errdetail_internal("First updater XID=%u second updater XID=%u.",
    7006             :                                         update_xid, xid)));
    7007             : 
    7008             :         /*
    7009             :          * As with all tuple visibility routines, it's critical to test
    7010             :          * TransactionIdIsInProgress before TransactionIdDidCommit, because of
    7011             :          * race conditions explained in detail in heapam_visibility.c.
    7012             :          */
    7013           0 :         if (TransactionIdIsCurrentTransactionId(xid) ||
    7014           0 :             TransactionIdIsInProgress(xid))
    7015           0 :             update_xid = xid;
    7016           0 :         else if (TransactionIdDidCommit(xid))
    7017             :         {
    7018             :             /*
    7019             :              * The transaction committed, so we can tell caller to set
    7020             :              * HEAP_XMAX_COMMITTED.  (We can only do this because we know the
    7021             :              * transaction is not running.)
    7022             :              */
    7023           0 :             update_committed = true;
    7024           0 :             update_xid = xid;
    7025             :         }
    7026             :         else
    7027             :         {
    7028             :             /*
    7029             :              * Not in progress, not committed -- must be aborted or crashed;
    7030             :              * we can ignore it.
    7031             :              */
    7032           0 :             continue;
    7033             :         }
    7034             : 
    7035             :         /*
    7036             :          * We determined that updater must be kept -- add it to pending new
    7037             :          * members list
    7038             :          */
    7039           0 :         if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
    7040           0 :             ereport(ERROR,
    7041             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7042             :                      errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
    7043             :                                      multi, xid, cutoffs->OldestXmin)));
    7044           0 :         newmembers[nnewmembers++] = members[i];
    7045             :     }
    7046             : 
    7047           2 :     pfree(members);
    7048             : 
    7049             :     /*
    7050             :      * Determine what to do with caller's multi based on information gathered
    7051             :      * during our second pass
    7052             :      */
    7053           2 :     if (nnewmembers == 0)
    7054             :     {
    7055             :         /* Nothing worth keeping */
    7056           0 :         *flags |= FRM_INVALIDATE_XMAX;
    7057           0 :         newxmax = InvalidTransactionId;
    7058             :     }
    7059           2 :     else if (TransactionIdIsValid(update_xid) && !has_lockers)
    7060             :     {
    7061             :         /*
    7062             :          * If there's a single member and it's an update, pass it back alone
    7063             :          * without creating a new Multi.  (XXX we could do this when there's a
    7064             :          * single remaining locker, too, but that would complicate the API too
    7065             :          * much; moreover, the case with the single updater is more
    7066             :          * interesting, because those are longer-lived.)
    7067             :          */
    7068             :         Assert(nnewmembers == 1);
    7069           0 :         *flags |= FRM_RETURN_IS_XID;
    7070           0 :         if (update_committed)
    7071           0 :             *flags |= FRM_MARK_COMMITTED;
    7072           0 :         newxmax = update_xid;
    7073             :     }
    7074             :     else
    7075             :     {
    7076             :         /*
    7077             :          * Create a new multixact with the surviving members of the previous
    7078             :          * one, to set as new Xmax in the tuple
    7079             :          */
    7080           2 :         newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
    7081           2 :         *flags |= FRM_RETURN_IS_MULTI;
    7082             :     }
    7083             : 
    7084           2 :     pfree(newmembers);
    7085             : 
    7086           2 :     pagefrz->freeze_required = true;
    7087           2 :     return newxmax;
    7088             : }
    7089             : 
    7090             : /*
    7091             :  * heap_prepare_freeze_tuple
    7092             :  *
    7093             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    7094             :  * are older than the OldestXmin and/or OldestMxact freeze cutoffs.  If so,
    7095             :  * setup enough state (in the *frz output argument) to enable caller to
    7096             :  * process this tuple as part of freezing its page, and return true.  Return
    7097             :  * false if nothing can be changed about the tuple right now.
    7098             :  *
    7099             :  * Also sets *totally_frozen to true if the tuple will be totally frozen once
    7100             :  * caller executes returned freeze plan (or if the tuple was already totally
    7101             :  * frozen by an earlier VACUUM).  This indicates that there are no remaining
    7102             :  * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
    7103             :  *
    7104             :  * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
    7105             :  * tuple that we returned true for, and then execute freezing.  Caller must
    7106             :  * initialize pagefrz fields for page as a whole before first call here for
    7107             :  * each heap page.
    7108             :  *
    7109             :  * VACUUM caller decides on whether or not to freeze the page as a whole.
    7110             :  * We'll often prepare freeze plans for a page that caller just discards.
    7111             :  * However, VACUUM doesn't always get to make a choice; it must freeze when
    7112             :  * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
    7113             :  * MXIDs < MultiXactCutoff) can never be left behind.  We help to make sure
    7114             :  * that VACUUM always follows that rule.
    7115             :  *
    7116             :  * We sometimes force freezing of xmax MultiXactId values long before it is
    7117             :  * strictly necessary to do so just to ensure the FreezeLimit postcondition.
    7118             :  * It's worth processing MultiXactIds proactively when it is cheap to do so,
    7119             :  * and it's convenient to make that happen by piggy-backing it on the "force
    7120             :  * freezing" mechanism.  Conversely, we sometimes delay freezing MultiXactIds
    7121             :  * because it is expensive right now (though only when it's still possible to
    7122             :  * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
    7123             :  *
    7124             :  * It is assumed that the caller has checked the tuple with
    7125             :  * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
    7126             :  * (else we should be removing the tuple, not freezing it).
    7127             :  *
    7128             :  * NB: This function has side effects: it might allocate a new MultiXactId.
    7129             :  * It will be set as tuple's new xmax when our *frz output is processed within
    7130             :  * heap_execute_freeze_tuple later on.  If the tuple is in a shared buffer
    7131             :  * then caller had better have an exclusive lock on it already.
    7132             :  */
    7133             : bool
    7134    28601634 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
    7135             :                           const struct VacuumCutoffs *cutoffs,
    7136             :                           HeapPageFreeze *pagefrz,
    7137             :                           HeapTupleFreeze *frz, bool *totally_frozen)
    7138             : {
    7139    28601634 :     bool        xmin_already_frozen = false,
    7140    28601634 :                 xmax_already_frozen = false;
    7141    28601634 :     bool        freeze_xmin = false,
    7142    28601634 :                 replace_xvac = false,
    7143    28601634 :                 replace_xmax = false,
    7144    28601634 :                 freeze_xmax = false;
    7145             :     TransactionId xid;
    7146             : 
    7147    28601634 :     frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
    7148    28601634 :     frz->t_infomask2 = tuple->t_infomask2;
    7149    28601634 :     frz->t_infomask = tuple->t_infomask;
    7150    28601634 :     frz->frzflags = 0;
    7151    28601634 :     frz->checkflags = 0;
    7152             : 
    7153             :     /*
    7154             :      * Process xmin, while keeping track of whether it's already frozen, or
    7155             :      * will become frozen iff our freeze plan is executed by caller (could be
    7156             :      * neither).
    7157             :      */
    7158    28601634 :     xid = HeapTupleHeaderGetXmin(tuple);
    7159    28601634 :     if (!TransactionIdIsNormal(xid))
    7160    22750320 :         xmin_already_frozen = true;
    7161             :     else
    7162             :     {
    7163     5851314 :         if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
    7164           0 :             ereport(ERROR,
    7165             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7166             :                      errmsg_internal("found xmin %u from before relfrozenxid %u",
    7167             :                                      xid, cutoffs->relfrozenxid)));
    7168             : 
    7169             :         /* Will set freeze_xmin flags in freeze plan below */
    7170     5851314 :         freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
    7171             : 
    7172             :         /* Verify that xmin committed if and when freeze plan is executed */
    7173     5851314 :         if (freeze_xmin)
    7174     4602988 :             frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
    7175             :     }
    7176             : 
    7177             :     /*
    7178             :      * Old-style VACUUM FULL is gone, but we have to process xvac for as long
    7179             :      * as we support having MOVED_OFF/MOVED_IN tuples in the database
    7180             :      */
    7181    28601634 :     xid = HeapTupleHeaderGetXvac(tuple);
    7182    28601634 :     if (TransactionIdIsNormal(xid))
    7183             :     {
    7184             :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7185             :         Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
    7186             : 
    7187             :         /*
    7188             :          * For Xvac, we always freeze proactively.  This allows totally_frozen
    7189             :          * tracking to ignore xvac.
    7190             :          */
    7191           0 :         replace_xvac = pagefrz->freeze_required = true;
    7192             : 
    7193             :         /* Will set replace_xvac flags in freeze plan below */
    7194             :     }
    7195             : 
    7196             :     /* Now process xmax */
    7197    28601634 :     xid = frz->xmax;
    7198    28601634 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7199             :     {
    7200             :         /* Raw xmax is a MultiXactId */
    7201             :         TransactionId newxmax;
    7202             :         uint16      flags;
    7203             : 
    7204             :         /*
    7205             :          * We will either remove xmax completely (in the "freeze_xmax" path),
    7206             :          * process xmax by replacing it (in the "replace_xmax" path), or
    7207             :          * perform no-op xmax processing.  The only constraint is that the
    7208             :          * FreezeLimit/MultiXactCutoff postcondition must never be violated.
    7209             :          */
    7210          12 :         newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
    7211             :                                     &flags, pagefrz);
    7212             : 
    7213          12 :         if (flags & FRM_NOOP)
    7214             :         {
    7215             :             /*
    7216             :              * xmax is a MultiXactId, and nothing about it changes for now.
    7217             :              * This is the only case where 'freeze_required' won't have been
    7218             :              * set for us by FreezeMultiXactId, as well as the only case where
    7219             :              * neither freeze_xmax nor replace_xmax are set (given a multi).
    7220             :              *
    7221             :              * This is a no-op, but the call to FreezeMultiXactId might have
    7222             :              * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
    7223             :              * for us (the "freeze page" variants, specifically).  That'll
    7224             :              * make it safe for our caller to freeze the page later on, while
    7225             :              * leaving this particular xmax undisturbed.
    7226             :              *
    7227             :              * FreezeMultiXactId is _not_ responsible for the "no freeze"
    7228             :              * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
    7229             :              * job.  A call to heap_tuple_should_freeze for this same tuple
    7230             :              * will take place below if 'freeze_required' isn't set already.
    7231             :              * (This repeats work from FreezeMultiXactId, but allows "no
    7232             :              * freeze" tracker maintenance to happen in only one place.)
    7233             :              */
    7234             :             Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
    7235             :             Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
    7236             :         }
    7237          10 :         else if (flags & FRM_RETURN_IS_XID)
    7238             :         {
    7239             :             /*
    7240             :              * xmax will become an updater Xid (original MultiXact's updater
    7241             :              * member Xid will be carried forward as a simple Xid in Xmax).
    7242             :              */
    7243             :             Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
    7244             : 
    7245             :             /*
    7246             :              * NB -- some of these transformations are only valid because we
    7247             :              * know the return Xid is a tuple updater (i.e. not merely a
    7248             :              * locker.) Also note that the only reason we don't explicitly
    7249             :              * worry about HEAP_KEYS_UPDATED is because it lives in
    7250             :              * t_infomask2 rather than t_infomask.
    7251             :              */
    7252           0 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    7253           0 :             frz->xmax = newxmax;
    7254           0 :             if (flags & FRM_MARK_COMMITTED)
    7255           0 :                 frz->t_infomask |= HEAP_XMAX_COMMITTED;
    7256           0 :             replace_xmax = true;
    7257             :         }
    7258          10 :         else if (flags & FRM_RETURN_IS_MULTI)
    7259             :         {
    7260             :             uint16      newbits;
    7261             :             uint16      newbits2;
    7262             : 
    7263             :             /*
    7264             :              * xmax is an old MultiXactId that we have to replace with a new
    7265             :              * MultiXactId, to carry forward two or more original member XIDs.
    7266             :              */
    7267             :             Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
    7268             : 
    7269             :             /*
    7270             :              * We can't use GetMultiXactIdHintBits directly on the new multi
    7271             :              * here; that routine initializes the masks to all zeroes, which
    7272             :              * would lose other bits we need.  Doing it this way ensures all
    7273             :              * unrelated bits remain untouched.
    7274             :              */
    7275           2 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    7276           2 :             frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    7277           2 :             GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
    7278           2 :             frz->t_infomask |= newbits;
    7279           2 :             frz->t_infomask2 |= newbits2;
    7280           2 :             frz->xmax = newxmax;
    7281           2 :             replace_xmax = true;
    7282             :         }
    7283             :         else
    7284             :         {
    7285             :             /*
    7286             :              * Freeze plan for tuple "freezes xmax" in the strictest sense:
    7287             :              * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
    7288             :              */
    7289             :             Assert(flags & FRM_INVALIDATE_XMAX);
    7290             :             Assert(!TransactionIdIsValid(newxmax));
    7291             : 
    7292             :             /* Will set freeze_xmax flags in freeze plan below */
    7293           8 :             freeze_xmax = true;
    7294             :         }
    7295             : 
    7296             :         /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
    7297             :         Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
    7298             :     }
    7299    28601622 :     else if (TransactionIdIsNormal(xid))
    7300             :     {
    7301             :         /* Raw xmax is normal XID */
    7302     6634342 :         if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
    7303           0 :             ereport(ERROR,
    7304             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7305             :                      errmsg_internal("found xmax %u from before relfrozenxid %u",
    7306             :                                      xid, cutoffs->relfrozenxid)));
    7307             : 
    7308             :         /* Will set freeze_xmax flags in freeze plan below */
    7309     6634342 :         freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
    7310             : 
    7311             :         /*
    7312             :          * Verify that xmax aborted if and when freeze plan is executed,
    7313             :          * provided it's from an update. (A lock-only xmax can be removed
    7314             :          * independent of this, since the lock is released at xact end.)
    7315             :          */
    7316     6634342 :         if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
    7317        2012 :             frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
    7318             :     }
    7319    21967280 :     else if (!TransactionIdIsValid(xid))
    7320             :     {
    7321             :         /* Raw xmax is InvalidTransactionId XID */
    7322             :         Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
    7323    21967280 :         xmax_already_frozen = true;
    7324             :     }
    7325             :     else
    7326           0 :         ereport(ERROR,
    7327             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    7328             :                  errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
    7329             :                                  xid, tuple->t_infomask)));
    7330             : 
    7331    28601634 :     if (freeze_xmin)
    7332             :     {
    7333             :         Assert(!xmin_already_frozen);
    7334             : 
    7335     4602988 :         frz->t_infomask |= HEAP_XMIN_FROZEN;
    7336             :     }
    7337    28601634 :     if (replace_xvac)
    7338             :     {
    7339             :         /*
    7340             :          * If a MOVED_OFF tuple is not dead, the xvac transaction must have
    7341             :          * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
    7342             :          * transaction succeeded.
    7343             :          */
    7344             :         Assert(pagefrz->freeze_required);
    7345           0 :         if (tuple->t_infomask & HEAP_MOVED_OFF)
    7346           0 :             frz->frzflags |= XLH_INVALID_XVAC;
    7347             :         else
    7348           0 :             frz->frzflags |= XLH_FREEZE_XVAC;
    7349             :     }
    7350             :     if (replace_xmax)
    7351             :     {
    7352             :         Assert(!xmax_already_frozen && !freeze_xmax);
    7353             :         Assert(pagefrz->freeze_required);
    7354             : 
    7355             :         /* Already set replace_xmax flags in freeze plan earlier */
    7356             :     }
    7357    28601634 :     if (freeze_xmax)
    7358             :     {
    7359             :         Assert(!xmax_already_frozen && !replace_xmax);
    7360             : 
    7361        3964 :         frz->xmax = InvalidTransactionId;
    7362             : 
    7363             :         /*
    7364             :          * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
    7365             :          * LOCKED.  Normalize to INVALID just to be sure no one gets confused.
    7366             :          * Also get rid of the HEAP_KEYS_UPDATED bit.
    7367             :          */
    7368        3964 :         frz->t_infomask &= ~HEAP_XMAX_BITS;
    7369        3964 :         frz->t_infomask |= HEAP_XMAX_INVALID;
    7370        3964 :         frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
    7371        3964 :         frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    7372             :     }
    7373             : 
    7374             :     /*
    7375             :      * Determine if this tuple is already totally frozen, or will become
    7376             :      * totally frozen (provided caller executes freeze plans for the page)
    7377             :      */
    7378    55950978 :     *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
    7379    27349344 :                        (freeze_xmax || xmax_already_frozen));
    7380             : 
    7381    28601634 :     if (!pagefrz->freeze_required && !(xmin_already_frozen &&
    7382             :                                        xmax_already_frozen))
    7383             :     {
    7384             :         /*
    7385             :          * So far no previous tuple from the page made freezing mandatory.
    7386             :          * Does this tuple force caller to freeze the entire page?
    7387             :          */
    7388     9454150 :         pagefrz->freeze_required =
    7389     9454150 :             heap_tuple_should_freeze(tuple, cutoffs,
    7390             :                                      &pagefrz->NoFreezePageRelfrozenXid,
    7391             :                                      &pagefrz->NoFreezePageRelminMxid);
    7392             :     }
    7393             : 
    7394             :     /* Tell caller if this tuple has a usable freeze plan set in *frz */
    7395    28601634 :     return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
    7396             : }
    7397             : 
    7398             : /*
    7399             :  * Perform xmin/xmax XID status sanity checks before actually executing freeze
    7400             :  * plans.
    7401             :  *
    7402             :  * heap_prepare_freeze_tuple doesn't perform these checks directly because
    7403             :  * pg_xact lookups are relatively expensive.  They shouldn't be repeated by
    7404             :  * successive VACUUMs that each decide against freezing the same page.
    7405             :  */
    7406             : void
    7407       45544 : heap_pre_freeze_checks(Buffer buffer,
    7408             :                        HeapTupleFreeze *tuples, int ntuples)
    7409             : {
    7410       45544 :     Page        page = BufferGetPage(buffer);
    7411             : 
    7412     1962796 :     for (int i = 0; i < ntuples; i++)
    7413             :     {
    7414     1917252 :         HeapTupleFreeze *frz = tuples + i;
    7415     1917252 :         ItemId      itemid = PageGetItemId(page, frz->offset);
    7416             :         HeapTupleHeader htup;
    7417             : 
    7418     1917252 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
    7419             : 
    7420             :         /* Deliberately avoid relying on tuple hint bits here */
    7421     1917252 :         if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
    7422             :         {
    7423     1917250 :             TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
    7424             : 
    7425             :             Assert(!HeapTupleHeaderXminFrozen(htup));
    7426     1917250 :             if (unlikely(!TransactionIdDidCommit(xmin)))
    7427           0 :                 ereport(ERROR,
    7428             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    7429             :                          errmsg_internal("uncommitted xmin %u needs to be frozen",
    7430             :                                          xmin)));
    7431             :         }
    7432             : 
    7433             :         /*
    7434             :          * TransactionIdDidAbort won't work reliably in the presence of XIDs
    7435             :          * left behind by transactions that were in progress during a crash,
    7436             :          * so we can only check that xmax didn't commit
    7437             :          */
    7438     1917252 :         if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
    7439             :         {
    7440         630 :             TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
    7441             : 
    7442             :             Assert(TransactionIdIsNormal(xmax));
    7443         630 :             if (unlikely(TransactionIdDidCommit(xmax)))
    7444           0 :                 ereport(ERROR,
    7445             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    7446             :                          errmsg_internal("cannot freeze committed xmax %u",
    7447             :                                          xmax)));
    7448             :         }
    7449             :     }
    7450       45544 : }
    7451             : 
    7452             : /*
    7453             :  * Helper which executes freezing of one or more heap tuples on a page on
    7454             :  * behalf of caller.  Caller passes an array of tuple plans from
    7455             :  * heap_prepare_freeze_tuple.  Caller must set 'offset' in each plan for us.
    7456             :  * Must be called in a critical section that also marks the buffer dirty and,
    7457             :  * if needed, emits WAL.
    7458             :  */
    7459             : void
    7460       45544 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
    7461             : {
    7462       45544 :     Page        page = BufferGetPage(buffer);
    7463             : 
    7464     1962796 :     for (int i = 0; i < ntuples; i++)
    7465             :     {
    7466     1917252 :         HeapTupleFreeze *frz = tuples + i;
    7467     1917252 :         ItemId      itemid = PageGetItemId(page, frz->offset);
    7468             :         HeapTupleHeader htup;
    7469             : 
    7470     1917252 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
    7471     1917252 :         heap_execute_freeze_tuple(htup, frz);
    7472             :     }
    7473       45544 : }
    7474             : 
    7475             : /*
    7476             :  * heap_freeze_tuple
    7477             :  *      Freeze tuple in place, without WAL logging.
    7478             :  *
    7479             :  * Useful for callers like CLUSTER that perform their own WAL logging.
    7480             :  */
    7481             : bool
    7482      736804 : heap_freeze_tuple(HeapTupleHeader tuple,
    7483             :                   TransactionId relfrozenxid, TransactionId relminmxid,
    7484             :                   TransactionId FreezeLimit, TransactionId MultiXactCutoff)
    7485             : {
    7486             :     HeapTupleFreeze frz;
    7487             :     bool        do_freeze;
    7488             :     bool        totally_frozen;
    7489             :     struct VacuumCutoffs cutoffs;
    7490             :     HeapPageFreeze pagefrz;
    7491             : 
    7492      736804 :     cutoffs.relfrozenxid = relfrozenxid;
    7493      736804 :     cutoffs.relminmxid = relminmxid;
    7494      736804 :     cutoffs.OldestXmin = FreezeLimit;
    7495      736804 :     cutoffs.OldestMxact = MultiXactCutoff;
    7496      736804 :     cutoffs.FreezeLimit = FreezeLimit;
    7497      736804 :     cutoffs.MultiXactCutoff = MultiXactCutoff;
    7498             : 
    7499      736804 :     pagefrz.freeze_required = true;
    7500      736804 :     pagefrz.FreezePageRelfrozenXid = FreezeLimit;
    7501      736804 :     pagefrz.FreezePageRelminMxid = MultiXactCutoff;
    7502      736804 :     pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
    7503      736804 :     pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
    7504             : 
    7505      736804 :     do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
    7506             :                                           &pagefrz, &frz, &totally_frozen);
    7507             : 
    7508             :     /*
    7509             :      * Note that because this is not a WAL-logged operation, we don't need to
    7510             :      * fill in the offset in the freeze record.
    7511             :      */
    7512             : 
    7513      736804 :     if (do_freeze)
    7514      506538 :         heap_execute_freeze_tuple(tuple, &frz);
    7515      736804 :     return do_freeze;
    7516             : }
    7517             : 
    7518             : /*
    7519             :  * For a given MultiXactId, return the hint bits that should be set in the
    7520             :  * tuple's infomask.
    7521             :  *
    7522             :  * Normally this should be called for a multixact that was just created, and
    7523             :  * so is on our local cache, so the GetMembers call is fast.
    7524             :  */
    7525             : static void
    7526      153602 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
    7527             :                        uint16 *new_infomask2)
    7528             : {
    7529             :     int         nmembers;
    7530             :     MultiXactMember *members;
    7531             :     int         i;
    7532      153602 :     uint16      bits = HEAP_XMAX_IS_MULTI;
    7533      153602 :     uint16      bits2 = 0;
    7534      153602 :     bool        has_update = false;
    7535      153602 :     LockTupleMode strongest = LockTupleKeyShare;
    7536             : 
    7537             :     /*
    7538             :      * We only use this in multis we just created, so they cannot be values
    7539             :      * pre-pg_upgrade.
    7540             :      */
    7541      153602 :     nmembers = GetMultiXactIdMembers(multi, &members, false, false);
    7542             : 
    7543     2945222 :     for (i = 0; i < nmembers; i++)
    7544             :     {
    7545             :         LockTupleMode mode;
    7546             : 
    7547             :         /*
    7548             :          * Remember the strongest lock mode held by any member of the
    7549             :          * multixact.
    7550             :          */
    7551     2791620 :         mode = TUPLOCK_from_mxstatus(members[i].status);
    7552     2791620 :         if (mode > strongest)
    7553        5776 :             strongest = mode;
    7554             : 
    7555             :         /* See what other bits we need */
    7556     2791620 :         switch (members[i].status)
    7557             :         {
    7558     2786784 :             case MultiXactStatusForKeyShare:
    7559             :             case MultiXactStatusForShare:
    7560             :             case MultiXactStatusForNoKeyUpdate:
    7561     2786784 :                 break;
    7562             : 
    7563         104 :             case MultiXactStatusForUpdate:
    7564         104 :                 bits2 |= HEAP_KEYS_UPDATED;
    7565         104 :                 break;
    7566             : 
    7567        4712 :             case MultiXactStatusNoKeyUpdate:
    7568        4712 :                 has_update = true;
    7569        4712 :                 break;
    7570             : 
    7571          20 :             case MultiXactStatusUpdate:
    7572          20 :                 bits2 |= HEAP_KEYS_UPDATED;
    7573          20 :                 has_update = true;
    7574          20 :                 break;
    7575             :         }
    7576             :     }
    7577             : 
    7578      153602 :     if (strongest == LockTupleExclusive ||
    7579             :         strongest == LockTupleNoKeyExclusive)
    7580        4892 :         bits |= HEAP_XMAX_EXCL_LOCK;
    7581      148710 :     else if (strongest == LockTupleShare)
    7582         878 :         bits |= HEAP_XMAX_SHR_LOCK;
    7583      147832 :     else if (strongest == LockTupleKeyShare)
    7584      147832 :         bits |= HEAP_XMAX_KEYSHR_LOCK;
    7585             : 
    7586      153602 :     if (!has_update)
    7587      148870 :         bits |= HEAP_XMAX_LOCK_ONLY;
    7588             : 
    7589      153602 :     if (nmembers > 0)
    7590      153602 :         pfree(members);
    7591             : 
    7592      153602 :     *new_infomask = bits;
    7593      153602 :     *new_infomask2 = bits2;
    7594      153602 : }
    7595             : 
    7596             : /*
    7597             :  * MultiXactIdGetUpdateXid
    7598             :  *
    7599             :  * Given a multixact Xmax and corresponding infomask, which does not have the
    7600             :  * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
    7601             :  * transaction.
    7602             :  *
    7603             :  * Caller is expected to check the status of the updating transaction, if
    7604             :  * necessary.
    7605             :  */
    7606             : static TransactionId
    7607      324038 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
    7608             : {
    7609      324038 :     TransactionId update_xact = InvalidTransactionId;
    7610             :     MultiXactMember *members;
    7611             :     int         nmembers;
    7612             : 
    7613             :     Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
    7614             :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    7615             : 
    7616             :     /*
    7617             :      * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
    7618             :      * pre-pg_upgrade.
    7619             :      */
    7620      324038 :     nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
    7621             : 
    7622      324038 :     if (nmembers > 0)
    7623             :     {
    7624             :         int         i;
    7625             : 
    7626      491560 :         for (i = 0; i < nmembers; i++)
    7627             :         {
    7628             :             /* Ignore lockers */
    7629      491560 :             if (!ISUPDATE_from_mxstatus(members[i].status))
    7630      167522 :                 continue;
    7631             : 
    7632             :             /* there can be at most one updater */
    7633             :             Assert(update_xact == InvalidTransactionId);
    7634      324038 :             update_xact = members[i].xid;
    7635             : #ifndef USE_ASSERT_CHECKING
    7636             : 
    7637             :             /*
    7638             :              * in an assert-enabled build, walk the whole array to ensure
    7639             :              * there's no other updater.
    7640             :              */
    7641      324038 :             break;
    7642             : #endif
    7643             :         }
    7644             : 
    7645      324038 :         pfree(members);
    7646             :     }
    7647             : 
    7648      324038 :     return update_xact;
    7649             : }
    7650             : 
    7651             : /*
    7652             :  * HeapTupleGetUpdateXid
    7653             :  *      As above, but use a HeapTupleHeader
    7654             :  *
    7655             :  * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
    7656             :  * checking the hint bits.
    7657             :  */
    7658             : TransactionId
    7659      319762 : HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
    7660             : {
    7661      319762 :     return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tup),
    7662      319762 :                                    tup->t_infomask);
    7663             : }
    7664             : 
    7665             : /*
    7666             :  * Does the given multixact conflict with the current transaction grabbing a
    7667             :  * tuple lock of the given strength?
    7668             :  *
    7669             :  * The passed infomask pairs up with the given multixact in the tuple header.
    7670             :  *
    7671             :  * If current_is_member is not NULL, it is set to 'true' if the current
    7672             :  * transaction is a member of the given multixact.
    7673             :  */
    7674             : static bool
    7675         436 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
    7676             :                         LockTupleMode lockmode, bool *current_is_member)
    7677             : {
    7678             :     int         nmembers;
    7679             :     MultiXactMember *members;
    7680         436 :     bool        result = false;
    7681         436 :     LOCKMODE    wanted = tupleLockExtraInfo[lockmode].hwlock;
    7682             : 
    7683         436 :     if (HEAP_LOCKED_UPGRADED(infomask))
    7684           0 :         return false;
    7685             : 
    7686         436 :     nmembers = GetMultiXactIdMembers(multi, &members, false,
    7687         436 :                                      HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    7688         436 :     if (nmembers >= 0)
    7689             :     {
    7690             :         int         i;
    7691             : 
    7692        5364 :         for (i = 0; i < nmembers; i++)
    7693             :         {
    7694             :             TransactionId memxid;
    7695             :             LOCKMODE    memlockmode;
    7696             : 
    7697        4942 :             if (result && (current_is_member == NULL || *current_is_member))
    7698             :                 break;
    7699             : 
    7700        4928 :             memlockmode = LOCKMODE_from_mxstatus(members[i].status);
    7701             : 
    7702             :             /* ignore members from current xact (but track their presence) */
    7703        4928 :             memxid = members[i].xid;
    7704        4928 :             if (TransactionIdIsCurrentTransactionId(memxid))
    7705             :             {
    7706         184 :                 if (current_is_member != NULL)
    7707         156 :                     *current_is_member = true;
    7708         184 :                 continue;
    7709             :             }
    7710        4744 :             else if (result)
    7711          16 :                 continue;
    7712             : 
    7713             :             /* ignore members that don't conflict with the lock we want */
    7714        4728 :             if (!DoLockModesConflict(memlockmode, wanted))
    7715        4650 :                 continue;
    7716             : 
    7717          78 :             if (ISUPDATE_from_mxstatus(members[i].status))
    7718             :             {
    7719             :                 /* ignore aborted updaters */
    7720          34 :                 if (TransactionIdDidAbort(memxid))
    7721           2 :                     continue;
    7722             :             }
    7723             :             else
    7724             :             {
    7725             :                 /* ignore lockers-only that are no longer in progress */
    7726          44 :                 if (!TransactionIdIsInProgress(memxid))
    7727          14 :                     continue;
    7728             :             }
    7729             : 
    7730             :             /*
    7731             :              * Whatever remains are either live lockers that conflict with our
    7732             :              * wanted lock, and updaters that are not aborted.  Those conflict
    7733             :              * with what we want.  Set up to return true, but keep going to
    7734             :              * look for the current transaction among the multixact members,
    7735             :              * if needed.
    7736             :              */
    7737          62 :             result = true;
    7738             :         }
    7739         436 :         pfree(members);
    7740             :     }
    7741             : 
    7742         436 :     return result;
    7743             : }
    7744             : 
    7745             : /*
    7746             :  * Do_MultiXactIdWait
    7747             :  *      Actual implementation for the two functions below.
    7748             :  *
    7749             :  * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
    7750             :  * needed to ensure we only sleep on conflicting members, and the infomask is
    7751             :  * used to optimize multixact access in case it's a lock-only multi); 'nowait'
    7752             :  * indicates whether to use conditional lock acquisition, to allow callers to
    7753             :  * fail if lock is unavailable.  'rel', 'ctid' and 'oper' are used to set up
    7754             :  * context information for error messages.  'remaining', if not NULL, receives
    7755             :  * the number of members that are still running, including any (non-aborted)
    7756             :  * subtransactions of our own transaction.  'logLockFailure' indicates whether
    7757             :  * to log details when a lock acquisition fails with 'nowait' enabled.
    7758             :  *
    7759             :  * We do this by sleeping on each member using XactLockTableWait.  Any
    7760             :  * members that belong to the current backend are *not* waited for, however;
    7761             :  * this would not merely be useless but would lead to Assert failure inside
    7762             :  * XactLockTableWait.  By the time this returns, it is certain that all
    7763             :  * transactions *of other backends* that were members of the MultiXactId
    7764             :  * that conflict with the requested status are dead (and no new ones can have
    7765             :  * been added, since it is not legal to add members to an existing
    7766             :  * MultiXactId).
    7767             :  *
    7768             :  * But by the time we finish sleeping, someone else may have changed the Xmax
    7769             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7770             :  *
    7771             :  * Note that in case we return false, the number of remaining members is
    7772             :  * not to be trusted.
    7773             :  */
    7774             : static bool
    7775         116 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    7776             :                    uint16 infomask, bool nowait,
    7777             :                    Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
    7778             :                    int *remaining, bool logLockFailure)
    7779             : {
    7780         116 :     bool        result = true;
    7781             :     MultiXactMember *members;
    7782             :     int         nmembers;
    7783         116 :     int         remain = 0;
    7784             : 
    7785             :     /* for pre-pg_upgrade tuples, no need to sleep at all */
    7786         116 :     nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
    7787         116 :         GetMultiXactIdMembers(multi, &members, false,
    7788         116 :                               HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    7789             : 
    7790         116 :     if (nmembers >= 0)
    7791             :     {
    7792             :         int         i;
    7793             : 
    7794         374 :         for (i = 0; i < nmembers; i++)
    7795             :         {
    7796         266 :             TransactionId memxid = members[i].xid;
    7797         266 :             MultiXactStatus memstatus = members[i].status;
    7798             : 
    7799         266 :             if (TransactionIdIsCurrentTransactionId(memxid))
    7800             :             {
    7801          48 :                 remain++;
    7802          48 :                 continue;
    7803             :             }
    7804             : 
    7805         218 :             if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
    7806         218 :                                      LOCKMODE_from_mxstatus(status)))
    7807             :             {
    7808          44 :                 if (remaining && TransactionIdIsInProgress(memxid))
    7809          16 :                     remain++;
    7810          44 :                 continue;
    7811             :             }
    7812             : 
    7813             :             /*
    7814             :              * This member conflicts with our multi, so we have to sleep (or
    7815             :              * return failure, if asked to avoid waiting.)
    7816             :              *
    7817             :              * Note that we don't set up an error context callback ourselves,
    7818             :              * but instead we pass the info down to XactLockTableWait.  This
    7819             :              * might seem a bit wasteful because the context is set up and
    7820             :              * tore down for each member of the multixact, but in reality it
    7821             :              * should be barely noticeable, and it avoids duplicate code.
    7822             :              */
    7823         174 :             if (nowait)
    7824             :             {
    7825           8 :                 result = ConditionalXactLockTableWait(memxid, logLockFailure);
    7826           8 :                 if (!result)
    7827           8 :                     break;
    7828             :             }
    7829             :             else
    7830         166 :                 XactLockTableWait(memxid, rel, ctid, oper);
    7831             :         }
    7832             : 
    7833         116 :         pfree(members);
    7834             :     }
    7835             : 
    7836         116 :     if (remaining)
    7837          20 :         *remaining = remain;
    7838             : 
    7839         116 :     return result;
    7840             : }
    7841             : 
    7842             : /*
    7843             :  * MultiXactIdWait
    7844             :  *      Sleep on a MultiXactId.
    7845             :  *
    7846             :  * By the time we finish sleeping, someone else may have changed the Xmax
    7847             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7848             :  *
    7849             :  * We return (in *remaining, if not NULL) the number of members that are still
    7850             :  * running, including any (non-aborted) subtransactions of our own transaction.
    7851             :  */
    7852             : static void
    7853         108 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
    7854             :                 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
    7855             :                 int *remaining)
    7856             : {
    7857         108 :     (void) Do_MultiXactIdWait(multi, status, infomask, false,
    7858             :                               rel, ctid, oper, remaining, false);
    7859         108 : }
    7860             : 
    7861             : /*
    7862             :  * ConditionalMultiXactIdWait
    7863             :  *      As above, but only lock if we can get the lock without blocking.
    7864             :  *
    7865             :  * By the time we finish sleeping, someone else may have changed the Xmax
    7866             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7867             :  *
    7868             :  * If the multixact is now all gone, return true.  Returns false if some
    7869             :  * transactions might still be running.
    7870             :  *
    7871             :  * We return (in *remaining, if not NULL) the number of members that are still
    7872             :  * running, including any (non-aborted) subtransactions of our own transaction.
    7873             :  */
    7874             : static bool
    7875           8 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    7876             :                            uint16 infomask, Relation rel, int *remaining,
    7877             :                            bool logLockFailure)
    7878             : {
    7879           8 :     return Do_MultiXactIdWait(multi, status, infomask, true,
    7880             :                               rel, NULL, XLTW_None, remaining, logLockFailure);
    7881             : }
    7882             : 
    7883             : /*
    7884             :  * heap_tuple_needs_eventual_freeze
    7885             :  *
    7886             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    7887             :  * will eventually require freezing (if tuple isn't removed by pruning first).
    7888             :  */
    7889             : bool
    7890      274804 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
    7891             : {
    7892             :     TransactionId xid;
    7893             : 
    7894             :     /*
    7895             :      * If xmin is a normal transaction ID, this tuple is definitely not
    7896             :      * frozen.
    7897             :      */
    7898      274804 :     xid = HeapTupleHeaderGetXmin(tuple);
    7899      274804 :     if (TransactionIdIsNormal(xid))
    7900        5470 :         return true;
    7901             : 
    7902             :     /*
    7903             :      * If xmax is a valid xact or multixact, this tuple is also not frozen.
    7904             :      */
    7905      269334 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7906             :     {
    7907             :         MultiXactId multi;
    7908             : 
    7909           0 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    7910           0 :         if (MultiXactIdIsValid(multi))
    7911           0 :             return true;
    7912             :     }
    7913             :     else
    7914             :     {
    7915      269334 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    7916      269334 :         if (TransactionIdIsNormal(xid))
    7917          14 :             return true;
    7918             :     }
    7919             : 
    7920      269320 :     if (tuple->t_infomask & HEAP_MOVED)
    7921             :     {
    7922           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    7923           0 :         if (TransactionIdIsNormal(xid))
    7924           0 :             return true;
    7925             :     }
    7926             : 
    7927      269320 :     return false;
    7928             : }
    7929             : 
    7930             : /*
    7931             :  * heap_tuple_should_freeze
    7932             :  *
    7933             :  * Return value indicates if heap_prepare_freeze_tuple sibling function would
    7934             :  * (or should) force freezing of the heap page that contains caller's tuple.
    7935             :  * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
    7936             :  * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
    7937             :  *
    7938             :  * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
    7939             :  * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
    7940             :  * Our working assumption is that caller won't decide to freeze this tuple.
    7941             :  * It's up to caller to only ratchet back its own top-level trackers after the
    7942             :  * point that it fully commits to not freezing the tuple/page in question.
    7943             :  */
    7944             : bool
    7945     9457146 : heap_tuple_should_freeze(HeapTupleHeader tuple,
    7946             :                          const struct VacuumCutoffs *cutoffs,
    7947             :                          TransactionId *NoFreezePageRelfrozenXid,
    7948             :                          MultiXactId *NoFreezePageRelminMxid)
    7949             : {
    7950             :     TransactionId xid;
    7951             :     MultiXactId multi;
    7952     9457146 :     bool        freeze = false;
    7953             : 
    7954             :     /* First deal with xmin */
    7955     9457146 :     xid = HeapTupleHeaderGetXmin(tuple);
    7956     9457146 :     if (TransactionIdIsNormal(xid))
    7957             :     {
    7958             :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7959     3465136 :         if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7960       45904 :             *NoFreezePageRelfrozenXid = xid;
    7961     3465136 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    7962       42758 :             freeze = true;
    7963             :     }
    7964             : 
    7965             :     /* Now deal with xmax */
    7966     9457146 :     xid = InvalidTransactionId;
    7967     9457146 :     multi = InvalidMultiXactId;
    7968     9457146 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7969           4 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    7970             :     else
    7971     9457142 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    7972             : 
    7973     9457146 :     if (TransactionIdIsNormal(xid))
    7974             :     {
    7975             :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7976             :         /* xmax is a non-permanent XID */
    7977     6482022 :         if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7978           8 :             *NoFreezePageRelfrozenXid = xid;
    7979     6482022 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    7980          54 :             freeze = true;
    7981             :     }
    7982     2975124 :     else if (!MultiXactIdIsValid(multi))
    7983             :     {
    7984             :         /* xmax is a permanent XID or invalid MultiXactId/XID */
    7985             :     }
    7986           4 :     else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
    7987             :     {
    7988             :         /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
    7989           0 :         if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
    7990           0 :             *NoFreezePageRelminMxid = multi;
    7991             :         /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
    7992           0 :         freeze = true;
    7993             :     }
    7994             :     else
    7995             :     {
    7996             :         /* xmax is a MultiXactId that may have an updater XID */
    7997             :         MultiXactMember *members;
    7998             :         int         nmembers;
    7999             : 
    8000             :         Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
    8001           4 :         if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
    8002           4 :             *NoFreezePageRelminMxid = multi;
    8003           4 :         if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
    8004           4 :             freeze = true;
    8005             : 
    8006             :         /* need to check whether any member of the mxact is old */
    8007           4 :         nmembers = GetMultiXactIdMembers(multi, &members, false,
    8008           4 :                                          HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
    8009             : 
    8010          10 :         for (int i = 0; i < nmembers; i++)
    8011             :         {
    8012           6 :             xid = members[i].xid;
    8013             :             Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    8014           6 :             if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    8015           0 :                 *NoFreezePageRelfrozenXid = xid;
    8016           6 :             if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    8017           0 :                 freeze = true;
    8018             :         }
    8019           4 :         if (nmembers > 0)
    8020           2 :             pfree(members);
    8021             :     }
    8022             : 
    8023     9457146 :     if (tuple->t_infomask & HEAP_MOVED)
    8024             :     {
    8025           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    8026           0 :         if (TransactionIdIsNormal(xid))
    8027             :         {
    8028             :             Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    8029           0 :             if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    8030           0 :                 *NoFreezePageRelfrozenXid = xid;
    8031             :             /* heap_prepare_freeze_tuple forces xvac freezing */
    8032           0 :             freeze = true;
    8033             :         }
    8034             :     }
    8035             : 
    8036     9457146 :     return freeze;
    8037             : }
    8038             : 
    8039             : /*
    8040             :  * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
    8041             :  * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
    8042             :  * that caller is in the process of physically removing, e.g. via HOT pruning
    8043             :  * or index deletion.
    8044             :  *
    8045             :  * Caller must initialize its value to InvalidTransactionId, which is
    8046             :  * generally interpreted as "definitely no need for a recovery conflict".
    8047             :  * Final value must reflect all heap tuples that caller will physically remove
    8048             :  * (or remove TID references to) via its ongoing pruning/deletion operation.
    8049             :  * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
    8050             :  * caller's WAL record) by REDO routine when it replays caller's operation.
    8051             :  */
    8052             : void
    8053     3148698 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
    8054             :                                       TransactionId *snapshotConflictHorizon)
    8055             : {
    8056     3148698 :     TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
    8057     3148698 :     TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
    8058     3148698 :     TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
    8059             : 
    8060     3148698 :     if (tuple->t_infomask & HEAP_MOVED)
    8061             :     {
    8062           0 :         if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
    8063           0 :             *snapshotConflictHorizon = xvac;
    8064             :     }
    8065             : 
    8066             :     /*
    8067             :      * Ignore tuples inserted by an aborted transaction or if the tuple was
    8068             :      * updated/deleted by the inserting transaction.
    8069             :      *
    8070             :      * Look for a committed hint bit, or if no xmin bit is set, check clog.
    8071             :      */
    8072     3148698 :     if (HeapTupleHeaderXminCommitted(tuple) ||
    8073      187214 :         (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
    8074             :     {
    8075     5662168 :         if (xmax != xmin &&
    8076     2671774 :             TransactionIdFollows(xmax, *snapshotConflictHorizon))
    8077      200946 :             *snapshotConflictHorizon = xmax;
    8078             :     }
    8079     3148698 : }
    8080             : 
    8081             : #ifdef USE_PREFETCH
    8082             : /*
    8083             :  * Helper function for heap_index_delete_tuples.  Issues prefetch requests for
    8084             :  * prefetch_count buffers.  The prefetch_state keeps track of all the buffers
    8085             :  * we can prefetch, and which have already been prefetched; each call to this
    8086             :  * function picks up where the previous call left off.
    8087             :  *
    8088             :  * Note: we expect the deltids array to be sorted in an order that groups TIDs
    8089             :  * by heap block, with all TIDs for each block appearing together in exactly
    8090             :  * one group.
    8091             :  */
    8092             : static void
    8093       39442 : index_delete_prefetch_buffer(Relation rel,
    8094             :                              IndexDeletePrefetchState *prefetch_state,
    8095             :                              int prefetch_count)
    8096             : {
    8097       39442 :     BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
    8098       39442 :     int         count = 0;
    8099             :     int         i;
    8100       39442 :     int         ndeltids = prefetch_state->ndeltids;
    8101       39442 :     TM_IndexDelete *deltids = prefetch_state->deltids;
    8102             : 
    8103       39442 :     for (i = prefetch_state->next_item;
    8104     1372692 :          i < ndeltids && count < prefetch_count;
    8105     1333250 :          i++)
    8106             :     {
    8107     1333250 :         ItemPointer htid = &deltids[i].tid;
    8108             : 
    8109     2654674 :         if (cur_hblkno == InvalidBlockNumber ||
    8110     1321424 :             ItemPointerGetBlockNumber(htid) != cur_hblkno)
    8111             :         {
    8112       36186 :             cur_hblkno = ItemPointerGetBlockNumber(htid);
    8113       36186 :             PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
    8114       36186 :             count++;
    8115             :         }
    8116             :     }
    8117             : 
    8118             :     /*
    8119             :      * Save the prefetch position so that next time we can continue from that
    8120             :      * position.
    8121             :      */
    8122       39442 :     prefetch_state->next_item = i;
    8123       39442 :     prefetch_state->cur_hblkno = cur_hblkno;
    8124       39442 : }
    8125             : #endif
    8126             : 
    8127             : /*
    8128             :  * Helper function for heap_index_delete_tuples.  Checks for index corruption
    8129             :  * involving an invalid TID in index AM caller's index page.
    8130             :  *
    8131             :  * This is an ideal place for these checks.  The index AM must hold a buffer
    8132             :  * lock on the index page containing the TIDs we examine here, so we don't
    8133             :  * have to worry about concurrent VACUUMs at all.  We can be sure that the
    8134             :  * index is corrupt when htid points directly to an LP_UNUSED item or
    8135             :  * heap-only tuple, which is not the case during standard index scans.
    8136             :  */
    8137             : static inline void
    8138     1100810 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
    8139             :                         Page page, OffsetNumber maxoff,
    8140             :                         const ItemPointerData *htid, TM_IndexStatus *istatus)
    8141             : {
    8142     1100810 :     OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
    8143             :     ItemId      iid;
    8144             : 
    8145             :     Assert(OffsetNumberIsValid(istatus->idxoffnum));
    8146             : 
    8147     1100810 :     if (unlikely(indexpagehoffnum > maxoff))
    8148           0 :         ereport(ERROR,
    8149             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    8150             :                  errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
    8151             :                                  ItemPointerGetBlockNumber(htid),
    8152             :                                  indexpagehoffnum,
    8153             :                                  istatus->idxoffnum, delstate->iblknum,
    8154             :                                  RelationGetRelationName(delstate->irel))));
    8155             : 
    8156     1100810 :     iid = PageGetItemId(page, indexpagehoffnum);
    8157     1100810 :     if (unlikely(!ItemIdIsUsed(iid)))
    8158           0 :         ereport(ERROR,
    8159             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    8160             :                  errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
    8161             :                                  ItemPointerGetBlockNumber(htid),
    8162             :                                  indexpagehoffnum,
    8163             :                                  istatus->idxoffnum, delstate->iblknum,
    8164             :                                  RelationGetRelationName(delstate->irel))));
    8165             : 
    8166     1100810 :     if (ItemIdHasStorage(iid))
    8167             :     {
    8168             :         HeapTupleHeader htup;
    8169             : 
    8170             :         Assert(ItemIdIsNormal(iid));
    8171      649158 :         htup = (HeapTupleHeader) PageGetItem(page, iid);
    8172             : 
    8173      649158 :         if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
    8174           0 :             ereport(ERROR,
    8175             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    8176             :                      errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
    8177             :                                      ItemPointerGetBlockNumber(htid),
    8178             :                                      indexpagehoffnum,
    8179             :                                      istatus->idxoffnum, delstate->iblknum,
    8180             :                                      RelationGetRelationName(delstate->irel))));
    8181             :     }
    8182     1100810 : }
    8183             : 
    8184             : /*
    8185             :  * heapam implementation of tableam's index_delete_tuples interface.
    8186             :  *
    8187             :  * This helper function is called by index AMs during index tuple deletion.
    8188             :  * See tableam header comments for an explanation of the interface implemented
    8189             :  * here and a general theory of operation.  Note that each call here is either
    8190             :  * a simple index deletion call, or a bottom-up index deletion call.
    8191             :  *
    8192             :  * It's possible for this to generate a fair amount of I/O, since we may be
    8193             :  * deleting hundreds of tuples from a single index block.  To amortize that
    8194             :  * cost to some degree, this uses prefetching and combines repeat accesses to
    8195             :  * the same heap block.
    8196             :  */
    8197             : TransactionId
    8198       11826 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
    8199             : {
    8200             :     /* Initial assumption is that earlier pruning took care of conflict */
    8201       11826 :     TransactionId snapshotConflictHorizon = InvalidTransactionId;
    8202       11826 :     BlockNumber blkno = InvalidBlockNumber;
    8203       11826 :     Buffer      buf = InvalidBuffer;
    8204       11826 :     Page        page = NULL;
    8205       11826 :     OffsetNumber maxoff = InvalidOffsetNumber;
    8206             :     TransactionId priorXmax;
    8207             : #ifdef USE_PREFETCH
    8208             :     IndexDeletePrefetchState prefetch_state;
    8209             :     int         prefetch_distance;
    8210             : #endif
    8211             :     SnapshotData SnapshotNonVacuumable;
    8212       11826 :     int         finalndeltids = 0,
    8213       11826 :                 nblocksaccessed = 0;
    8214             : 
    8215             :     /* State that's only used in bottom-up index deletion case */
    8216       11826 :     int         nblocksfavorable = 0;
    8217       11826 :     int         curtargetfreespace = delstate->bottomupfreespace,
    8218       11826 :                 lastfreespace = 0,
    8219       11826 :                 actualfreespace = 0;
    8220       11826 :     bool        bottomup_final_block = false;
    8221             : 
    8222       11826 :     InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
    8223             : 
    8224             :     /* Sort caller's deltids array by TID for further processing */
    8225       11826 :     index_delete_sort(delstate);
    8226             : 
    8227             :     /*
    8228             :      * Bottom-up case: resort deltids array in an order attuned to where the
    8229             :      * greatest number of promising TIDs are to be found, and determine how
    8230             :      * many blocks from the start of sorted array should be considered
    8231             :      * favorable.  This will also shrink the deltids array in order to
    8232             :      * eliminate completely unfavorable blocks up front.
    8233             :      */
    8234       11826 :     if (delstate->bottomup)
    8235        4138 :         nblocksfavorable = bottomup_sort_and_shrink(delstate);
    8236             : 
    8237             : #ifdef USE_PREFETCH
    8238             :     /* Initialize prefetch state. */
    8239       11826 :     prefetch_state.cur_hblkno = InvalidBlockNumber;
    8240       11826 :     prefetch_state.next_item = 0;
    8241       11826 :     prefetch_state.ndeltids = delstate->ndeltids;
    8242       11826 :     prefetch_state.deltids = delstate->deltids;
    8243             : 
    8244             :     /*
    8245             :      * Determine the prefetch distance that we will attempt to maintain.
    8246             :      *
    8247             :      * Since the caller holds a buffer lock somewhere in rel, we'd better make
    8248             :      * sure that isn't a catalog relation before we call code that does
    8249             :      * syscache lookups, to avoid risk of deadlock.
    8250             :      */
    8251       11826 :     if (IsCatalogRelation(rel))
    8252        8634 :         prefetch_distance = maintenance_io_concurrency;
    8253             :     else
    8254             :         prefetch_distance =
    8255        3192 :             get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
    8256             : 
    8257             :     /* Cap initial prefetch distance for bottom-up deletion caller */
    8258       11826 :     if (delstate->bottomup)
    8259             :     {
    8260             :         Assert(nblocksfavorable >= 1);
    8261             :         Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
    8262        4138 :         prefetch_distance = Min(prefetch_distance, nblocksfavorable);
    8263             :     }
    8264             : 
    8265             :     /* Start prefetching. */
    8266       11826 :     index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
    8267             : #endif
    8268             : 
    8269             :     /* Iterate over deltids, determine which to delete, check their horizon */
    8270             :     Assert(delstate->ndeltids > 0);
    8271     1112636 :     for (int i = 0; i < delstate->ndeltids; i++)
    8272             :     {
    8273     1104948 :         TM_IndexDelete *ideltid = &delstate->deltids[i];
    8274     1104948 :         TM_IndexStatus *istatus = delstate->status + ideltid->id;
    8275     1104948 :         ItemPointer htid = &ideltid->tid;
    8276             :         OffsetNumber offnum;
    8277             : 
    8278             :         /*
    8279             :          * Read buffer, and perform required extra steps each time a new block
    8280             :          * is encountered.  Avoid refetching if it's the same block as the one
    8281             :          * from the last htid.
    8282             :          */
    8283     2198070 :         if (blkno == InvalidBlockNumber ||
    8284     1093122 :             ItemPointerGetBlockNumber(htid) != blkno)
    8285             :         {
    8286             :             /*
    8287             :              * Consider giving up early for bottom-up index deletion caller
    8288             :              * first. (Only prefetch next-next block afterwards, when it
    8289             :              * becomes clear that we're at least going to access the next
    8290             :              * block in line.)
    8291             :              *
    8292             :              * Sometimes the first block frees so much space for bottom-up
    8293             :              * caller that the deletion process can end without accessing any
    8294             :              * more blocks.  It is usually necessary to access 2 or 3 blocks
    8295             :              * per bottom-up deletion operation, though.
    8296             :              */
    8297       31754 :             if (delstate->bottomup)
    8298             :             {
    8299             :                 /*
    8300             :                  * We often allow caller to delete a few additional items
    8301             :                  * whose entries we reached after the point that space target
    8302             :                  * from caller was satisfied.  The cost of accessing the page
    8303             :                  * was already paid at that point, so it made sense to finish
    8304             :                  * it off.  When that happened, we finalize everything here
    8305             :                  * (by finishing off the whole bottom-up deletion operation
    8306             :                  * without needlessly paying the cost of accessing any more
    8307             :                  * blocks).
    8308             :                  */
    8309        9274 :                 if (bottomup_final_block)
    8310         326 :                     break;
    8311             : 
    8312             :                 /*
    8313             :                  * Give up when we didn't enable our caller to free any
    8314             :                  * additional space as a result of processing the page that we
    8315             :                  * just finished up with.  This rule is the main way in which
    8316             :                  * we keep the cost of bottom-up deletion under control.
    8317             :                  */
    8318        8948 :                 if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
    8319        3812 :                     break;
    8320        5136 :                 lastfreespace = actualfreespace;    /* for next time */
    8321             : 
    8322             :                 /*
    8323             :                  * Deletion operation (which is bottom-up) will definitely
    8324             :                  * access the next block in line.  Prepare for that now.
    8325             :                  *
    8326             :                  * Decay target free space so that we don't hang on for too
    8327             :                  * long with a marginal case. (Space target is only truly
    8328             :                  * helpful when it allows us to recognize that we don't need
    8329             :                  * to access more than 1 or 2 blocks to satisfy caller due to
    8330             :                  * agreeable workload characteristics.)
    8331             :                  *
    8332             :                  * We are a bit more patient when we encounter contiguous
    8333             :                  * blocks, though: these are treated as favorable blocks.  The
    8334             :                  * decay process is only applied when the next block in line
    8335             :                  * is not a favorable/contiguous block.  This is not an
    8336             :                  * exception to the general rule; we still insist on finding
    8337             :                  * at least one deletable item per block accessed.  See
    8338             :                  * bottomup_nblocksfavorable() for full details of the theory
    8339             :                  * behind favorable blocks and heap block locality in general.
    8340             :                  *
    8341             :                  * Note: The first block in line is always treated as a
    8342             :                  * favorable block, so the earliest possible point that the
    8343             :                  * decay can be applied is just before we access the second
    8344             :                  * block in line.  The Assert() verifies this for us.
    8345             :                  */
    8346             :                 Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
    8347        5136 :                 if (nblocksfavorable > 0)
    8348        4636 :                     nblocksfavorable--;
    8349             :                 else
    8350         500 :                     curtargetfreespace /= 2;
    8351             :             }
    8352             : 
    8353             :             /* release old buffer */
    8354       27616 :             if (BufferIsValid(buf))
    8355       15790 :                 UnlockReleaseBuffer(buf);
    8356             : 
    8357       27616 :             blkno = ItemPointerGetBlockNumber(htid);
    8358       27616 :             buf = ReadBuffer(rel, blkno);
    8359       27616 :             nblocksaccessed++;
    8360             :             Assert(!delstate->bottomup ||
    8361             :                    nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
    8362             : 
    8363             : #ifdef USE_PREFETCH
    8364             : 
    8365             :             /*
    8366             :              * To maintain the prefetch distance, prefetch one more page for
    8367             :              * each page we read.
    8368             :              */
    8369       27616 :             index_delete_prefetch_buffer(rel, &prefetch_state, 1);
    8370             : #endif
    8371             : 
    8372       27616 :             LockBuffer(buf, BUFFER_LOCK_SHARE);
    8373             : 
    8374       27616 :             page = BufferGetPage(buf);
    8375       27616 :             maxoff = PageGetMaxOffsetNumber(page);
    8376             :         }
    8377             : 
    8378             :         /*
    8379             :          * In passing, detect index corruption involving an index page with a
    8380             :          * TID that points to a location in the heap that couldn't possibly be
    8381             :          * correct.  We only do this with actual TIDs from caller's index page
    8382             :          * (not items reached by traversing through a HOT chain).
    8383             :          */
    8384     1100810 :         index_delete_check_htid(delstate, page, maxoff, htid, istatus);
    8385             : 
    8386     1100810 :         if (istatus->knowndeletable)
    8387             :             Assert(!delstate->bottomup && !istatus->promising);
    8388             :         else
    8389             :         {
    8390      830350 :             ItemPointerData tmp = *htid;
    8391             :             HeapTupleData heapTuple;
    8392             : 
    8393             :             /* Are any tuples from this HOT chain non-vacuumable? */
    8394      830350 :             if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
    8395             :                                        &heapTuple, NULL, true))
    8396      498002 :                 continue;       /* can't delete entry */
    8397             : 
    8398             :             /* Caller will delete, since whole HOT chain is vacuumable */
    8399      332348 :             istatus->knowndeletable = true;
    8400             : 
    8401             :             /* Maintain index free space info for bottom-up deletion case */
    8402      332348 :             if (delstate->bottomup)
    8403             :             {
    8404             :                 Assert(istatus->freespace > 0);
    8405       18618 :                 actualfreespace += istatus->freespace;
    8406       18618 :                 if (actualfreespace >= curtargetfreespace)
    8407        4542 :                     bottomup_final_block = true;
    8408             :             }
    8409             :         }
    8410             : 
    8411             :         /*
    8412             :          * Maintain snapshotConflictHorizon value for deletion operation as a
    8413             :          * whole by advancing current value using heap tuple headers.  This is
    8414             :          * loosely based on the logic for pruning a HOT chain.
    8415             :          */
    8416      602808 :         offnum = ItemPointerGetOffsetNumber(htid);
    8417      602808 :         priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    8418             :         for (;;)
    8419       41262 :         {
    8420             :             ItemId      lp;
    8421             :             HeapTupleHeader htup;
    8422             : 
    8423             :             /* Sanity check (pure paranoia) */
    8424      644070 :             if (offnum < FirstOffsetNumber)
    8425           0 :                 break;
    8426             : 
    8427             :             /*
    8428             :              * An offset past the end of page's line pointer array is possible
    8429             :              * when the array was truncated
    8430             :              */
    8431      644070 :             if (offnum > maxoff)
    8432           0 :                 break;
    8433             : 
    8434      644070 :             lp = PageGetItemId(page, offnum);
    8435      644070 :             if (ItemIdIsRedirected(lp))
    8436             :             {
    8437       18928 :                 offnum = ItemIdGetRedirect(lp);
    8438       18928 :                 continue;
    8439             :             }
    8440             : 
    8441             :             /*
    8442             :              * We'll often encounter LP_DEAD line pointers (especially with an
    8443             :              * entry marked knowndeletable by our caller up front).  No heap
    8444             :              * tuple headers get examined for an htid that leads us to an
    8445             :              * LP_DEAD item.  This is okay because the earlier pruning
    8446             :              * operation that made the line pointer LP_DEAD in the first place
    8447             :              * must have considered the original tuple header as part of
    8448             :              * generating its own snapshotConflictHorizon value.
    8449             :              *
    8450             :              * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
    8451             :              * the same strategy that index vacuuming uses in all cases. Index
    8452             :              * VACUUM WAL records don't even have a snapshotConflictHorizon
    8453             :              * field of their own for this reason.
    8454             :              */
    8455      625142 :             if (!ItemIdIsNormal(lp))
    8456      401434 :                 break;
    8457             : 
    8458      223708 :             htup = (HeapTupleHeader) PageGetItem(page, lp);
    8459             : 
    8460             :             /*
    8461             :              * Check the tuple XMIN against prior XMAX, if any
    8462             :              */
    8463      246042 :             if (TransactionIdIsValid(priorXmax) &&
    8464       22334 :                 !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
    8465           0 :                 break;
    8466             : 
    8467      223708 :             HeapTupleHeaderAdvanceConflictHorizon(htup,
    8468             :                                                   &snapshotConflictHorizon);
    8469             : 
    8470             :             /*
    8471             :              * If the tuple is not HOT-updated, then we are at the end of this
    8472             :              * HOT-chain.  No need to visit later tuples from the same update
    8473             :              * chain (they get their own index entries) -- just move on to
    8474             :              * next htid from index AM caller.
    8475             :              */
    8476      223708 :             if (!HeapTupleHeaderIsHotUpdated(htup))
    8477      201374 :                 break;
    8478             : 
    8479             :             /* Advance to next HOT chain member */
    8480             :             Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
    8481       22334 :             offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
    8482       22334 :             priorXmax = HeapTupleHeaderGetUpdateXid(htup);
    8483             :         }
    8484             : 
    8485             :         /* Enable further/final shrinking of deltids for caller */
    8486      602808 :         finalndeltids = i + 1;
    8487             :     }
    8488             : 
    8489       11826 :     UnlockReleaseBuffer(buf);
    8490             : 
    8491             :     /*
    8492             :      * Shrink deltids array to exclude non-deletable entries at the end.  This
    8493             :      * is not just a minor optimization.  Final deltids array size might be
    8494             :      * zero for a bottom-up caller.  Index AM is explicitly allowed to rely on
    8495             :      * ndeltids being zero in all cases with zero total deletable entries.
    8496             :      */
    8497             :     Assert(finalndeltids > 0 || delstate->bottomup);
    8498       11826 :     delstate->ndeltids = finalndeltids;
    8499             : 
    8500       11826 :     return snapshotConflictHorizon;
    8501             : }
    8502             : 
    8503             : /*
    8504             :  * Specialized inlineable comparison function for index_delete_sort()
    8505             :  */
    8506             : static inline int
    8507    26455812 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
    8508             : {
    8509    26455812 :     ItemPointer tid1 = &deltid1->tid;
    8510    26455812 :     ItemPointer tid2 = &deltid2->tid;
    8511             : 
    8512             :     {
    8513    26455812 :         BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
    8514    26455812 :         BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
    8515             : 
    8516    26455812 :         if (blk1 != blk2)
    8517    10886544 :             return (blk1 < blk2) ? -1 : 1;
    8518             :     }
    8519             :     {
    8520    15569268 :         OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
    8521    15569268 :         OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
    8522             : 
    8523    15569268 :         if (pos1 != pos2)
    8524    15569268 :             return (pos1 < pos2) ? -1 : 1;
    8525             :     }
    8526             : 
    8527             :     Assert(false);
    8528             : 
    8529           0 :     return 0;
    8530             : }
    8531             : 
    8532             : /*
    8533             :  * Sort deltids array from delstate by TID.  This prepares it for further
    8534             :  * processing by heap_index_delete_tuples().
    8535             :  *
    8536             :  * This operation becomes a noticeable consumer of CPU cycles with some
    8537             :  * workloads, so we go to the trouble of specialization/micro optimization.
    8538             :  * We use shellsort for this because it's easy to specialize, compiles to
    8539             :  * relatively few instructions, and is adaptive to presorted inputs/subsets
    8540             :  * (which are typical here).
    8541             :  */
    8542             : static void
    8543       11826 : index_delete_sort(TM_IndexDeleteOp *delstate)
    8544             : {
    8545       11826 :     TM_IndexDelete *deltids = delstate->deltids;
    8546       11826 :     int         ndeltids = delstate->ndeltids;
    8547             : 
    8548             :     /*
    8549             :      * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
    8550             :      *
    8551             :      * This implementation is fast with array sizes up to ~4500.  This covers
    8552             :      * all supported BLCKSZ values.
    8553             :      */
    8554       11826 :     const int   gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
    8555             : 
    8556             :     /* Think carefully before changing anything here -- keep swaps cheap */
    8557             :     StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
    8558             :                      "element size exceeds 8 bytes");
    8559             : 
    8560      118260 :     for (int g = 0; g < lengthof(gaps); g++)
    8561             :     {
    8562    15735374 :         for (int hi = gaps[g], i = hi; i < ndeltids; i++)
    8563             :         {
    8564    15628940 :             TM_IndexDelete d = deltids[i];
    8565    15628940 :             int         j = i;
    8566             : 
    8567    27237124 :             while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
    8568             :             {
    8569    11608184 :                 deltids[j] = deltids[j - hi];
    8570    11608184 :                 j -= hi;
    8571             :             }
    8572    15628940 :             deltids[j] = d;
    8573             :         }
    8574             :     }
    8575       11826 : }
    8576             : 
    8577             : /*
    8578             :  * Returns how many blocks should be considered favorable/contiguous for a
    8579             :  * bottom-up index deletion pass.  This is a number of heap blocks that starts
    8580             :  * from and includes the first block in line.
    8581             :  *
    8582             :  * There is always at least one favorable block during bottom-up index
    8583             :  * deletion.  In the worst case (i.e. with totally random heap blocks) the
    8584             :  * first block in line (the only favorable block) can be thought of as a
    8585             :  * degenerate array of contiguous blocks that consists of a single block.
    8586             :  * heap_index_delete_tuples() will expect this.
    8587             :  *
    8588             :  * Caller passes blockgroups, a description of the final order that deltids
    8589             :  * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
    8590             :  * processing.  Note that deltids need not actually be sorted just yet (caller
    8591             :  * only passes deltids to us so that we can interpret blockgroups).
    8592             :  *
    8593             :  * You might guess that the existence of contiguous blocks cannot matter much,
    8594             :  * since in general the main factor that determines which blocks we visit is
    8595             :  * the number of promising TIDs, which is a fixed hint from the index AM.
    8596             :  * We're not really targeting the general case, though -- the actual goal is
    8597             :  * to adapt our behavior to a wide variety of naturally occurring conditions.
    8598             :  * The effects of most of the heuristics we apply are only noticeable in the
    8599             :  * aggregate, over time and across many _related_ bottom-up index deletion
    8600             :  * passes.
    8601             :  *
    8602             :  * Deeming certain blocks favorable allows heapam to recognize and adapt to
    8603             :  * workloads where heap blocks visited during bottom-up index deletion can be
    8604             :  * accessed contiguously, in the sense that each newly visited block is the
    8605             :  * neighbor of the block that bottom-up deletion just finished processing (or
    8606             :  * close enough to it).  It will likely be cheaper to access more favorable
    8607             :  * blocks sooner rather than later (e.g. in this pass, not across a series of
    8608             :  * related bottom-up passes).  Either way it is probably only a matter of time
    8609             :  * (or a matter of further correlated version churn) before all blocks that
    8610             :  * appear together as a single large batch of favorable blocks get accessed by
    8611             :  * _some_ bottom-up pass.  Large batches of favorable blocks tend to either
    8612             :  * appear almost constantly or not even once (it all depends on per-index
    8613             :  * workload characteristics).
    8614             :  *
    8615             :  * Note that the blockgroups sort order applies a power-of-two bucketing
    8616             :  * scheme that creates opportunities for contiguous groups of blocks to get
    8617             :  * batched together, at least with workloads that are naturally amenable to
    8618             :  * being driven by heap block locality.  This doesn't just enhance the spatial
    8619             :  * locality of bottom-up heap block processing in the obvious way.  It also
    8620             :  * enables temporal locality of access, since sorting by heap block number
    8621             :  * naturally tends to make the bottom-up processing order deterministic.
    8622             :  *
    8623             :  * Consider the following example to get a sense of how temporal locality
    8624             :  * might matter: There is a heap relation with several indexes, each of which
    8625             :  * is low to medium cardinality.  It is subject to constant non-HOT updates.
    8626             :  * The updates are skewed (in one part of the primary key, perhaps).  None of
    8627             :  * the indexes are logically modified by the UPDATE statements (if they were
    8628             :  * then bottom-up index deletion would not be triggered in the first place).
    8629             :  * Naturally, each new round of index tuples (for each heap tuple that gets a
    8630             :  * heap_update() call) will have the same heap TID in each and every index.
    8631             :  * Since these indexes are low cardinality and never get logically modified,
    8632             :  * heapam processing during bottom-up deletion passes will access heap blocks
    8633             :  * in approximately sequential order.  Temporal locality of access occurs due
    8634             :  * to bottom-up deletion passes behaving very similarly across each of the
    8635             :  * indexes at any given moment.  This keeps the number of buffer misses needed
    8636             :  * to visit heap blocks to a minimum.
    8637             :  */
    8638             : static int
    8639        4138 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
    8640             :                           TM_IndexDelete *deltids)
    8641             : {
    8642        4138 :     int64       lastblock = -1;
    8643        4138 :     int         nblocksfavorable = 0;
    8644             : 
    8645             :     Assert(nblockgroups >= 1);
    8646             :     Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
    8647             : 
    8648             :     /*
    8649             :      * We tolerate heap blocks that will be accessed only slightly out of
    8650             :      * physical order.  Small blips occur when a pair of almost-contiguous
    8651             :      * blocks happen to fall into different buckets (perhaps due only to a
    8652             :      * small difference in npromisingtids that the bucketing scheme didn't
    8653             :      * quite manage to ignore).  We effectively ignore these blips by applying
    8654             :      * a small tolerance.  The precise tolerance we use is a little arbitrary,
    8655             :      * but it works well enough in practice.
    8656             :      */
    8657       13438 :     for (int b = 0; b < nblockgroups; b++)
    8658             :     {
    8659       12802 :         IndexDeleteCounts *group = blockgroups + b;
    8660       12802 :         TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
    8661       12802 :         BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
    8662             : 
    8663       12802 :         if (lastblock != -1 &&
    8664        8664 :             ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
    8665        7666 :              (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
    8666             :             break;
    8667             : 
    8668        9300 :         nblocksfavorable++;
    8669        9300 :         lastblock = block;
    8670             :     }
    8671             : 
    8672             :     /* Always indicate that there is at least 1 favorable block */
    8673             :     Assert(nblocksfavorable >= 1);
    8674             : 
    8675        4138 :     return nblocksfavorable;
    8676             : }
    8677             : 
    8678             : /*
    8679             :  * qsort comparison function for bottomup_sort_and_shrink()
    8680             :  */
    8681             : static int
    8682      415150 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
    8683             : {
    8684      415150 :     const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
    8685      415150 :     const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
    8686             : 
    8687             :     /*
    8688             :      * Most significant field is npromisingtids (which we invert the order of
    8689             :      * so as to sort in desc order).
    8690             :      *
    8691             :      * Caller should have already normalized npromisingtids fields into
    8692             :      * power-of-two values (buckets).
    8693             :      */
    8694      415150 :     if (group1->npromisingtids > group2->npromisingtids)
    8695       19710 :         return -1;
    8696      395440 :     if (group1->npromisingtids < group2->npromisingtids)
    8697       21192 :         return 1;
    8698             : 
    8699             :     /*
    8700             :      * Tiebreak: desc ntids sort order.
    8701             :      *
    8702             :      * We cannot expect power-of-two values for ntids fields.  We should
    8703             :      * behave as if they were already rounded up for us instead.
    8704             :      */
    8705      374248 :     if (group1->ntids != group2->ntids)
    8706             :     {
    8707      269486 :         uint32      ntids1 = pg_nextpower2_32((uint32) group1->ntids);
    8708      269486 :         uint32      ntids2 = pg_nextpower2_32((uint32) group2->ntids);
    8709             : 
    8710      269486 :         if (ntids1 > ntids2)
    8711       42298 :             return -1;
    8712      227188 :         if (ntids1 < ntids2)
    8713       52322 :             return 1;
    8714             :     }
    8715             : 
    8716             :     /*
    8717             :      * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
    8718             :      * block in deltids array) order.
    8719             :      *
    8720             :      * This is equivalent to sorting in ascending heap block number order
    8721             :      * (among otherwise equal subsets of the array).  This approach allows us
    8722             :      * to avoid accessing the out-of-line TID.  (We rely on the assumption
    8723             :      * that the deltids array was sorted in ascending heap TID order when
    8724             :      * these offsets to the first TID from each heap block group were formed.)
    8725             :      */
    8726      279628 :     if (group1->ifirsttid > group2->ifirsttid)
    8727      136636 :         return 1;
    8728      142992 :     if (group1->ifirsttid < group2->ifirsttid)
    8729      142992 :         return -1;
    8730             : 
    8731           0 :     pg_unreachable();
    8732             : 
    8733             :     return 0;
    8734             : }
    8735             : 
    8736             : /*
    8737             :  * heap_index_delete_tuples() helper function for bottom-up deletion callers.
    8738             :  *
    8739             :  * Sorts deltids array in the order needed for useful processing by bottom-up
    8740             :  * deletion.  The array should already be sorted in TID order when we're
    8741             :  * called.  The sort process groups heap TIDs from deltids into heap block
    8742             :  * groupings.  Earlier/more-promising groups/blocks are usually those that are
    8743             :  * known to have the most "promising" TIDs.
    8744             :  *
    8745             :  * Sets new size of deltids array (ndeltids) in state.  deltids will only have
    8746             :  * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
    8747             :  * return.  This often means that deltids will be shrunk to a small fraction
    8748             :  * of its original size (we eliminate many heap blocks from consideration for
    8749             :  * caller up front).
    8750             :  *
    8751             :  * Returns the number of "favorable" blocks.  See bottomup_nblocksfavorable()
    8752             :  * for a definition and full details.
    8753             :  */
    8754             : static int
    8755        4138 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
    8756             : {
    8757             :     IndexDeleteCounts *blockgroups;
    8758             :     TM_IndexDelete *reordereddeltids;
    8759        4138 :     BlockNumber curblock = InvalidBlockNumber;
    8760        4138 :     int         nblockgroups = 0;
    8761        4138 :     int         ncopied = 0;
    8762        4138 :     int         nblocksfavorable = 0;
    8763             : 
    8764             :     Assert(delstate->bottomup);
    8765             :     Assert(delstate->ndeltids > 0);
    8766             : 
    8767             :     /* Calculate per-heap-block count of TIDs */
    8768        4138 :     blockgroups = palloc_array(IndexDeleteCounts, delstate->ndeltids);
    8769     1972606 :     for (int i = 0; i < delstate->ndeltids; i++)
    8770             :     {
    8771     1968468 :         TM_IndexDelete *ideltid = &delstate->deltids[i];
    8772     1968468 :         TM_IndexStatus *istatus = delstate->status + ideltid->id;
    8773     1968468 :         ItemPointer htid = &ideltid->tid;
    8774     1968468 :         bool        promising = istatus->promising;
    8775             : 
    8776     1968468 :         if (curblock != ItemPointerGetBlockNumber(htid))
    8777             :         {
    8778             :             /* New block group */
    8779       81752 :             nblockgroups++;
    8780             : 
    8781             :             Assert(curblock < ItemPointerGetBlockNumber(htid) ||
    8782             :                    !BlockNumberIsValid(curblock));
    8783             : 
    8784       81752 :             curblock = ItemPointerGetBlockNumber(htid);
    8785       81752 :             blockgroups[nblockgroups - 1].ifirsttid = i;
    8786       81752 :             blockgroups[nblockgroups - 1].ntids = 1;
    8787       81752 :             blockgroups[nblockgroups - 1].npromisingtids = 0;
    8788             :         }
    8789             :         else
    8790             :         {
    8791     1886716 :             blockgroups[nblockgroups - 1].ntids++;
    8792             :         }
    8793             : 
    8794     1968468 :         if (promising)
    8795      247944 :             blockgroups[nblockgroups - 1].npromisingtids++;
    8796             :     }
    8797             : 
    8798             :     /*
    8799             :      * We're about ready to sort block groups to determine the optimal order
    8800             :      * for visiting heap blocks.  But before we do, round the number of
    8801             :      * promising tuples for each block group up to the next power-of-two,
    8802             :      * unless it is very low (less than 4), in which case we round up to 4.
    8803             :      * npromisingtids is far too noisy to trust when choosing between a pair
    8804             :      * of block groups that both have very low values.
    8805             :      *
    8806             :      * This scheme divides heap blocks/block groups into buckets.  Each bucket
    8807             :      * contains blocks that have _approximately_ the same number of promising
    8808             :      * TIDs as each other.  The goal is to ignore relatively small differences
    8809             :      * in the total number of promising entries, so that the whole process can
    8810             :      * give a little weight to heapam factors (like heap block locality)
    8811             :      * instead.  This isn't a trade-off, really -- we have nothing to lose. It
    8812             :      * would be foolish to interpret small differences in npromisingtids
    8813             :      * values as anything more than noise.
    8814             :      *
    8815             :      * We tiebreak on nhtids when sorting block group subsets that have the
    8816             :      * same npromisingtids, but this has the same issues as npromisingtids,
    8817             :      * and so nhtids is subject to the same power-of-two bucketing scheme. The
    8818             :      * only reason that we don't fix nhtids in the same way here too is that
    8819             :      * we'll need accurate nhtids values after the sort.  We handle nhtids
    8820             :      * bucketization dynamically instead (in the sort comparator).
    8821             :      *
    8822             :      * See bottomup_nblocksfavorable() for a full explanation of when and how
    8823             :      * heap locality/favorable blocks can significantly influence when and how
    8824             :      * heap blocks are accessed.
    8825             :      */
    8826       85890 :     for (int b = 0; b < nblockgroups; b++)
    8827             :     {
    8828       81752 :         IndexDeleteCounts *group = blockgroups + b;
    8829             : 
    8830             :         /* Better off falling back on nhtids with low npromisingtids */
    8831       81752 :         if (group->npromisingtids <= 4)
    8832       70324 :             group->npromisingtids = 4;
    8833             :         else
    8834       11428 :             group->npromisingtids =
    8835       11428 :                 pg_nextpower2_32((uint32) group->npromisingtids);
    8836             :     }
    8837             : 
    8838             :     /* Sort groups and rearrange caller's deltids array */
    8839        4138 :     qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
    8840             :           bottomup_sort_and_shrink_cmp);
    8841        4138 :     reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
    8842             : 
    8843        4138 :     nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
    8844             :     /* Determine number of favorable blocks at the start of final deltids */
    8845        4138 :     nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
    8846             :                                                  delstate->deltids);
    8847             : 
    8848       27748 :     for (int b = 0; b < nblockgroups; b++)
    8849             :     {
    8850       23610 :         IndexDeleteCounts *group = blockgroups + b;
    8851       23610 :         TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
    8852             : 
    8853       23610 :         memcpy(reordereddeltids + ncopied, firstdtid,
    8854       23610 :                sizeof(TM_IndexDelete) * group->ntids);
    8855       23610 :         ncopied += group->ntids;
    8856             :     }
    8857             : 
    8858             :     /* Copy final grouped and sorted TIDs back into start of caller's array */
    8859        4138 :     memcpy(delstate->deltids, reordereddeltids,
    8860             :            sizeof(TM_IndexDelete) * ncopied);
    8861        4138 :     delstate->ndeltids = ncopied;
    8862             : 
    8863        4138 :     pfree(reordereddeltids);
    8864        4138 :     pfree(blockgroups);
    8865             : 
    8866        4138 :     return nblocksfavorable;
    8867             : }
    8868             : 
    8869             : /*
    8870             :  * Perform XLogInsert for a heap-visible operation.  'block' is the block
    8871             :  * being marked all-visible, and vm_buffer is the buffer containing the
    8872             :  * corresponding visibility map block.  Both should have already been modified
    8873             :  * and dirtied.
    8874             :  *
    8875             :  * snapshotConflictHorizon comes from the largest xmin on the page being
    8876             :  * marked all-visible.  REDO routine uses it to generate recovery conflicts.
    8877             :  *
    8878             :  * If checksums or wal_log_hints are enabled, we may also generate a full-page
    8879             :  * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
    8880             :  * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
    8881             :  * update the heap page's LSN.
    8882             :  */
    8883             : XLogRecPtr
    8884       68416 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
    8885             :                  TransactionId snapshotConflictHorizon, uint8 vmflags)
    8886             : {
    8887             :     xl_heap_visible xlrec;
    8888             :     XLogRecPtr  recptr;
    8889             :     uint8       flags;
    8890             : 
    8891             :     Assert(BufferIsValid(heap_buffer));
    8892             :     Assert(BufferIsValid(vm_buffer));
    8893             : 
    8894       68416 :     xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
    8895       68416 :     xlrec.flags = vmflags;
    8896       68416 :     if (RelationIsAccessibleInLogicalDecoding(rel))
    8897         112 :         xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
    8898       68416 :     XLogBeginInsert();
    8899       68416 :     XLogRegisterData(&xlrec, SizeOfHeapVisible);
    8900             : 
    8901       68416 :     XLogRegisterBuffer(0, vm_buffer, 0);
    8902             : 
    8903       68416 :     flags = REGBUF_STANDARD;
    8904       68416 :     if (!XLogHintBitIsNeeded())
    8905        6170 :         flags |= REGBUF_NO_IMAGE;
    8906       68416 :     XLogRegisterBuffer(1, heap_buffer, flags);
    8907             : 
    8908       68416 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
    8909             : 
    8910       68416 :     return recptr;
    8911             : }
    8912             : 
    8913             : /*
    8914             :  * Perform XLogInsert for a heap-update operation.  Caller must already
    8915             :  * have modified the buffer(s) and marked them dirty.
    8916             :  */
    8917             : static XLogRecPtr
    8918      601206 : log_heap_update(Relation reln, Buffer oldbuf,
    8919             :                 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
    8920             :                 HeapTuple old_key_tuple,
    8921             :                 bool all_visible_cleared, bool new_all_visible_cleared)
    8922             : {
    8923             :     xl_heap_update xlrec;
    8924             :     xl_heap_header xlhdr;
    8925             :     xl_heap_header xlhdr_idx;
    8926             :     uint8       info;
    8927             :     uint16      prefix_suffix[2];
    8928      601206 :     uint16      prefixlen = 0,
    8929      601206 :                 suffixlen = 0;
    8930             :     XLogRecPtr  recptr;
    8931      601206 :     Page        page = BufferGetPage(newbuf);
    8932      601206 :     bool        need_tuple_data = RelationIsLogicallyLogged(reln);
    8933             :     bool        init;
    8934             :     int         bufflags;
    8935             : 
    8936             :     /* Caller should not call me on a non-WAL-logged relation */
    8937             :     Assert(RelationNeedsWAL(reln));
    8938             : 
    8939      601206 :     XLogBeginInsert();
    8940             : 
    8941      601206 :     if (HeapTupleIsHeapOnly(newtup))
    8942      294274 :         info = XLOG_HEAP_HOT_UPDATE;
    8943             :     else
    8944      306932 :         info = XLOG_HEAP_UPDATE;
    8945             : 
    8946             :     /*
    8947             :      * If the old and new tuple are on the same page, we only need to log the
    8948             :      * parts of the new tuple that were changed.  That saves on the amount of
    8949             :      * WAL we need to write.  Currently, we just count any unchanged bytes in
    8950             :      * the beginning and end of the tuple.  That's quick to check, and
    8951             :      * perfectly covers the common case that only one field is updated.
    8952             :      *
    8953             :      * We could do this even if the old and new tuple are on different pages,
    8954             :      * but only if we don't make a full-page image of the old page, which is
    8955             :      * difficult to know in advance.  Also, if the old tuple is corrupt for
    8956             :      * some reason, it would allow the corruption to propagate the new page,
    8957             :      * so it seems best to avoid.  Under the general assumption that most
    8958             :      * updates tend to create the new tuple version on the same page, there
    8959             :      * isn't much to be gained by doing this across pages anyway.
    8960             :      *
    8961             :      * Skip this if we're taking a full-page image of the new page, as we
    8962             :      * don't include the new tuple in the WAL record in that case.  Also
    8963             :      * disable if effective_wal_level='logical', as logical decoding needs to
    8964             :      * be able to read the new tuple in whole from the WAL record alone.
    8965             :      */
    8966      601206 :     if (oldbuf == newbuf && !need_tuple_data &&
    8967      294928 :         !XLogCheckBufferNeedsBackup(newbuf))
    8968             :     {
    8969      293666 :         char       *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
    8970      293666 :         char       *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
    8971      293666 :         int         oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
    8972      293666 :         int         newlen = newtup->t_len - newtup->t_data->t_hoff;
    8973             : 
    8974             :         /* Check for common prefix between old and new tuple */
    8975    24665908 :         for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
    8976             :         {
    8977    24613916 :             if (newp[prefixlen] != oldp[prefixlen])
    8978      241674 :                 break;
    8979             :         }
    8980             : 
    8981             :         /*
    8982             :          * Storing the length of the prefix takes 2 bytes, so we need to save
    8983             :          * at least 3 bytes or there's no point.
    8984             :          */
    8985      293666 :         if (prefixlen < 3)
    8986       44210 :             prefixlen = 0;
    8987             : 
    8988             :         /* Same for suffix */
    8989     9599876 :         for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
    8990             :         {
    8991     9547376 :             if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
    8992      241166 :                 break;
    8993             :         }
    8994      293666 :         if (suffixlen < 3)
    8995       73668 :             suffixlen = 0;
    8996             :     }
    8997             : 
    8998             :     /* Prepare main WAL data chain */
    8999      601206 :     xlrec.flags = 0;
    9000      601206 :     if (all_visible_cleared)
    9001        3054 :         xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
    9002      601206 :     if (new_all_visible_cleared)
    9003        1616 :         xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
    9004      601206 :     if (prefixlen > 0)
    9005      249456 :         xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
    9006      601206 :     if (suffixlen > 0)
    9007      219998 :         xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
    9008      601206 :     if (need_tuple_data)
    9009             :     {
    9010       94044 :         xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
    9011       94044 :         if (old_key_tuple)
    9012             :         {
    9013         292 :             if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    9014         130 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
    9015             :             else
    9016         162 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
    9017             :         }
    9018             :     }
    9019             : 
    9020             :     /* If new tuple is the single and first tuple on page... */
    9021      608452 :     if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
    9022        7246 :         PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    9023             :     {
    9024        6868 :         info |= XLOG_HEAP_INIT_PAGE;
    9025        6868 :         init = true;
    9026             :     }
    9027             :     else
    9028      594338 :         init = false;
    9029             : 
    9030             :     /* Prepare WAL data for the old page */
    9031      601206 :     xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
    9032      601206 :     xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
    9033     1202412 :     xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
    9034      601206 :                                               oldtup->t_data->t_infomask2);
    9035             : 
    9036             :     /* Prepare WAL data for the new page */
    9037      601206 :     xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
    9038      601206 :     xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
    9039             : 
    9040      601206 :     bufflags = REGBUF_STANDARD;
    9041      601206 :     if (init)
    9042        6868 :         bufflags |= REGBUF_WILL_INIT;
    9043      601206 :     if (need_tuple_data)
    9044       94044 :         bufflags |= REGBUF_KEEP_DATA;
    9045             : 
    9046      601206 :     XLogRegisterBuffer(0, newbuf, bufflags);
    9047      601206 :     if (oldbuf != newbuf)
    9048      282394 :         XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
    9049             : 
    9050      601206 :     XLogRegisterData(&xlrec, SizeOfHeapUpdate);
    9051             : 
    9052             :     /*
    9053             :      * Prepare WAL data for the new tuple.
    9054             :      */
    9055      601206 :     if (prefixlen > 0 || suffixlen > 0)
    9056             :     {
    9057      292738 :         if (prefixlen > 0 && suffixlen > 0)
    9058             :         {
    9059      176716 :             prefix_suffix[0] = prefixlen;
    9060      176716 :             prefix_suffix[1] = suffixlen;
    9061      176716 :             XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
    9062             :         }
    9063      116022 :         else if (prefixlen > 0)
    9064             :         {
    9065       72740 :             XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
    9066             :         }
    9067             :         else
    9068             :         {
    9069       43282 :             XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
    9070             :         }
    9071             :     }
    9072             : 
    9073      601206 :     xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
    9074      601206 :     xlhdr.t_infomask = newtup->t_data->t_infomask;
    9075      601206 :     xlhdr.t_hoff = newtup->t_data->t_hoff;
    9076             :     Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
    9077             : 
    9078             :     /*
    9079             :      * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
    9080             :      *
    9081             :      * The 'data' doesn't include the common prefix or suffix.
    9082             :      */
    9083      601206 :     XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
    9084      601206 :     if (prefixlen == 0)
    9085             :     {
    9086      351750 :         XLogRegisterBufData(0,
    9087      351750 :                             (char *) newtup->t_data + SizeofHeapTupleHeader,
    9088      351750 :                             newtup->t_len - SizeofHeapTupleHeader - suffixlen);
    9089             :     }
    9090             :     else
    9091             :     {
    9092             :         /*
    9093             :          * Have to write the null bitmap and data after the common prefix as
    9094             :          * two separate rdata entries.
    9095             :          */
    9096             :         /* bitmap [+ padding] [+ oid] */
    9097      249456 :         if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
    9098             :         {
    9099      249456 :             XLogRegisterBufData(0,
    9100      249456 :                                 (char *) newtup->t_data + SizeofHeapTupleHeader,
    9101      249456 :                                 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
    9102             :         }
    9103             : 
    9104             :         /* data after common prefix */
    9105      249456 :         XLogRegisterBufData(0,
    9106      249456 :                             (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
    9107      249456 :                             newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
    9108             :     }
    9109             : 
    9110             :     /* We need to log a tuple identity */
    9111      601206 :     if (need_tuple_data && old_key_tuple)
    9112             :     {
    9113             :         /* don't really need this, but its more comfy to decode */
    9114         292 :         xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    9115         292 :         xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
    9116         292 :         xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
    9117             : 
    9118         292 :         XLogRegisterData(&xlhdr_idx, SizeOfHeapHeader);
    9119             : 
    9120             :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    9121         292 :         XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
    9122         292 :                          old_key_tuple->t_len - SizeofHeapTupleHeader);
    9123             :     }
    9124             : 
    9125             :     /* filtering by origin on a row level is much more efficient */
    9126      601206 :     XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    9127             : 
    9128      601206 :     recptr = XLogInsert(RM_HEAP_ID, info);
    9129             : 
    9130      601206 :     return recptr;
    9131             : }
    9132             : 
    9133             : /*
    9134             :  * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
    9135             :  *
    9136             :  * This is only used when effective_wal_level is logical, and only for
    9137             :  * catalog tuples.
    9138             :  */
    9139             : static XLogRecPtr
    9140       49322 : log_heap_new_cid(Relation relation, HeapTuple tup)
    9141             : {
    9142             :     xl_heap_new_cid xlrec;
    9143             : 
    9144             :     XLogRecPtr  recptr;
    9145       49322 :     HeapTupleHeader hdr = tup->t_data;
    9146             : 
    9147             :     Assert(ItemPointerIsValid(&tup->t_self));
    9148             :     Assert(tup->t_tableOid != InvalidOid);
    9149             : 
    9150       49322 :     xlrec.top_xid = GetTopTransactionId();
    9151       49322 :     xlrec.target_locator = relation->rd_locator;
    9152       49322 :     xlrec.target_tid = tup->t_self;
    9153             : 
    9154             :     /*
    9155             :      * If the tuple got inserted & deleted in the same TX we definitely have a
    9156             :      * combo CID, set cmin and cmax.
    9157             :      */
    9158       49322 :     if (hdr->t_infomask & HEAP_COMBOCID)
    9159             :     {
    9160             :         Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
    9161             :         Assert(!HeapTupleHeaderXminInvalid(hdr));
    9162        4048 :         xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
    9163        4048 :         xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
    9164        4048 :         xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
    9165             :     }
    9166             :     /* No combo CID, so only cmin or cmax can be set by this TX */
    9167             :     else
    9168             :     {
    9169             :         /*
    9170             :          * Tuple inserted.
    9171             :          *
    9172             :          * We need to check for LOCK ONLY because multixacts might be
    9173             :          * transferred to the new tuple in case of FOR KEY SHARE updates in
    9174             :          * which case there will be an xmax, although the tuple just got
    9175             :          * inserted.
    9176             :          */
    9177       58946 :         if (hdr->t_infomask & HEAP_XMAX_INVALID ||
    9178       13672 :             HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
    9179             :         {
    9180       31604 :             xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
    9181       31604 :             xlrec.cmax = InvalidCommandId;
    9182             :         }
    9183             :         /* Tuple from a different tx updated or deleted. */
    9184             :         else
    9185             :         {
    9186       13670 :             xlrec.cmin = InvalidCommandId;
    9187       13670 :             xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
    9188             :         }
    9189       45274 :         xlrec.combocid = InvalidCommandId;
    9190             :     }
    9191             : 
    9192             :     /*
    9193             :      * Note that we don't need to register the buffer here, because this
    9194             :      * operation does not modify the page. The insert/update/delete that
    9195             :      * called us certainly did, but that's WAL-logged separately.
    9196             :      */
    9197       49322 :     XLogBeginInsert();
    9198       49322 :     XLogRegisterData(&xlrec, SizeOfHeapNewCid);
    9199             : 
    9200             :     /* will be looked at irrespective of origin */
    9201             : 
    9202       49322 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
    9203             : 
    9204       49322 :     return recptr;
    9205             : }
    9206             : 
    9207             : /*
    9208             :  * Build a heap tuple representing the configured REPLICA IDENTITY to represent
    9209             :  * the old tuple in an UPDATE or DELETE.
    9210             :  *
    9211             :  * Returns NULL if there's no need to log an identity or if there's no suitable
    9212             :  * key defined.
    9213             :  *
    9214             :  * Pass key_required true if any replica identity columns changed value, or if
    9215             :  * any of them have any external data.  Delete must always pass true.
    9216             :  *
    9217             :  * *copy is set to true if the returned tuple is a modified copy rather than
    9218             :  * the same tuple that was passed in.
    9219             :  */
    9220             : static HeapTuple
    9221     3675776 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
    9222             :                        bool *copy)
    9223             : {
    9224     3675776 :     TupleDesc   desc = RelationGetDescr(relation);
    9225     3675776 :     char        replident = relation->rd_rel->relreplident;
    9226             :     Bitmapset  *idattrs;
    9227             :     HeapTuple   key_tuple;
    9228             :     bool        nulls[MaxHeapAttributeNumber];
    9229             :     Datum       values[MaxHeapAttributeNumber];
    9230             : 
    9231     3675776 :     *copy = false;
    9232             : 
    9233     3675776 :     if (!RelationIsLogicallyLogged(relation))
    9234     3475192 :         return NULL;
    9235             : 
    9236      200584 :     if (replident == REPLICA_IDENTITY_NOTHING)
    9237         462 :         return NULL;
    9238             : 
    9239      200122 :     if (replident == REPLICA_IDENTITY_FULL)
    9240             :     {
    9241             :         /*
    9242             :          * When logging the entire old tuple, it very well could contain
    9243             :          * toasted columns. If so, force them to be inlined.
    9244             :          */
    9245         394 :         if (HeapTupleHasExternal(tp))
    9246             :         {
    9247           8 :             *copy = true;
    9248           8 :             tp = toast_flatten_tuple(tp, desc);
    9249             :         }
    9250         394 :         return tp;
    9251             :     }
    9252             : 
    9253             :     /* if the key isn't required and we're only logging the key, we're done */
    9254      199728 :     if (!key_required)
    9255       93752 :         return NULL;
    9256             : 
    9257             :     /* find out the replica identity columns */
    9258      105976 :     idattrs = RelationGetIndexAttrBitmap(relation,
    9259             :                                          INDEX_ATTR_BITMAP_IDENTITY_KEY);
    9260             : 
    9261             :     /*
    9262             :      * If there's no defined replica identity columns, treat as !key_required.
    9263             :      * (This case should not be reachable from heap_update, since that should
    9264             :      * calculate key_required accurately.  But heap_delete just passes
    9265             :      * constant true for key_required, so we can hit this case in deletes.)
    9266             :      */
    9267      105976 :     if (bms_is_empty(idattrs))
    9268       12042 :         return NULL;
    9269             : 
    9270             :     /*
    9271             :      * Construct a new tuple containing only the replica identity columns,
    9272             :      * with nulls elsewhere.  While we're at it, assert that the replica
    9273             :      * identity columns aren't null.
    9274             :      */
    9275       93934 :     heap_deform_tuple(tp, desc, values, nulls);
    9276             : 
    9277      301790 :     for (int i = 0; i < desc->natts; i++)
    9278             :     {
    9279      207856 :         if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
    9280             :                           idattrs))
    9281             :             Assert(!nulls[i]);
    9282             :         else
    9283      113898 :             nulls[i] = true;
    9284             :     }
    9285             : 
    9286       93934 :     key_tuple = heap_form_tuple(desc, values, nulls);
    9287       93934 :     *copy = true;
    9288             : 
    9289       93934 :     bms_free(idattrs);
    9290             : 
    9291             :     /*
    9292             :      * If the tuple, which by here only contains indexed columns, still has
    9293             :      * toasted columns, force them to be inlined. This is somewhat unlikely
    9294             :      * since there's limits on the size of indexed columns, so we don't
    9295             :      * duplicate toast_flatten_tuple()s functionality in the above loop over
    9296             :      * the indexed columns, even if it would be more efficient.
    9297             :      */
    9298       93934 :     if (HeapTupleHasExternal(key_tuple))
    9299             :     {
    9300           8 :         HeapTuple   oldtup = key_tuple;
    9301             : 
    9302           8 :         key_tuple = toast_flatten_tuple(oldtup, desc);
    9303           8 :         heap_freetuple(oldtup);
    9304             :     }
    9305             : 
    9306       93934 :     return key_tuple;
    9307             : }
    9308             : 
    9309             : /*
    9310             :  * HeapCheckForSerializableConflictOut
    9311             :  *      We are reading a tuple.  If it's not visible, there may be a
    9312             :  *      rw-conflict out with the inserter.  Otherwise, if it is visible to us
    9313             :  *      but has been deleted, there may be a rw-conflict out with the deleter.
    9314             :  *
    9315             :  * We will determine the top level xid of the writing transaction with which
    9316             :  * we may be in conflict, and ask CheckForSerializableConflictOut() to check
    9317             :  * for overlap with our own transaction.
    9318             :  *
    9319             :  * This function should be called just about anywhere in heapam.c where a
    9320             :  * tuple has been read. The caller must hold at least a shared lock on the
    9321             :  * buffer, because this function might set hint bits on the tuple. There is
    9322             :  * currently no known reason to call this function from an index AM.
    9323             :  */
    9324             : void
    9325    63037244 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
    9326             :                                     HeapTuple tuple, Buffer buffer,
    9327             :                                     Snapshot snapshot)
    9328             : {
    9329             :     TransactionId xid;
    9330             :     HTSV_Result htsvResult;
    9331             : 
    9332    63037244 :     if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
    9333    62986496 :         return;
    9334             : 
    9335             :     /*
    9336             :      * Check to see whether the tuple has been written to by a concurrent
    9337             :      * transaction, either to create it not visible to us, or to delete it
    9338             :      * while it is visible to us.  The "visible" bool indicates whether the
    9339             :      * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
    9340             :      * is going on with it.
    9341             :      *
    9342             :      * In the event of a concurrently inserted tuple that also happens to have
    9343             :      * been concurrently updated (by a separate transaction), the xmin of the
    9344             :      * tuple will be used -- not the updater's xid.
    9345             :      */
    9346       50748 :     htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
    9347       50748 :     switch (htsvResult)
    9348             :     {
    9349       49122 :         case HEAPTUPLE_LIVE:
    9350       49122 :             if (visible)
    9351       49096 :                 return;
    9352          26 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9353          26 :             break;
    9354         722 :         case HEAPTUPLE_RECENTLY_DEAD:
    9355             :         case HEAPTUPLE_DELETE_IN_PROGRESS:
    9356         722 :             if (visible)
    9357         570 :                 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    9358             :             else
    9359         152 :                 xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9360             : 
    9361         722 :             if (TransactionIdPrecedes(xid, TransactionXmin))
    9362             :             {
    9363             :                 /* This is like the HEAPTUPLE_DEAD case */
    9364             :                 Assert(!visible);
    9365         134 :                 return;
    9366             :             }
    9367         588 :             break;
    9368         656 :         case HEAPTUPLE_INSERT_IN_PROGRESS:
    9369         656 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9370         656 :             break;
    9371         248 :         case HEAPTUPLE_DEAD:
    9372             :             Assert(!visible);
    9373         248 :             return;
    9374           0 :         default:
    9375             : 
    9376             :             /*
    9377             :              * The only way to get to this default clause is if a new value is
    9378             :              * added to the enum type without adding it to this switch
    9379             :              * statement.  That's a bug, so elog.
    9380             :              */
    9381           0 :             elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
    9382             : 
    9383             :             /*
    9384             :              * In spite of having all enum values covered and calling elog on
    9385             :              * this default, some compilers think this is a code path which
    9386             :              * allows xid to be used below without initialization. Silence
    9387             :              * that warning.
    9388             :              */
    9389             :             xid = InvalidTransactionId;
    9390             :     }
    9391             : 
    9392             :     Assert(TransactionIdIsValid(xid));
    9393             :     Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
    9394             : 
    9395             :     /*
    9396             :      * Find top level xid.  Bail out if xid is too early to be a conflict, or
    9397             :      * if it's our own xid.
    9398             :      */
    9399        1270 :     if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
    9400         128 :         return;
    9401        1142 :     xid = SubTransGetTopmostTransaction(xid);
    9402        1142 :     if (TransactionIdPrecedes(xid, TransactionXmin))
    9403           0 :         return;
    9404             : 
    9405        1142 :     CheckForSerializableConflictOut(relation, xid, snapshot);
    9406             : }

Generated by: LCOV version 1.16